Terraform Learnings: Deploy an OVA Using the vSphere Provider

Once i got my head around the basics of Terraform I wanted to play with the vSphere provider to see what its was capable of. A basic use case that everyone needs is to deploy a VM. So my first use case is to deploy a VM from an OVA. The vSphere provider documentation for deploying an OVA uses William Lam’s nested ESXi OVA as an example. This is a great example of how to use the provider but seeing as I plan to play with the NSX-T provider also, I decided to use NSX-T Manager OVA as my source to deploy.

So first thing to do is setup your provider. Every provider in the Terraform registry has a Use Provider button on the provider page that pops up a How to use this provider box. This shows you what you need to put in your required_providers & provider block. In my case I will use a providers.tf file and it will look like the below example. Note you can only have one required_providers block in your configuration, but you can have multiple providers. So all required providers go in the same required_providers block and each provider has its own provider block.

# providers.tf

terraform {
  required_providers {
    vsphere = {
      source  = "hashicorp/vsphere"
      version = "~> 2.1.1"
    }
  }
}
provider "vsphere" {
  user                 = var.vsphere_user
  password             = var.vsphere_password
  vsphere_server       = var.vsphere_server
  allow_unverified_ssl = true
}

To authenticate to our chosen provider (in this case vSphere) we need to provide credentials. If you read my initial post on Terraform you would have seen me mention a terraform.tfvars file which can be used for sensitive variables. We will declare these as variables later in the variables.tf file but this is where we assign the values. So my terraform.tfvars file looks like this

# terraform.tfvars

# vSphere Provider Credentials
vsphere_user     = "administrator@vsphere.local"
vsphere_password = "VMw@re1!"

Next we need variables to enable us to deploy our NSX-T Manager appliance. So we create a variables.tf file and populate it with our variables. Note – variables that have a default value are considered optional and the default value will be used if no value is passed.

# variables.tf

# vSphere Infrastructure Details
variable "data_center" { default = "sfo-m01-dc01" }
variable "cluster" { default = "sfo-m01-cl01" }
variable "vds" { default = "sfo-m01-vds01" }
variable "workload_datastore" { default = "vsanDatastore" }
variable "compute_pool" { default = "sfo-m01-cl01" }
variable "compute_host" {default = "sfo01-m01-esx01.sfo.rainpole.io"}
variable "vsphere_server" {default = "sfo-m01-vc01.sfo.rainpole.io"}

# vCenter Credential Variables
variable "vsphere_user" {}
variable "vsphere_password" {}

# NSX-T Manager Deployment
variable "mgmt_pg" { default = "sfo-m01-vds01-pg-mgmt" }
variable "vm_name" { default = "sfo-m01-nsx01a" }
variable "local_ovf_path" { default = "F:\\OVAs\\nsx-unified-appliance-3.1.3.5.0.19068437.ova" }
variable "deployment_option" { default = "extra_small" } # valid deployments are: extra_small, small, medium, large
variable "nsx_role" { default = "NSX Manager" }          # valid roles are NSX Manager, NSX Global Manager
variable "nsx_ip_0" { default = "172.16.225.66" }
variable "nsx_netmask_0" { default = "255.255.255.0" }
variable "nsx_gateway_0" { default = "172.16.225.1" }
variable "nsx_dns1_0" { default = "172.16.225.4" }
variable "nsx_domain_0" { default = "sfo.rainpole.io" }
variable "nsx_ntp_0" { default = "ntp.sfo.rainpole.io" }
variable "nsx_isSSHEnabled" { default = "True" }
variable "nsx_allowSSHRootLogin" { default = "True" }
variable "nsx_passwd_0" { default = "VMw@re1!VMw@re1!" }
variable "nsx_cli_passwd_0" { default = "VMw@re1!VMw@re1!" }
variable "nsx_cli_audit_passwd_0" { default = "VMw@re1!VMw@re1!" }
variable "nsx_hostname" { default = "sfo-m01-nsx01a.sfo.rainpole.io" }

Now that we have our provider & variables in place we need a plan file to deploy the NSX-T Manager OVA, including the data sources we need to pull information from and the resource we are going to create.

# main.tf

# Data source for vCenter Datacenter
data "vsphere_datacenter" "datacenter" {
  name = var.data_center
}

# Data source for vCenter Cluster
data "vsphere_compute_cluster" "cluster" {
  name          = var.cluster
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# Data source for vCenter Datastore
data "vsphere_datastore" "datastore" {
  name          = var.workload_datastore
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# Data source for vCenter Portgroup
data "vsphere_network" "mgmt" {
  name          = var.mgmt_pg
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# Data source for vCenter Resource Pool. In our case we will use the root resource pool
data "vsphere_resource_pool" "pool" {
  name          = format("%s%s", data.vsphere_compute_cluster.cluster.name, "/Resources")
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# Data source for ESXi host to deploy to
data "vsphere_host" "host" {
  name          = var.compute_host
  datacenter_id = data.vsphere_datacenter.datacenter.id
}

# Data source for the OVF to read the required OVF Properties
data "vsphere_ovf_vm_template" "ovfLocal" {
  name             = var.vm_name
  resource_pool_id = data.vsphere_resource_pool.pool.id
  datastore_id     = data.vsphere_datastore.datastore.id
  host_system_id   = data.vsphere_host.host.id
  local_ovf_path   = var.local_ovf_path
  ovf_network_map = {
    "Network 1" = data.vsphere_network.mgmt.id
  }
}

# Deployment of VM from Local OVA
resource "vsphere_virtual_machine" "nsxt01" {
  name                 = var.vm_name
  datacenter_id        = data.vsphere_datacenter.datacenter.id
  datastore_id         = data.vsphere_ovf_vm_template.ovfLocal.datastore_id
  host_system_id       = data.vsphere_ovf_vm_template.ovfLocal.host_system_id
  resource_pool_id     = data.vsphere_ovf_vm_template.ovfLocal.resource_pool_id
  num_cpus             = data.vsphere_ovf_vm_template.ovfLocal.num_cpus
  num_cores_per_socket = data.vsphere_ovf_vm_template.ovfLocal.num_cores_per_socket
  memory               = data.vsphere_ovf_vm_template.ovfLocal.memory
  guest_id             = data.vsphere_ovf_vm_template.ovfLocal.guest_id
  scsi_type            = data.vsphere_ovf_vm_template.ovfLocal.scsi_type
  dynamic "network_interface" {
    for_each = data.vsphere_ovf_vm_template.ovfLocal.ovf_network_map
    content {
      network_id = network_interface.value
    }
  }

  wait_for_guest_net_timeout = 5

  ovf_deploy {
    allow_unverified_ssl_cert = true
    local_ovf_path            = var.local_ovf_path
    disk_provisioning         = "thin"
    deployment_option         = var.deployment_option

  }
  vapp {
    properties = {
      "nsx_role"               = var.nsx_role,
      "nsx_ip_0"               = var.nsx_ip_0,
      "nsx_netmask_0"          = var.nsx_netmask_0,
      "nsx_gateway_0"          = var.nsx_gateway_0,
      "nsx_dns1_0"             = var.nsx_dns1_0,
      "nsx_domain_0"           = var.nsx_domain_0,
      "nsx_ntp_0"              = var.nsx_ntp_0,
      "nsx_isSSHEnabled"       = var.nsx_isSSHEnabled,
      "nsx_allowSSHRootLogin"  = var.nsx_allowSSHRootLogin,
      "nsx_passwd_0"           = var.nsx_passwd_0,
      "nsx_cli_passwd_0"       = var.nsx_cli_passwd_0,
      "nsx_cli_audit_passwd_0" = var.nsx_cli_audit_passwd_0,
      "nsx_hostname"           = var.nsx_hostname
    }
  }
  lifecycle {
    ignore_changes = [
      #vapp # Enable this to ignore all vapp properties if the plan is re-run
      vapp[0].properties["nsx_role"], # Avoid unwanted changes to specific vApp properties.
      vapp[0].properties["nsx_passwd_0"],
      vapp[0].properties["nsx_cli_passwd_0"],
      vapp[0].properties["nsx_cli_audit_passwd_0"],
      host_system_id # Avoids moving the VM back to the host it was deployed to if DRS has relocated it
    ]
  }
}

Once we have all of the above we can run the following to validate our plan

terraform plan -out=nsxt01

If your plan is successful you should see an output similar to below

Once your plan is successful run the command below to apply the plan

terraform apply nsxt01

If the stars align your NSX-T Manager appliance should deploy successfully. Once its deployed, if you were to re-run the plan you should see a message similar to below

One of the key pieces to this is the lifecycle block in the plan. The lifecycle block enables you to callout things that Terraform should ignore when it is re-applying a plan. Things like tags or other items that may get updated by other systems etc. In our case we want Terraform to ignore the vApp properties as it will try to apply password properties every time, which would entail powering down the VM, making the change, and powering the VM back on.

lifecycle { ignore_changes = [ 
#vapp # Enable this to ignore all vapp properties if the plan is re-run 
vapp[0].properties["nsx_role"], # Avoid unwanted changes to specific vApp properties. 
vapp[0].properties["nsx_passwd_0"], 
vapp[0].properties["nsx_cli_passwd_0"], 
vapp[0].properties["nsx_cli_audit_passwd_0"], 
host_system_id # Avoids moving the VM back to the host it was deployed to if DRS has relocated it 
] 
}

Hopefully this was useful. I’m sure there are more efficient ways of doing this. I will update the post if i find them. Keep a look out for the next instalment

Cleanup Failed Tasks in SDDC Manager

I was chatting with my colleague Paudie O’Riordan yesterday about PowerVCF as he was doing some testing internally and he mentioned that a great addition would be to have the ability to find, and cleanup failed tasks in SDDC Manager. Some use cases for this would be, cleaning up an environment before handing it off to a customer, or before recording a demo etc.

Currently there isnt a supported public API to delete a failed task so you have to run a curl command on SDDC Manager with the task ID. So getting a list of failed tasks and then running a command to delete each one can take time. See Martin Gustafson’s post on how to do it manually here.

I took a look at our existing code for retrieving tasks (and discovered a bug in the logic that is now fixed in PowerVCF 2.1.5!) and we have the ability to specify -status. So requesting a list of tasks with -status “failed” returns a list. So i put the script below together to retrieve a list of failed tasks, loop through them and delete them. The script requires the following inputs

  • SDDC Manager FQDN. This is the target that is queried for failed tasks
  • SDDC Manager API User. This is the user that is used to query for failed tasks. Must have the SDDC Manager ADMIN role
  • Password for the above user
  • Password for the SDDC Manager appliance vcf user. This is used to run the task deletion. This is not tracked in the credentials DB so we need to pass it.

Once the above variables are populated the script does the following:

  • Checks for PowerVCF (minimum version 2.1.5) and installs if not present
  • Requests an API token from SDDC Manager
  • Queries SDDC Manager for the management domain vCenter Server details
  • Uses the management domain vCenter Server details to retrieve the SDDC Manager VM name
  • Queries SDDC Manager for a list of tasks in a failed state
  • Loops through the list of failed tasks and deletes them from SDDC Manager
  • Verifies the task is no longer present

Here is the script. It is also published here if you would like to enhance it

# Script to cleanup failed tasks in SDDC Manager
# Written by Brian O'Connell - Staff Solutions Architect @ VMware

#User Variables
# SDDC Manager FQDN. This is the target that is queried for failed tasks
$sddcManagerFQDN = "lax-vcf01.lax.rainpole.io"
# SDDC Manager API User. This is the user that is used to query for failed tasks. Must have the SDDC Manager ADMIN role
$sddcManagerAPIUser = "administrator@vsphere.local"
$sddcManagerAPIPassword = "VMw@re1!"
# Password for the SDDC Manager appliance vcf user. This is used to run the task deletion
$sddcManagerVCFPassword = "VMw@re1!"



# DO NOT CHANGE ANYTHING BELOW THIS LINE
#########################################

# Set TLS to 1.2 to avoid certificate mismatch errors
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12

# Install PowerVCF if not already installed
if (!(Get-InstalledModule -name PowerVCF -MinimumVersion 2.1.5 -ErrorAction SilentlyContinue)) {
    Install-Module -Name PowerVCF -MinimumVersion 2.1.5 -Force
}

# Request a VCF Token using PowerVCF
Request-VCFToken -fqdn $sddcManagerFQDN -username $sddcManagerAPIUser -password $sddcManagerAPIPassword

# Disconnect all connected vCenters to ensure only the desired vCenter is available
if ($defaultviservers) {
    $server = $defaultviservers.Name
    foreach ($server in $defaultviservers) {            
        Disconnect-VIServer -Server $server -Confirm:$False
    }
}

# Retrieve the Management Domain vCenter Server FQDN
$vcenterFQDN = ((Get-VCFWorkloadDomain | where-object {$_.type -eq "MANAGEMENT"}).vcenters.fqdn)
$vcenterUser = (Get-VCFCredential -resourceType "PSC").username
$vcenterPassword = (Get-VCFCredential -resourceType "PSC").password

# Retrieve SDDC Manager VM Name
if ($vcenterFQDN) {
    Write-Output "Getting SDDC Manager Manager VM Name"
    Connect-VIServer -server $vcenterFQDN -user $vcenterUser -password $vcenterPassword | Out-Null
    $sddcmVMName = ((Get-VM * | Where-Object {$_.Guest.Hostname -eq $sddcManagerFQDN}).Name)              
}

# Retrieve a list of failed tasks
$failedTaskIDs = @()
$ids = (Get-VCFTask -status "Failed").id
Foreach ($id in $ids) {
    $failedTaskIDs += ,$id
}
# Cleanup the failed tasks
Foreach ($taskID in $failedTaskIDs) {
    $scriptCommand = "curl -X DELETE 127.0.0.1/tasks/registrations/$taskID"
    Write-Output "Deleting Failed Task ID $taskID"
    $output = Invoke-VMScript -ScriptText $scriptCommand -vm $sddcmVMName -GuestUser "vcf" -GuestPassword $sddcManagerVCFPassword

# Verify the task was deleted    
    Try {
    $verifyTaskDeleted = (Get-VCFTask -id $taskID)
    if ($verifyTaskDeleted -eq "Task ID Not Found") {
        Write-Output "Task ID $taskID Deleted Successfully"
    }
}
    catch {
        Write-Error "Something went wrong. Please check your SDDC Manager state"
    }
}
Disconnect-VIServer -server $vcenterFQDN -Confirm:$False

As always, comments/feedback welcome!

Part 2: Working With the SRM VAMI API : Replacing the Appliance Certificate

In Part 1 of this series we saw how to retrieve a sessionId from the Site Recovery Manager VAMI interface using Postman & Powershell. In this post we will use that sessionId to replace the appliance SSL certificate using the API. To start we again use the VAMI UI to inspect the endpoint URL being used for certificate replacement by doing a manual replacement. In this case the URL is:

https://sfo-m01-srm01.sfo.rainpole.io:5480/configure/requestHandlers/installPkcs12Certificate

Site Recovery Manager expects the certificate in P12 format so I used CertGen to create the cert format needed. When using the UI you browse to the cert file and it uploads in the browser along with the certificate passphrase. Behind the scenes it is then base64 encoded, so you need to do this before using the API.

# Base64 encoded the p12 file

$certFile = ".\sfo-m01-srm01.4.p12"

$base64string = [Convert]::ToBase64String([IO.File]::ReadAllBytes($certFile))


$body = '{
"certificateContent": "'+$base64string+'",
"certificatePassword": "'+$certPassword+'"
}'

#Create the required headers using the sessionId

$headers = @{"Content-Type" = "application/json"}
$headers.Add("dr.config.service.sessionid", "$sessionId")


$uri = "https://sfo-m01-srm01.sfo.rainpole.io:5480/configure/requestHandlers/installPkcs12Certificate"


Invoke-RestMethod -Method POST -Uri $uri -Headers $headers -body $body

And there you have it..your appliance cert replaced via the API.

Part 1: Working With the SRM VAMI API : Retrieving a Session ID

I’ve recently been doing a lot of work with VMware Site Recovery Manager (SRM) and vSphere Replication (vSR) with VMware Cloud Foundation. Earlier this year we (Ken Gould & I) published an early access design for Site Protection & Recovery for VMware Cloud Foundation 4.2. We have been working to refresh & enhance this design for a new release. Part of this effort includes trying to add some automation to assist with the manual steps to speed up time to deploy. SRM & vSR do not have publicly documented VAMI APIs so we set about trying to automate the configuration with a little bit of reverse engineering.

As with most APIs, whether public or private, you must authenticate before you can run an API workflow, so the first task is figuring out how the authentication to perform a workflow works. Typically, if you hit F12 in your browser you will get a developer console that exposes what goes on behind the scenes in a browser session. So to inspect the process, use the browser to perform a manual login, and review the header & response tabs in the developer view. This exposes the Request URL to use, the method (POST) and the required headers (accept: application/json)

The Response tab shows a sessionId which can be used for further configuration API calls in the headers as dr.config.service.sessionid

So with the above information you can use an API client like Postman to retrieve a sessionId with the URL & headers like this

And your VAMI admin user and password in JSON format in the body payload

You can also use the information to retrieve a sessionId using PowerShell

$headers = @{"Content-Type" = "application/json"}
$uri = "https://sfo-m01-srm01.sfo.rainpole.io:5480/configure/requestHandlers/login"
$body = '{"username": "admin","password": "mypassword"}'

$request = Invoke-RestMethod -Method POST -Uri $uri -Headers $headers -body $body

$sessionID = $request.data.sessionId

$sessionId

Keep an eye out for additional posts where we will use the sessionId to perform API based tasks

Checking Password Expiry For VMware Cloud Foundation Management Components

Within a VMware Cloud Foundation instance, SDDC Manager is used to manage the lifecycle of passwords (or credentials). While we provide the ability to rotate (either scheduled or manually) currently there is no easy way to check when a particular password is due to expire, which can lead to appliance root passwords expiring, which will cause all sorts of issues. The ability to monitor expiry is something that is being worked on, but as a stop gap I put together the script below which leverages PowerVCF and also a currently undocumented API for validating credentials.

The script has a function called Get-VCFPasswordExpiry that accepts the following parameters

  • -fqdn (FQDN of the SDDC Manager)
  • -username (SDDC Manager Username – Must have the ADMIN role)
  • -password (SDDC Manager password)
  • -resourceType (Optional parameter to specify a resourceType. If not passed, all resources will be checked. If passed (e.g. VCENTER) then only that resourceType will be checked. Supported resource types are

PowerVCF is a requirement. If you dont already have it run the following

Install-Module -Name PowerVCF

The code takes a while to run as it needs to do the following to check password expiry

  • Connect to SDDC Manager to retrieve an API token
  • Retrieve a list of all credentials
  • Using the resourceID of each credential
    • Perform a credential validation
    • Wait for the validation to complete
    • Parse the results for the expiry details
    • Add all the results to an array and present in a table (Kudos to Ken Gould for assistance with the presentation of this piece!)

In this example script I am returning all non SERVICE user accounts regardless of expiry (SERVICE account passwords are system managed). You could get more granular by adding something like this to only display accounts with passwords due to expire in less than 14 days

if ($validationTaskResponse.validationChecks.passwordDetails.numberOfDaysToExpiry -lt 14) {
               Write-Output "Password for username $($validationTaskResponse.validationChecks.username) expires in $($validationTaskResponse.validationChecks.passwordDetails.numberOfDaysToExpiry) days"
           }

Here is the script content. As always feedback is welcome. Also posted in Github here if anyone wants to fork and improve https://github.com/LifeOfBrianOC/Get-VCFPasswordExpiry

# Script to check the password expiry of VMware Cloud Foundation Credentials
# Written by Brian O'Connell - VMware

#User Variables
$sddcManagerFQDN = "sfo-vcf01.sfo.rainpole.io"
$sddcManagerAdminUser = "administrator@vsphere.local"
$sddcManagerAdminPassword = "VMw@re1!"

# Requires PowerVCF Module
#Requires -Module PowerVCF

Function Get-VCFPasswordExpiry
{

    Param (
        [Parameter (Mandatory = $true)] [ValidateNotNullOrEmpty()] [String]$fqdn,
        [Parameter (Mandatory = $true)] [ValidateNotNullOrEmpty()] [String]$username,
        [Parameter (Mandatory = $true)] [ValidateNotNullOrEmpty()] [String]$password,
        [Parameter (Mandatory = $false)] [ValidateSet("VCENTER", "PSC", "ESXI", "BACKUP", "NSXT_MANAGER", "NSXT_EDGE", "VRSLCM", "WSA", "VROPS", "VRLI", "VRA", "VXRAIL_MANAGER")] [ValidateNotNullOrEmpty()] [String]$resourceType
    )
# Request an SDDC manager Token
Request-VCFToken -fqdn $fqdn -username $username -password $password
# Build the required headers
$credentialheaders = @{"Content-Type" = "application/json"}
$credentialheaders.Add("Authorization", "Bearer $accessToken")
# Get all credential objects that are not type SERVICE
if (!$PsBoundParameters.ContainsKey("resourceType")) {
$credentials = Get-VCFCredential | where-object {$_.accountType -ne "SERVICE"}
}
else {
    $credentials = Get-VCFCredential -resourceType $resourceType | where-object {$_.accountType -ne "SERVICE"}
}
$validationArray = @()
Foreach ($credential in $credentials) {
    $resourceType = $credential.resource.resourceType
    $resourceID = $credential.resource.resourceId
    $username = $credential.username
    $credentialType = $credential.credentialType
    $body = '[
    {
        "resourceType": "'+$resourceType+'",
        "resourceId": "'+$resourceID+'",
        "credentials": [
            {
                "username": "'+$username+'",
                "credentialType": "'+$credentialType+'"
            }
        ]
    }
]'
    $uri = "https://$sddcManagerFQDN/v1/credentials/validations"
    # Submit a credential validation request
            $response = Invoke-RestMethod -Method POST -URI $uri -headers $credentialheaders -body $body
            $validationTaskId = $response.id

            Do {
                # Keep checking until executionStatus is not IN_PROGRESS
                $validationTaskuri = "https://$sddcManagerFQDN/v1/credentials/validations/$validationTaskId"
                $validationTaskResponse = Invoke-RestMethod -Method GET -URI $validationTaskuri -headers $credentialheaders
            }
            While ($validationTaskResponse.executionStatus -eq "IN_PROGRESS")
            # Build the output
            $validationObject = New-Object -TypeName psobject
            $validationObject | Add-Member -notepropertyname 'Resource Name' -notepropertyvalue $validationTaskResponse.validationChecks.resourceName
            $validationObject | Add-Member -notepropertyname 'Username' -notepropertyvalue $validationTaskResponse.validationChecks.username
            $validationObject | Add-Member -notepropertyname 'Number Of Days To Expiry' -notepropertyvalue $validationTaskResponse.validationChecks.passwordDetails.numberOfDaysToExpiry
            
            Write-Output "Checking Password Expiry for username $($validationTaskResponse.validationChecks.username) from resource $($validationTaskResponse.validationChecks.resourceName)"
            # Add each credential result to the array
            $validationArray += $validationObject
           #break
}
# Print the array
$validationArray
}

# Run the function
Get-VCFPasswordExpiry -fqdn $sddcManagerFQDN -username $sddcManagerAdminUser -password $sddcManagerAdminPassword

# Run the function with resourceType VCENTER
# Get-VCFPasswordExpiry -fqdn $sddcManagerFQDN -username $sddcManagerAdminUser -password $sddcManagerAdminPassword -resourceType VCENTER

Here is a screenshot of the result

VMware Cloud Foundation Bringup With Signed Certs on ESXi Hosts

Traditionally VMware Cloud Foundation (VCF) has followed the hybrid approach when it comes to SSL certificate management. Hybrid mode essentially means using CA signed certs for the vCenter Server machineSSL cert, and VMCA signed certs for the solution user certs. In this mode, ESXi host certs are VMCA managed also. You then have the option to integrate with an external Microsoft CA or continue to use VMCA for all certs. If you decide to integrate with a Microsoft CA, ESXi host certs remain VMCA managed. This is not always ideal as some customers require all components on the network to be signed by a known & trusted CA. Up until the recent 4.1 VMware Cloud Foundation (VCF) release it was not possible to use custom CA signed certs on your ESXi hosts, as hybrid mode would overwrite your CA signed ESXi certs with VMCA signed certs. There is a great blog post here on how to manually enable CA signed certs here but with VCF 4.1 it is now supported to do this via the API during bringup. The procedure is as follows:

 

  1. Install the ESXi hosts that will be used for bringup with the ESXi version on the Bill Of Materials for 4.1
  2. Install your custom CA signed certs on each host that will be used for the management domain
    1. Log in to the ESXi Shell, either directly from the DCUI or from an SSH client, as a user with administrator privileges.
    2. In the directory /etc/vmware/ssl, rename the existing certificates using the following commands.
      mv rui.crt orig.rui.crt 
      mv rui.key orig.rui.key
      
    3. Copy the certificates that you want to use to /etc/vmware/ssl.
    4. Rename the new certificate and key to rui.crt and rui.key.
    5. Restart the host management agents by running the following commands
 /etc/init.d/hostd restart /etc/init.d/vpxa restart 

Repeat the above steps for all management domain hosts

To ensure that SDDC Manager is aware that you are using custom certs you need to add a flag in the bringup json along with the PEM encoded signing chain certificate, so that it is added to the SDDC Manager keystore. This will ensure the certificates are trusted. The API guide for 4.1 provides an example json spec here. Pay particular attention to this section

securitySpec" : {
"esxiCertsMode" : "One among:Custom, VMCA",
"rootCaCerts" : [ {
"alias" : "string",
"certChain" : [ "string" ]
} ]
}

So to enable support for signed certs you would set this section as follows (Substituting your signing CA chain)

 securitySpec" : { 
"esxiCertsMode" : "Custom", 
"rootCaCerts" : [ { 
"alias" : "Rainpole-CA", 
"certChain" : [ "-----BEGIN CERTIFICATE-----
MIIDczCCAlugAwIBAgIQI9xwbTkI9J5GhMffcP5CHDANBgkqhkiG9w0BAQsFADBM
MRIwEAYKCZImiZPyLGQBGRYCaW8xGDAWBgoJkiaJk/IsZAEZFghyYWlucG9sZTEc
MBoGA1UEAxMTcmFpbnBvbGUtZGMwMXJwbC1DQTAeFw0yMDAzMzAxNDQ2MTNaFw0y
NTAzMzAxNDU2MTNaMEwxEjAQBgoJkiaJk/IsZAEZFgJpbzEYMBYGCgmSJomT8ixk
ARkWCHJhaW5wb2xlMRwwGgYDVQQDExNyYWlucG9sZS1kYzAxcnBsLUNBMIIBIjAN
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzpwkz7aPlQcfevcCelHc9DPswHkd
kjY96Vh3GvYlesaVEcy/q/BOvvh3KgLMLy8r7cy2cNPO3FANKOfqVdVx3ghfEUyL
g61W9BskAlwryzJRmjhOJJVqvB8CWjy+eCp7MejHGdEud6WdEvK8CaBcPngEg0KM
eLRNLGe8OCw8yY4GTrjU+H7PYQZtyD0kxxy5f48ueaDXat4ENRGcAuHEfCoMGfaR
bDue1OO4diHd900bCym5ggBNX0jhRudNULXPTayZl2ksImV0+QkaVeptQImXfCgb
kgnHQJ5CxK26up7fB5eAsmGLAsJLBnHuM7P9xvV09EvWjFCgLX/oBBDYTQIDAQAB
o1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7oOq
QBK8yg8mHnAfb+u6/GO0ZUcwEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEL
BQADggEBALYxZGj4vWjFDN1atOUsBx2jrmxbExgMAyRpNlSc2aj+7vzxHxUW5VbX
x9nc/BfkTiCK6c7Y9VYb+mgjb8z0kNv58sT4ar1yIl1n63VOCoyyLcaFB8HyEJpD
wUhz4RNPoSijZMpm+M5EuSLfWlhEJo7N8sLqHgvvk1dFpbK8fIHbPS5KJwJibbPe
w9UuNRdcxN9hFWKBC0SvfgX+1CJxVdvgfi65rSHPuWinJzrXXdH999DfpDESRzwH
0pqE3GtMCt1Nqalp2QJFdahbT+kxj7QWHTjUylSENDHjdln7a8WH8RGxvEy/97YZ
+crXmxvQ/bAgHk9vcRERbRjfyIs7v88=
-----END CERTIFICATE-----" ] } ] } 

 

 

You can then follow the steps outlined in the API guide to deploy the management domain using the Cloud Builder API. Note that once custom mode is enabled, all future workload domains that you create must also use signed certs.

PowerVCF 2.0 Authentication Changes

One of the many major enhancements in VMware Cloud Foundation 4.0 is a switch from basic authentication to token based authentication for the VCF API.

Basic authentication is a header field in the form of Authorization: Basic <credentials>, where credentials is the base64 encoding of a username and password. The credentials are not encrypted, therefore Basic Authentication is not the industry standard for API authentication.

VCF 4.0 has moved to using token based authentication (JWT Tokens to be exact) for securing the API. The token implementation is as follows:

  1. An authorized user executes a POST API call to /v1/tokens
  2. The response contains an access token and a refresh token
    1. The access token is valid for 1 hour
      1. The access token is passed in every API call header in the form of Authorization: Bearer <access token>
    2. The refresh token is valid for 24 hours
      1. The refresh token is used to request a new access token once it has expired

PowerVCF 2.0 abstracts all of this in the following way:

  • An authorized user connects to SDDC Manager to request the tokens by running:

Connect-VCFManager -fqdn sfo-vcf01.sfo.rainpole.io -username svc-vcf-api@rainpole.io -password VMw@re1!

  • The access & refresh tokens are stored in memory and used when running subsequent API calls. As each API call is executed PowerVCF checks the expiry of the access token. If the access token is about to expire, it uses the refresh token to request a new access token and proceeds with the API call. So the user does not need to worry about token management.

We have also introduced roles that can be assigned to users. Initially we have ADMIN & OPERATOR, with more roles planned for a future release.

ADMIN = Full Administrator Access to all APIs

OPERATOR = All Access except Password Management, User Management, Backup Management

To request an API token you must have a user account that is assigned either the ADMIN or OPERATOR role in SDDC Manager. The default administrator@vsphere.local user is assigned the ADMIN role during bringup but it is advisable to add additional users for performing day to day tasks.

Once you have a user added you can then authenticate with SDDC Manager to retrieve your access & refresh tokens.

Tip: You can connect using the administrator@vsphere.local user to add new users using PowerVCF. You can use the New-VCFUser PowerVCF cmdlet to create the user and assign a role like so:


Connect-VCFManager -fqdn sfo-vcf01.sfo.rainpole.io -username administrator@vsphere.LOCAL -password VMw@re1!

New-VCFUser -user vcf-admin@rainpole.io -role ADMIN

Once your user is configured PowerVCF will do the rest when it comes to managing the API access tokens.

 

PowerShell Script to Configure an NSX-T Load Balancer for the vRealize Suite & Workspace ONE Access

As part of my role in the VMware Hyper-converged Business Unit (HCIBU) I spend a lot of time working with new product versions testing integrations for next-gen VMware Validated Designs and Cloud Foundation. A lot of my focus is on Cloud Operations and Automation (vROPs, vRLI, vRA etc) and consequently I regularly need to deploy environments to perform integration testing. I will typically leverage existing automation where possible and tend to create my own when i find gaps. Once such gap was the ability to use PowerShell to interact with the NSX-T API. For anyone who is familiar with setting up a load balancer for the vRealize Suite in NSX-T – there are a lot of manual clicks required. So i set about creating some PowerShell functions to make it a little less tedious and to speed up getting my environments setup so i could get to the testing faster.

There is comprehensive NSX-T API documentation posted on code.vmware .com that I used to decipher the various API endpoints required to complete the various tasks:

  • Create the Load Balancer
  • Create the Service Monitors
  • Create the Application Profiles
  • Create the Server Pools
  • Create the Virtual Servers

The result is a PowerShell module with a function for each of the above and a corresponding JSON file that is read in for the settings for each function. I have included a sample JSON file to get you started. Just substitute your values.

Note: You must have a Tier-1 & associated segments created. (I’ll add that functionality when i get a chance!)

PowerShell Module, Sample JSON & Script are posted to Github here

Create a multi pNIC VMware Cloud Foundation NSX-V Workload Domain with PowerVCF

Hopefully by now you’ve seen my earlier posts about the new PowerShell module for the VMware Cloud Foundation API. If not i’d suggest reviewing these before reading on

With the release of VMware Cloud Foundation 3.9.1 it is now supported, via the API only, to use more than 2 physical NICs (pNICs) per host. In fact the API now supports up to three vSphere Distributed switches and six physical NICs, providing more flexibility to support high performance use cases and physical traffic separation.

There is a tech note that goes into more detail on the use cases for more than 2 pNICs and it also shows how this works using PostMan but we can also achieve this using PowerVCF.

The workflow using PowerVCF is the same as my earlier example for creating a workload domain. The only difference is the content in the JSON file.

Note: There is a validation API to validate the JSON you are passing before making the submission. PowerVCF dynamically formats the validation JSON as the formatting is slightly different to what you submit to create the workload domain.

To get you started there is a sample JSON file with the required formatting. Here is a snapshot of what it looks like

{
  "domainName": "PowerVCF",  
  "vcenterSpec": {  
    "name": "sfo01w01vc01",  
    "networkDetailsSpec": {  
       "ipAddress": "172.16.225.64",  
       "dnsName": "sfo01w01vc01.sfo01.rainpole.local",  
       "gateway": "172.16.225.1",  
       "subnetMask": "255.255.255.0"
     },  
     "rootPassword": "VMw@re1!",  
     "datacenterName": "PowerVCF-DC"  
   },  
   "computeSpec": {  
      "clusterSpecs": [ {  
          "name": "Cluster1",  
          "hostSpecs": [ {  
              "id": "d0693b58-4012-4387-92ed-721cfa709e44",
              "license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
              "hostNetworkSpec": {  
                 "vmNics": [ {  
                     "id": "vmnic0",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, {  
                     "id": "vmnic1",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, { 
                     "id": "vmnic2",  
                     "vdsName": "SDDC-Dswitch-Private2" 
                  }, {  
                     "id": "vmnic3",  
                     "vdsName": "SDDC-Dswitch-Private2"  
                  } ]  
               }  
            }, {  
              "id": "7006bec4-fccb-49a0-bff6-fd56c807d26a",
              "license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
              "hostNetworkSpec": {  
                 "vmNics": [ {  
                     "id": "vmnic0",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, {  
                     "id": "vmnic1",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, { 
                     "id": "vmnic2",  
                     "vdsName": "SDDC-Dswitch-Private2" 
                  }, {  
                     "id": "vmnic3",  
                     "vdsName": "SDDC-Dswitch-Private2"  
                  } ]  
               }  
            }, {  
              "id": "cc257a80-e179-4297-bf7e-179a0944bbab",
              "license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
              "hostNetworkSpec": {  
                 "vmNics": [ {  
                     "id": "vmnic0",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, {  
                     "id": "vmnic1",  
                     "vdsName": "SDDC-Dswitch-Private1"  
                  }, { 
                     "id": "vmnic2",  
                     "vdsName": "SDDC-Dswitch-Private2" 
                  }, {  
                     "id": "vmnic3",  
                     "vdsName": "SDDC-Dswitch-Private2"  
                  } ] 
               } 
           } ],     
    "datastoreSpec": {  
        "vsanDatastoreSpec": {  
            "failuresToTolerate": 1,  
            "licenseKey": "BBBBB-BBBBB-BBBBB-BBBBB-BBBBB",
            "datastoreName": "vSanDatastore" 
         }  
     },  
     "networkSpec": { 
         "vdsSpecs": [ { 
             "name": "SDDC-Dswitch-Private1", 
             "portGroupSpecs": [ {  
                 "name": "SDDC-DPortGroup-Mgmt", 
                 "transportType": "MANAGEMENT" 
             }, { 
                 "name": "SDDC-DPortGroup-VSAN",  
                 "transportType": "VSAN" 
             }, {  
                 "name": "SDDC-DPortGroup-vMotion", 
                 "transportType": "VMOTION" 
             } ] 
          },  
          {  
             "name": "SDDC-Dswitch-Private2", 
             "portGroupSpecs": [ { 
                "name": "SDDC-DPortGroup-Public", 
                "transportType": "PUBLIC"  } ] 
           } 
        ],  
        "nsxClusterSpec": { 
           "nsxVClusterSpec": {  
              "vlanId": 2237,  
              "vdsNameForVxlanConfig": "SDDC-Dswitch-Private1"  
            }  
          }  
        }  
      } ] 
   }, 
  "nsxVSpec" : {
    "nsxManagerSpec" : {
      "name" : "sfo01w01nsx01",
      "networkDetailsSpec" : {
        "ipAddress" : "172.16.225.66",
        "dnsName" : "sfo01w01nsx01.sfo01.rainpole.local",
        "gateway" : "172.16.225.1",
        "subnetMask" : "255.255.255.0"
      }
    },
    "nsxVControllerSpec" : {
      "nsxControllerIps" : [ "172.16.225.121", "172.16.225.122", "172.16.225.123" ],
      "nsxControllerPassword" : "VMw@re123456!",
      "nsxControllerGateway" : "172.16.225.1",
      "nsxControllerSubnetMask" : "255.255.255.0"
    },
    "licenseKey" : "CCCCC-CCCCC-CCCCC-CCCCC-CCCCC",
    "nsxManagerAdminPassword" : "VMw@re1!",
    "nsxManagerEnablePassword" : "VMw@re1!"
  }
}

You can see that the magic happens in the hostNetworkSpec section where you map each vmnic to a vdsName

<p>"hostNetworkSpec": { "vmNics": [ { "id": "vmnic0", "vdsName": "SDDC-Dswitch-Private1" }, { "id": "vmnic1", "vdsName": "SDDC-Dswitch-Private1" }, { "id": "vmnic2", "vdsName": "SDDC-Dswitch-Private2" }, { "id": "vmnic3", "vdsName": "SDDC-Dswitch-Private2" } ] }</p>

So please try it out and let us know how it goes!

Create a new VMware Cloud Foundation Workload Domain with PowerVCF

So now that we have a PowerShell module for the VMware Cloud Foundation API, just what can we do with it? Well in this example we will create an NSX-V backed VMware Cloud Foundation workload domain, all using PowerVCF to interact with the API. Now all of this could obviously be wrapped up in a single script but I’m going to show you each step, with some tips along the way.

I will be making the assumption that you are familiar with VMware Cloud Foundation Concepts. If not please review the documentation here.

So once you have the initial VCF bringup completed you need to add a workload domain(s) to service our workloads. In my example below I have a management domain only.

And i have only the 4 hosts that are part of the management domain in my inventory. So i need to add new hosts to my inventory before i can create a new workload domain.

The sequence of events is as follows:

  • Install the PowerVCF Module
  • Connect to SDDC Manager
  • Create a network pool
  • Commission hosts
  • Create Workload domain

Install the PowerVCF Module from the PowerShell Gallery

  • Open PowerShell
  • Run the following to install the module

Install-Module -Name PowerVCF

Connect to SDDC Manager

  • To establish a session with SDDC Manager run the following

Connect-VCFManager -fqdn sddc-manager.sfo01.rainpole.local -username admin -password VMw@re1!

Create a network pool

The first thing you need before you can commission new hosts is to create a new network pool, which will include the vSAN & vMotion network details for this workload domain cluster.

To create a new network pool do the following:

  • Before you can create a network pool you first need to create the json body that will be passed in.

TIP: The PowerVCF Module includes a folder of sample json files to get you started

Here is the json format required for creating a vSAN network pool (Please use the same json with the module rather than copying from here as formatting is probably messed up!)


{
"name": "sfo01w01-cl01",
"networks": [
{
"type": "VSAN",
"vlanId": 2240,
"mtu": 9000,
"subnet": "172.16.240.0",
"mask": "255.255.255.0",
"gateway": "172.16.240.253",
"ipPools": [
{
"start": "172.16.240.5",
"end": "172.16.240.100"
}
]
},
{
"type": "VMOTION",
"vlanId": 2236,
"mtu": 9000,
"subnet": "172.16.236.0",
"mask": "255.255.255.0",
"gateway": "172.16.236.253",
"ipPools": [
{
"start": "172.16.236.5",
"end": "172.16.236.100"
}
]
}
]
}

So first off lets get a list of current Network Pools. To do this run the following cmdlet:

Get-VCFNetworkPool

As expected this returns a single network pool.

So to create a new network pool using the json you created earlier run the following:

New-VCFNetworkPool -json .\SampleJSON\NetworkPool\addNetworkPoolSpec.json

Now running Get-VCFNetworkPool should display 2 Network Pools

Commission Hosts

Now that you have a network pool you can commission hosts and associate them with the network pool. For this you need the following json

TIP: For this json you need the network pool name & ID. These were returned when the pool was created and also by Get-VCFNetworkPool


[
{
"fqdn": "sfo01w01esx01.sfo01.rainpole.local",
"username": "root",
"storageType": "VSAN",
"password": "VMw@re1!",
"networkPoolName": "sfo01w01-cl01",
"networkPoolId": "afd314f6-f31d-4ad4-8943-0ecb35c044b9"
},
{
"fqdn": "sfo01w01esx02.sfo01.rainpole.local",
"username": "root",
"storageType": "VSAN",
"password": "VMw@re1!",
"networkPoolName": "sfo01w01-cl01",
"networkPoolId": "afd314f6-f31d-4ad4-8943-0ecb35c044b9"

},
{
"fqdn": "sfo01w01esx03.sfo01.rainpole.local",
"username": "root",
"storageType": "VSAN",
"password": "VMw@re1!",
"networkPoolName": "sfo01w01-cl01",
"networkPoolId": "afd314f6-f31d-4ad4-8943-0ecb35c044b9"
},
{
"fqdn": "sfo01w01esx04.sfo01.rainpole.local",
"username": "root",
"storageType": "VSAN",
"password": "VMw@re1!",
"networkPoolName": "sfo01w01-cl01",
"networkPoolId": "afd314f6-f31d-4ad4-8943-0ecb35c044b9"
}
]

So to commission the 4 new hosts into my VCF inventory i simply run

Commission-VCFHost -json .\SampleJSON\Host\commissionHosts.json

TIP: This returns a task id, which you can monitor by running the following until status=Successful:

Get-VCFTask -id b93e2bc7-627b-4f7c-980b-c12b3497c4ea

Create a Workload Domain

Once the commission hosts task is complete you can then create a workload domain using those hosts. Creating a workload domain also requires a json file. For this you need the id’s of the hosts that you want to use. In VCF hosts that are available to be used in a workload domain have a status of UNASSIGNED_USEABLE so to find the id’s of the hosts you want to add run the following

TIP: Filter the results by adding | select fqdn,id

Get-VCFHost -Status UNASSIGNED_USEABLE | select fqdn,id

This returns the ids you need for creating the workload domain. Here is the Workload domain json. (Replace ESXi licence (AAAAA), vSAN licence (BBBBB) & NSX-V licence (CCCCC) with your keys)


{
"domainName" : "PowerVCF",
"vcenterSpec" : {
"name" : "sfo01w01vc01",
"networkDetailsSpec" : {
"ipAddress" : "172.16.225.64",
"dnsName" : "sfo01w01vc01.sfo01.rainpole.local",
"gateway" : "172.16.225.1",
"subnetMask" : "255.255.255.0"
},
"rootPassword" : "VMw@re1!",
"datacenterName" : "PowerVCF-DC"
},
"computeSpec" : {
"clusterSpecs" : [ {
"name" : "Cluster1",
"hostSpecs" : [ {
"id" : "dd2ec05f-39e1-464e-83f1-1349a0dcf723",
"license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
"hostNetworkSpec" : {
"vmNics" : [ {
"id" : "vmnic0",
"vdsName" : "sfo01w01vds01"
}, {
"id" : "vmnic1",
"vdsName" : "sfo01w01vds01"
} ]
}
}, {
"id" : "809b25e8-1db6-464b-b310-97f581c56da5",
"license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
"hostNetworkSpec" : {
"vmNics" : [ {
"id" : "vmnic0",
"vdsName" : "sfo01w01vds01"
}, {
"id" : "vmnic1",
"vdsName" : "sfo01w01vds01"
} ]
}
}, {
"id" : "5d3eea32-6464-4ae6-9866-932fb926a5f1",
"license":"AAAAA-AAAAA-AAAAA-AAAAA-AAAAA",
"hostNetworkSpec" : {
"vmNics" : [ {
"id" : "vmnic0",
"vdsName" : "sfo01w01vds01"
}, {
"id" : "vmnic1",
"vdsName" : "sfo01w01vds01"
} ]
}
} ],
"datastoreSpec" : {
"vsanDatastoreSpec" : {
"failuresToTolerate" : 1,
"licenseKey" : "BBBBB-BBBBB-BBBBB-BBBBB-BBBBB",
"datastoreName" : "sfo01w01vsan01"
}
},
"networkSpec" : {
"vdsSpecs" : [ {
"name" : "sfo01w01vds01",
"portGroupSpecs" : [ {
"name" : "sfo01w01vds01-Mgmt",
"transportType" : "MANAGEMENT"
}, {
"name" : "sfo01w01vds01-VSAN",
"transportType" : "VSAN"
}, {
"name" : "sfo01w01vds01-vMotion",
"transportType" : "VMOTION"
} ]
} ],
"nsxClusterSpec" : {
"nsxVClusterSpec" : {
"vlanId" : 2237,
"vdsNameForVxlanConfig" : "sfo01w01vds01"
}
}
}
} ]
},
"nsxVSpec" : {
"nsxManagerSpec" : {
"name" : "sfo01w01nsx01",
"networkDetailsSpec" : {
"ipAddress" : "172.16.225.66",
"dnsName" : "sfo01w01nsx01.sfo01.rainpole.local",
"gateway" : "172.16.225.1",
"subnetMask" : "255.255.255.0"
}
},
"nsxVControllerSpec" : {
"nsxControllerIps" : [ "172.16.235.121", "172.16.235.122", "172.16.235.123" ],
"nsxControllerPassword" : "VMw@re123456!",
"nsxControllerGateway" : "172.16.235.1",
"nsxControllerSubnetMask" : "255.255.255.0"
},
"licenseKey" : "CCCCC-CCCCC-CCCCC-CCCCC-CCCCC",
"nsxManagerAdminPassword" : "VMw@re1!",
"nsxManagerEnablePassword" : "VMw@re1!"
}
}

To create the workload domain run the following:

New-VCFWorkloadDomain -json .\SampleJSON\WorkloadDomain\workloadDomainSpec-NSX-V.json

This will return a Task ID. Monitor the workload domain creation by running the following

Get-VCFTask -id b93e2bc7-627b-4f7c-980b-c12b3497c4ea

And that should be it. If you’ve gotten all your json details correct you should have a fully functioning NSX-V workload domain without using the UI!