How to pass VM NIC keys to Load Balancer module using output block in terraform

155 Views Asked by At

I am trying to associate specific Virtual Machine NIC IPs to Load Balancer Backend pool using azurerm_network_interface_backend_address_pool_association resource. Since I have specific requirement to map only specific VM NIC ips to backendpool pool. So, I have created a mapping in vm_nic_to_lb_bpool_map.

Below is my module folder structure

enter image description here

I am sharing complete code

loadbalancer module

resource "azurerm_lb" "lb" {
  for_each            = var.loadbalancers.tiers
  name                = each.value.lb_name
  location            = "eastus"
  resource_group_name = var.existing_resource_group
  sku                 = "Standard"

  frontend_ip_configuration {
    name                          = "fip-${each.value.fip_name}"
    subnet_id                     = each.value.subnet_id
    private_ip_address_allocation = each.value.private_ip_type
  }

}

resource "azurerm_lb_probe" "probe" {

  depends_on = [azurerm_lb.lb]
  for_each = {
    for lb in flatten([
      for lb_name, lb in var.loadbalancers.tiers : [
        for probe_name, probe in lb.lb_probes : {
          lb_name_value  = lb.lb_name
          lb_name        = lb_name
          probe_name     = probe_name
          protocol_value = probe.protocol
          port_value     = probe.port

        }
      ]
      ]
    ) : "${lb.lb_name}-${lb.probe_name}" => lb
  }
  loadbalancer_id = azurerm_lb.lb[each.value.lb_name].id
  name            = each.value.probe_name
  protocol        = each.value.protocol_value
  port            = each.value.port_value

}

resource "azurerm_lb_backend_address_pool" "bepool" {

  depends_on      = [azurerm_lb.lb]
  for_each        = var.loadbalancers.tiers
  name            = each.value.backend_address_pool_name
  loadbalancer_id = azurerm_lb.lb[each.key].id

}

resource "azurerm_lb_rule" "rule" {

  depends_on = [azurerm_lb.lb]
  for_each = {
    for lb in flatten([
      for lb_name, lb in var.loadbalancers.tiers : [
        for lb_rule, rule in lb.lb_rules : {
          lb_name                 = lb_name
          lb_rule                 = lb_rule
          frontend_port           = rule.frontend_port
          protocol                = rule.protocol
          backend_port            = rule.backend_port
          enable_floating_ip      = rule.enable_floating_ip
          frontend_ip_config_name = rule.frontend_ip_config_name
          probe_name              = rule.probe_name
        }
      ]
      ]
    ) : "${lb.lb_name}-${lb.lb_rule}" => lb
  }

  loadbalancer_id                = azurerm_lb.lb[each.value.lb_name].id
  name                           = each.value.lb_rule
  protocol                       = each.value.protocol
  frontend_port                  = each.value.frontend_port
  backend_port                   = each.value.backend_port
  frontend_ip_configuration_name = azurerm_lb.lb[each.value.lb_name].frontend_ip_configuration[0].name
  enable_floating_ip             = each.value.enable_floating_ip
  backend_address_pool_ids       = [azurerm_lb_backend_address_pool.bepool[each.value.lb_name].id]
  probe_id                       = azurerm_lb_probe.probe["${each.value.lb_name}-${each.value.probe_name}"].id

}

resource "azurerm_network_interface_backend_address_pool_association" "nic_lb_association" {
  for_each = var.vm_nic_to_lb_bpool_map

  network_interface_id    = var.virtual_machine_nic_keys[each.key].id
  ip_configuration_name   = var.virtual_machine_nic_keys[each.key].ip_configuration[0].name
  backend_address_pool_id = azurerm_lb_backend_address_pool.bepool[each.value].id
}

loadbalancer variables

variable "loadbalancers" {
}

variable "existing_resource_group" {
  type = string
}

variable "virtual_machine_nic_keys" {
}

variable "vm_nic_to_lb_bpool_map" {
}

Virtual machine module

data "azurerm_resource_group" "rg" {
  name = var.existing_resource_group
}

resource "azurerm_network_interface" "nic" {

  for_each = {
    for vm in flatten([
      for vm_name, vm in var.virtual_machines.nodes : [
        for nic_name, nic in vm.networks : {
          vm_number      = vm.vm_num,
          vm_name        = vm_name,
          nic_name       = nic_name,
          subnet_value   = nic.subnet
          nic_name_value = nic.nic_name

        }
      ]
      ]
    ) : "${vm.vm_name}-${vm.nic_name}" => vm
  }
  name                = "${var.vm_prefix}-${each.value.nic_name_value}-nic"
  location            = "eastus"
  resource_group_name = var.existing_resource_group

  ip_configuration {
    name                          = "${var.vm_prefix}-${each.value.nic_name_value}-ipconfig"
    subnet_id                     = each.value.subnet_value
    private_ip_address_allocation = "Dynamic"
  }
}

resource "azurerm_linux_virtual_machine" "vm" {

  depends_on                      = [azurerm_network_interface.nic]
  for_each                        = var.virtual_machines.nodes
  name                            = "${var.vm_prefix}-${each.value.vm_name}-${each.value.vm_num}"
  admin_username                  = "plutoadmin"
  admin_password                  = "pluto@1234522"
  disable_password_authentication = false
  location                        = "eastus"
  resource_group_name             = var.existing_resource_group
  network_interface_ids           = [for nic_key, nic in azurerm_network_interface.nic : nic.id if startswith(nic_key, "${each.key}-")]
  size                            = "Standard_B2ms"

  os_disk {
    name                 = "${var.vm_prefix}-${each.value.vm_name}-${each.value.vm_num}-OSdisk"
    caching              = "ReadWrite"
    storage_account_type = "Standard_LRS"
  }

  source_image_reference {
    publisher = "RedHat"
    offer     = "RHEL"
    sku       = "82gen2"
    version   = "latest"
  }
}

Virtual Machine output

output "vm_nics_ids" {

  value = [for nic_key, nic in azurerm_network_interface.nic : nic.id]
}

output "virtual_machine_nic_keys" {
  value = merge([
    for vm_name, vm in var.virtual_machines.nodes :
    {
      for nic_name, nic in vm.networks :
      "${vm_name}-${nic_name}" => azurerm_network_interface.nic["${vm_name}-${nic_name}-nic"].id
    }
  ]...)
}

virtual machine variables

variable "vm_prefix" {
  type    = string
  default = "pluto"
}

variable "virtual_machines" {
}

variable "existing_resource_group" {
  type = string
}

main.tf

module "virtualmachine" {
  source                  = "./virtualmachine"
  existing_resource_group = var.existing_resource_group
  virtual_machines        = var.virtual_machines
  vm_prefix               = var.vm_prefix
}

module "loadbalancer" {
  depends_on               = [module.virtualmachine]
  source                   = "./loadbalancer"
  existing_resource_group  = var.existing_resource_group
  loadbalancers            = var.loadbalancers
  vm_nic_to_lb_bpool_map   = var.vm_nic_to_lb_bpool_map
  virtual_machine_nic_keys = module.virtualmachine.virtual_machine_nic_keys

}

tfvars

    existing_resource_group = "pluto"
    existing_infra_rg       = "pluto-infra"
 
    virtual_machines = {
      nodes = {
        app1_node1 = {
          "vm_name" = "app"
          "vm_num"  = "1"
          networks = {
            nic1 = {
              "nic_name" = "app-1"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
            },
          }
        },
        service1_node1 = {
          "vm_name" = "service"
          "vm_num"  = "1"
          networks = {
            nic1 = {
              "nic_name" = "service-1"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
            },
          }
        },
        db1_node1 = {
          "vm_name" = "db"
          "vm_num"  = "1"
          networks = {
            nic1 = {
              "nic_name" = "db-1-1"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
            },
            nic2 = {
              "nic_name" = "db-1-2"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/db-subnet"
            }
          }
        },
        db2_node2 = {
          "vm_name" = "db"
          "vm_num"  = "2"
          networks = {
            nic1 = {
              "nic_name" = "db-2-1"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
            },
            nic2 = {
              "nic_name" = "db-2-2"
              "subnet"   = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/db-subnet"
            },
          }
        },
      }   
    }
       
    loadbalancers = {
      tiers = {
        app-lb = {
          lb_name                   = "app-loadbalancer"
          fip_name                  = "app-fip"
          subnet_id                 = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
          private_ip_type           = "Dynamic"
          backend_address_pool_name = "app-bpool"
    
          lb_probes = {
            tomcat-probe = {
              protocol = "Tcp"
              port     = "8080"
            }
          }
          lb_rules = {
            tomcat-rule = {
              frontend_port           = "7070"
              protocol                = "Tcp"
              backend_port            = "8080"
              enable_floating_ip      = true
              frontend_ip_config_name = "app-loadbalancer"
              probe_name              = "tomcat-probe"
            }
          }
        },
        db-lb = {
          lb_name                   = "db-loadbalancer"
          fip_name                  = "db-fip"
          subnet_id                 = "/subscriptions/************/resourceGroups/pluto/providers/Microsoft.Network/virtualNetworks/pluto-infra/subnets/app-subnet"
          private_ip_type           = "Dynamic"
          backend_address_pool_name = "db-bpool"
    
          lb_probes = {
            db-probe = {
              protocol = "Tcp"
              port     = "8080"
            }
          }
          lb_rules = {
            db-rule = {
              frontend_port           = "7070"
              protocol                = "Tcp"
              backend_port            = "8080"
              enable_floating_ip      = true
              frontend_ip_config_name = "db-loadbalancer"
              probe_name              = "db-probe"
            }
          }
        }
      }
    }
    
    vm_nic_to_lb_bpool_map = {
      "app1_node1-nic1" = "app_lb"
      "db1_node1-nic1"  = "db_lb"
      "db2_node2-nic2"  = "db_lb"
    }


**variables**

variable "resource_group_location" {
  type    = string
  default = "eastus"
}

variable "vm_prefix" {
  type    = string
  default = "pluto"
}

variable "virtual_machines" {
}

variable "existing_resource_group" {
  type = string
}

variable "loadbalancers" {
}

variable "vm_nic_to_lb_bpool_map" {
}

ERROR :

│ Error: Invalid index
│
│   on virtualmachine\output.tf line 11, in output "virtual_machine_nic_keys":
│   11:       "${vm_name}-${nic_name}" => azurerm_network_interface.nic["${vm_name}-${nic_name}-nic"].id
│     ├────────────────
│     │ azurerm_network_interface.nic is object with 6 attributes
│
│ The given key does not identify an element in this collection value.

could someone help to find the error root cause or how to write output variable in virtual machine to output the virtual machine nic keys that matches the vm_nic_to_lb_bpool_map ?

Expecting the below association
"app1_node1-nic1" = "app_lb" "db1_node1-nic1" = "db_lb" "db2_node2-nic2" = "db_lb"

non-module code working as expected with below code

resource "azurerm_network_interface_backend_address_pool_association" "nic_lb_association" {
  for_each = local.vm_to_lb_map

  network_interface_id    = azurerm_network_interface.nic-poc[each.key].id
  ip_configuration_name   = azurerm_network_interface.nic-poc[each.key].ip_configuration[0].name
  backend_address_pool_id = azurerm_lb_backend_address_pool.bepool[each.value].id
}
1

There are 1 best solutions below

2
Vinay B On

I tried to pass VM NIC keys to Load Balancer module using output block using terraform and I was able to provision the requirment successfully.

You are getting this error because of how you defined the output variable virtual_machine_nic_keys in your virtualmachine module. The error means that the collection azurerm_network_interface.nic does not have the key you are using to access it. Therefore, the keys you are using and the keys in the azurerm_network_interface.nic map do not match.

To resolve this issue, let's focus on the relevant portion of your Terraform configuration:

`output "virtual_machine_nic_keys" {
  value = merge([
    for vm_name, vm in var.virtual_machines.nodes :
    {
      for nic_name, nic in vm.networks :
      "${vm_name}-${nic_name}" => azurerm_network_interface.nic["${vm_name}-${nic_name}-nic"].id
    }
  ]...)
}`

To overcome the issue double-check that the NIC names you're constructing keys with ("${vm_name}-${nic_name}-nic") match exactly how they are named when creating azurerm_network_interface.nic resources. The error suggests there's at least one key that doesn't match.

My terraform configuration:

main.tf:

provider "azurerm" {
  features {}
}

resource "azurerm_resource_group" "rg" {
  name     = "examplevk-rg"
  location = "East US"
}

resource "azurerm_virtual_network" "example" {
  name                = "testvk-vnet"
  address_space       = ["10.0.0.0/16"]
  location            = azurerm_resource_group.rg.location
  resource_group_name = azurerm_resource_group.rg.name
}

resource "azurerm_subnet" "example" {
  name                 = "testvk-subnet"
  resource_group_name  = azurerm_resource_group.rg.name
  virtual_network_name = azurerm_virtual_network.example.name
  address_prefixes     = ["10.0.1.0/24"]
}

resource "azurerm_public_ip" "lb_pip" {
  name                = "testvk-public-ip"
  resource_group_name = azurerm_resource_group.rg.name
  location            = azurerm_resource_group.rg.location
  allocation_method   = "Static"
  sku                 = "Standard"
}

module "vm" {
  source              = "./modules/vm"
  resource_group_name = azurerm_resource_group.rg.name
  location            = azurerm_resource_group.rg.location
  vm_prefix           = "vktdestvm"
  subnet_id           = azurerm_subnet.example.id
  virtual_network_id  = azurerm_virtual_network.example.id
   vm_configurations   = {
    vm1 = {
      size           = "Standard_B2s",
      admin_username = "adminuser",
      admin_password = "SecurePassword123!",
      subnet_id      = azurerm_subnet.example.id
    }
  }
}

module "loadbalancer" {
  source              = "./modules/loadbalancer"
  resource_group_name = azurerm_resource_group.rg.name
  location            = azurerm_resource_group.rg.location
  public_ip_id        = azurerm_public_ip.lb_pip.id
    vm_nic_ids          = {
    "vm1" = {
      ip_address         = "10.0.0.4" // Example IP, replace with actual dynamic reference
      virtual_network_id = azurerm_virtual_network.example.id
    }
    // Add more VMs as needed
  }
  lb_config = {
    name = "myLoadBalancer"
    sku  = "Standard"
    frontend_ip_configs = [
      {
        name         = "myFrontendIPConfig"
        public_ip_id = azurerm_public_ip.lb_pip.id
      }
    ]
    backend_pools = [
      {
        name = "myBackendPool"
      }
    ]
    health_probes = [
      {
        name         = "myHealthProbe"
        protocol     = "Http"
        port         = 80
        request_path = "/"
      }
    ]
    lb_rules = [
      {
        name                       = "myLoadBalancingRule"
        protocol                   = "Tcp"
        frontend_port              = 80
        backend_port               = 80
        frontend_ip_config_name    = "myFrontendIPConfig"
        backend_address_pool_name  = "myBackendPool"
        probe_name                 = "myHealthProbe"
        enable_floating_ip         = false
      }
    ]
  }
}

modules/vm/main.tf:

variable "resource_group_name" { type = string }
variable "location" { type = string }
variable "subnet_id" { type = string }
variable "vm_prefix" { type = string }

variable "virtual_network_id" {
  description = "The ID of the virtual network where the VMs are connected."
  type        = string
}

variable "vm_configurations" {
  description = "Configurations for VMs."
  type = map(object({
    size           = string
    admin_username = string
    admin_password = string
    subnet_id      = string
  }))
}


resource "azurerm_network_interface" "main" {
  for_each           = var.vm_configurations
  name               = "${var.vm_prefix}-${each.key}-nic"
  location           = var.location
  resource_group_name= var.resource_group_name

  ip_configuration {
    name                          = "internal"
    subnet_id                     = var.subnet_id
    private_ip_address_allocation = "Dynamic"
  }
}

resource "azurerm_linux_virtual_machine" "main" {
  for_each            = var.vm_configurations

  name                          = "${var.vm_prefix}-${each.key}"
  resource_group_name           = var.resource_group_name
  location                      = var.location
  size                          = each.value.size
  admin_username                = each.value.admin_username
  admin_password                = each.value.admin_password
  network_interface_ids         = [azurerm_network_interface.main[each.key].id]
  disable_password_authentication = false

  os_disk {
    caching              = "ReadWrite"
    storage_account_type = "Standard_LRS"
  }

  source_image_reference {
    publisher = "Canonical"
    offer     = "UbuntuServer"
    sku       = "18.04-LTS"
    version   = "latest"
  }
}

output "vm_nic_details" {
  value = {for k, v in azurerm_network_interface.main : k => {
      ip_address = v.ip_configuration[0].private_ip_address
      virtual_network_id = var.virtual_network_id # Assuming you have this variable declared and passed correctly
  }}
}

modules/loadbalancer/main.tf:

variable "resource_group_name" { type = string }
variable "location" { type = string }
variable "public_ip_id" { type = string }
variable "vm_nic_ids" {
  description = "A map of VM NIC IDs to their properties."
  type = map(object({
    ip_address         = string
    virtual_network_id = string
  }))
}


variable "lb_config" {
  description = "Load balancer configuration."
  type = object({
    name                = string
    sku                 = string
    frontend_ip_configs = list(object({
      name           = string
      public_ip_id   = string
    }))
    backend_pools = list(object({
      name = string
    }))
    health_probes = list(object({
      name         = string
      protocol     = string
      port         = number
      request_path = string
    }))
    lb_rules = list(object({
      name                       = string
      protocol                   = string
      frontend_port              = number
      backend_port               = number
      enable_floating_ip         = bool
      frontend_ip_config_name    = string
    }))
  })
}
resource "azurerm_lb" "main" {
  name                = "myLoadBalancer"
  location            = var.location
  resource_group_name = var.resource_group_name
  sku                 = "Standard"

  frontend_ip_configuration {
    name                 = "myFrontendIPConfig"
    public_ip_address_id = var.public_ip_id
  }
}

resource "azurerm_lb_backend_address_pool" "example" {
  loadbalancer_id = azurerm_lb.main.id
  name            = "myBackendPool"
}

resource "azurerm_lb_probe" "main" {
  name            = "myHealthProbe"
  loadbalancer_id = azurerm_lb.main.id
  protocol        = "Http"
  port            = 80
  request_path    = "/"
}

resource "azurerm_lb_rule" "main" {
  name                           = "myLoadBalancingRule"
  loadbalancer_id                = azurerm_lb.main.id
  protocol                       = "Tcp"
  frontend_port                  = 80
  backend_port                   = 80
  frontend_ip_configuration_name = "myFrontendIPConfig"
  //backend_address_pool_id        = azurerm_lb_backend_address_pool.main.id
  probe_id                       = azurerm_lb_probe.main.id
  enable_floating_ip             = false
}

# Assuming VM NICs should be associated with the backend pool
resource "azurerm_lb_backend_address_pool_address" "main" {
  for_each              = var.vm_nic_ids

  name                  = "BAPAddress-${each.key}"
  backend_address_pool_id = azurerm_lb_backend_address_pool.example.id
  ip_address            = each.value.ip_address
  virtual_network_id    = each.value.virtual_network_id
}

Output:

enter image description here

enter image description here

enter image description here