使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

huangapple go评论57阅读模式
英文:

When using Terraform to increase the number of max_pods in the default_node_pool of AKS, you must recreate the cluster itself

问题

Here's the translation of the code and relevant information you provided:

  1. 使用Terraform,我们将AKS的default_node_pool的max_pods从20调整为30。
  2. network_policy和network_plugin都是"azure"。

请注意,这是您提供的翻译,不包括代码部分。如果您有其他需要,请随时提出。

英文:
  1. Using Terraform, we adjusted the number of max_pods of default_node_pool of AKS to 20 -> 30.
  2. network_policy and network_plugin is a "azure"

The code is as follows.

  • var.tf
variable "system_rg" {
  type        = string
  default     = "aks-test-resourcegroup"
}

variable "location" {
  type        = string
  default     = "Korea Central"
}

###################
# k8s cluster
###################
variable "cluster_name" {
  default     = "Test-AKS"
}

variable "aks_version" {
  type    = string
  default = "1.25.5"
}

variable "private_cluster_enabled" {
  type    = string
  default = "true"
}

variable "private_cluster_public_fqdn_enabled" {
  type    = string
  default = "true"
}

variable "private_dns_zone_id" {
  type    = string
  default = "None"
}

variable "sku_tier" {
  type    = string
  default = "Free"
}
################### 
# default_node_pool
###################
variable "only_critical_addons_enabled" {
  type        = string
  default     = "true"
}

variable "temporary_name_for_rotation" {
  type    = string
  default = "tempsys01"
}

variable "orchestrator_version" {
  type    = string
  default = "1.25.5"
}

variable "agents_count" {
  type    = number
  default = "3"
}

variable "agents_size" {
  type    = string
  default = "Standard_D4s_v5"
}

variable "os_disk_size_gb" {
  description = "The size of the OS Disk which should be used for each agent in the Node Pool. Changing this forces a new resource to be created."
  type        = number
  default     = 256
}

variable "max_pods" {
  description = "The maximum number of pods that can run on each agent. Changing this forces a new resource to be created."
  type        = number
  default     = "30" # 20 => 30
}
###################
# linux_profile
###################
variable "admin_username" {
  type    = string
  default = "azureuser"
}

variable "ssh_public_key" {
  type        = string
  default     = ""
}

################### 
# network_profile
###################
variable "service_cidr" {
  type    = string
  default = "10.254.0.0/24"
}

variable "dns_service_ip" {
  type    = string
  default = "10.254.0.10"
}

variable "docker_bridge_cidr" {
  type    = string
  default = "172.17.0.1/16"
}

# ###############################
# # user_node_pool
# ###############################
variable "usernodepoo_vm" {
  description = "VM of AKS Cluster"
  type        = map(any)
  default = {
    vm1 = {
      user_agents_name         = "upool01"
      user_agents_size         = "Standard_D4s_v5"
      user_agents_count        = "4"
      user_agents_os_disk_size = "256" 
      max_pods                 = "20"
      orchestrator_version     = "1.25.5"
    }
  }
}
  • cluster.tf
############################################################
# AKS Cluster
############################################################

resource "azurerm_kubernetes_cluster" "aks" {
  name                                = var.cluster_name
  location                            = var.location
  resource_group_name                 = data.azurerm_resource_group.aks-rg.name 
  node_resource_group                 = "${var.system_rg}-node"
  dns_prefix                          = var.cluster_name
  kubernetes_version                  = var.aks_version
  private_cluster_enabled             = var.private_cluster_enabled
  private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled 
  private_dns_zone_id                 = var.private_dns_zone_id
  sku_tier                            = var.sku_tier

  default_node_pool {
    name                         = "syspool01"
    vm_size                      = var.agents_size
    os_disk_size_gb              = var.os_disk_size_gb
    node_count                   = var.agents_count
    vnet_subnet_id               = data.azurerm_subnet.subnet.id
    zones                        = [1, 2, 3]
    kubelet_disk_type            = "OS"
    os_sku                       = "Ubuntu"
    os_disk_type                 = "Managed"
    ultra_ssd_enabled            = "false"
    max_pods                     = var.max_pods
    only_critical_addons_enabled = var.only_critical_addons_enabled 
    temporary_name_for_rotation  = var.temporary_name_for_rotation 
    orchestrator_version         = var.aks_version
  }

  linux_profile {
    admin_username = var.admin_username

    ssh_key {
      key_data = replace(coalesce("${var.ssh_public_key}", tls_private_key.ssh[0].public_key_openssh), "\n", "")
    }
  }

  network_profile {
    network_plugin    = "azure"
    network_policy    = "azure"
    load_balancer_sku = "standard"
    outbound_type     = "userDefinedRouting"
    service_cidr      = var.service_cidr
    dns_service_ip    = var.dns_service_ip
  }

  tags = {
    Environment = "${var.tag}"
  }

  identity {
    type = "SystemAssigned"
  }
}

## usernodepool
resource "azurerm_kubernetes_cluster_node_pool" "usernodepool" {
  for_each = var.usernodepoo_vm

  name                  = each.value.user_agents_name
  kubernetes_cluster_id = azurerm_kubernetes_cluster.aks.id
  vm_size               = each.value.user_agents_size
  os_disk_size_gb       = each.value.user_agents_os_disk_size 
  node_count            = each.value.user_agents_count
  vnet_subnet_id        = data.azurerm_subnet.subnet.id
  zones                 = [1, 2, 3]
  mode                  = "User"
  kubelet_disk_type     = "OS"
  os_sku                = "Ubuntu"
  os_disk_type          = "Managed"
  ultra_ssd_enabled     = "false"
  max_pods              = each.value.max_pods
  orchestrator_version  = each.value.orchestrator_version
}

Applying this Terraform code will attempt to recreate the entire cluster. Is there a way to prevent this and just increase the number of max_pods?

I tried setting it up as below, but it was the same.

resource "azurerm_kubernetes_cluster" "aks" {
...
  lifecycle {
    prevent_destroy = true
  }
}
│ Error: Instance cannot be destroyed
│
│   on cluster.tf line 63:
│   63: resource "azurerm_kubernetes_cluster" "aks" {
│
│ Resource azurerm_kubernetes_cluster.aks has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the
│ scope of the plan using the -target flag.

答案1

得分: 2

应用此Terraform代码将尝试重新创建整个集群。是否有一种方法可以防止这种情况,只增加max_pods的数量?

为了防止重新创建整个集群,只需更新max_pods的值。您可以使用Terraform生命周期配置块来管理资源在更新期间的行为。

以下是使用相同代码更新max_pods的示例代码,而不会销毁现有的AKS集群

provider "azurerm" {
  features {}
}

resource "azurerm_resource_group" "aksdemo-rg" {
  name     = "demo-rg-aks-test"
  location = "West Europe"
}

resource "azurerm_kubernetes_cluster" "hellaks" {
  name                = "example-aks1"
  location            = azurerm_resource_group.aksdemo-rg.location
  resource_group_name = azurerm_resource_group.aksdemo-rg.name
  dns_prefix          = "exampleaks1"

  default_node_pool {
    name       = "default"
    node_count = 3
    max_pods   = 30
    vm_size    = "Standard_D2_v2"
    temporary_name_for_rotation = "exampleaks1temp"
  }

  identity {
    type = "SystemAssigned"
  }

  tags = {
    Environment = "Production"
  }

  lifecycle {
    prevent_destroy = true
  }
}

Terraform计划

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

Terraform应用

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

输出

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

英文:

> Applying this Terraform code will attempt to recreate the entire cluster. Is there a way to prevent this and just increase the number of max_pods?

To prevent recreating the entire cluster and only update the max_pods value. You can use the Terraform lifecycle configuration block to manage the behavior of the resource during updates.

Here is sample code to update the max_pods with same code without destroying existing AKS cluster

provider "azurerm" {
  features {}
}
resource "azurerm_resource_group" "aksdemo-rg" {
  name     = "demo-rg-aks-test"
  location = "West Europe"
}
resource "azurerm_kubernetes_cluster" "hellaks" {
  name                = "example-aks1"
  location            = azurerm_resource_group.aksdemo-rg.location
  resource_group_name = azurerm_resource_group.aksdemo-rg.name
  dns_prefix          = "exampleaks1"

  default_node_pool {
    name       = "default"
    node_count = 3
    max_pods   = 30
    vm_size    = "Standard_D2_v2"
    temporary_name_for_rotation = "exampleaks1temp"
  }
  identity {
    type = "SystemAssigned"
  }
  tags = {
    Environment = "Production"
  }
    lifecycle {
    prevent_destroy = true
  }
}

Terraform Plan

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

Terraform Apply:

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

Output:

使用Terraform增加AKS的default_node_pool中max_pods数量时,必须重新创建集群本身。

huangapple
  • 本文由 发表于 2023年6月9日 10:09:25
  • 转载请务必保留本文链接:https://go.coder-hub.com/76436762.html
匿名

发表评论

匿名网友

:?: :razz: :sad: :evil: :!: :smile: :oops: :grin: :eek: :shock: :???: :cool: :lol: :mad: :twisted: :roll: :wink: :idea: :arrow: :neutral: :cry: :mrgreen:

确定