Skip to content

what is wrong with my disk,is this because qemu-img convert #1213

@13567436138

Description

@13567436138

root@ubuntu:~# virsh start terraform-k8s-cluster-master-0
error: Failed to start domain 'terraform-k8s-cluster-master-0'
error: internal error: QEMU unexpectedly closed the monitor (vm='terraform-k8s-cluster-master-0'): 2025-11-19T08:14:52.785026Z qemu-system-x86_64: -blockdev {"driver":"file","filename":"/var/lib/libvirt/disks/terraform/terraform-k8s-cluster-master-0.qcow2","node-name":"libvirt-2-storage","auto-read-only":true,"discard":"unmap"}: Could not open '/var/lib/libvirt/disks/terraform/terraform-k8s-cluster-master-0.qcow2': Permission denied

# storage-pool.tf
resource "libvirt_pool" "disks_pool" {
  name = "disks"
  type = "dir"
  target = {
    path = "/var/lib/libvirt/disks/terraform"
  }
}

resource "libvirt_pool" "default" {
  name = "default"
  type = "dir"
  target = {
    path = "/var/lib/libvirt/images/cloud-init"
  }
  
}

# 验证存储池创建
resource "null_resource" "verify_pool" {
  depends_on = [libvirt_pool.disks_pool]
  
  provisioner "local-exec" {
    command = <<EOT
      echo "验证存储池创建..."
      sudo virsh pool-info disks
      sudo virsh pool-info default
    EOT
  }
}

# volume.tf
resource "libvirt_volume" "master_disk" {
  count  = var.master_nodes
  
  depends_on = [null_resource.verify_pool]
  
  name   = "${var.cluster_name}-master-${count.index}.qcow2"
  
  
  pool   = libvirt_pool.disks_pool.name
  format = "qcow2"
  
  # 设置磁盘大小
  capacity  = var.node_disk_size * 1024 * 1024 * 1024
  
  

}

resource "libvirt_volume" "worker_disk" {
  count  = var.worker_nodes
  
  depends_on = [null_resource.verify_pool]
  
  name   = "${var.cluster_name}-worker-${count.index}.qcow2"
  

  pool   = libvirt_pool.disks_pool.name
  format = "qcow2"
  capacity   = var.node_disk_size * 1024 * 1024 * 1024
 
}

resource "null_resource" "copy_master" {
  count      = var.master_nodes
  depends_on = [libvirt_volume.master_disk]

  provisioner "local-exec" {
    command = <<EOT
      qemu-img convert -f qcow2 -O qcow2 "${var.base_image_url}" "${libvirt_pool.disks_pool.target.path}/${libvirt_volume.master_disk[count.index].name}"
    EOT
  }
  
}

resource "null_resource" "copy_worker" {
  count      = var.worker_nodes
  depends_on = [libvirt_volume.worker_disk]
  
  provisioner "local-exec" {
    command = <<EOT
      qemu-img convert -f qcow2 -O qcow2 "${var.base_image_url}" "${libvirt_pool.disks_pool.target.path}/${libvirt_volume.worker_disk[count.index].name}"
    EOT
  }
}

# cloud-init.tf
# Master 节点 Cloud-Init 配置 - 桥接模式
data "template_cloudinit_config" "master_config" {
  count = var.master_nodes

  gzip          = false
  base64_encode = false

  part {
    content_type = "text/cloud-config"
    content = templatefile("${path.module}/scripts/cloud-init/master.cfg", {
      hostname    = "${var.cluster_name}-master-${count.index}"
      ssh_key     = var.ssh_public_key
      static_ip   = var.master_ips[count.index]
      gateway     = var.gateway
      dns_servers = join(",", var.dns_servers)
      domain      = var.domain_name
    })
  }
}

# Worker 节点 Cloud-Init 配置 - 桥接模式
data "template_cloudinit_config" "worker_config" {
  count = var.worker_nodes

  gzip          = false
  base64_encode = false

  part {
    content_type = "text/cloud-config"
    content = templatefile("${path.module}/scripts/cloud-init/worker.cfg", {
      hostname    = "${var.cluster_name}-worker-${count.index}"
      ssh_key     = var.ssh_public_key
      static_ip   = var.worker_ips[count.index]
      gateway     = var.gateway
      dns_servers = join(",", var.dns_servers)
      domain      = var.domain_name
    })
  }
}

# 创建 Cloud-Init 磁盘
resource "libvirt_cloudinit_disk" "master_cloudinit" {
  count     = var.master_nodes
  
  depends_on = [data.template_cloudinit_config.master_config]
  
  name      = "${var.cluster_name}-master-${count.index}-cloudinit.iso"

  user_data = data.template_cloudinit_config.master_config[count.index].rendered
  
  meta_data = <<-EOF
    instance-id: ${var.cluster_name}-master-${count.index}
    local-hostname: ${var.cluster_name}-master-${count.index}
  EOF
  
}

resource "libvirt_cloudinit_disk" "worker_cloudinit" {
  count     = var.worker_nodes
  
  depends_on = [data.template_cloudinit_config.worker_config]
  
  name      = "${var.cluster_name}-worker-${count.index}-cloudinit.iso"

  user_data = data.template_cloudinit_config.worker_config[count.index].rendered
  
  meta_data = <<-EOF
    instance-id: ${var.cluster_name}-worker-${count.index}
    local-hostname: ${var.cluster_name}-worker-${count.index}
  EOF
  
}

resource "libvirt_volume" "master_cloudinit" {
  count     = var.master_nodes
  
  depends_on = [libvirt_cloudinit_disk.master_cloudinit]
  
  name = "${var.cluster_name}-master-${count.index}-cloudinit.iso"
  pool = libvirt_pool.default.name

  create = {
    content = {
      url = libvirt_cloudinit_disk.master_cloudinit[count.index].path
    }
  }
}

resource "libvirt_volume" "worker_cloudinit" {
  count     = var.worker_nodes
  
  depends_on = [libvirt_cloudinit_disk.worker_cloudinit]
  
  name = "${var.cluster_name}-master-${count.index}-cloudinit.iso"
  pool = libvirt_pool.default.name

  create = {
    content = {
      url = libvirt_cloudinit_disk.worker_cloudinit[count.index].path
    }
  }
}

# master-nodes.tf
resource "libvirt_domain" "master" {
  count  = var.master_nodes
  
  depends_on = [libvirt_volume.master_cloudinit,null_resource.copy_master]
  
  name   = "${var.cluster_name}-master-${count.index}"
  memory = var.node_memory
  vcpu   = var.node_cpu

 devices = {
    disks = [
      {
        source = {
          pool   = libvirt_volume.master_disk[count.index].pool
          volume = libvirt_volume.master_disk[count.index].name
        }
        target = {
          dev = "vda"
          bus = "virtio"
        }
      },
      {
        device = "cdrom"
        source = {
          pool   = libvirt_volume.master_cloudinit[count.index].pool
          volume = libvirt_volume.master_cloudinit[count.index].name
        }
        target = {
          bus = "sata"
          dev = "sda"
        }
      }
    ]
    interfaces = [
      {
        type = "bridge"
        model = "virtio"
        source = {
          bridge = var.bridge_interface
        }
        hostname   = "${var.cluster_name}-master-${count.index}"
      }
    ]
  }

  os ={
    type    = "hvm"
    type_arch    = "x86_64"
    type_machine = "q35"
    boot_devices = ["hd", "network"]
  }

  # 验证节点启动
  provisioner "remote-exec" {
     inline = [
        "echo 'Node ${var.cluster_name}-master-${count.index} is ready'",
        "ip addr show || echo 'IP command not available yet'",
        "hostname",
        "ping -c 3 ${var.gateway} || echo 'Network not fully ready'"
      ]

      connection {
        type        = "ssh"
        user        = "root"
        private_key = file("~/.ssh/id_ed25519")
        host        = var.master_ips[count.index]
        timeout     = "300s"
      }

      # 失败时继续执行后续步骤
      on_failure = continue
  }
}

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions