@@ -287,14 +287,23 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
287287 // "sshfs" requires ESXi SSH and sshfs on PVE node
288288 // "sshfs_boot" always uses SSHFS
289289 // "https" uses VMware API (no SSHFS needed)
290+ // "auto" uses SSHFS when available (required for vSAN — HTTPS can't serve vSAN objects)
290291 const requestedTransferMode = config . transferMode || "sshfs"
292+ const hasVsanDisks = vmConfig . disks . some ( d => d . datastoreName . toLowerCase ( ) . includes ( 'vsan' ) )
291293 let useSSHFS = false
292- if ( isSshfsBoot || requestedTransferMode === "sshfs" ) {
294+ if ( isSshfsBoot || requestedTransferMode === "sshfs" || ( requestedTransferMode === "auto" && esxiSshAvailable ) ) {
293295 if ( ! esxiSshAvailable ) {
294296 throw new Error ( "SSHFS transfer mode requires SSH to be configured on the ESXi connection. Please enable SSH in the connection settings." )
295297 }
296298 useSSHFS = true
297299 }
300+ // vSAN requires SSHFS — HTTPS /folder/ endpoint can't serve vSAN object-backed disks reliably
301+ if ( hasVsanDisks && ! useSSHFS ) {
302+ throw new Error (
303+ `vSAN datastores require SSHFS transfer mode but SSH is not available. ` +
304+ `Please enable SSH on the ESXi connection and select "SSHFS" or "Auto" transfer mode.`
305+ )
306+ }
298307
299308 // Check sshfs binary on PVE node when SSHFS mode is active
300309 let sshfsMountPath = ''
@@ -1086,22 +1095,23 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
10861095 // Verify the disk file is accessible via SSHFS
10871096 const checkFile = await executeSSH ( config . targetConnectionId , nodeIp , `test -f "${ sshfsDiskPath } " && echo EXISTS || echo MISSING` )
10881097 if ( checkFile . output ?. trim ( ) !== "EXISTS" ) {
1089- // Try without -flat suffix (some storage types use different naming)
1098+ // -flat.vmdk not found (common on vSAN where data is object-backed)
1099+ // Fall back to VMDK descriptor - qemu-img -f vmdk can read it and follow references
10901100 const altPath = `${ sshfsMountPath } /${ disk . relativePath } `
10911101 const checkAlt = await executeSSH ( config . targetConnectionId , nodeIp , `test -f "${ altPath } " && echo EXISTS || echo MISSING` )
10921102 if ( checkAlt . output ?. trim ( ) === "EXISTS" ) {
1093- await appendLog ( jobId , `Using descriptor VMDK path (no -flat suffix): ${ altPath } ` , "info" )
1094- // qemu-img can read VMDK descriptors and resolve the flat file automatically
1095- return await sshfsConvertAndImport ( i , disk , altPath , tmpFile )
1103+ await appendLog ( jobId , `Using VMDK descriptor (vSAN/object storage): qemu-img will read via descriptor` , "info" )
1104+ return await sshfsConvertAndImport ( i , disk , altPath , tmpFile , "vmdk" )
10961105 }
1097- throw new Error ( `Disk file not found via SSHFS: ${ sshfsDiskPath } ` )
1106+ throw new Error ( `Disk file not found via SSHFS: ${ sshfsDiskPath } (also tried descriptor: ${ altPath } ) ` )
10981107 }
10991108
1100- await sshfsConvertAndImport ( i , disk , sshfsDiskPath , tmpFile )
1109+ await sshfsConvertAndImport ( i , disk , sshfsDiskPath , tmpFile , "raw" )
11011110 }
11021111
11031112 // Core convert+import from an SSHFS path for file-based storage
1104- async function sshfsConvertAndImport ( i : number , disk : EsxiDiskInfo , sourcePath : string , tmpFile : string ) {
1113+ // inputFormat: "raw" for flat VMDKs (direct raw data), "vmdk" for VMDK descriptors (vSAN/object storage)
1114+ async function sshfsConvertAndImport ( i : number , disk : EsxiDiskInfo , sourcePath : string , tmpFile : string , inputFormat : "raw" | "vmdk" = "raw" ) {
11051115 const diskSizeGB = ( disk . capacityBytes / 1073741824 ) . toFixed ( 1 )
11061116 const scsiSlot = `scsi${ i } `
11071117
@@ -1122,8 +1132,10 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
11221132 const outputFile = `${ tmpFile } .${ importFormat } `
11231133
11241134 // Use qemu-img convert with progress output
1135+ // inputFormat=vmdk: reads VMDK descriptor and follows references (required for vSAN)
1136+ // inputFormat=raw: reads flat VMDK as raw data (standard VMFS)
11251137 await executeSSH ( config . targetConnectionId , nodeIp ,
1126- `cat > "${ convertScript } " << 'CONVEOF'\nqemu-img convert -p -f raw -O ${ importFormat } "${ sourcePath } " "${ outputFile } " 2>"${ progressFile } "\necho $? > "${ exitFile } "\nCONVEOF`
1138+ `cat > "${ convertScript } " << 'CONVEOF'\nqemu-img convert -p -f ${ inputFormat } -O ${ importFormat } "${ sourcePath } " "${ outputFile } " 2>"${ progressFile } "\necho $? > "${ exitFile } "\nCONVEOF`
11271139 )
11281140
11291141 const startConvert = await executeSSH ( config . targetConnectionId , nodeIp ,
@@ -1223,24 +1235,28 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
12231235 }
12241236 }
12251237
1226- // Stream disk via SSHFS for block storage (dd from mounted flat VMDK to pre-allocated device)
1238+ // Stream disk via SSHFS for block storage (dd or qemu-img convert to pre-allocated device)
12271239 async function streamDiskViaSshfsToBlock ( i : number , disk : EsxiDiskInfo , devicePath : string ) {
12281240 const diskSizeGB = ( disk . capacityBytes / 1073741824 ) . toFixed ( 1 )
12291241 await appendLog ( jobId , `[Disk ${ i + 1 } /${ vmConfig . disks . length } ] Streaming "${ disk . label } " via SSHFS to block device (${ diskSizeGB } GB)...` )
12301242
12311243 const flatPath = disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
12321244 let sshfsDiskPath = `${ sshfsMountPath } /${ flatPath } `
1245+ let useVmdkDescriptor = false
12331246
12341247 // Verify file exists
12351248 const checkFile = await executeSSH ( config . targetConnectionId , nodeIp , `test -f "${ sshfsDiskPath } " && echo EXISTS || echo MISSING` )
12361249 if ( checkFile . output ?. trim ( ) !== "EXISTS" ) {
1237- // Try descriptor path as fallback
1250+ // -flat.vmdk not found (common on vSAN where data is object-backed)
1251+ // Fall back to VMDK descriptor - qemu-img can read it and follow references to actual data
12381252 const altPath = `${ sshfsMountPath } /${ disk . relativePath } `
12391253 const checkAlt = await executeSSH ( config . targetConnectionId , nodeIp , `test -f "${ altPath } " && echo EXISTS || echo MISSING` )
12401254 if ( checkAlt . output ?. trim ( ) === "EXISTS" ) {
12411255 sshfsDiskPath = altPath
1256+ useVmdkDescriptor = true
1257+ await appendLog ( jobId , `Using VMDK descriptor (vSAN/object storage): qemu-img convert will read disk data via descriptor` , "info" )
12421258 } else {
1243- throw new Error ( `Disk file not found via SSHFS: ${ sshfsDiskPath } ` )
1259+ throw new Error ( `Disk file not found via SSHFS: ${ sshfsDiskPath } (also tried descriptor: ${ altPath } ) ` )
12441260 }
12451261 }
12461262
@@ -1251,23 +1267,31 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
12511267 totalBytes : BigInt ( disk . capacityBytes ) ,
12521268 } )
12531269
1254- // dd from SSHFS mount directly to block device
12551270 const ctrlPrefix = `/tmp/proxcenter-mig-${ jobId } -sshfsblk${ i } `
12561271 const progressFile = `${ ctrlPrefix } .progress`
12571272 const pidFile = `${ ctrlPrefix } .pid`
12581273 const exitFile = `${ ctrlPrefix } .exit`
1259- const ddScript = `${ ctrlPrefix } .sh`
1274+ const transferScript = `${ ctrlPrefix } .sh`
12601275
1261- await executeSSH ( config . targetConnectionId , nodeIp ,
1262- `cat > "${ ddScript } " << 'DDEOF'\ndd if="${ sshfsDiskPath } " of="${ devicePath } " bs=4M status=progress 2>"${ progressFile } "\necho $? > "${ exitFile } "\nDDEOF`
1263- )
1276+ if ( useVmdkDescriptor ) {
1277+ // vSAN / object storage: use qemu-img convert to read VMDK descriptor and write raw to block device
1278+ // qemu-img understands VMDK format and follows descriptor references to the actual data objects
1279+ await executeSSH ( config . targetConnectionId , nodeIp ,
1280+ `cat > "${ transferScript } " << 'XFEREOF'\nqemu-img convert -p -f vmdk -O raw "${ sshfsDiskPath } " "${ devicePath } " 2>"${ progressFile } "\necho $? > "${ exitFile } "\nXFEREOF`
1281+ )
1282+ } else {
1283+ // VMFS / standard: flat VMDK is raw data, dd directly to block device (faster, no conversion overhead)
1284+ await executeSSH ( config . targetConnectionId , nodeIp ,
1285+ `cat > "${ transferScript } " << 'XFEREOF'\ndd if="${ sshfsDiskPath } " of="${ devicePath } " bs=4M status=progress 2>"${ progressFile } "\necho $? > "${ exitFile } "\nXFEREOF`
1286+ )
1287+ }
12641288
1265- const startDd = await executeSSH ( config . targetConnectionId , nodeIp ,
1266- `nohup bash "${ ddScript } " > /dev/null 2>&1 & echo $!` )
1267- if ( ! startDd . success || ! startDd . output ?. trim ( ) ) {
1268- throw new Error ( `Failed to start dd : ${ startDd . error } ` )
1289+ const startCmd = await executeSSH ( config . targetConnectionId , nodeIp ,
1290+ `nohup bash "${ transferScript } " > /dev/null 2>&1 & echo $!` )
1291+ if ( ! startCmd . success || ! startCmd . output ?. trim ( ) ) {
1292+ throw new Error ( `Failed to start ${ useVmdkDescriptor ? 'qemu-img convert' : 'dd' } : ${ startCmd . error } ` )
12691293 }
1270- const pid = startDd . output . trim ( )
1294+ const pid = startCmd . output . trim ( )
12711295 await executeSSH ( config . targetConnectionId , nodeIp , `echo ${ pid } > "${ pidFile } "` )
12721296
12731297 const totalBytes = disk . capacityBytes
@@ -1276,15 +1300,23 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
12761300
12771301 while ( true ) {
12781302 if ( isCancelled ( jobId ) ) {
1279- await executeSSH ( config . targetConnectionId , nodeIp , `kill ${ pid } 2>/dev/null; rm -f "${ ddScript } " "${ pidFile } " "${ exitFile } " "${ progressFile } "` )
1303+ await executeSSH ( config . targetConnectionId , nodeIp , `kill ${ pid } 2>/dev/null; rm -f "${ transferScript } " "${ pidFile } " "${ exitFile } " "${ progressFile } "` )
12801304 throw new Error ( "Migration cancelled" )
12811305 }
12821306 await new Promise ( r => setTimeout ( r , 3000 ) )
12831307
1284- // Parse dd progress: "123456789 bytes ..."
1285- const progressResult = await executeSSH ( config . targetConnectionId , nodeIp ,
1286- `tail -c 200 "${ progressFile } " 2>/dev/null | tr '\\r' '\\n' | grep -oP '^\\d+' | tail -1 || echo 0` )
1287- transferredBytes = Number . parseInt ( progressResult . output ?. trim ( ) || "0" , 10 ) || 0
1308+ if ( useVmdkDescriptor ) {
1309+ // Parse qemu-img progress: outputs lines like "(12.34/100%)"
1310+ const progressResult = await executeSSH ( config . targetConnectionId , nodeIp ,
1311+ `tail -c 100 "${ progressFile } " 2>/dev/null | tr '\\r' '\\n' | grep -oP '[\\d.]+(?=/100%)' | tail -1 || echo 0` )
1312+ const pct = Number . parseFloat ( progressResult . output ?. trim ( ) || "0" ) || 0
1313+ transferredBytes = Math . round ( ( pct / 100 ) * totalBytes )
1314+ } else {
1315+ // Parse dd progress: "123456789 bytes ..."
1316+ const progressResult = await executeSSH ( config . targetConnectionId , nodeIp ,
1317+ `tail -c 200 "${ progressFile } " 2>/dev/null | tr '\\r' '\\n' | grep -oP '^\\d+' | tail -1 || echo 0` )
1318+ transferredBytes = Number . parseInt ( progressResult . output ?. trim ( ) || "0" , 10 ) || 0
1319+ }
12881320
12891321 const elapsed = ( Date . now ( ) - startTime ) / 1000
12901322 const speedBps = elapsed > 0 ? transferredBytes / elapsed : 0
@@ -1302,9 +1334,9 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
13021334 const exitCheck = await executeSSH ( config . targetConnectionId , nodeIp , `cat "${ exitFile } " 2>/dev/null || echo RUNNING` )
13031335 if ( exitCheck . output ?. trim ( ) !== "RUNNING" ) {
13041336 const exitCode = Number . parseInt ( exitCheck . output ?. trim ( ) || "1" , 10 )
1305- await executeSSH ( config . targetConnectionId , nodeIp , `rm -f "${ ddScript } " "${ pidFile } " "${ exitFile } " "${ progressFile } "` )
1337+ await executeSSH ( config . targetConnectionId , nodeIp , `rm -f "${ transferScript } " "${ pidFile } " "${ exitFile } " "${ progressFile } "` )
13061338 if ( exitCode !== 0 ) {
1307- throw new Error ( `dd streaming failed (exit ${ exitCode } )` )
1339+ throw new Error ( `${ useVmdkDescriptor ? 'qemu-img convert' : 'dd' } failed (exit ${ exitCode } )` )
13081340 }
13091341 break
13101342 }
@@ -1729,8 +1761,32 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
17291761 let bootMethod : "qemu-ssh" | "sshfs" | "nbd" | null = null
17301762 const diskBus = vmConfig . disks [ 0 ] ?. controllerType ?. toLowerCase ( ) ?. includes ( "scsi" ) ? "scsi" : "sata"
17311763 const firstDisk = vmConfig . disks [ 0 ]
1764+
1765+ // Detect vSAN: -flat.vmdk doesn't exist as a separate POSIX file on vSAN
1766+ // We need to check via SSHFS whether the flat file or the descriptor should be used
17321767 const firstFlatPath = firstDisk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
1733- const firstEsxiPath = `/vmfs/volumes/${ firstDisk . datastoreName } /${ firstFlatPath } `
1768+ const firstDescriptorPath = firstDisk . relativePath
1769+ let useVmdkFormat = false // true when we must use VMDK descriptor instead of flat raw
1770+
1771+ // Check if -flat.vmdk exists (won't on vSAN)
1772+ if ( useSSHFS ) {
1773+ const firstMountPath = sshfsMountedDatastores . get ( firstDisk . datastoreName ) || sshfsMountPath
1774+ const flatCheck = await executeSSH ( config . targetConnectionId , nodeIp ,
1775+ `test -f "${ firstMountPath } /${ firstFlatPath } " && echo EXISTS || echo MISSING` )
1776+ if ( flatCheck . output ?. trim ( ) !== "EXISTS" ) {
1777+ const descCheck = await executeSSH ( config . targetConnectionId , nodeIp ,
1778+ `test -f "${ firstMountPath } /${ firstDescriptorPath } " && echo EXISTS || echo MISSING` )
1779+ if ( descCheck . output ?. trim ( ) === "EXISTS" ) {
1780+ useVmdkFormat = true
1781+ await appendLog ( jobId , "vSAN detected: -flat.vmdk not found, using VMDK descriptor with format=vmdk" , "info" )
1782+ }
1783+ }
1784+ }
1785+
1786+ // Resolve disk path and format based on vSAN detection
1787+ const bootDiskFile = useVmdkFormat ? firstDescriptorPath : firstFlatPath
1788+ const bootDiskFormat = useVmdkFormat ? "vmdk" : "raw"
1789+ const firstEsxiPath = `/vmfs/volumes/${ firstDisk . datastoreName } /${ bootDiskFile } `
17341790
17351791 if ( qemuSshKeyPath ) {
17361792 await appendLog ( jobId , "Testing QEMU SSH driver connectivity..." , "info" )
@@ -1739,7 +1795,7 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
17391795
17401796 if ( qemuTestResult . output ?. includes ( "virtual size" ) || qemuTestResult . output ?. includes ( "file format" ) ) {
17411797 bootMethod = "qemu-ssh"
1742- await appendLog ( jobId , " QEMU SSH driver: connection OK" , "success" )
1798+ await appendLog ( jobId , ` QEMU SSH driver: connection OK (format= ${ bootDiskFormat } )` , "success" )
17431799 } else {
17441800 await appendLog ( jobId , `QEMU SSH driver test failed: ${ qemuTestResult . output ?. substring ( 0 , 200 ) } ` , "warn" )
17451801 }
@@ -1749,10 +1805,10 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
17491805 if ( ! bootMethod ) {
17501806 await appendLog ( jobId , "Trying SSHFS/FUSE boot (QEMU reads from SSHFS mount)..." , "info" )
17511807 const firstMountPath = sshfsMountedDatastores . get ( firstDisk . datastoreName ) || sshfsMountPath
1752- const firstFusePath = `${ firstMountPath } /${ firstFlatPath } `
1808+ const firstFusePath = `${ firstMountPath } /${ bootDiskFile } `
17531809
17541810 const fuseTestResult = await executeSSH ( config . targetConnectionId , nodeIp ,
1755- `timeout 10 qemu-img info '${ firstFusePath } ' 2>&1` )
1811+ `timeout 10 qemu-img info ${ useVmdkFormat ? "-f vmdk " : "" } '${ firstFusePath } ' 2>&1` )
17561812 if ( fuseTestResult . output ?. includes ( "virtual size" ) || fuseTestResult . output ?. includes ( "file format" ) ) {
17571813 useSshfsForBoot = true
17581814
@@ -1780,16 +1836,16 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
17801836 let nbdOk = true
17811837 for ( let di = 0 ; di < vmConfig . disks . length ; di ++ ) {
17821838 const disk = vmConfig . disks [ di ]
1783- const flatP = disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
1839+ const diskFile = useVmdkFormat ? disk . relativePath : disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
17841840 const mp = sshfsMountedDatastores . get ( disk . datastoreName ) || sshfsMountPath
1785- const fusePath = `${ mp } /${ flatP } `
1841+ const fusePath = `${ mp } /${ diskFile } `
17861842 const sockPath = `/tmp/proxcenter-nbd-${ jobId } -${ di } .sock`
17871843
17881844 await executeSSH ( config . targetConnectionId , nodeIp ,
17891845 `fuser -k "${ sockPath } " 2>/dev/null; rm -f "${ sockPath } "` )
17901846
17911847 const nbdStart = await executeSSH ( config . targetConnectionId , nodeIp ,
1792- `qemu-nbd --fork --persistent --socket="${ sockPath } " --format=raw --cache=writeback --aio=threads '${ fusePath } ' 2>&1` )
1848+ `qemu-nbd --fork --persistent --socket="${ sockPath } " --format=${ bootDiskFormat } --cache=writeback --aio=threads '${ fusePath } ' 2>&1` )
17931849
17941850 await new Promise ( r => setTimeout ( r , 1000 ) )
17951851 const sockCheck = await executeSSH ( config . targetConnectionId , nodeIp , `test -S "${ sockPath } " && echo EXISTS` )
@@ -1884,19 +1940,20 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
18841940 const argsParts : string [ ] = [ ]
18851941 for ( let di = 0 ; di < vmConfig . disks . length ; di ++ ) {
18861942 const disk = vmConfig . disks [ di ]
1887- const flatFile = disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
1943+ const diskFile = useVmdkFormat ? disk . relativePath : disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
18881944 const driveId = `sshfs-disk${ di } `
18891945
18901946 let driveSpec = ""
18911947 if ( bootMethod === "qemu-ssh" ) {
1892- const esxiPath = `/vmfs/volumes/${ disk . datastoreName } /${ flatFile } `
1893- driveSpec = `file.driver=ssh,file.host=${ esxiHost } ,file.port=${ esxiSshPort } ,file.path=${ esxiPath } ,file.user=${ esxiSshUser } ,file.host-key-check.mode=none${ sshKeyOpt } ,format=raw ,if=none,id=${ driveId } ,cache=writeback,aio=threads`
1948+ const esxiPath = `/vmfs/volumes/${ disk . datastoreName } /${ diskFile } `
1949+ driveSpec = `file.driver=ssh,file.host=${ esxiHost } ,file.port=${ esxiSshPort } ,file.path=${ esxiPath } ,file.user=${ esxiSshUser } ,file.host-key-check.mode=none${ sshKeyOpt } ,format=${ bootDiskFormat } ,if=none,id=${ driveId } ,cache=writeback,aio=threads`
18941950 } else if ( bootMethod === "sshfs" ) {
18951951 const mp = sshfsMountedDatastores . get ( disk . datastoreName ) || sshfsMountPath
1896- const fusePath = `${ mp } /${ flatFile } `
1897- driveSpec = `file=${ fusePath } ,format=raw ,if=none,id=${ driveId } ,cache=writeback,aio=threads,detect-zeroes=on`
1952+ const fusePath = `${ mp } /${ diskFile } `
1953+ driveSpec = `file=${ fusePath } ,format=${ bootDiskFormat } ,if=none,id=${ driveId } ,cache=writeback,aio=threads,detect-zeroes=on`
18981954 } else if ( bootMethod === "nbd" ) {
18991955 const sockPath = ndbSocketPaths [ di ]
1956+ // NBD exports raw blocks regardless of source format (qemu-nbd handles conversion)
19001957 driveSpec = `file.driver=nbd,file.path=${ sockPath } ,format=raw,if=none,id=${ driveId } ,cache=writeback,aio=threads`
19011958 }
19021959
@@ -1967,14 +2024,14 @@ export async function runMigrationPipeline(jobId: string, config: MigrationConfi
19672024 const nbdFallbackParts : string [ ] = [ ]
19682025 for ( let di = 0 ; di < vmConfig . disks . length ; di ++ ) {
19692026 const disk = vmConfig . disks [ di ]
1970- const flatP = disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
2027+ const diskP = useVmdkFormat ? disk . relativePath : disk . relativePath . replace ( / \. v m d k $ / , "-flat.vmdk" )
19712028 const mp = sshfsMountedDatastores . get ( disk . datastoreName ) || sshfsMountPath
1972- const fusePath = `${ mp } /${ flatP } `
2029+ const fusePath = `${ mp } /${ diskP } `
19732030 const sockPath = `/tmp/proxcenter-nbd-${ jobId } -${ di } .sock`
19742031
19752032 await executeSSH ( config . targetConnectionId , nodeIp , `fuser -k "${ sockPath } " 2>/dev/null; rm -f "${ sockPath } "` )
19762033 const nbdStartResult = await executeSSH ( config . targetConnectionId , nodeIp ,
1977- `qemu-nbd --fork --persistent --socket="${ sockPath } " --format=raw --cache=writeback --aio=threads '${ fusePath } ' 2>&1` )
2034+ `qemu-nbd --fork --persistent --socket="${ sockPath } " --format=${ bootDiskFormat } --cache=writeback --aio=threads '${ fusePath } ' 2>&1` )
19782035 await new Promise ( r => setTimeout ( r , 1000 ) )
19792036 const sockExists = await executeSSH ( config . targetConnectionId , nodeIp , `test -S "${ sockPath } " && echo EXISTS` )
19802037 if ( nbdStartResult . success && sockExists . output ?. includes ( "EXISTS" ) ) {
0 commit comments