Skip to content

Commit 34c6c4a

Browse files
committed
docs: trim verbose comments
Pass over the new doc/inline comments and tighten the ones whose payload is the function name plus a single non-obvious WHY. Net -34 lines, no substance lost. - clone.go restorePatchStorageConfigs / buildStateReplacements: drop the re-explanation of how the function's body works - cloudhypervisor + firecracker preflight + lock comments: keep the failure mode (outage if we kill first; corruption if we don't lock every writable disk) and drop the procedural restating - validateSnapshotIntegrity / preflightRestore / hasMemoryRangeFile / snapshotResidentBasename / writeSnapshotMeta / snapshotMeta + Mount Spec: collapse multi-paragraph docs to one or two sentences
1 parent 7f93188 commit 34c6c4a

11 files changed

Lines changed: 39 additions & 73 deletions

File tree

hypervisor/cloudhypervisor/clone.go

Lines changed: 7 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -267,14 +267,9 @@ func hasCidataRole(sc *types.StorageConfig) bool {
267267
return sc.Role == types.StorageRoleCidata
268268
}
269269

270-
// restorePatchStorageConfigs returns the disks to feed into patchCHConfig so
271-
// the disk count matches the snapshot's config.json. When the snapshot did
272-
// NOT carry cidata (cloudimg post-first-boot), ensureCloneCidata appended a
273-
// fresh cidata entry — patchCHConfig must not see it (snapshot config.json
274-
// has no slot for it; cidata is hot-plugged later).
275-
//
276-
// When the snapshot carried cidata, or for direct-boot/Windows VMs that
277-
// never have cidata, we pass everything through unchanged.
270+
// restorePatchStorageConfigs strips the cidata entry ensureCloneCidata
271+
// appended to a no-cidata snapshot, so patchCHConfig matches chCfg.Disks
272+
// count. Cidata is hot-plugged later.
278273
func restorePatchStorageConfigs(storageConfigs []*types.StorageConfig, directBoot, windows, hadCidataInSnapshot bool) []*types.StorageConfig {
279274
if directBoot || windows || hadCidataInSnapshot {
280275
return storageConfigs
@@ -329,14 +324,10 @@ func buildCmdline(storageConfigs []*types.StorageConfig, networkConfigs []*types
329324
return cmdline.String()
330325
}
331326

332-
// buildStateReplacements builds old→new string mappings for state.json patching.
333-
// Only disk paths need patching (snapshot paths → clone paths). When
334-
// ensureCloneCidata appended a fresh cidata entry, storageConfigs is one
335-
// longer than chCfg.Disks; the prefix-min iteration covers the entries the
336-
// snapshot already knew about, which is exactly what state.json references.
337-
//
338-
// MAC addresses are no longer patched here — hot-swap (vm.remove-device +
339-
// vm.add-net) replaces the entire virtio-net device with the correct MAC.
327+
// buildStateReplacements maps source disk paths to clone paths for
328+
// state.json patching. Iterates min(chCfg.Disks, storageConfigs) so an
329+
// appended cidata in storageConfigs doesn't desync the prefix.
330+
// MACs are not patched here; NIC hot-swap rewrites the virtio-net device.
340331
func buildStateReplacements(chCfg *chVMConfig, storageConfigs []*types.StorageConfig) map[string]string {
341332
n := min(len(chCfg.Disks), len(storageConfigs))
342333
m := make(map[string]string, n)

hypervisor/cloudhypervisor/direct.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,8 +21,7 @@ func (ch *CloudHypervisor) DirectClone(ctx context.Context, vmID string, vmCfg *
2121
// Files are handled per-type: hardlink for memory-range-*, reflink/copy for
2222
// the COW disk, plain copy for small metadata.
2323
func (ch *CloudHypervisor) DirectRestore(ctx context.Context, vmRef string, vmCfg *types.VMConfig, srcDir string) (*types.VM, error) {
24-
// Validate CPU and snapshot integrity BEFORE killing the running VM —
25-
// any failure post-kill costs an outage.
24+
// Preflight before kill — a malformed snapshot must not cost an outage.
2625
if err := hypervisor.ValidateHostCPU(vmCfg.CPU); err != nil {
2726
return nil, err
2827
}

hypervisor/cloudhypervisor/helper.go

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -48,11 +48,9 @@ func ReverseLayerSerials(storageConfigs []*types.StorageConfig) []string {
4848
return serials
4949
}
5050

51-
// validateSnapshotIntegrity is the CH-specific preflight wrapper around
52-
// hypervisor.ValidateSnapshotIntegrity: it runs the common checks, asserts
53-
// the sidecar mirrors snapshot config.json's disk shape, and verifies the
54-
// CH-specific runtime files (state.json + at least one memory-range-*) are
55-
// present so vm.restore won't fail post-kill.
51+
// validateSnapshotIntegrity is the CH preflight: common checks, sidecar/
52+
// config.json disk shape match, plus state.json + memory-range-* presence
53+
// so vm.restore won't fail post-kill.
5654
func validateSnapshotIntegrity(srcDir string, sidecar []*types.StorageConfig) error {
5755
if err := hypervisor.ValidateSnapshotIntegrity(srcDir, sidecar); err != nil {
5856
return err
@@ -89,9 +87,8 @@ func validateSnapshotIntegrity(srcDir string, sidecar []*types.StorageConfig) er
8987
return nil
9088
}
9189

92-
// hasMemoryRangeFile reports whether srcDir contains at least one CH
93-
// memory-range-* file. We can't enumerate the exact set without parsing
94-
// state.json, but a missing prefix is enough to fail vm.restore.
90+
// hasMemoryRangeFile reports whether srcDir has at least one CH
91+
// memory-range-* file. A missing prefix is enough to fail vm.restore.
9592
func hasMemoryRangeFile(srcDir string) (bool, error) {
9693
entries, err := os.ReadDir(srcDir)
9794
if err != nil {

hypervisor/cloudhypervisor/restore.go

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -31,9 +31,7 @@ func (ch *CloudHypervisor) Restore(ctx context.Context, vmRef string, vmCfg *typ
3131
}
3232
defer cleanupStaging()
3333

34-
// Pre-flight: validate the staged snapshot fully (sidecar shape, data
35-
// disk files present, role sequence vs live record) BEFORE killing the
36-
// running VM. A malformed snapshot must not cost an outage.
34+
// Preflight before kill — a malformed snapshot must not cost an outage.
3735
if err := ch.preflightRestore(stagingDir, rec); err != nil {
3836
return nil, fmt.Errorf("snapshot preflight: %w", err)
3937
}
@@ -94,10 +92,8 @@ func (ch *CloudHypervisor) restoreAfterExtract(ctx context.Context, vmID string,
9492
}()
9593

9694
chConfigPath := filepath.Join(rec.RunDir, "config.json")
97-
// Use the sidecar's length, which mirrors snapshot config.json's disk
98-
// count. rec.StorageConfigs may carry trailing cidata that the snapshot
99-
// (post-first-boot) doesn't — prefix-slicing trims it cleanly because
100-
// cidata is always at the tail of rec.
95+
// Use sidecar length; rec may have trailing cidata that the snapshot
96+
// lacks (cloudimg post-first-boot), prefix-slice trims it.
10197
meta, metaErr := loadSnapshotMeta(rec.RunDir)
10298
if metaErr != nil {
10399
return nil, fmt.Errorf("load snapshot meta: %w", metaErr)

hypervisor/cloudhypervisor/snapshot.go

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,10 @@ import (
1515
"github.com/cocoonstack/cocoon/utils"
1616
)
1717

18-
// snapshotMetaFile is the cocoon-owned sidecar inside a CH snapshot.
19-
// It mirrors snapshot config.json's disk shape but carries Role/MountPoint/
20-
// FSType/DirectIO that the CH schema cannot.
18+
// snapshotMetaFile carries Role/MountPoint/FSType/DirectIO that CH's chDisk
19+
// schema can't hold; mirrors chCfg.Disks order and length.
2120
const snapshotMetaFile = "cocoon.json"
2221

23-
// snapshotMeta is persisted as cocoon.json alongside CH state.json/config.json.
24-
// StorageConfigs is in the same order and length as snapshot config.json's disks.
2522
type snapshotMeta struct {
2623
StorageConfigs []*types.StorageConfig `json:"storage_configs"`
2724
BootConfig *types.BootConfig `json:"boot_config,omitempty"`
@@ -134,11 +131,10 @@ func (ch *CloudHypervisor) Snapshot(ctx context.Context, ref string) (*types.Sna
134131
return ch.BuildSnapshotConfig(snapID, &rec), utils.TarDirStreamWithRemove(tmpDir), nil
135132
}
136133

137-
// writeSnapshotMeta builds the cocoon.json sidecar. Sidecar mirrors the
138-
// snapshot's own config.json shape (chCfg.Disks order/length), not
139-
// activeDisks(rec) — the latter would diverge whenever a cloudimg VM is
140-
// snapshotted between FirstBooted=true and the next stop, since CH still
141-
// holds cidata in that window.
134+
// writeSnapshotMeta builds the cocoon.json sidecar by mirroring the
135+
// snapshot's config.json shape. Using activeDisks(rec) would diverge for a
136+
// cloudimg VM snapshotted post-FirstBooted but pre-restart: CH still holds
137+
// cidata, but activeDisks would skip it.
142138
func writeSnapshotMeta(tmpDir string, rec *hypervisor.VMRecord) error {
143139
chCfg, _, err := parseCHConfig(filepath.Join(tmpDir, "config.json"))
144140
if err != nil {

hypervisor/firecracker/direct.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,11 +41,9 @@ func (fc *Firecracker) DirectRestore(ctx context.Context, vmRef string, vmCfg *t
4141
return nil, killErr
4242
}
4343

44-
// Lock every writable disk (COW + data disks) so recoverStaleBackup heals
45-
// stale clone backups before the restore overwrites the disks. Locking
46-
// only the COW would let a leftover data-<x>.raw.cocoon-clone-backup
47-
// survive, then a future clone would rename that backup back over the
48-
// freshly restored data disk.
44+
// Lock every writable disk so recoverStaleBackup heals stale
45+
// data-*.raw.cocoon-clone-backup before restore overwrites them;
46+
// otherwise a future clone would rename the backup over restored data.
4947
var result *types.VM
5048
if lockErr := withSourceWritableDisksLocked(rec.StorageConfigs, func() error {
5149
if cleanErr := cleanSnapshotFiles(rec.RunDir); cleanErr != nil {

hypervisor/firecracker/helper.go

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,9 @@ const pidFileName = "fc.pid"
1212

1313
var runtimeFiles = []string{hypervisor.APISocketName, pidFileName, hypervisor.ConsoleSockName}
1414

15-
// preflightRestore loads the FC sidecar, runs structural validation, asserts
16-
// the role sequence matches the live record's prefix, and confirms FC's
17-
// snapshot/load runtime files (vmstate + mem) are present. Called before
18-
// killing the running VM so a malformed snapshot is rejected without outage.
19-
//
20-
// FC has no config.json equivalent to compare against (vmstate is binary),
21-
// so the only cocoon-side disk-shape check is the sidecar itself.
15+
// preflightRestore is the FC preflight: common checks, role-sequence match,
16+
// vmstate + mem files. FC vmstate is binary so the sidecar is the only
17+
// cocoon-side disk-shape source — there's no chCfg counterpart.
2218
func (fc *Firecracker) preflightRestore(srcDir string, rec *hypervisor.VMRecord) error {
2319
meta, err := loadSnapshotMeta(srcDir, fc.conf.RootDir, fc.conf.Config.RunDir)
2420
if err != nil {

hypervisor/firecracker/restore.go

Lines changed: 4 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,7 @@ func (fc *Firecracker) Restore(ctx context.Context, vmRef string, vmCfg *types.V
3636
}
3737
defer cleanupStaging()
3838

39-
// Pre-flight: validate the staged snapshot fully BEFORE killing the
40-
// running VM. A malformed snapshot must not cost an outage.
39+
// Preflight before kill — a malformed snapshot must not cost an outage.
4140
if err := fc.preflightRestore(stagingDir, rec); err != nil {
4241
return nil, fmt.Errorf("snapshot preflight: %w", err)
4342
}
@@ -47,11 +46,9 @@ func (fc *Firecracker) Restore(ctx context.Context, vmRef string, vmCfg *types.V
4746
return nil, killErr
4847
}
4948

50-
// Lock every writable disk (COW + data disks). withCOWPathLocked alone
51-
// would leave a stale data-<x>.raw.cocoon-clone-backup unhealed; the next
52-
// concurrent clone/snapshot would acquire that path's lock,
53-
// recoverStaleBackup would rename the backup over the just-restored data
54-
// disk, silently corrupting it.
49+
// Lock every writable disk so recoverStaleBackup heals stale
50+
// data-*.raw.cocoon-clone-backup before restore overwrites them;
51+
// otherwise a future clone would rename the backup over restored data.
5552
var result *types.VM
5653
if lockErr := withSourceWritableDisksLocked(rec.StorageConfigs, func() error {
5754
_ = os.Remove(cowPath)

hypervisor/firecracker/snapshot.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,9 @@ func (fc *Firecracker) Snapshot(ctx context.Context, ref string) (*types.Snapsho
6060
return nil, nil, fmt.Errorf("create temp dir: %w", err)
6161
}
6262

63-
// Serialize the COW + data-disk copy with concurrent clone redirects.
64-
// withSourceWritableDisksLocked locks every writable disk in dictionary
65-
// order so a concurrent clone seeing the same source can't race the
66-
// reflink against its rename+symlink redirect.
63+
// Lock every writable disk so a concurrent clone's rename+symlink
64+
// redirect can't race this snapshot's reflink. Dictionary order keeps
65+
// concurrent callers deadlock-free.
6766
if err := withSourceWritableDisksLocked(rec.StorageConfigs, func() error {
6867
return fc.WithRunningVM(ctx, &rec, func(_ int) error {
6968
if err := pauseVM(ctx, hc); err != nil {

hypervisor/utils.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -478,9 +478,8 @@ func createSparseFile(path string, size int64) error {
478478
}
479479

480480
// snapshotResidentBasename returns the basename a sidecar entry's file should
481-
// have inside the snapshot dir, or "" if the disk is not stored alongside the
482-
// snapshot (i.e. shared base layers). The sidecar's Path field still points at
483-
// the source VM's runDir, so we strip back to the basename.
481+
// have inside srcDir, or "" for shared base layers (not in the snapshot tar).
482+
// Sidecar Path still references the source runDir, so we strip to basename.
484483
func snapshotResidentBasename(sc *types.StorageConfig) string {
485484
switch sc.Role {
486485
case types.StorageRoleData:

0 commit comments

Comments
 (0)