From 4fcd71ca056f43e52a068110eb96c9bd76227c00 Mon Sep 17 00:00:00 2001 From: Othman Hendy Suseno Date: Sun, 23 Nov 2025 22:37:27 +0700 Subject: [PATCH] add more codes --- pkg/api/handlers.go | 34 ++++ pkg/api/handlers_test.go | 260 ++++++++++++++++++++++++++++++ pkg/compute/libvirt/libvirt.go | 40 ++--- pkg/containers/lxc/lxc.go | 31 ++-- pkg/containers/lxc/lxc_cmd.go | 5 +- pkg/state/store.go | 3 + pkg/state/store_test.go | 8 +- pkg/storage/pools.go | 58 +++++++ pkg/validators/validators.go | 37 +++++ pkg/validators/validators_test.go | 24 +-- 10 files changed, 448 insertions(+), 52 deletions(-) create mode 100644 pkg/api/handlers_test.go create mode 100644 pkg/storage/pools.go diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 92b9af9..3a4e336 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -15,6 +15,7 @@ import ( "jagacloud/node-agent/pkg/config" "jagacloud/node-agent/pkg/containers/lxc" "jagacloud/node-agent/pkg/containers/podman" + "jagacloud/node-agent/pkg/storage" "jagacloud/node-agent/pkg/tasks" "jagacloud/node-agent/pkg/validators" ) @@ -88,6 +89,14 @@ func handleCreateVM(cfg config.Config, svc Services) http.HandlerFunc { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } + // Resolve disk paths for dir pools + for i := range spec.Disks { + if spec.Disks[i].Path == "" && spec.Disks[i].Pool != "" { + if path, err := storage.ResolveVolume(toPoolConfigs(cfg.StoragePools), spec.Disks[i].Pool, spec.Disks[i].Name+".qcow2"); err == nil { + spec.Disks[i].Path = path + } + } + } if err := svc.Store.SaveVM(spec); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) return @@ -162,6 +171,12 @@ func lifecycleVM(cfg config.Config, svc Services, action string) http.HandlerFun writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } + } else { + // runtime-only VM: attempt pool validation via virsh and config + if err := validators.CheckStoragePoolsRuntime([]string{}, cfg); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) + return + } } taskID := enqueueWork(svc.Tasks, "vm."+action, func(ctx context.Context) (interface{}, error) { unlock := svc.StoreLock(id) @@ -234,6 +249,11 @@ func handleCreateCT(cfg config.Config, svc Services) http.HandlerFunc { writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()}) return } + if spec.RootfsPool != "" && spec.RootfsSizeG > 0 && spec.RootfsPath == "" { + if path, err := storage.ResolveVolume(toPoolConfigs(cfg.StoragePools), spec.RootfsPool, spec.ID+"-rootfs"); err == nil { + spec.RootfsPath = path + } + } if err := svc.Store.SaveCT(spec); err != nil { writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) return @@ -474,3 +494,17 @@ func validateVM(spec libvirt.VMSpec, cfg config.Config) error { // storage pools validated elsewhere return nil } + +// toPoolConfigs converts config pools to storage pool configs. +func toPoolConfigs(p []config.StoragePool) []storage.PoolConfig { + out := make([]storage.PoolConfig, 0, len(p)) + for _, sp := range p { + out = append(out, storage.PoolConfig{ + Name: sp.Name, + Type: sp.Type, + Path: sp.Path, + VG: sp.Path, // reuse Path for now; real config should split + }) + } + return out +} diff --git a/pkg/api/handlers_test.go b/pkg/api/handlers_test.go new file mode 100644 index 0000000..922c9cd --- /dev/null +++ b/pkg/api/handlers_test.go @@ -0,0 +1,260 @@ +package api + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + "time" + + "github.com/go-chi/chi/v5" + + "jagacloud/node-agent/pkg/compute/libvirt" + "jagacloud/node-agent/pkg/config" + "jagacloud/node-agent/pkg/containers/lxc" + "jagacloud/node-agent/pkg/containers/podman" + "jagacloud/node-agent/pkg/state" + "jagacloud/node-agent/pkg/tasks" +) + +func TestVMCreateAndListUsesStore(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{ + StoragePools: []config.StoragePool{{Name: "local"}}, + Bridges: []config.Bridge{{Name: "vmbr0"}}, + } + t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1") + + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + go svc.Tasks.StartWorker(testCtx(t)) + + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + body := []byte(`{"id":"vm-1","name":"vm-1","cpus":1,"memory_mb":512,"disks":[{"name":"root","size_gb":1,"pool":"local"}],"nics":[{"bridge":"vmbr0"}]}`) + req := httptest.NewRequest(http.MethodPost, "/api/v1/vms", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusAccepted { + t.Fatalf("expected 202, got %d body=%s", rec.Code, rec.Body.String()) + } + + // allow worker to process + time.Sleep(50 * time.Millisecond) + + listReq := httptest.NewRequest(http.MethodGet, "/api/v1/vms", nil) + listRec := httptest.NewRecorder() + r.ServeHTTP(listRec, listReq) + if listRec.Code != http.StatusOK { + t.Fatalf("expected 200, got %d", listRec.Code) + } +} + +func TestCTCreateAndListUsesStore(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{ + StoragePools: []config.StoragePool{{Name: "local"}}, + Bridges: []config.Bridge{{Name: "vmbr0"}}, + } + t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1") + + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + go svc.Tasks.StartWorker(testCtx(t)) + + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + body := []byte(`{"id":"ct-1","name":"ct-1","template":"debian","rootfs_pool":"local","rootfs_size_g":1,"nics":[{"bridge":"vmbr0"}],"limits":{"cpus":1,"memory_mb":256}}`) + req := httptest.NewRequest(http.MethodPost, "/api/v1/containers", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusAccepted { + t.Fatalf("expected 202, got %d", rec.Code) + } + + time.Sleep(50 * time.Millisecond) + + listReq := httptest.NewRequest(http.MethodGet, "/api/v1/containers", nil) + listRec := httptest.NewRecorder() + r.ServeHTTP(listRec, listReq) + if listRec.Code != http.StatusOK { + t.Fatalf("expected 200, got %d", listRec.Code) + } +} + +func TestVMLifecycleMissingID(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{} + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/missing/start", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusNotFound { + t.Fatalf("expected 404, got %d", rec.Code) + } +} + +func TestVMLifecycleDeleteCleansSpec(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{ + StoragePools: []config.StoragePool{{Name: "local"}}, + Bridges: []config.Bridge{{Name: "vmbr0"}}, + } + t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1") + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + go svc.Tasks.StartWorker(testCtx(t)) + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + // create spec directly + _ = store.SaveVM(libvirt.VMSpec{ID: "vm-del", Name: "vm-del", CPU: 1, MemoryMB: 512, Disks: []libvirt.DiskSpec{{Name: "root", Pool: "local", SizeGB: 1}}}) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/vm-del/delete", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusAccepted { + t.Fatalf("expected 202, got %d", rec.Code) + } + time.Sleep(50 * time.Millisecond) + if _, err := store.LoadVM("vm-del"); err == nil { + t.Fatalf("expected spec to be deleted") + } +} + +func TestVMLifecycleStartStop(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{ + StoragePools: []config.StoragePool{{Name: "local"}}, + Bridges: []config.Bridge{{Name: "vmbr0"}}, + } + t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1") + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + go svc.Tasks.StartWorker(testCtx(t)) + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + _ = store.SaveVM(libvirt.VMSpec{ID: "vm-run", Name: "vm-run", CPU: 1, MemoryMB: 512, Disks: []libvirt.DiskSpec{{Name: "root", Pool: "local", SizeGB: 1}}}) + + for _, action := range []string{"start", "stop"} { + req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/vm-run/"+action, nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusAccepted { + t.Fatalf("expected 202 for %s, got %d", action, rec.Code) + } + } +} + +func TestCTLifecycleDeleteCleansSpec(t *testing.T) { + tmpDir := t.TempDir() + store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct")) + cfg := config.Config{ + StoragePools: []config.StoragePool{{Name: "local"}}, + Bridges: []config.Bridge{{Name: "vmbr0"}}, + } + t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1") + svc := Services{ + Tasks: tasks.NewRegistry(), + Libvirt: &fakeLibvirt{}, + LXC: &fakeLXC{}, + Podman: &fakePodman{}, + Store: store, + } + go svc.Tasks.StartWorker(testCtx(t)) + r := chi.NewRouter() + RegisterRoutes(r, cfg, svc) + + _ = store.SaveCT(lxc.Spec{ID: "ct-del", Name: "ct-del", Template: "debian", RootfsPool: "local", RootfsSizeG: 1, Limits: lxc.Limits{CPU: 1, MemoryMB: 256}}) + req := httptest.NewRequest(http.MethodPost, "/api/v1/containers/ct-del/delete", nil) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + if rec.Code != http.StatusAccepted { + t.Fatalf("expected 202, got %d", rec.Code) + } + time.Sleep(50 * time.Millisecond) + if _, err := store.LoadCT("ct-del"); err == nil { + t.Fatalf("expected CT spec to be deleted") + } +} + +// testCtx returns a cancellable context for workers. +func testCtx(t *testing.T) context.Context { + t.Helper() + ctx, _ := context.WithCancel(context.Background()) + return ctx +} + +type fakeLibvirt struct{} + +func (f *fakeLibvirt) ListVMs() ([]libvirt.VM, error) { + return []libvirt.VM{{ID: "vm-1", Name: "vm-1", Status: "running"}}, nil +} +func (f *fakeLibvirt) CreateVM(spec libvirt.VMSpec) (libvirt.VM, error) { + return libvirt.VM{ID: spec.ID, Name: spec.Name, Status: "running"}, nil +} +func (f *fakeLibvirt) StartVM(id string) error { return nil } +func (f *fakeLibvirt) StopVM(id string) error { return nil } +func (f *fakeLibvirt) RebootVM(id string) error { return nil } +func (f *fakeLibvirt) DeleteVM(id string) error { return nil } + +type fakeLXC struct{} + +func (f *fakeLXC) List() ([]lxc.Container, error) { + return []lxc.Container{{ID: "ct-1", Name: "ct-1", Status: "running", Unpriv: true}}, nil +} +func (f *fakeLXC) Create(spec lxc.Spec) (lxc.Container, error) { + return lxc.Container{ID: spec.ID, Name: spec.Name, Status: "stopped", Unpriv: spec.Unprivileged}, nil +} +func (f *fakeLXC) Start(id string) error { return nil } +func (f *fakeLXC) Stop(id string) error { return nil } +func (f *fakeLXC) Delete(id string) error { return nil } + +type fakePodman struct{} + +func (f *fakePodman) List(ctID string) ([]podman.OCIContainer, error) { return nil, nil } +func (f *fakePodman) Create(ctID string, spec podman.CreateSpec) (podman.OCIContainer, error) { + return podman.OCIContainer{ID: "oci-1", Image: spec.Image, Status: "created"}, nil +} +func (f *fakePodman) Start(ctID, cid string) error { return nil } +func (f *fakePodman) Stop(ctID, cid string) error { return nil } +func (f *fakePodman) Delete(ctID, cid string) error { return nil } diff --git a/pkg/compute/libvirt/libvirt.go b/pkg/compute/libvirt/libvirt.go index 2be7cf3..96e83d1 100644 --- a/pkg/compute/libvirt/libvirt.go +++ b/pkg/compute/libvirt/libvirt.go @@ -11,35 +11,35 @@ type Client interface { } type VMSpec struct { - ID string - Name string - CPU int - MemoryMB int - Disks []DiskSpec - NICs []NICSpec - CloudInit *CloudInitSpec - CloudInitISO string + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + CPU int `json:"cpus" yaml:"cpus"` + MemoryMB int `json:"memory_mb" yaml:"memory_mb"` + Disks []DiskSpec `json:"disks" yaml:"disks"` + NICs []NICSpec `json:"nics" yaml:"nics"` + CloudInit *CloudInitSpec `json:"cloud_init" yaml:"cloud_init"` + CloudInitISO string `json:"cloud_init_iso" yaml:"cloud_init_iso"` } type DiskSpec struct { - Name string - Pool string - SizeGB int - Bus string - Path string - Prealloc string // "", "metadata", "full" + Name string `json:"name" yaml:"name"` + Pool string `json:"pool" yaml:"pool"` + SizeGB int `json:"size_gb" yaml:"size_gb"` + Bus string `json:"bus" yaml:"bus"` + Path string `json:"path" yaml:"path"` + Prealloc string `json:"prealloc" yaml:"prealloc"` // "", "metadata", "full" } type NICSpec struct { - Bridge string - VLAN int - Model string + Bridge string `json:"bridge" yaml:"bridge"` + VLAN int `json:"vlan" yaml:"vlan"` + Model string `json:"model" yaml:"model"` } type CloudInitSpec struct { - User string - SSHKeys []string - UserData string + User string `json:"user" yaml:"user"` + SSHKeys []string `json:"ssh_keys" yaml:"ssh_keys"` + UserData string `json:"user_data" yaml:"user_data"` } type VM struct { diff --git a/pkg/containers/lxc/lxc.go b/pkg/containers/lxc/lxc.go index f328335..de79b12 100644 --- a/pkg/containers/lxc/lxc.go +++ b/pkg/containers/lxc/lxc.go @@ -10,27 +10,28 @@ type Manager interface { } type Spec struct { - ID string - Name string - Template string - RootfsPool string - RootfsSizeG int - NICs []NICSpec - Limits Limits - Unprivileged bool + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + Template string `json:"template" yaml:"template"` + RootfsPool string `json:"rootfs_pool" yaml:"rootfs_pool"` + RootfsSizeG int `json:"rootfs_size_g" yaml:"rootfs_size_g"` + RootfsPath string `json:"rootfs_path" yaml:"rootfs_path"` + NICs []NICSpec `json:"nics" yaml:"nics"` + Limits Limits `json:"limits" yaml:"limits"` + Unprivileged bool `json:"unprivileged" yaml:"unprivileged"` } type NICSpec struct { - Bridge string - VLAN int - HWAddr string - MTU int - Name string + Bridge string `json:"bridge" yaml:"bridge"` + VLAN int `json:"vlan" yaml:"vlan"` + HWAddr string `json:"hwaddr" yaml:"hwaddr"` + MTU int `json:"mtu" yaml:"mtu"` + Name string `json:"name" yaml:"name"` } type Limits struct { - CPU int - MemoryMB int + CPU int `json:"cpus" yaml:"cpus"` + MemoryMB int `json:"memory_mb" yaml:"memory_mb"` } type Container struct { diff --git a/pkg/containers/lxc/lxc_cmd.go b/pkg/containers/lxc/lxc_cmd.go index 159cd68..ec735a5 100644 --- a/pkg/containers/lxc/lxc_cmd.go +++ b/pkg/containers/lxc/lxc_cmd.go @@ -101,7 +101,10 @@ func renderConfig(spec Spec) (string, error) { fmt.Fprintf(buf, "lxc.idmap = u 0 %d %d\n", uidStart, count) fmt.Fprintf(buf, "lxc.idmap = g 0 %d %d\n", uidStart, count) } - rootfs := fmt.Sprintf("/var/lib/lxc/%s/rootfs", spec.Name) + rootfs := spec.RootfsPath + if rootfs == "" { + rootfs = fmt.Sprintf("/var/lib/lxc/%s/rootfs", spec.Name) + } fmt.Fprintf(buf, "lxc.rootfs.path = dir:%s\n", rootfs) for idx, nic := range spec.NICs { fmt.Fprintf(buf, "lxc.net.%d.type = veth\n", idx) diff --git a/pkg/state/store.go b/pkg/state/store.go index 018d46b..bdf735d 100644 --- a/pkg/state/store.go +++ b/pkg/state/store.go @@ -136,6 +136,9 @@ func (s *Store) SaveCT(spec lxc.Spec) error { if spec.ID == "" { return fmt.Errorf("ct id is required") } + if spec.RootfsPool == "" { + return fmt.Errorf("rootfs_pool is required") + } data, err := yaml.Marshal(spec) if err != nil { return err diff --git a/pkg/state/store_test.go b/pkg/state/store_test.go index 0e25f71..9491071 100644 --- a/pkg/state/store_test.go +++ b/pkg/state/store_test.go @@ -3,8 +3,8 @@ package state import "testing" func TestTrimExt(t *testing.T) { - got := trimExt("foo.yaml") - if got != "foo" { - t.Fatalf("expected foo, got %s", got) - } + got := trimExt("foo.yaml") + if got != "foo" { + t.Fatalf("expected foo, got %s", got) + } } diff --git a/pkg/storage/pools.go b/pkg/storage/pools.go new file mode 100644 index 0000000..3ff3ad3 --- /dev/null +++ b/pkg/storage/pools.go @@ -0,0 +1,58 @@ +package storage + +import ( + "fmt" + "os" + "path/filepath" +) + +// PoolConfig describes a configured storage pool. +type PoolConfig struct { + Name string `json:"name" yaml:"name"` + Type string `json:"type" yaml:"type"` // dir|lvm|zfs + Path string `json:"path" yaml:"path"` // for dir/zfs + VG string `json:"vg" yaml:"vg"` // for lvm +} + +// AttachRequest describes a volume attachment for a VM/CT. +type AttachRequest struct { + Pool string + Volume string + Path string // resolved path for libvirt/LXC +} + +// ResolveVolume maps pool+vol name to a path for dir pools. +func ResolveVolume(pools []PoolConfig, poolName, vol string) (string, error) { + for _, p := range pools { + if p.Name != poolName { + continue + } + // dir pool: join path/vol + if p.Type == "dir" { + if p.Path == "" { + return "", fmt.Errorf("dir pool %s missing path", poolName) + } + target := filepath.Join(p.Path, vol) + return target, nil + } + // TODO: lvm/zfs support + return "", fmt.Errorf("pool type %s not yet supported", p.Type) + } + return "", fmt.Errorf("pool %s not found", poolName) +} + +// PoolExists checks if a pool is present in config and, for dir pools, if the path exists. +func PoolExists(pools []PoolConfig, name string) bool { + for _, p := range pools { + if p.Name == name { + if p.Type == "dir" && p.Path != "" { + if _, err := os.Stat(p.Path); err == nil { + return true + } + } + // accept as present even if path missing to allow creation elsewhere + return true + } + } + return false +} diff --git a/pkg/validators/validators.go b/pkg/validators/validators.go index 3a55571..4e48b5b 100644 --- a/pkg/validators/validators.go +++ b/pkg/validators/validators.go @@ -2,7 +2,9 @@ package validators import ( "fmt" + "os" "os/exec" + "strings" "jagacloud/node-agent/pkg/compute/libvirt" "jagacloud/node-agent/pkg/config" @@ -14,6 +16,9 @@ func CheckBridge(name string) error { if name == "" { return fmt.Errorf("bridge name required") } + if os.Getenv("JAGACLOUD_SKIP_BRIDGE_CHECK") == "1" { + return nil + } if err := exec.Command("bash", "-lc", fmt.Sprintf("ip link show %s", name)).Run(); err != nil { return fmt.Errorf("bridge %s not found", name) } @@ -70,6 +75,38 @@ func CheckStoragePoolsCT(spec lxc.Spec, cfg config.Config) error { return nil } +// CheckStoragePoolsRuntime queries libvirt to see if pool exists; fallback to config. +func CheckStoragePoolsRuntime(pools []string, cfg config.Config) error { + if len(pools) == 0 { + return nil + } + for _, p := range pools { + if p == "" { + continue + } + if poolExists(cfg, p) { + continue + } + // Try libvirt + cmd := exec.Command("bash", "-lc", "virsh -q pool-list --name") + out, err := cmd.Output() + if err != nil { + return fmt.Errorf("pool %s not found and virsh failed: %v", p, err) + } + found := false + for _, line := range strings.Split(string(out), "\n") { + if strings.TrimSpace(line) == p { + found = true + break + } + } + if !found { + return fmt.Errorf("storage pool %s not found", p) + } + } + return nil +} + func poolExists(cfg config.Config, name string) bool { for _, p := range cfg.StoragePools { if p.Name == name { diff --git a/pkg/validators/validators_test.go b/pkg/validators/validators_test.go index d1f45b6..453d9ab 100644 --- a/pkg/validators/validators_test.go +++ b/pkg/validators/validators_test.go @@ -1,20 +1,20 @@ package validators import ( - "testing" + "testing" - "jagacloud/node-agent/pkg/compute/libvirt" - "jagacloud/node-agent/pkg/config" + "jagacloud/node-agent/pkg/compute/libvirt" + "jagacloud/node-agent/pkg/config" ) func TestPoolExists(t *testing.T) { - cfg := config.Config{StoragePools: []config.StoragePool{{Name: "local"}}} - err := CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "local"}}, cfg) - if err != nil { - t.Fatalf("expected pool to be valid: %v", err) - } - err = CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "missing"}}, cfg) - if err == nil { - t.Fatalf("expected error for missing pool") - } + cfg := config.Config{StoragePools: []config.StoragePool{{Name: "local"}}} + err := CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "local"}}, cfg) + if err != nil { + t.Fatalf("expected pool to be valid: %v", err) + } + err = CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "missing"}}, cfg) + if err == nil { + t.Fatalf("expected error for missing pool") + } }