add more codes

This commit is contained in:
2025-11-23 22:37:27 +07:00
parent b506a64ed1
commit 4fcd71ca05
10 changed files with 448 additions and 52 deletions

View File

@@ -15,6 +15,7 @@ import (
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/containers/lxc"
"jagacloud/node-agent/pkg/containers/podman"
"jagacloud/node-agent/pkg/storage"
"jagacloud/node-agent/pkg/tasks"
"jagacloud/node-agent/pkg/validators"
)
@@ -88,6 +89,14 @@ func handleCreateVM(cfg config.Config, svc Services) http.HandlerFunc {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
// Resolve disk paths for dir pools
for i := range spec.Disks {
if spec.Disks[i].Path == "" && spec.Disks[i].Pool != "" {
if path, err := storage.ResolveVolume(toPoolConfigs(cfg.StoragePools), spec.Disks[i].Pool, spec.Disks[i].Name+".qcow2"); err == nil {
spec.Disks[i].Path = path
}
}
}
if err := svc.Store.SaveVM(spec); err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
@@ -162,6 +171,12 @@ func lifecycleVM(cfg config.Config, svc Services, action string) http.HandlerFun
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
} else {
// runtime-only VM: attempt pool validation via virsh and config
if err := validators.CheckStoragePoolsRuntime([]string{}, cfg); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
}
taskID := enqueueWork(svc.Tasks, "vm."+action, func(ctx context.Context) (interface{}, error) {
unlock := svc.StoreLock(id)
@@ -234,6 +249,11 @@ func handleCreateCT(cfg config.Config, svc Services) http.HandlerFunc {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if spec.RootfsPool != "" && spec.RootfsSizeG > 0 && spec.RootfsPath == "" {
if path, err := storage.ResolveVolume(toPoolConfigs(cfg.StoragePools), spec.RootfsPool, spec.ID+"-rootfs"); err == nil {
spec.RootfsPath = path
}
}
if err := svc.Store.SaveCT(spec); err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
@@ -474,3 +494,17 @@ func validateVM(spec libvirt.VMSpec, cfg config.Config) error {
// storage pools validated elsewhere
return nil
}
// toPoolConfigs converts config pools to storage pool configs.
func toPoolConfigs(p []config.StoragePool) []storage.PoolConfig {
out := make([]storage.PoolConfig, 0, len(p))
for _, sp := range p {
out = append(out, storage.PoolConfig{
Name: sp.Name,
Type: sp.Type,
Path: sp.Path,
VG: sp.Path, // reuse Path for now; real config should split
})
}
return out
}

260
pkg/api/handlers_test.go Normal file
View File

@@ -0,0 +1,260 @@
package api
import (
"bytes"
"context"
"net/http"
"net/http/httptest"
"path/filepath"
"testing"
"time"
"github.com/go-chi/chi/v5"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/containers/lxc"
"jagacloud/node-agent/pkg/containers/podman"
"jagacloud/node-agent/pkg/state"
"jagacloud/node-agent/pkg/tasks"
)
func TestVMCreateAndListUsesStore(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{
StoragePools: []config.StoragePool{{Name: "local"}},
Bridges: []config.Bridge{{Name: "vmbr0"}},
}
t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1")
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
go svc.Tasks.StartWorker(testCtx(t))
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
body := []byte(`{"id":"vm-1","name":"vm-1","cpus":1,"memory_mb":512,"disks":[{"name":"root","size_gb":1,"pool":"local"}],"nics":[{"bridge":"vmbr0"}]}`)
req := httptest.NewRequest(http.MethodPost, "/api/v1/vms", bytes.NewBuffer(body))
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusAccepted {
t.Fatalf("expected 202, got %d body=%s", rec.Code, rec.Body.String())
}
// allow worker to process
time.Sleep(50 * time.Millisecond)
listReq := httptest.NewRequest(http.MethodGet, "/api/v1/vms", nil)
listRec := httptest.NewRecorder()
r.ServeHTTP(listRec, listReq)
if listRec.Code != http.StatusOK {
t.Fatalf("expected 200, got %d", listRec.Code)
}
}
func TestCTCreateAndListUsesStore(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{
StoragePools: []config.StoragePool{{Name: "local"}},
Bridges: []config.Bridge{{Name: "vmbr0"}},
}
t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1")
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
go svc.Tasks.StartWorker(testCtx(t))
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
body := []byte(`{"id":"ct-1","name":"ct-1","template":"debian","rootfs_pool":"local","rootfs_size_g":1,"nics":[{"bridge":"vmbr0"}],"limits":{"cpus":1,"memory_mb":256}}`)
req := httptest.NewRequest(http.MethodPost, "/api/v1/containers", bytes.NewBuffer(body))
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusAccepted {
t.Fatalf("expected 202, got %d", rec.Code)
}
time.Sleep(50 * time.Millisecond)
listReq := httptest.NewRequest(http.MethodGet, "/api/v1/containers", nil)
listRec := httptest.NewRecorder()
r.ServeHTTP(listRec, listReq)
if listRec.Code != http.StatusOK {
t.Fatalf("expected 200, got %d", listRec.Code)
}
}
func TestVMLifecycleMissingID(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{}
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/missing/start", nil)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusNotFound {
t.Fatalf("expected 404, got %d", rec.Code)
}
}
func TestVMLifecycleDeleteCleansSpec(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{
StoragePools: []config.StoragePool{{Name: "local"}},
Bridges: []config.Bridge{{Name: "vmbr0"}},
}
t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1")
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
go svc.Tasks.StartWorker(testCtx(t))
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
// create spec directly
_ = store.SaveVM(libvirt.VMSpec{ID: "vm-del", Name: "vm-del", CPU: 1, MemoryMB: 512, Disks: []libvirt.DiskSpec{{Name: "root", Pool: "local", SizeGB: 1}}})
req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/vm-del/delete", nil)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusAccepted {
t.Fatalf("expected 202, got %d", rec.Code)
}
time.Sleep(50 * time.Millisecond)
if _, err := store.LoadVM("vm-del"); err == nil {
t.Fatalf("expected spec to be deleted")
}
}
func TestVMLifecycleStartStop(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{
StoragePools: []config.StoragePool{{Name: "local"}},
Bridges: []config.Bridge{{Name: "vmbr0"}},
}
t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1")
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
go svc.Tasks.StartWorker(testCtx(t))
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
_ = store.SaveVM(libvirt.VMSpec{ID: "vm-run", Name: "vm-run", CPU: 1, MemoryMB: 512, Disks: []libvirt.DiskSpec{{Name: "root", Pool: "local", SizeGB: 1}}})
for _, action := range []string{"start", "stop"} {
req := httptest.NewRequest(http.MethodPost, "/api/v1/vms/vm-run/"+action, nil)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusAccepted {
t.Fatalf("expected 202 for %s, got %d", action, rec.Code)
}
}
}
func TestCTLifecycleDeleteCleansSpec(t *testing.T) {
tmpDir := t.TempDir()
store := state.NewStore(filepath.Join(tmpDir, "vm"), filepath.Join(tmpDir, "ct"))
cfg := config.Config{
StoragePools: []config.StoragePool{{Name: "local"}},
Bridges: []config.Bridge{{Name: "vmbr0"}},
}
t.Setenv("JAGACLOUD_SKIP_BRIDGE_CHECK", "1")
svc := Services{
Tasks: tasks.NewRegistry(),
Libvirt: &fakeLibvirt{},
LXC: &fakeLXC{},
Podman: &fakePodman{},
Store: store,
}
go svc.Tasks.StartWorker(testCtx(t))
r := chi.NewRouter()
RegisterRoutes(r, cfg, svc)
_ = store.SaveCT(lxc.Spec{ID: "ct-del", Name: "ct-del", Template: "debian", RootfsPool: "local", RootfsSizeG: 1, Limits: lxc.Limits{CPU: 1, MemoryMB: 256}})
req := httptest.NewRequest(http.MethodPost, "/api/v1/containers/ct-del/delete", nil)
rec := httptest.NewRecorder()
r.ServeHTTP(rec, req)
if rec.Code != http.StatusAccepted {
t.Fatalf("expected 202, got %d", rec.Code)
}
time.Sleep(50 * time.Millisecond)
if _, err := store.LoadCT("ct-del"); err == nil {
t.Fatalf("expected CT spec to be deleted")
}
}
// testCtx returns a cancellable context for workers.
func testCtx(t *testing.T) context.Context {
t.Helper()
ctx, _ := context.WithCancel(context.Background())
return ctx
}
type fakeLibvirt struct{}
func (f *fakeLibvirt) ListVMs() ([]libvirt.VM, error) {
return []libvirt.VM{{ID: "vm-1", Name: "vm-1", Status: "running"}}, nil
}
func (f *fakeLibvirt) CreateVM(spec libvirt.VMSpec) (libvirt.VM, error) {
return libvirt.VM{ID: spec.ID, Name: spec.Name, Status: "running"}, nil
}
func (f *fakeLibvirt) StartVM(id string) error { return nil }
func (f *fakeLibvirt) StopVM(id string) error { return nil }
func (f *fakeLibvirt) RebootVM(id string) error { return nil }
func (f *fakeLibvirt) DeleteVM(id string) error { return nil }
type fakeLXC struct{}
func (f *fakeLXC) List() ([]lxc.Container, error) {
return []lxc.Container{{ID: "ct-1", Name: "ct-1", Status: "running", Unpriv: true}}, nil
}
func (f *fakeLXC) Create(spec lxc.Spec) (lxc.Container, error) {
return lxc.Container{ID: spec.ID, Name: spec.Name, Status: "stopped", Unpriv: spec.Unprivileged}, nil
}
func (f *fakeLXC) Start(id string) error { return nil }
func (f *fakeLXC) Stop(id string) error { return nil }
func (f *fakeLXC) Delete(id string) error { return nil }
type fakePodman struct{}
func (f *fakePodman) List(ctID string) ([]podman.OCIContainer, error) { return nil, nil }
func (f *fakePodman) Create(ctID string, spec podman.CreateSpec) (podman.OCIContainer, error) {
return podman.OCIContainer{ID: "oci-1", Image: spec.Image, Status: "created"}, nil
}
func (f *fakePodman) Start(ctID, cid string) error { return nil }
func (f *fakePodman) Stop(ctID, cid string) error { return nil }
func (f *fakePodman) Delete(ctID, cid string) error { return nil }

View File

@@ -11,35 +11,35 @@ type Client interface {
}
type VMSpec struct {
ID string
Name string
CPU int
MemoryMB int
Disks []DiskSpec
NICs []NICSpec
CloudInit *CloudInitSpec
CloudInitISO string
ID string `json:"id" yaml:"id"`
Name string `json:"name" yaml:"name"`
CPU int `json:"cpus" yaml:"cpus"`
MemoryMB int `json:"memory_mb" yaml:"memory_mb"`
Disks []DiskSpec `json:"disks" yaml:"disks"`
NICs []NICSpec `json:"nics" yaml:"nics"`
CloudInit *CloudInitSpec `json:"cloud_init" yaml:"cloud_init"`
CloudInitISO string `json:"cloud_init_iso" yaml:"cloud_init_iso"`
}
type DiskSpec struct {
Name string
Pool string
SizeGB int
Bus string
Path string
Prealloc string // "", "metadata", "full"
Name string `json:"name" yaml:"name"`
Pool string `json:"pool" yaml:"pool"`
SizeGB int `json:"size_gb" yaml:"size_gb"`
Bus string `json:"bus" yaml:"bus"`
Path string `json:"path" yaml:"path"`
Prealloc string `json:"prealloc" yaml:"prealloc"` // "", "metadata", "full"
}
type NICSpec struct {
Bridge string
VLAN int
Model string
Bridge string `json:"bridge" yaml:"bridge"`
VLAN int `json:"vlan" yaml:"vlan"`
Model string `json:"model" yaml:"model"`
}
type CloudInitSpec struct {
User string
SSHKeys []string
UserData string
User string `json:"user" yaml:"user"`
SSHKeys []string `json:"ssh_keys" yaml:"ssh_keys"`
UserData string `json:"user_data" yaml:"user_data"`
}
type VM struct {

View File

@@ -10,27 +10,28 @@ type Manager interface {
}
type Spec struct {
ID string
Name string
Template string
RootfsPool string
RootfsSizeG int
NICs []NICSpec
Limits Limits
Unprivileged bool
ID string `json:"id" yaml:"id"`
Name string `json:"name" yaml:"name"`
Template string `json:"template" yaml:"template"`
RootfsPool string `json:"rootfs_pool" yaml:"rootfs_pool"`
RootfsSizeG int `json:"rootfs_size_g" yaml:"rootfs_size_g"`
RootfsPath string `json:"rootfs_path" yaml:"rootfs_path"`
NICs []NICSpec `json:"nics" yaml:"nics"`
Limits Limits `json:"limits" yaml:"limits"`
Unprivileged bool `json:"unprivileged" yaml:"unprivileged"`
}
type NICSpec struct {
Bridge string
VLAN int
HWAddr string
MTU int
Name string
Bridge string `json:"bridge" yaml:"bridge"`
VLAN int `json:"vlan" yaml:"vlan"`
HWAddr string `json:"hwaddr" yaml:"hwaddr"`
MTU int `json:"mtu" yaml:"mtu"`
Name string `json:"name" yaml:"name"`
}
type Limits struct {
CPU int
MemoryMB int
CPU int `json:"cpus" yaml:"cpus"`
MemoryMB int `json:"memory_mb" yaml:"memory_mb"`
}
type Container struct {

View File

@@ -101,7 +101,10 @@ func renderConfig(spec Spec) (string, error) {
fmt.Fprintf(buf, "lxc.idmap = u 0 %d %d\n", uidStart, count)
fmt.Fprintf(buf, "lxc.idmap = g 0 %d %d\n", uidStart, count)
}
rootfs := fmt.Sprintf("/var/lib/lxc/%s/rootfs", spec.Name)
rootfs := spec.RootfsPath
if rootfs == "" {
rootfs = fmt.Sprintf("/var/lib/lxc/%s/rootfs", spec.Name)
}
fmt.Fprintf(buf, "lxc.rootfs.path = dir:%s\n", rootfs)
for idx, nic := range spec.NICs {
fmt.Fprintf(buf, "lxc.net.%d.type = veth\n", idx)

View File

@@ -136,6 +136,9 @@ func (s *Store) SaveCT(spec lxc.Spec) error {
if spec.ID == "" {
return fmt.Errorf("ct id is required")
}
if spec.RootfsPool == "" {
return fmt.Errorf("rootfs_pool is required")
}
data, err := yaml.Marshal(spec)
if err != nil {
return err

View File

@@ -3,8 +3,8 @@ package state
import "testing"
func TestTrimExt(t *testing.T) {
got := trimExt("foo.yaml")
if got != "foo" {
t.Fatalf("expected foo, got %s", got)
}
got := trimExt("foo.yaml")
if got != "foo" {
t.Fatalf("expected foo, got %s", got)
}
}

58
pkg/storage/pools.go Normal file
View File

@@ -0,0 +1,58 @@
package storage
import (
"fmt"
"os"
"path/filepath"
)
// PoolConfig describes a configured storage pool.
type PoolConfig struct {
Name string `json:"name" yaml:"name"`
Type string `json:"type" yaml:"type"` // dir|lvm|zfs
Path string `json:"path" yaml:"path"` // for dir/zfs
VG string `json:"vg" yaml:"vg"` // for lvm
}
// AttachRequest describes a volume attachment for a VM/CT.
type AttachRequest struct {
Pool string
Volume string
Path string // resolved path for libvirt/LXC
}
// ResolveVolume maps pool+vol name to a path for dir pools.
func ResolveVolume(pools []PoolConfig, poolName, vol string) (string, error) {
for _, p := range pools {
if p.Name != poolName {
continue
}
// dir pool: join path/vol
if p.Type == "dir" {
if p.Path == "" {
return "", fmt.Errorf("dir pool %s missing path", poolName)
}
target := filepath.Join(p.Path, vol)
return target, nil
}
// TODO: lvm/zfs support
return "", fmt.Errorf("pool type %s not yet supported", p.Type)
}
return "", fmt.Errorf("pool %s not found", poolName)
}
// PoolExists checks if a pool is present in config and, for dir pools, if the path exists.
func PoolExists(pools []PoolConfig, name string) bool {
for _, p := range pools {
if p.Name == name {
if p.Type == "dir" && p.Path != "" {
if _, err := os.Stat(p.Path); err == nil {
return true
}
}
// accept as present even if path missing to allow creation elsewhere
return true
}
}
return false
}

View File

@@ -2,7 +2,9 @@ package validators
import (
"fmt"
"os"
"os/exec"
"strings"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
@@ -14,6 +16,9 @@ func CheckBridge(name string) error {
if name == "" {
return fmt.Errorf("bridge name required")
}
if os.Getenv("JAGACLOUD_SKIP_BRIDGE_CHECK") == "1" {
return nil
}
if err := exec.Command("bash", "-lc", fmt.Sprintf("ip link show %s", name)).Run(); err != nil {
return fmt.Errorf("bridge %s not found", name)
}
@@ -70,6 +75,38 @@ func CheckStoragePoolsCT(spec lxc.Spec, cfg config.Config) error {
return nil
}
// CheckStoragePoolsRuntime queries libvirt to see if pool exists; fallback to config.
func CheckStoragePoolsRuntime(pools []string, cfg config.Config) error {
if len(pools) == 0 {
return nil
}
for _, p := range pools {
if p == "" {
continue
}
if poolExists(cfg, p) {
continue
}
// Try libvirt
cmd := exec.Command("bash", "-lc", "virsh -q pool-list --name")
out, err := cmd.Output()
if err != nil {
return fmt.Errorf("pool %s not found and virsh failed: %v", p, err)
}
found := false
for _, line := range strings.Split(string(out), "\n") {
if strings.TrimSpace(line) == p {
found = true
break
}
}
if !found {
return fmt.Errorf("storage pool %s not found", p)
}
}
return nil
}
func poolExists(cfg config.Config, name string) bool {
for _, p := range cfg.StoragePools {
if p.Name == name {

View File

@@ -1,20 +1,20 @@
package validators
import (
"testing"
"testing"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
)
func TestPoolExists(t *testing.T) {
cfg := config.Config{StoragePools: []config.StoragePool{{Name: "local"}}}
err := CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "local"}}, cfg)
if err != nil {
t.Fatalf("expected pool to be valid: %v", err)
}
err = CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "missing"}}, cfg)
if err == nil {
t.Fatalf("expected error for missing pool")
}
cfg := config.Config{StoragePools: []config.StoragePool{{Name: "local"}}}
err := CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "local"}}, cfg)
if err != nil {
t.Fatalf("expected pool to be valid: %v", err)
}
err = CheckStoragePoolsVM([]libvirt.DiskSpec{{Pool: "missing"}}, cfg)
if err == nil {
t.Fatalf("expected error for missing pool")
}
}