initial commit

This commit is contained in:
2025-11-23 11:29:12 +07:00
commit 382b57ed83
33 changed files with 2360 additions and 0 deletions

12
README.md Normal file
View File

@@ -0,0 +1,12 @@
# Jagacloud Hypervisor (v1 scaffolding)
Minimal single-node-first hypervisor inspired by Proxmox patterns but using a clean Go node-agent with libvirt, LXC, and Podman-in-LXC.
Contents:
- cmd/node-agent: main entrypoint
- pkg: API handlers and adapters for libvirt, LXC, Podman, storage, network, config, tasks
- templates: starter templates for libvirt, LXC, Podman, network, storage configs
- ansible: host preparation playbooks and roles
- docs: API and config reference (see api/openapi.yaml)
This is a skeleton; real implementations are TODO-marked.

View File

@@ -0,0 +1,2 @@
[hypervisors]
node1 ansible_host=192.0.2.10

View File

@@ -0,0 +1,9 @@
- hosts: hypervisors
become: yes
roles:
- common
- kvm-libvirt
- lxc
- podman
- network-bridge
- node-agent

View File

@@ -0,0 +1,8 @@
- name: Install base packages
ansible.builtin.apt:
name:
- ca-certificates
- curl
- gnupg
state: present
update_cache: yes

View File

@@ -0,0 +1,15 @@
- name: Install KVM and libvirt
ansible.builtin.apt:
name:
- qemu-kvm
- libvirt-daemon-system
- libvirt-clients
- virtinst
state: present
update_cache: yes
- name: Enable and start libvirtd
ansible.builtin.systemd:
name: libvirtd
state: started
enabled: yes

View File

@@ -0,0 +1,14 @@
- name: Install LXC
ansible.builtin.apt:
name:
- lxc
- lxc-templates
state: present
update_cache: yes
- name: Ensure unprivileged defaults
ansible.builtin.copy:
dest: /etc/lxc/default.conf
content: |
lxc.net.0.type = empty
# Jagacloud will render per-CT configs under /etc/jagacloud/lxc

View File

@@ -0,0 +1,25 @@
- name: Install bridge utilities
ansible.builtin.apt:
name:
- bridge-utils
- ifupdown2
state: present
update_cache: yes
- name: Configure vmbr0 bridge (ifupdown2)
ansible.builtin.copy:
dest: /etc/network/interfaces.d/vmbr0
content: |
auto vmbr0
iface vmbr0 inet manual
bridge_ports none
bridge_stp off
bridge_fd 0
bridge_vlan_aware yes
notify: restart networking
handlers:
- name: restart networking
ansible.builtin.service:
name: networking
state: restarted

View File

@@ -0,0 +1,48 @@
- name: Create config directory
ansible.builtin.file:
path: /etc/jagacloud
state: directory
mode: '0755'
- name: Place sample agent config
ansible.builtin.copy:
dest: /etc/jagacloud/agent.yaml
content: |
listen_addr: ":8000"
libvirt_uri: "qemu:///system"
lxc_path: "/etc/jagacloud/lxc"
podman_socket: "/run/podman/podman.sock"
auth_token: "changeme"
- name: Install node-agent binary (placeholder)
ansible.builtin.copy:
src: files/node-agent
dest: /usr/local/bin/node-agent
mode: '0755'
ignore_errors: true
- name: Install systemd unit
ansible.builtin.copy:
dest: /etc/systemd/system/node-agent.service
content: |
[Unit]
Description=Jagacloud Node Agent
After=network-online.target
[Service]
ExecStart=/usr/local/bin/node-agent
Restart=on-failure
[Install]
WantedBy=multi-user.target
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: yes
- name: Enable and start node-agent
ansible.builtin.systemd:
name: node-agent
state: started
enabled: yes
ignore_errors: true

View File

@@ -0,0 +1,12 @@
- name: Install Podman
ansible.builtin.apt:
name:
- podman
state: present
update_cache: yes
- name: Enable podman socket
ansible.builtin.systemd:
name: podman.socket
state: started
enabled: yes

78
api/config-examples.md Normal file
View File

@@ -0,0 +1,78 @@
# Jagacloud Config Examples
## Agent config `/etc/jagacloud/agent.yaml`
```yaml
listen_addr: ":8000"
libvirt_uri: "qemu:///system"
lxc_path: "/etc/jagacloud/lxc"
podman_socket: "/run/podman/podman.sock"
auth_token: "replace-me"
storage_pools:
- name: local-dir
type: dir
path: /var/lib/jagacloud/images
- name: local-lvm
type: lvm
path: /dev/vg0
bridges:
- name: vmbr0
vlan_aware: true
mtu: 1500
```
## VM spec (persisted)
`/etc/jagacloud/vm/100.yaml`
```yaml
id: "100"
name: "vm-100"
cpus: 4
memory_mb: 8192
disks:
- name: root
pool: local-lvm
size_gb: 40
bus: virtio
nics:
- bridge: vmbr0
vlan: 10
model: virtio
cloud_init:
user: debian
ssh_keys:
- "ssh-ed25519 AAA... user@host"
```
## Container spec (persisted)
`/etc/jagacloud/ct/200.yaml`
```yaml
id: "200"
name: "ct-200"
unprivileged: true
limits:
cpus: 2
memory_mb: 2048
template: "debian-bookworm"
rootfs:
pool: local-dir
size_gb: 10
nics:
- bridge: vmbr0
vlan: 20
```
## Network bridge (systemd-networkd snippet)
`/etc/jagacloud/network/vmbr0.netdev`
```ini
[NetDev]
Name=vmbr0
Kind=bridge
```
`/etc/jagacloud/network/vmbr0.network`
```ini
[Match]
Name=vmbr0
[Network]
VLANFiltering=yes
```

219
api/openapi.yaml Normal file
View File

@@ -0,0 +1,219 @@
openapi: 3.0.3
info:
title: Jagacloud Node Agent API
version: 0.1.0
description: Minimal single-node hypervisor API for VMs (libvirt), LXC containers, and Podman-in-LXC.
servers:
- url: http://{host}:{port}
variables:
host:
default: 127.0.0.1
port:
default: "8000"
components:
securitySchemes:
bearerToken:
type: http
scheme: bearer
bearerFormat: JWT
schemas:
Error:
type: object
properties:
error:
type: string
NodeInfo:
type: object
properties:
hostname: { type: string }
version: { type: string }
cpu: { type: object, properties: { cores: {type: integer}, model: {type: string}, load: {type: number} } }
memory: { type: object, properties: { total_mb: {type: integer}, used_mb: {type: integer} } }
storage_pools:
type: array
items: { $ref: '#/components/schemas/StoragePool' }
bridges:
type: array
items: { $ref: '#/components/schemas/Bridge' }
StoragePool:
type: object
properties:
name: { type: string }
type: { type: string, enum: [dir, lvm, zfs] }
path: { type: string }
free_gb: { type: number }
total_gb: { type: number }
Bridge:
type: object
properties:
name: { type: string }
vlan_aware: { type: boolean }
mtu: { type: integer }
VM:
type: object
properties:
id: { type: string }
name: { type: string }
status: { type: string, enum: [running, stopped, paused, error] }
cpus: { type: integer }
memory_mb: { type: integer }
disks: { type: array, items: { $ref: '#/components/schemas/VMDisk' } }
nics: { type: array, items: { $ref: '#/components/schemas/VMNic' } }
VMDisk:
type: object
properties:
name: { type: string }
pool: { type: string }
size_gb: { type: integer }
bus: { type: string, enum: [virtio, sata] }
VMNic:
type: object
properties:
bridge: { type: string }
model: { type: string }
vlan: { type: integer }
VMCreate:
allOf:
- $ref: '#/components/schemas/VM'
- type: object
required: [name, cpus, memory_mb]
properties:
cloud_init:
type: object
properties:
user: { type: string }
ssh_keys: { type: array, items: {type: string} }
user_data: { type: string }
Container:
type: object
properties:
id: { type: string }
name: { type: string }
status: { type: string, enum: [running, stopped, error] }
unprivileged: { type: boolean }
nics: { type: array, items: { $ref: '#/components/schemas/ContainerNic' } }
limits: { type: object, properties: { cpus: {type: integer}, memory_mb: {type: integer} } }
ContainerCreate:
allOf:
- $ref: '#/components/schemas/Container'
- type: object
required: [name, template]
properties:
template: { type: string }
rootfs: { type: object, properties: { pool: {type: string}, size_gb: {type: integer} }, required: [pool, size_gb] }
ContainerNic:
type: object
properties:
bridge: { type: string }
vlan: { type: integer }
OCIContainer:
type: object
properties:
id: { type: string }
image: { type: string }
status: { type: string }
OCICreate:
type: object
required: [image]
properties:
image: { type: string }
cmd: { type: array, items: {type: string} }
env: { type: object, additionalProperties: {type: string} }
ports: { type: array, items: { type: object, properties: { host_port: {type: integer}, container_port: {type: integer} } } }
volumes: { type: array, items: { type: string } }
restart: { type: string }
security:
- bearerToken: []
paths:
/api/v1/node:
get:
summary: Node information
responses:
'200': { description: OK, content: { application/json: { schema: { $ref: '#/components/schemas/NodeInfo' } } } }
default: { description: Error, content: { application/json: { schema: { $ref: '#/components/schemas/Error' } } } }
/api/v1/vms:
get:
summary: List VMs
responses:
'200': { description: OK, content: { application/json: { schema: { type: array, items: { $ref: '#/components/schemas/VM' } } } } }
post:
summary: Create VM
requestBody:
required: true
content: { application/json: { schema: { $ref: '#/components/schemas/VMCreate' } } }
responses:
'202': { description: Accepted, content: { application/json: { schema: { $ref: '#/components/schemas/VM' } } } }
/api/v1/vms/{id}:
get:
summary: Get VM
parameters: [{ name: id, in: path, required: true, schema: {type: string} }]
responses:
'200': { description: OK, content: { application/json: { schema: { $ref: '#/components/schemas/VM' } } } }
post:
summary: Update VM (reserved for future)
responses:
'501': { description: Not implemented }
/api/v1/vms/{id}/{action}:
post:
summary: VM lifecycle action
parameters:
- { name: id, in: path, required: true, schema: {type: string} }
- { name: action, in: path, required: true, schema: { type: string, enum: [start, stop, reboot, delete] } }
responses:
'202': { description: Accepted }
/api/v1/containers:
get:
summary: List LXC containers
responses:
'200': { description: OK, content: { application/json: { schema: { type: array, items: { $ref: '#/components/schemas/Container' } } } } }
post:
summary: Create container
requestBody:
required: true
content: { application/json: { schema: { $ref: '#/components/schemas/ContainerCreate' } } }
responses:
'202': { description: Accepted, content: { application/json: { schema: { $ref: '#/components/schemas/Container' } } } }
/api/v1/containers/{id}:
get:
summary: Get container
parameters: [{ name: id, in: path, required: true, schema: {type: string} }]
responses:
'200': { description: OK, content: { application/json: { schema: { $ref: '#/components/schemas/Container' } } } }
/api/v1/containers/{id}/{action}:
post:
summary: Container lifecycle action
parameters:
- { name: id, in: path, required: true, schema: {type: string} }
- { name: action, in: path, required: true, schema: { type: string, enum: [start, stop, delete] } }
responses:
'202': { description: Accepted }
/api/v1/containers/{id}/oci:
get:
summary: List OCI containers inside CT
parameters: [{ name: id, in: path, required: true, schema: {type: string} }]
responses:
'200': { description: OK, content: { application/json: { schema: { type: array, items: { $ref: '#/components/schemas/OCIContainer' } } } } }
post:
summary: Create OCI container inside CT
requestBody:
required: true
content: { application/json: { schema: { $ref: '#/components/schemas/OCICreate' } } }
responses:
'202': { description: Accepted, content: { application/json: { schema: { $ref: '#/components/schemas/OCIContainer' } } } }
/api/v1/containers/{id}/oci/{cid}:
get:
summary: Get OCI container
parameters:
- { name: id, in: path, required: true, schema: {type: string} }
- { name: cid, in: path, required: true, schema: {type: string} }
responses:
'200': { description: OK, content: { application/json: { schema: { $ref: '#/components/schemas/OCIContainer' } } } }
/api/v1/containers/{id}/oci/{cid}/{action}:
post:
summary: OCI lifecycle action
parameters:
- { name: id, in: path, required: true, schema: {type: string} }
- { name: cid, in: path, required: true, schema: {type: string} }
- { name: action, in: path, required: true, schema: { type: string, enum: [start, stop, delete] } }
responses:
'202': { description: Accepted }

40
cmd/node-agent/main.go Normal file
View File

@@ -0,0 +1,40 @@
package main
import (
"context"
"log"
"net/http"
"github.com/go-chi/chi/v5"
"jagacloud/node-agent/pkg/api"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/containers/lxc"
"jagacloud/node-agent/pkg/containers/podman"
"jagacloud/node-agent/pkg/state"
"jagacloud/node-agent/pkg/tasks"
)
func main() {
cfg := config.LoadOrExit("/etc/jagacloud/agent.yaml")
store := state.NewStore(cfg.VMPath, cfg.CTPath)
svc := api.Services{
Tasks: tasks.NewRegistry(),
Libvirt: libvirt.NewVirshClient(cfg.LibvirtURI, "templates/libvirt/domain.xml.tmpl", "/etc/jagacloud/libvirt"),
LXC: lxc.NewCmdManager(cfg.LXCPath),
Podman: podman.NewCmdClient(cfg.PodmanSocket),
Store: store,
}
go svc.Tasks.StartWorker(context.Background())
r := chi.NewRouter()
api.RegisterRoutes(r, cfg, svc)
log.Printf("node-agent listening on %s", cfg.ListenAddr)
if err := http.ListenAndServe(cfg.ListenAddr, r); err != nil {
log.Fatalf("http server failed: %v", err)
}
}

8
go.mod Normal file
View File

@@ -0,0 +1,8 @@
module jagacloud/node-agent
go 1.21
require (
github.com/go-chi/chi/v5 v5.0.10
gopkg.in/yaml.v3 v3.0.1
)

6
go.sum Normal file
View File

@@ -0,0 +1,6 @@
github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk=
github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

412
pkg/api/handlers.go Normal file
View File

@@ -0,0 +1,412 @@
package api
import (
"context"
"crypto/rand"
"encoding/hex"
"encoding/json"
"errors"
"net/http"
"strings"
"github.com/go-chi/chi/v5"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/containers/lxc"
"jagacloud/node-agent/pkg/containers/podman"
"jagacloud/node-agent/pkg/tasks"
"jagacloud/node-agent/pkg/validators"
)
// Utility to write JSON.
func writeJSON(w http.ResponseWriter, status int, v interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
_ = json.NewEncoder(w).Encode(v)
}
// Node info
func handleNodeInfo(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
// TODO: collect real stats (CPU, mem, storage, bridges) using cfg.
writeJSON(w, http.StatusOK, map[string]interface{}{
"hostname": "TODO",
"version": "0.1.0",
})
}
}
// VM handlers
func handleListVMs(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
stored, _ := svc.Store.ListVMs()
runtime, _ := svc.Libvirt.ListVMs()
// Merge status from runtime into stored specs
statusMap := map[string]string{}
for _, vm := range runtime {
statusMap[vm.ID] = vm.Status
}
out := []interface{}{}
for _, spec := range stored {
status := statusMap[spec.ID]
out = append(out, map[string]interface{}{
"id": spec.ID,
"name": spec.Name,
"status": status,
"cpus": spec.CPU,
"memory_mb": spec.MemoryMB,
})
}
writeJSON(w, http.StatusOK, out)
}
}
func handleCreateVM(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var spec libvirt.VMSpec
if err := json.NewDecoder(r.Body).Decode(&spec); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if spec.ID == "" && spec.Name != "" {
spec.ID = spec.Name
}
if spec.ID == "" {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "id or name required"})
return
}
if err := validators.CheckBridgeSet(spec.NICs); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if err := validateVM(spec, cfg); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if err := svc.Store.SaveVM(spec); err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
taskID := enqueueWork(svc.Tasks, "vm.create", func(ctx context.Context) (interface{}, error) {
unlock := svc.StoreLock(spec.ID)
defer unlock()
return svc.Libvirt.CreateVM(spec)
})
writeJSON(w, http.StatusAccepted, map[string]string{"status": "queued", "task": taskID})
}
}
func handleGetVM(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
spec, err := svc.Store.LoadVM(id)
if err == nil {
writeJSON(w, http.StatusOK, spec)
return
}
vms, err := svc.Libvirt.ListVMs()
if err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
for _, vm := range vms {
if vm.ID == id || vm.Name == id {
writeJSON(w, http.StatusOK, vm)
return
}
}
writeJSON(w, http.StatusNotFound, map[string]string{"error": "not found"})
}
}
func handleStartVM(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleVM(cfg, svc, "start")
}
func handleStopVM(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleVM(cfg, svc, "stop")
}
func handleRebootVM(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleVM(cfg, svc, "reboot")
}
func handleDeleteVM(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleVM(cfg, svc, "delete")
}
func lifecycleVM(cfg config.Config, svc Services, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
taskID := enqueueWork(svc.Tasks, "vm."+action, func(ctx context.Context) (interface{}, error) {
unlock := svc.StoreLock(id)
defer unlock()
switch action {
case "start":
return nil, svc.Libvirt.StartVM(id)
case "stop":
return nil, svc.Libvirt.StopVM(id)
case "reboot":
return nil, svc.Libvirt.RebootVM(id)
case "delete":
if err := svc.Libvirt.DeleteVM(id); err != nil {
return nil, err
}
_ = svc.Store.DeleteVM(id)
return nil, nil
default:
return nil, errors.New("unsupported action")
}
})
writeJSON(w, http.StatusAccepted, map[string]string{"id": id, "action": action, "status": "queued", "task": taskID})
}
}
// Container (LXC) handlers
func handleListCT(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
stored, _ := svc.Store.ListCTs()
runtime, _ := svc.LXC.List()
statusMap := map[string]string{}
for _, ct := range runtime {
statusMap[ct.ID] = ct.Status
}
out := []interface{}{}
for _, spec := range stored {
status := statusMap[spec.ID]
out = append(out, map[string]interface{}{
"id": spec.ID,
"name": spec.Name,
"status": status,
"limits": spec.Limits,
"nics": spec.NICs,
"template": spec.Template,
})
}
writeJSON(w, http.StatusOK, out)
}
}
func handleCreateCT(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var spec lxc.Spec
if err := json.NewDecoder(r.Body).Decode(&spec); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if spec.ID == "" && spec.Name != "" {
spec.ID = spec.Name
}
if spec.ID == "" {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "id or name required"})
return
}
if err := validators.CheckBridgeSetCT(spec.NICs); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
if err := svc.Store.SaveCT(spec); err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
taskID := enqueueWork(svc.Tasks, "ct.create", func(ctx context.Context) (interface{}, error) {
unlock := svc.StoreLock(spec.ID)
defer unlock()
return svc.LXC.Create(spec)
})
writeJSON(w, http.StatusAccepted, map[string]string{"status": "queued", "task": taskID})
}
}
func handleGetCT(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
if spec, err := svc.Store.LoadCT(id); err == nil {
writeJSON(w, http.StatusOK, spec)
return
}
cts, err := svc.LXC.List()
if err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
for _, ct := range cts {
if ct.ID == id || ct.Name == id {
writeJSON(w, http.StatusOK, ct)
return
}
}
writeJSON(w, http.StatusNotFound, map[string]string{"error": "not found"})
}
}
func handleStartCT(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleCT(cfg, svc, "start")
}
func handleStopCT(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleCT(cfg, svc, "stop")
}
func handleDeleteCT(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleCT(cfg, svc, "delete")
}
func lifecycleCT(cfg config.Config, svc Services, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
taskID := enqueueWork(svc.Tasks, "ct."+action, func(ctx context.Context) (interface{}, error) {
unlock := svc.StoreLock(id)
defer unlock()
switch action {
case "start":
return nil, svc.LXC.Start(id)
case "stop":
return nil, svc.LXC.Stop(id)
case "delete":
if err := svc.LXC.Delete(id); err != nil {
return nil, err
}
_ = svc.Store.DeleteCT(id)
return nil, nil
default:
return nil, errors.New("unsupported action")
}
})
writeJSON(w, http.StatusAccepted, map[string]string{"id": id, "action": action, "status": "queued", "task": taskID})
}
}
// OCI-in-LXC handlers
func handleListOCI(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
items, err := svc.Podman.List(id)
if err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
writeJSON(w, http.StatusOK, items)
}
}
func handleCreateOCI(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctID := chi.URLParam(r, "id")
var spec podman.CreateSpec
if err := json.NewDecoder(r.Body).Decode(&spec); err != nil {
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
return
}
taskID := enqueueWork(svc.Tasks, "oci.create", func(ctx context.Context) (interface{}, error) {
return svc.Podman.Create(ctID, spec)
})
writeJSON(w, http.StatusAccepted, map[string]string{"status": "queued", "task": taskID})
}
}
func handleGetOCI(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
cid := chi.URLParam(r, "cid")
ctID := chi.URLParam(r, "id")
items, err := svc.Podman.List(ctID)
if err != nil {
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
return
}
for _, item := range items {
if item.ID == cid {
writeJSON(w, http.StatusOK, item)
return
}
}
writeJSON(w, http.StatusNotFound, map[string]string{"error": "not found"})
}
}
func handleStartOCI(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleOCI(cfg, svc, "start")
}
func handleStopOCI(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleOCI(cfg, svc, "stop")
}
func handleDeleteOCI(cfg config.Config, svc Services) http.HandlerFunc {
return lifecycleOCI(cfg, svc, "delete")
}
func lifecycleOCI(cfg config.Config, svc Services, action string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
cid := chi.URLParam(r, "cid")
ctID := chi.URLParam(r, "id")
taskID := enqueueWork(svc.Tasks, "oci."+action, func(ctx context.Context) (interface{}, error) {
switch action {
case "start":
return nil, svc.Podman.Start(ctID, cid)
case "stop":
return nil, svc.Podman.Stop(ctID, cid)
case "delete":
return nil, svc.Podman.Delete(ctID, cid)
default:
return nil, errors.New("unsupported action")
}
})
writeJSON(w, http.StatusAccepted, map[string]string{"id": cid, "action": action, "status": "queued", "task": taskID})
}
}
// Task handlers
func handleGetTask(cfg config.Config, svc Services) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
id := chi.URLParam(r, "id")
t, ok := svc.Tasks.Get(id)
if !ok {
writeJSON(w, http.StatusNotFound, map[string]string{"error": "not found"})
return
}
writeJSON(w, http.StatusOK, t)
}
}
// enqueue creates a task entry with queued status.
func enqueueWork(reg *tasks.Registry, kind string, fn tasks.WorkFunc) string {
id := randomID()
reg.EnqueueWork(id, wrapWork(kind, fn))
return id
}
func randomID() string {
b := make([]byte, 8)
if _, err := rand.Read(b); err != nil {
return "task-unknown"
}
return hex.EncodeToString(b)
}
func wrapWork(kind string, fn tasks.WorkFunc) tasks.WorkFunc {
return func(ctx context.Context) (interface{}, error) {
if fn == nil {
return nil, errors.New("no work function")
}
res, err := fn(ctx)
if err != nil {
return nil, err
}
return map[string]interface{}{"kind": kind, "result": res}, nil
}
}
// authMiddleware enforces a static bearer token.
func authMiddleware(token string) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
header := r.Header.Get("Authorization")
if !strings.HasPrefix(header, "Bearer ") {
writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "missing bearer token"})
return
}
supplied := strings.TrimPrefix(header, "Bearer ")
if supplied != token {
writeJSON(w, http.StatusForbidden, map[string]string{"error": "invalid token"})
return
}
ctx := context.WithValue(r.Context(), "auth", "ok")
next.ServeHTTP(w, r.WithContext(ctx))
})
}
}

112
pkg/api/routes.go Normal file
View File

@@ -0,0 +1,112 @@
package api
import (
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/config"
"jagacloud/node-agent/pkg/containers/lxc"
"jagacloud/node-agent/pkg/containers/podman"
"jagacloud/node-agent/pkg/state"
"jagacloud/node-agent/pkg/tasks"
)
// Services bundles runtime services passed to handlers.
type Services struct {
Tasks *tasks.Registry
Libvirt libvirtClient
LXC lxcManager
Podman podmanClient
Store *state.Store
Validator ResourceValidator
}
// StoreLock returns an unlock function for the given ID; no-op if store is nil.
func (s Services) StoreLock(id string) func() {
if s.Store == nil {
return func() {}
}
return s.Store.Lock(id)
}
// ResourceValidator validates network/storage resources before actions.
type ResourceValidator interface {
ValidateVM(libvirt.VMSpec) error
ValidateCT(lxc.Spec) error
}
// Interfaces narrowed for injection/testing.
type libvirtClient interface {
ListVMs() ([]libvirt.VM, error)
CreateVM(libvirt.VMSpec) (libvirt.VM, error)
StartVM(id string) error
StopVM(id string) error
RebootVM(id string) error
DeleteVM(id string) error
}
type lxcManager interface {
List() ([]lxc.Container, error)
Create(lxc.Spec) (lxc.Container, error)
Start(id string) error
Stop(id string) error
Delete(id string) error
}
type podmanClient interface {
List(ctID string) ([]podman.OCIContainer, error)
Create(ctID string, spec podman.CreateSpec) (podman.OCIContainer, error)
Start(ctID, cid string) error
Stop(ctID, cid string) error
Delete(ctID, cid string) error
}
// RegisterRoutes wires HTTP routes.
func RegisterRoutes(r *chi.Mux, cfg config.Config, svc Services) {
r.Use(middleware.RequestID)
r.Use(middleware.Logger)
if cfg.AuthToken != "" {
r.Use(authMiddleware(cfg.AuthToken))
}
r.Get("/api/v1/node", handleNodeInfo(cfg, svc))
r.Route("/api/v1/vms", func(r chi.Router) {
r.Get("/", handleListVMs(cfg, svc))
r.Post("/", handleCreateVM(cfg, svc))
r.Route("/{id}", func(r chi.Router) {
r.Get("/", handleGetVM(cfg, svc))
r.Post("/start", handleStartVM(cfg, svc))
r.Post("/stop", handleStopVM(cfg, svc))
r.Post("/reboot", handleRebootVM(cfg, svc))
r.Post("/delete", handleDeleteVM(cfg, svc))
})
})
r.Route("/api/v1/containers", func(r chi.Router) {
r.Get("/", handleListCT(cfg, svc))
r.Post("/", handleCreateCT(cfg, svc))
r.Route("/{id}", func(r chi.Router) {
r.Get("/", handleGetCT(cfg, svc))
r.Post("/start", handleStartCT(cfg, svc))
r.Post("/stop", handleStopCT(cfg, svc))
r.Post("/delete", handleDeleteCT(cfg, svc))
r.Route("/oci", func(r chi.Router) {
r.Get("/", handleListOCI(cfg, svc))
r.Post("/", handleCreateOCI(cfg, svc))
r.Route("/{cid}", func(r chi.Router) {
r.Get("/", handleGetOCI(cfg, svc))
r.Post("/start", handleStartOCI(cfg, svc))
r.Post("/stop", handleStopOCI(cfg, svc))
r.Post("/delete", handleDeleteOCI(cfg, svc))
})
})
})
})
r.Route("/api/v1/tasks", func(r chi.Router) {
r.Get("/{id}", handleGetTask(cfg, svc))
})
}

View File

@@ -0,0 +1,53 @@
package libvirt
// Client abstracts libvirt operations needed by the node-agent.
type Client interface {
ListVMs() ([]VM, error)
CreateVM(spec VMSpec) (VM, error)
StartVM(id string) error
StopVM(id string) error
RebootVM(id string) error
DeleteVM(id string) error
}
type VMSpec struct {
ID string
Name string
CPU int
MemoryMB int
Disks []DiskSpec
NICs []NICSpec
CloudInit *CloudInitSpec
CloudInitISO string
}
type DiskSpec struct {
Name string
Pool string
SizeGB int
Bus string
Path string
Prealloc string // "", "metadata", "full"
}
type NICSpec struct {
Bridge string
VLAN int
Model string
}
type CloudInitSpec struct {
User string
SSHKeys []string
UserData string
}
type VM struct {
ID string
Name string
Status string
CPU int
MemoryMB int
}
// TODO: implement a real libvirt-go backed client that renders domain XML from templates.

View File

@@ -0,0 +1,407 @@
package libvirt
import (
"bytes"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"text/template"
)
// VirshClient shells out to virsh; avoids cgo dependencies.
type VirshClient struct {
URI string
TemplatePath string
OutputDir string
CloudInitDir string
}
func NewVirshClient(uri, tmpl, outDir string) *VirshClient {
return &VirshClient{URI: uri, TemplatePath: tmpl, OutputDir: outDir, CloudInitDir: "/var/lib/libvirt/cloud-init"}
}
func (c *VirshClient) ListVMs() ([]VM, error) {
cmd := exec.Command("virsh", "--connect", c.URI, "list", "--all", "--name")
out, err := cmd.Output()
if err != nil {
return nil, err
}
lines := strings.Split(strings.TrimSpace(string(out)), "\n")
vms := []VM{}
for _, line := range lines {
name := strings.TrimSpace(line)
if name == "" {
continue
}
status := "unknown"
statOut, _ := exec.Command("virsh", "--connect", c.URI, "domstate", name).Output()
if len(statOut) > 0 {
status = strings.TrimSpace(string(statOut))
}
vms = append(vms, VM{ID: name, Name: name, Status: status})
}
return vms, nil
}
func (c *VirshClient) CreateVM(spec VMSpec) (VM, error) {
if spec.Name == "" {
spec.Name = spec.ID
}
// ensure disks have paths
for i := range spec.Disks {
if spec.Disks[i].Path == "" {
spec.Disks[i].Path = fmt.Sprintf("/var/lib/libvirt/images/%s-%s.qcow2", spec.Name, spec.Disks[i].Name)
}
if spec.Disks[i].Bus == "" {
spec.Disks[i].Bus = "virtio"
}
if err := ensureDisk(spec.Disks[i]); err != nil {
return VM{}, err
}
}
for i := range spec.NICs {
if spec.NICs[i].Model == "" {
spec.NICs[i].Model = "virtio"
}
}
if spec.CloudInit != nil && spec.CloudInitISO == "" {
iso, err := c.createCloudInit(spec)
if err != nil {
return VM{}, err
}
spec.CloudInitISO = iso
}
xml, err := c.render(spec)
if err != nil {
return VM{}, err
}
if err := os.MkdirAll(c.OutputDir, 0o755); err != nil {
return VM{}, err
}
path := filepath.Join(c.OutputDir, fmt.Sprintf("%s.xml", spec.Name))
if err := os.WriteFile(path, []byte(xml), 0o644); err != nil {
return VM{}, err
}
if err := c.run("define", path); err != nil {
return VM{}, err
}
if err := c.run("start", spec.Name); err != nil {
return VM{}, err
}
return VM{ID: spec.Name, Name: spec.Name, Status: "running", CPU: spec.CPU, MemoryMB: spec.MemoryMB}, nil
}
func (c *VirshClient) StartVM(id string) error { return c.run("start", id) }
func (c *VirshClient) StopVM(id string) error { return c.run("shutdown", id) }
func (c *VirshClient) RebootVM(id string) error { return c.run("reboot", id) }
func (c *VirshClient) DeleteVM(id string) error {
// Destroy then undefine
_ = c.run("destroy", id)
return c.run("undefine", id)
}
func (c *VirshClient) render(spec VMSpec) (string, error) {
tmpl, err := template.ParseFiles(c.TemplatePath)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(nil)
if err := tmpl.Execute(buf, spec); err != nil {
return "", err
}
return buf.String(), nil
}
func (c *VirshClient) run(args ...string) error {
full := append([]string{"--connect", c.URI}, args...)
cmd := exec.Command("virsh", full...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// ensureDisk ensures a qcow2 file exists with given size.
func ensureDisk(d DiskSpec) error {
if d.SizeGB == 0 && fileExists(d.Path) {
return nil
}
if d.SizeGB == 0 {
return fmt.Errorf("disk %s missing size_gb", d.Name)
}
if err := os.MkdirAll(path.Dir(d.Path), 0o755); err != nil {
return err
}
if fileExists(d.Path) {
return nil
}
args := []string{"create", "-f", "qcow2"}
if d.Prealloc == "metadata" {
args = append(args, "-o", "preallocation=metadata")
} else if d.Prealloc == "full" {
args = append(args, "-o", "preallocation=full")
}
args = append(args, d.Path, fmt.Sprintf("%dG", d.SizeGB))
cmd := exec.Command("qemu-img", args...)
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
return cmd.Run()
}
func fileExists(p string) bool {
_, err := os.Stat(p)
return err == nil
}
// createCloudInit generates a simple cloud-init ISO using cloud-localds if available.
func (c *VirshClient) createCloudInit(spec VMSpec) (string, error) {
if err := os.MkdirAll(c.CloudInitDir, 0o755); err != nil {
return "", err
}
isoPath := filepath.Join(c.CloudInitDir, fmt.Sprintf("%s-cloudinit.iso", spec.Name))
userData := "#cloud-config\n"
if spec.CloudInit != nil {
if spec.CloudInit.User != "" {
userData += fmt.Sprintf("users:\n - name: %s\n sudo: ALL=(ALL) NOPASSWD:ALL\n shell: /bin/bash\n", spec.CloudInit.User)
}
if len(spec.CloudInit.SSHKeys) > 0 {
userData += "ssh_authorized_keys:\n"
for _, k := range spec.CloudInit.SSHKeys {
userData += fmt.Sprintf(" - %s\n", k)
}
}
if spec.CloudInit.UserData != "" {
userData += spec.CloudInit.UserData + "\n"
}
}
// write tmp user-data
tmpUD := filepath.Join(c.CloudInitDir, fmt.Sprintf("%s-user-data", spec.Name))
if err := os.WriteFile(tmpUD, []byte(userData), 0o644); err != nil {
return "", err
}
metaData := fmt.Sprintf("instance-id: %s\nlocal-hostname: %s\n", spec.Name, spec.Name)
tmpMD := filepath.Join(c.CloudInitDir, fmt.Sprintf("%s-meta-data", spec.Name))
if err := os.WriteFile(tmpMD, []byte(metaData), 0o644); err != nil {
return "", err
}
if hasBinary("cloud-localds") {
cmd := exec.Command("cloud-localds", isoPath, tmpUD, tmpMD)
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return "", err
}
return isoPath, nil
}
// Fallback: build ISO via genisoimage/mkisofs
args := []string{"-output", isoPath, "-volid", "cidata", "-joliet", "-rock", tmpUD, tmpMD}
switch {
case hasBinary("genisoimage"):
cmd := exec.Command("genisoimage", args...)
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return "", err
}
case hasBinary("mkisofs"):
cmd := exec.Command("mkisofs", args...)
cmd.Stdout = io.Discard
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return "", err
}
default:
if err := buildIsoPureGo(isoPath, tmpUD, tmpMD); err != nil {
return "", fmt.Errorf("no iso tools available: %w", err)
}
}
return isoPath, nil
}
func hasBinary(name string) bool {
_, err := exec.LookPath(name)
return err == nil
}
// buildIsoPureGo writes a minimal ISO with two files using only stdlib (ISO9660 Level 1).
// This is deliberately simple and avoids Joliet/RockRidge; suitable for cloud-init seed.
func buildIsoPureGo(outPath, userDataPath, metaDataPath string) error {
files := []struct {
Path string
Data []byte
}{
{"user-data", nil},
{"meta-data", nil},
}
for i := range files {
data, err := os.ReadFile([]string{userDataPath, metaDataPath}[i])
if err != nil {
return err
}
files[i].Data = data
}
// ISO layout constants
sectorSize := 2048
writeAt := func(w io.WriterAt, off int64, data []byte) error {
_, err := w.WriteAt(data, off)
return err
}
// Create file
f, err := os.OpenFile(outPath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
if err != nil {
return err
}
defer f.Close()
// Helper to align to sector
align := func(off int64) int64 {
if off%int64(sectorSize) == 0 {
return off
}
return off + int64(sectorSize) - off%int64(sectorSize)
}
// Primary Volume Descriptor at sector 16
type fileEntry struct {
name string
start int64
size int64
}
var entries []fileEntry
offset := int64(sectorSize * 17) // start writing files after pvd + root dir sector
for _, fdesc := range files {
offset = align(offset)
if _, err := f.WriteAt(fdesc.Data, offset); err != nil {
return err
}
entries = append(entries, fileEntry{name: fdesc.Path, start: offset / int64(sectorSize), size: int64(len(fdesc.Data))})
offset += int64(len(fdesc.Data))
}
// Root directory record at sector 17
rootSector := int64(17)
dirRec := buildDirRecord(0, 0, 0, true) // current dir
dirRec = append(dirRec, buildDirRecord(0, 0, 0, true)...) // parent (same)
for _, e := range entries {
dirRec = append(dirRec, buildDirRecord(byteLen(e.name), e.start, e.size, false, e.name)...)
}
dirRec = padTo(dirRec, sectorSize)
if err := writeAt(f, rootSector*int64(sectorSize), dirRec); err != nil {
return err
}
// Primary Volume Descriptor
pvd := make([]byte, sectorSize)
pvd[0] = 1 // type
copy(pvd[1:6], "CD001") // id
pvd[6] = 1 // version
copy(pvd[40:72], padString("CIDATA", 32)) // volume id
// volume space size (little and big endian)
volSectors := uint32(100) // arbitrary ample space
putBothEndian(pvd[80:88], volSectors)
pvd[120] = 1 // volume set size lsb
pvd[121] = 0
pvd[124] = 1 // volume seq number lsb
pvd[125] = 0
pvd[128] = 0x08 // logical block size 2048 LE
pvd[129] = 0x00
pvd[130] = 0x08 // logical block size BE
pvd[131] = 0x00
// path table size
pathTableSize := uint32(len(dirRec))
putBothEndian(pvd[132:140], pathTableSize)
// path table location (little endian)
putLE(pvd[140:144], uint32(rootSector+1))
// root directory record
rootRec := buildDirRecord(1, rootSector, int64(len(dirRec)), true)
copy(pvd[156:], rootRec)
copy(pvd[190:], padString("JAGACLOUD", 32))
if err := writeAt(f, 16*int64(sectorSize), pvd); err != nil {
return err
}
// Volume descriptor set terminator
vdst := make([]byte, sectorSize)
vdst[0] = 255
copy(vdst[1:6], "CD001")
vdst[6] = 1
if err := writeAt(f, 17*int64(sectorSize), vdst); err != nil {
return err
}
return nil
}
func buildDirRecord(nameLen byte, extent int64, size int64, isDir bool, name ...string) []byte {
n := "."
if len(name) > 0 {
n = name[0]
}
rec := []byte{}
rec = append(rec, 0) // length placeholder
rec = append(rec, 0) // ext attr rec len
ext := make([]byte, 8)
putLE(ext, uint32(extent))
rec = append(rec, ext...)
sz := make([]byte, 8)
putLE(sz, uint32(size))
rec = append(rec, sz...)
rec = append(rec, []byte{0, 0, 0, 0, 0, 0, 0}...) // date/time
flags := byte(0)
if isDir {
flags = 2
}
rec = append(rec, flags)
rec = append(rec, []byte{0, 0, 0}...) // unit size, gap, vol seq LSB
rec = append(rec, 0, 0) // vol seq MSB
rec = append(rec, byte(len(n)))
rec = append(rec, []byte(n)...)
if len(rec)%2 != 0 {
rec = append(rec, 0)
}
rec[0] = byte(len(rec))
return rec
}
func padString(s string, l int) []byte {
b := make([]byte, l)
copy(b, []byte(s))
return b
}
func padTo(b []byte, size int) []byte {
if len(b)%size == 0 {
return b
}
pad := size - len(b)%size
return append(b, make([]byte, pad)...)
}
func putBothEndian(dst []byte, v uint32) {
putLE(dst, v)
putBE(dst[4:], v)
}
func putLE(dst []byte, v uint32) {
dst[0] = byte(v)
dst[1] = byte(v >> 8)
dst[2] = byte(v >> 16)
dst[3] = byte(v >> 24)
}
func putBE(dst []byte, v uint32) {
dst[0] = byte(v >> 24)
dst[1] = byte(v >> 16)
dst[2] = byte(v >> 8)
dst[3] = byte(v)
}

84
pkg/config/config.go Normal file
View File

@@ -0,0 +1,84 @@
package config
import (
"errors"
"io"
"os"
"strings"
"gopkg.in/yaml.v3"
)
// Config captures node-agent runtime configuration.
type Config struct {
ListenAddr string `json:"listen_addr" yaml:"listen_addr"`
LibvirtURI string `json:"libvirt_uri" yaml:"libvirt_uri"`
LXCPath string `json:"lxc_path" yaml:"lxc_path"`
PodmanSocket string `json:"podman_socket" yaml:"podman_socket"`
VMPath string `json:"vm_path" yaml:"vm_path"`
CTPath string `json:"ct_path" yaml:"ct_path"`
AuthToken string `json:"auth_token" yaml:"auth_token"`
StoragePools []StoragePool `json:"storage_pools" yaml:"storage_pools"`
Bridges []Bridge `json:"bridges" yaml:"bridges"`
}
type StoragePool struct {
Name string `json:"name" yaml:"name"`
Type string `json:"type" yaml:"type"`
Path string `json:"path" yaml:"path"`
}
type Bridge struct {
Name string `json:"name" yaml:"name"`
VlanAware bool `json:"vlan_aware" yaml:"vlan_aware"`
MTU int `json:"mtu" yaml:"mtu"`
}
// LoadOrExit loads YAML/JSON config and applies defaults. It panics on fatal errors
// to keep the agent simple for now.
func LoadOrExit(path string) Config {
cfg := Config{
ListenAddr: ":8000",
LibvirtURI: "qemu:///system",
LXCPath: "/etc/jagacloud/lxc",
VMPath: "/etc/jagacloud/vm",
CTPath: "/etc/jagacloud/ct",
}
f, err := os.Open(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
// File missing is allowed; defaults stay in place.
return cfg
}
panic(err)
}
defer f.Close()
raw, err := io.ReadAll(f)
if err != nil {
panic(err)
}
if err := yaml.Unmarshal(raw, &cfg); err != nil {
panic(err)
}
cfg.ListenAddr = strings.TrimSpace(cfg.ListenAddr)
if cfg.ListenAddr == "" {
cfg.ListenAddr = ":8000"
}
if cfg.LibvirtURI == "" {
cfg.LibvirtURI = "qemu:///system"
}
if cfg.LXCPath == "" {
cfg.LXCPath = "/etc/jagacloud/lxc"
}
if cfg.VMPath == "" {
cfg.VMPath = "/etc/jagacloud/vm"
}
if cfg.CTPath == "" {
cfg.CTPath = "/etc/jagacloud/ct"
}
return cfg
}

43
pkg/containers/lxc/lxc.go Normal file
View File

@@ -0,0 +1,43 @@
package lxc
// Manager abstracts LXC lifecycle operations.
type Manager interface {
List() ([]Container, error)
Create(spec Spec) (Container, error)
Start(id string) error
Stop(id string) error
Delete(id string) error
}
type Spec struct {
ID string
Name string
Template string
RootfsPool string
RootfsSizeG int
NICs []NICSpec
Limits Limits
Unprivileged bool
}
type NICSpec struct {
Bridge string
VLAN int
HWAddr string
MTU int
Name string
}
type Limits struct {
CPU int
MemoryMB int
}
type Container struct {
ID string
Name string
Status string
Unpriv bool
}
// TODO: shell out to lxc-* binaries with generated config under cfg path.

View File

@@ -0,0 +1,157 @@
package lxc
import (
"bytes"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
// CmdManager uses lxc-* commands.
type CmdManager struct {
ConfigDir string
Template string // unused placeholder for future rendering
}
func NewCmdManager(cfgDir string) *CmdManager {
return &CmdManager{ConfigDir: cfgDir}
}
func (m *CmdManager) List() ([]Container, error) {
cmd := exec.Command("lxc-ls", "--active")
out, err := cmd.Output()
if err != nil {
return nil, err
}
names := splitLines(string(out))
var res []Container
for _, n := range names {
if n == "" {
continue
}
res = append(res, Container{ID: n, Name: n, Status: "running", Unpriv: true})
}
return res, nil
}
func (m *CmdManager) Create(spec Spec) (Container, error) {
if spec.Name == "" {
spec.Name = spec.ID
}
if err := os.MkdirAll(m.ConfigDir, 0o755); err != nil {
return Container{}, err
}
// For simplicity, use download template; real code should render rootfs according to spec.
args := []string{"-n", spec.Name, "-t", "download", "--", "-d", "debian", "-r", "bookworm", "-a", "amd64"}
if err := exec.Command("lxc-create", args...).Run(); err != nil {
return Container{}, err
}
cfgPath := filepath.Join(m.ConfigDir, fmt.Sprintf("%s.conf", spec.Name))
cfgContent, err := renderConfig(spec)
if err != nil {
return Container{}, err
}
_ = os.WriteFile(cfgPath, []byte(cfgContent), 0o644)
return Container{ID: spec.Name, Name: spec.Name, Status: "stopped", Unpriv: spec.Unprivileged}, nil
}
func (m *CmdManager) Start(id string) error {
return exec.Command("lxc-start", "-n", id).Run()
}
func (m *CmdManager) Stop(id string) error {
return exec.Command("lxc-stop", "-n", id).Run()
}
func (m *CmdManager) Delete(id string) error {
return exec.Command("lxc-destroy", "-n", id).Run()
}
func splitLines(s string) []string {
var out []string
start := 0
for i := 0; i < len(s); i++ {
if s[i] == '\n' || s[i] == '\r' {
if start < i {
out = append(out, s[start:i])
}
start = i + 1
}
}
if start < len(s) {
out = append(out, s[start:])
}
return out
}
func renderConfig(spec Spec) (string, error) {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "lxc.include = /usr/share/lxc/config/common.conf\n")
fmt.Fprintf(buf, "lxc.arch = linux64\n")
if spec.Unprivileged {
fmt.Fprintf(buf, "lxc.apparmor.profile = generated\n")
fmt.Fprintf(buf, "lxc.apparmor.allow_nesting = 1\n")
uidStart, count, ok := hostIDMap()
if !ok {
return "", fmt.Errorf("no subuid/subgid ranges found for root; configure /etc/subuid and /etc/subgid")
}
fmt.Fprintf(buf, "lxc.idmap = u 0 %d %d\n", uidStart, count)
fmt.Fprintf(buf, "lxc.idmap = g 0 %d %d\n", uidStart, count)
}
rootfs := fmt.Sprintf("/var/lib/lxc/%s/rootfs", spec.Name)
fmt.Fprintf(buf, "lxc.rootfs.path = dir:%s\n", rootfs)
for idx, nic := range spec.NICs {
fmt.Fprintf(buf, "lxc.net.%d.type = veth\n", idx)
fmt.Fprintf(buf, "lxc.net.%d.link = %s\n", idx, nic.Bridge)
if nic.VLAN > 0 {
fmt.Fprintf(buf, "lxc.net.%d.vlan.id = %d\n", idx, nic.VLAN)
}
if nic.HWAddr != "" {
fmt.Fprintf(buf, "lxc.net.%d.hwaddr = %s\n", idx, nic.HWAddr)
}
if nic.MTU > 0 {
fmt.Fprintf(buf, "lxc.net.%d.mtu = %d\n", idx, nic.MTU)
}
if nic.Name != "" {
fmt.Fprintf(buf, "lxc.net.%d.name = %s\n", idx, nic.Name)
}
}
return buf.String(), nil
}
// hostIDMap finds the first subuid/subgid range for root, defaulting to 100000/65536.
func hostIDMap() (start int, count int, ok bool) {
contentUID, errUID := os.ReadFile("/etc/subuid")
contentGID, errGID := os.ReadFile("/etc/subgid")
if errUID != nil || errGID != nil {
return 0, 0, false
}
s1, c1 := parseSubID(contentUID)
s2, c2 := parseSubID(contentGID)
if s1 > 0 && c1 > 0 && s2 > 0 && c2 > 0 {
return s1, c1, true
}
return 0, 0, false
}
func parseSubID(b []byte) (int, int) {
lines := strings.Split(string(b), "\n")
for _, line := range lines {
fields := strings.Split(line, ":")
if len(fields) < 3 {
continue
}
if fields[0] != "root" {
continue
}
s, err1 := strconv.Atoi(fields[1])
c, err2 := strconv.Atoi(fields[2])
if err1 == nil && err2 == nil {
return s, c
}
}
return 0, 0
}

View File

@@ -0,0 +1,32 @@
package podman
// Client talks to Podman socket inside a container.
type Client interface {
List(ctID string) ([]OCIContainer, error)
Create(ctID string, spec CreateSpec) (OCIContainer, error)
Start(ctID, cid string) error
Stop(ctID, cid string) error
Delete(ctID, cid string) error
}
type CreateSpec struct {
Image string
Cmd []string
Env map[string]string
Ports []PortMap
Volumes []string
Restart string
}
type PortMap struct {
HostPort int
ContainerPort int
}
type OCIContainer struct {
ID string
Image string
Status string
}
// TODO: connect via nsenter into CT and talk to Podman socket.

View File

@@ -0,0 +1,122 @@
package podman
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
)
// CmdClient talks to podman socket via CLI.
type CmdClient struct {
SocketPath string
}
func NewCmdClient(sock string) *CmdClient {
return &CmdClient{SocketPath: sock}
}
func (c *CmdClient) List(ctID string) ([]OCIContainer, error) {
args := c.baseArgs(ctID, "ps", "--format", "json")
out, err := exec.Command(args[0], args[1:]...).Output()
if err != nil {
return nil, err
}
var parsed []struct {
ID string `json:"Id"`
Image string `json:"Image"`
Status string `json:"Status"`
}
if err := json.Unmarshal(out, &parsed); err != nil {
return nil, err
}
res := make([]OCIContainer, 0, len(parsed))
for _, p := range parsed {
res = append(res, OCIContainer{ID: p.ID, Image: p.Image, Status: p.Status})
}
return res, nil
}
func (c *CmdClient) Create(ctID string, spec CreateSpec) (OCIContainer, error) {
args := c.baseArgs(ctID, "create")
for k, v := range spec.Env {
args = append(args, "--env", k+"="+v)
}
for _, v := range spec.Volumes {
args = append(args, "-v", v)
}
for _, p := range spec.Ports {
args = append(args, "-p", formatPort(p))
}
if spec.Restart != "" {
args = append(args, "--restart", spec.Restart)
}
args = append(args, spec.Image)
args = append(args, spec.Cmd...)
out, err := exec.Command(args[0], args[1:]...).CombinedOutput()
if err != nil {
return OCIContainer{}, err
}
id := strings.TrimSpace(string(out))
return OCIContainer{ID: id, Image: spec.Image, Status: "created"}, nil
}
func (c *CmdClient) Start(ctID, cid string) error {
args := c.baseArgs(ctID, "start", cid)
return exec.Command(args[0], args[1:]...).Run()
}
func (c *CmdClient) Stop(ctID, cid string) error {
args := c.baseArgs(ctID, "stop", cid)
return exec.Command(args[0], args[1:]...).Run()
}
func (c *CmdClient) Delete(ctID, cid string) error {
args := c.baseArgs(ctID, "rm", "-f", cid)
return exec.Command(args[0], args[1:]...).Run()
}
// baseArgs chooses how to enter the CT: prefer nsenter into CT init pid; otherwise use host socket.
func (c *CmdClient) baseArgs(ctID string, args ...string) []string {
nsPrefix := []string{}
if ctID != "" {
pid := containerInitPID(ctID)
if pid > 0 {
nsPrefix = []string{"nsenter", "-t", fmt.Sprintf("%d", pid), "-n", "-m", "-u", "-i", "--"}
}
}
full := append([]string{"podman"}, args...)
if len(nsPrefix) > 0 {
return append(nsPrefix, full...)
}
// fallback to socket on host
if c.SocketPath != "" {
full = append([]string{"podman", "--url", "unix://" + c.SocketPath}, args...)
}
return full
}
func formatPort(p PortMap) string {
return fmt.Sprintf("%d:%d", p.HostPort, p.ContainerPort)
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// containerInitPID returns the PID of the CT's init using lxc-info.
func containerInitPID(ctID string) int {
out, err := exec.Command("lxc-info", "-n", ctID, "-pH").Output()
if err != nil {
return 0
}
pidStr := strings.TrimSpace(string(out))
pid, err := strconv.Atoi(pidStr)
if err != nil {
return 0
}
return pid
}

15
pkg/network/network.go Normal file
View File

@@ -0,0 +1,15 @@
package network
// BridgeManager handles host bridge/VLAN setup.
type BridgeManager interface {
EnsureBridge(name string, vlanAware bool, mtu int) error
List() ([]Bridge, error)
}
type Bridge struct {
Name string
VlanAware bool
MTU int
}
// TODO: implement using systemd-networkd/ifupdown2 files in /etc/jagacloud/network.

202
pkg/state/store.go Normal file
View File

@@ -0,0 +1,202 @@
package state
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"sync"
"gopkg.in/yaml.v3"
"jagacloud/node-agent/pkg/compute/libvirt"
"jagacloud/node-agent/pkg/containers/lxc"
)
// Store persists VM/CT specs to disk and provides simple locks per ID.
type Store struct {
vmDir string
ctDir string
mu sync.Mutex
locks map[string]*sync.Mutex
}
func NewStore(vmDir, ctDir string) *Store {
return &Store{vmDir: vmDir, ctDir: ctDir, locks: make(map[string]*sync.Mutex)}
}
func (s *Store) ensureDirs() error {
for _, d := range []string{s.vmDir, s.ctDir} {
if err := os.MkdirAll(d, 0o755); err != nil {
return err
}
}
return nil
}
// ---- Locks ----
func (s *Store) lock(id string) func() {
s.mu.Lock()
m, ok := s.locks[id]
if !ok {
m = &sync.Mutex{}
s.locks[id] = m
}
s.mu.Unlock()
m.Lock()
return m.Unlock
}
// Lock exposes a per-ID lock; caller must defer the returned unlock function.
func (s *Store) Lock(id string) func() {
return s.lock(id)
}
// ---- VM ----
func (s *Store) SaveVM(spec libvirt.VMSpec) error {
if err := s.ensureDirs(); err != nil {
return err
}
if spec.ID == "" && spec.Name != "" {
spec.ID = spec.Name
}
if spec.ID == "" {
return fmt.Errorf("vm id is required")
}
data, err := yaml.Marshal(spec)
if err != nil {
return err
}
path := filepath.Join(s.vmDir, fmt.Sprintf("%s.yaml", spec.ID))
return os.WriteFile(path, data, 0o644)
}
func (s *Store) LoadVM(id string) (libvirt.VMSpec, error) {
var spec libvirt.VMSpec
path := filepath.Join(s.vmDir, fmt.Sprintf("%s.yaml", id))
data, err := os.ReadFile(path)
if err != nil {
return spec, err
}
if err := yaml.Unmarshal(data, &spec); err != nil {
return spec, err
}
if spec.ID == "" {
spec.ID = id
}
return spec, nil
}
func (s *Store) ListVMs() ([]libvirt.VMSpec, error) {
specs := []libvirt.VMSpec{}
err := filepath.WalkDir(s.vmDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
if filepath.Ext(path) != ".yaml" {
return nil
}
data, err := os.ReadFile(path)
if err != nil {
return err
}
var spec libvirt.VMSpec
if err := yaml.Unmarshal(data, &spec); err != nil {
return err
}
if spec.ID == "" {
spec.ID = trimExt(filepath.Base(path))
}
specs = append(specs, spec)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return specs, nil
}
func (s *Store) DeleteVM(id string) error {
path := filepath.Join(s.vmDir, fmt.Sprintf("%s.yaml", id))
return os.Remove(path)
}
// ---- CT ----
func (s *Store) SaveCT(spec lxc.Spec) error {
if err := s.ensureDirs(); err != nil {
return err
}
if spec.ID == "" && spec.Name != "" {
spec.ID = spec.Name
}
if spec.ID == "" {
return fmt.Errorf("ct id is required")
}
data, err := yaml.Marshal(spec)
if err != nil {
return err
}
path := filepath.Join(s.ctDir, fmt.Sprintf("%s.yaml", spec.ID))
return os.WriteFile(path, data, 0o644)
}
func (s *Store) LoadCT(id string) (lxc.Spec, error) {
var spec lxc.Spec
path := filepath.Join(s.ctDir, fmt.Sprintf("%s.yaml", id))
data, err := os.ReadFile(path)
if err != nil {
return spec, err
}
if err := yaml.Unmarshal(data, &spec); err != nil {
return spec, err
}
if spec.ID == "" {
spec.ID = id
}
return spec, nil
}
func (s *Store) ListCTs() ([]lxc.Spec, error) {
specs := []lxc.Spec{}
err := filepath.WalkDir(s.ctDir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
if filepath.Ext(path) != ".yaml" {
return nil
}
data, err := os.ReadFile(path)
if err != nil {
return err
}
var spec lxc.Spec
if err := yaml.Unmarshal(data, &spec); err != nil {
return err
}
if spec.ID == "" {
spec.ID = trimExt(filepath.Base(path))
}
specs = append(specs, spec)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return specs, nil
}
func (s *Store) DeleteCT(id string) error {
path := filepath.Join(s.ctDir, fmt.Sprintf("%s.yaml", id))
return os.Remove(path)
}
func trimExt(name string) string {
return name[:len(name)-len(filepath.Ext(name))]
}

25
pkg/storage/storage.go Normal file
View File

@@ -0,0 +1,25 @@
package storage
// Pool abstracts storage operations (minimal set for v1).
type Pool interface {
Name() string
Type() string
AllocateVolume(spec VolumeSpec) (Volume, error)
DeleteVolume(id string) error
}
type VolumeSpec struct {
Name string
SizeGB int
// Pool-specific fields may be embedded later.
}
type Volume struct {
ID string
Name string
Pool string
SizeGB int
Path string
}
// TODO: add concrete pool implementations for dir/lvm/zfs.

100
pkg/tasks/tasks.go Normal file
View File

@@ -0,0 +1,100 @@
package tasks
import (
"context"
"errors"
"sync"
)
// Simple in-memory task registry placeholder. Replace with durable store if needed.
type Task struct {
ID string
Status string
Result interface{}
Err string
}
type Registry struct {
mu sync.Mutex
tasks map[string]Task
workChan chan workItem
}
type workItem struct {
taskID string
fn WorkFunc
}
// WorkFunc performs the actual task work.
type WorkFunc func(ctx context.Context) (interface{}, error)
func NewRegistry() *Registry {
return &Registry{
tasks: make(map[string]Task),
workChan: make(chan workItem, 64),
}
}
func (r *Registry) Add(t Task) {
r.mu.Lock()
defer r.mu.Unlock()
r.tasks[t.ID] = t
}
// EnqueueWork registers a task and queues it for asynchronous processing.
func (r *Registry) EnqueueWork(id string, fn WorkFunc) {
r.Add(Task{ID: id, Status: "queued"})
r.workChan <- workItem{taskID: id, fn: fn}
}
func (r *Registry) Update(id string, status string, result interface{}) {
r.mu.Lock()
defer r.mu.Unlock()
if t, ok := r.tasks[id]; ok {
t.Status = status
t.Result = result
r.tasks[id] = t
}
}
func (r *Registry) Fail(id string, err error) {
if err == nil {
err = errors.New("unknown error")
}
r.mu.Lock()
defer r.mu.Unlock()
if t, ok := r.tasks[id]; ok {
t.Status = "error"
t.Err = err.Error()
r.tasks[id] = t
}
}
func (r *Registry) Get(id string) (Task, bool) {
r.mu.Lock()
defer r.mu.Unlock()
t, ok := r.tasks[id]
return t, ok
}
// StartWorker processes queued work sequentially. Caller should run this in a goroutine.
func (r *Registry) StartWorker(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case item := <-r.workChan:
r.runItem(ctx, item)
}
}
}
func (r *Registry) runItem(ctx context.Context, item workItem) {
r.Update(item.taskID, "running", nil)
res, err := item.fn(ctx)
if err != nil {
r.Fail(item.taskID, err)
return
}
r.Update(item.taskID, "completed", res)
}

View File

@@ -0,0 +1,46 @@
package validators
import (
"fmt"
"os/exec"
)
// CheckBridge returns nil if the bridge exists on the host.
func CheckBridge(name string) error {
if name == "" {
return fmt.Errorf("bridge name required")
}
if err := exec.Command("bash", "-lc", fmt.Sprintf("ip link show %s", name)).Run(); err != nil {
return fmt.Errorf("bridge %s not found", name)
}
return nil
}
// CheckBridgeSet validates all bridges in NIC specs.
func CheckBridgeSet(nics []libvirt.NICSpec) error {
for _, nic := range nics {
if err := CheckBridge(nic.Bridge); err != nil {
return err
}
}
return nil
}
// CheckBridgeSetCT validates bridges for LXC NICs.
func CheckBridgeSetCT(nics []lxc.NICSpec) error {
for _, nic := range nics {
if err := CheckBridge(nic.Bridge); err != nil {
return err
}
}
return nil
}
// CheckStoragePool is a stub; implement LVM/ZFS/dir verification as needed.
func CheckStoragePool(name string) error {
if name == "" {
return fmt.Errorf("storage pool name required")
}
// TODO: query configured pools
return nil
}

View File

@@ -0,0 +1,41 @@
<!-- Minimal libvirt domain template rendered by jagacloud -->
<domain type='kvm'>
<name>{{.Name}}</name>
<memory unit='MiB'>{{.MemoryMB}}</memory>
<vcpu>{{.CPU}}</vcpu>
<os>
<type arch='x86_64'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/><apic/><pae/>
</features>
<cpu mode='host-model'/>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
{{range .Disks}}
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='{{.Path}}'/>
<target dev='vd{{.Name}}' bus='{{.Bus}}'/>
</disk>
{{end}}
{{if .CloudInitISO}}
<disk type='file' device='cdrom'>
<driver name='qemu' type='raw'/>
<source file='{{.CloudInitISO}}'/>
<target dev='sata0' bus='sata'/>
<readonly/>
</disk>
{{end}}
{{range .NICs}}
<interface type='bridge'>
<source bridge='{{.Bridge}}'/>
<model type='{{.Model}}'/>
{{if gt .VLAN 0}}<vlan><tag id='{{.VLAN}}'/></vlan>{{end}}
</interface>
{{end}}
<console type='pty'/>
<graphics type='vnc' autoport='yes'/>
</devices>
</domain>

View File

@@ -0,0 +1,4 @@
# Default LXC config for Jagacloud
lxc.include = /usr/share/lxc/config/common.conf
lxc.arch = linux64
# TODO: idmap, rootfs, network sections will be rendered per CT.

View File

@@ -0,0 +1,3 @@
[NetDev]
Name={{.Name}}
Kind=bridge

View File

@@ -0,0 +1,2 @@
# Podman config template for containers inside LXC
# TODO: adjust per container runtime needs.

View File

@@ -0,0 +1,4 @@
# Example storage pool definition
type: {{.Type}} # dir|lvm|zfs
name: {{.Name}}
path: {{.Path}}