diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 9040fd0..4abf132 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -51,3 +51,27 @@ Monitoring: Deliverables: - Production-grade code: tests for domain/service layers, lintable, clear error handling. - Provide code in small cohesive modules, avoid giant files. + +Repo-specific quick notes +- Skeleton is a Go + HTMX app with `cmd/appliance` run server and `Makefile` for dev tasks. +- Key directories: `internal/http` (handlers, middleware), `internal/domain`, `internal/service`, `internal/infra` (stubs/adapters), `internal/job`, `migrations/`. +- Run locally: `make run` or `go run ./cmd/appliance`. +- DB: uses SQLite with migrations under `migrations/` and `internal/infra/sqlite/db`. +- Auth: temporary header-based dev-auth (`X-Auth-User`, `X-Auth-Role`) to drive role checks in middleware. +- HTMX: Templates in `internal/templates` use HTMX `hx-get`/`hx-post` patterns. +- Mock services for dev/test: `internal/service/mock`. +- Example API calls: + - `curl -H "X-Auth-User: viewer" -H "X-Auth-Role: viewer" http://127.0.0.1:8080/api/pools` + - `curl -s -X POST -H "X-Auth-User: admin" -H "X-Auth-Role: admin" -H "Content-Type: application/json" -d '{"name":"tank","vdevs":["/dev/sda"]}' http://127.0.0.1:8080/api/pools` + +Tests and local dev +- Use `go test ./...` for unit tests and `internal/service/mock` to emulate hardware adapters. +- Tests may fallback to in-memory templates when `internal/templates` cannot be read. + +Files to extend for new features +- For ZFS: add `internal/infra/zfs` adapter implementing `internal/service.ZFSService`. +- For SMB/NFS: add adapters implementing `NFSService` and `SMBService`. +- For object storage: add MinIO adapter in `internal/infra/minio`. +- For iSCSI: add an adapter under `internal/infra/iscsi` interacting with `targetcli` or kernel LIO. + +If you need me to scaffold an adapter or add tests I can continue iterating. diff --git a/cmd/appliance/main.go b/cmd/appliance/main.go index 0ef74ff..f81f538 100644 --- a/cmd/appliance/main.go +++ b/cmd/appliance/main.go @@ -10,9 +10,13 @@ import ( "syscall" "time" + "github.com/example/storage-appliance/internal/audit" httpin "github.com/example/storage-appliance/internal/http" + "github.com/example/storage-appliance/internal/infra/osexec" "github.com/example/storage-appliance/internal/infra/sqlite/db" + "github.com/example/storage-appliance/internal/infra/zfs" "github.com/example/storage-appliance/internal/service/mock" + "github.com/example/storage-appliance/internal/service/storage" _ "github.com/glebarez/sqlite" "github.com/go-chi/chi/v5" "github.com/google/uuid" @@ -49,6 +53,10 @@ func main() { diskSvc := &mock.MockDiskService{} zfsSvc := &mock.MockZFSService{} jobRunner := &mock.MockJobRunner{} + auditLogger := audit.NewSQLAuditLogger(sqldb) + zfsAdapter := zfs.NewAdapter(osexec.Default) + // storage service wiring: use zfsAdapter and jobRunner and audit logger + storageSvc := storage.NewStorageService(zfsAdapter, jobRunner, auditLogger) app := &httpin.App{ DB: sqldb, @@ -56,6 +64,7 @@ func main() { ZFSSvc: zfsSvc, JobRunner: jobRunner, HTTPClient: &http.Client{}, + StorageSvc: storageSvc, } r.Use(uuidMiddleware) httpin.RegisterRoutes(r, app) diff --git a/internal/audit/audit.go b/internal/audit/audit.go new file mode 100644 index 0000000..3a92bd1 --- /dev/null +++ b/internal/audit/audit.go @@ -0,0 +1,56 @@ +package audit + +import ( + "context" + "database/sql" + "encoding/json" + "log" + "time" + + "github.com/google/uuid" +) + +type Event struct { + ID string + Timestamp time.Time + UserID string + Action string + ResourceType string + ResourceID string + Success bool + Details map[string]any +} + +type AuditLogger interface { + Record(ctx context.Context, e Event) error +} + +type SQLAuditLogger struct { + DB *sql.DB +} + +func NewSQLAuditLogger(db *sql.DB) *SQLAuditLogger { + return &SQLAuditLogger{DB: db} +} + +func (l *SQLAuditLogger) Record(ctx context.Context, e Event) error { + if e.ID == "" { + e.ID = uuid.New().String() + } + if e.Timestamp.IsZero() { + e.Timestamp = time.Now() + } + detailsJSON, _ := json.Marshal(e.Details) + _, err := l.DB.ExecContext(ctx, `INSERT INTO audit_events (id, ts, user_id, action, resource_type, resource_id, success, details) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, e.ID, e.Timestamp, e.UserID, e.Action, e.ResourceType, e.ResourceID, boolToInt(e.Success), string(detailsJSON)) + if err != nil { + log.Printf("audit record failed: %v", err) + } + return err +} + +func boolToInt(b bool) int { + if b { + return 1 + } + return 0 +} diff --git a/internal/domain/domain.go b/internal/domain/domain.go index a4d8452..d90b28d 100644 --- a/internal/domain/domain.go +++ b/internal/domain/domain.go @@ -17,6 +17,17 @@ type Role struct { Permissions []string } +type PoolHealth struct { + Pool string + Status string + Detail string +} + +type Snapshot struct { + Name string + Dataset string + CreatedAt time.Time +} type Disk struct { ID UUID Path string diff --git a/internal/http/app.go b/internal/http/app.go index bfe86f1..05e73e8 100644 --- a/internal/http/app.go +++ b/internal/http/app.go @@ -14,4 +14,5 @@ type App struct { ZFSSvc service.ZFSService JobRunner service.JobRunner HTTPClient *http.Client + StorageSvc *storage.StorageService } diff --git a/internal/http/handlers.go b/internal/http/handlers.go index cdfeeb2..1200f6a 100644 --- a/internal/http/handlers.go +++ b/internal/http/handlers.go @@ -5,8 +5,10 @@ import ( "html/template" "net/http" "path/filepath" + "strings" "github.com/example/storage-appliance/internal/domain" + "github.com/go-chi/chi/v5" ) var templates *template.Template @@ -25,7 +27,7 @@ func (a *App) DashboardHandler(w http.ResponseWriter, r *http.Request) { data := map[string]interface{}{ "Title": "Storage Appliance Dashboard", } - if err := templates.ExecuteTemplate(w, "dashboard.html", data); err != nil { + if err := templates.ExecuteTemplate(w, "base", data); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } } @@ -78,3 +80,64 @@ func StaticHandler(w http.ResponseWriter, r *http.Request) { p := r.URL.Path[len("/static/"):] http.ServeFile(w, r, filepath.Join("static", p)) } + +// StorageHandler renders the main storage page +func (a *App) StorageHandler(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{ + "Title": "Storage", + } + if err := templates.ExecuteTemplate(w, "base", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// HXPoolsHandler renders a pools partial (HTMX) +func (a *App) HXPoolsHandler(w http.ResponseWriter, r *http.Request) { + pools, _ := a.StorageSvc.ListPools(r.Context()) + if err := templates.ExecuteTemplate(w, "hx_pools", pools); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// StorageCreatePoolHandler handles HTMX pool create POST; expects form values `name` and `vdevs` (comma-separated) +func (a *App) StorageCreatePoolHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.FormValue("name") + vdevsRaw := r.FormValue("vdevs") + vdevs := []string{} + if vdevsRaw != "" { + vdevs = append(vdevs, strings.Split(vdevsRaw, ",")...) + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + jobID, err := a.StorageSvc.CreatePool(r.Context(), user, role, name, vdevs) + if err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + // Return a small job row partial as response + data := map[string]string{"JobID": jobID, "Name": name} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// JobPartialHandler returns a job progress partial by id +func (a *App) JobPartialHandler(w http.ResponseWriter, r *http.Request) { + id := chi.URLParam(r, "id") + // Read job status from DB + row := a.DB.QueryRowContext(r.Context(), `SELECT id, type, status, progress FROM jobs WHERE id = ?`, id) + var jid, jtype, status string + var progress int + if err := row.Scan(&jid, &jtype, &status, &progress); err != nil { + http.Error(w, "job not found", http.StatusNotFound) + return + } + data := map[string]any{"JobID": jid, "Type": jtype, "Status": status, "Progress": progress} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/internal/http/handlers_test.go b/internal/http/handlers_test.go index 7d6430b..cc33e77 100644 --- a/internal/http/handlers_test.go +++ b/internal/http/handlers_test.go @@ -4,6 +4,7 @@ import ( "database/sql" "net/http" "net/http/httptest" + "strings" "testing" "github.com/example/storage-appliance/internal/service/mock" @@ -23,3 +24,22 @@ func TestPoolsHandler(t *testing.T) { t.Fatalf("expected non-empty body") } } + +func TestCreatePoolHandler(t *testing.T) { + m := &mock.MockZFSService{} + j := &mock.MockJobRunner{} + app := &App{DB: &sql.DB{}, ZFSSvc: m, JobRunner: j} + body := `{"name":"tank","vdevs":["/dev/sda"]}` + req := httptest.NewRequest(http.MethodPost, "/api/pools", strings.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("X-Auth-User", "admin") + req.Header.Set("X-Auth-Role", "admin") + w := httptest.NewRecorder() + app.CreatePoolHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d", w.Code) + } + if !strings.Contains(w.Body.String(), "job_id") { + t.Fatalf("expected job_id in response") + } +} diff --git a/internal/http/router.go b/internal/http/router.go index 9415d70..1df215f 100644 --- a/internal/http/router.go +++ b/internal/http/router.go @@ -20,5 +20,9 @@ func RegisterRoutes(r *chi.Mux, app *App) { r.With(RBAC("storage.pool.create")).Post("/pools", app.CreatePoolHandler) // create a pool -> creates a job r.Get("/jobs", app.JobsHandler) }) + r.Get("/storage", app.StorageHandler) + r.Get("/hx/pools", app.HXPoolsHandler) + r.Post("/storage/pool/create", app.StorageCreatePoolHandler) + r.Get("/jobs/{id}", app.JobPartialHandler) r.Get("/static/*", StaticHandler) } diff --git a/internal/infra/osexec/fakerunner.go b/internal/infra/osexec/fakerunner.go new file mode 100644 index 0000000..695ece8 --- /dev/null +++ b/internal/infra/osexec/fakerunner.go @@ -0,0 +1,27 @@ +package osexec + +import ( + "context" + "time" +) + +// FakeRunner is a test-friendly Runner that returns pre-determined values, supports delays, and can simulate timeouts. +type FakeRunner struct { + Stdout string + Stderr string + ExitCode int + Delay time.Duration + Err error +} + +func (f *FakeRunner) Run(ctx context.Context, name string, args ...string) (string, string, int, error) { + if f.Delay > 0 { + select { + case <-time.After(f.Delay): + case <-ctx.Done(): + // Pass through context error + return "", "", -1, ctx.Err() + } + } + return f.Stdout, f.Stderr, f.ExitCode, f.Err +} diff --git a/internal/infra/osexec/osexec.go b/internal/infra/osexec/osexec.go new file mode 100644 index 0000000..f0fc5fc --- /dev/null +++ b/internal/infra/osexec/osexec.go @@ -0,0 +1,142 @@ +package osexec + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "time" +) + +// Typed errors +var ( + ErrTimeout = errors.New("command timeout") +) + +// ErrExitNonZero is returned when a command returned a non-zero exit status. +type ErrExitNonZero struct { + ExitCode int + Stdout string + Stderr string +} + +func (e ErrExitNonZero) Error() string { + return fmt.Sprintf("command exit status %d", e.ExitCode) +} + +// Runner defines the behavior for executing a command. Implementations can be injected for tests. +type Runner interface { + Run(ctx context.Context, name string, args ...string) (stdout string, stderr string, exitCode int, err error) +} + +// DefaultMaxOutputBytes limits stdout/stderr returned by Exec. +const DefaultMaxOutputBytes = 64 * 1024 + +// DefaultRunner executes real OS commands. +type DefaultRunner struct { + RedactPatterns []*regexp.Regexp + MaxOutputBytes int +} + +func NewDefaultRunner(patterns []string) *DefaultRunner { + var compiled []*regexp.Regexp + for _, p := range patterns { + r, err := regexp.Compile(p) + if err == nil { + compiled = append(compiled, r) + } + } + return &DefaultRunner{RedactPatterns: compiled, MaxOutputBytes: DefaultMaxOutputBytes} +} + +func (r *DefaultRunner) redact(s string) string { + if len(r.RedactPatterns) == 0 { + return s + } + out := s + for _, re := range r.RedactPatterns { + out = re.ReplaceAllString(out, "[REDACTED]") + } + return out +} + +func (r *DefaultRunner) truncate(s string) (string, bool) { + max := r.MaxOutputBytes + if max <= 0 || len(s) <= max { + return s, false + } + return s[:max] + "... (truncated)", true +} + +// Run executes a command using os/exec and returns captured output and exit code. +func (r *DefaultRunner) Run(ctx context.Context, name string, args ...string) (string, string, int, error) { + start := time.Now() + // use exec.CommandContext to respect context deadlines + cmd := exec.CommandContext(ctx, name, args...) + outBytes, err := cmd.Output() // only captures stdout; we rely on combined output for simplicity + var stderr []byte + if err != nil { + if ee, ok := err.(*exec.ExitError); ok { + stderr = ee.Stderr + } + } + // If context timed out + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + return string(outBytes), string(stderr), -1, ErrTimeout + } + + stdout := string(outBytes) + sErrStr := string(stderr) + // Truncate outputs if needed + stdout, _ = r.truncate(stdout) + sErrStr, _ = r.truncate(sErrStr) + + // If command failed with non-zero exit + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + code := exitErr.ExitCode() + return stdout, sErrStr, code, ErrExitNonZero{ExitCode: code, Stdout: stdout, Stderr: sErrStr} + } + return stdout, sErrStr, -1, err + } + + _ = time.Since(start) // measure duration; can be logged by caller + return stdout, sErrStr, 0, nil +} + +// Exec runs using DefaultRunner; default patterns are warmed up empty +var Default = NewDefaultRunner(nil) + +// Exec is a convenience function using the Default runner. +func Exec(ctx context.Context, name string, args ...string) (string, string, int, error) { + return Default.Run(ctx, name, args...) +} + +// ExecWithRunner uses the provided Runner to execute and normalizes errors. +func ExecWithRunner(r Runner, ctx context.Context, name string, args ...string) (string, string, int, error) { + out, errOut, code, err := r.Run(ctx, name, args...) + // timeout + if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled { + return out, errOut, -1, ErrTimeout + } + // runner returned an error + if err != nil { + // If error implements ErrExitNonZero (already constructed by runner), return as is + var exitErr ErrExitNonZero + if errors.As(err, &exitErr) { + return exitErr.Stdout, exitErr.Stderr, exitErr.ExitCode, exitErr + } + // If it's an exec.ExitError, wrap in ErrExitNonZero + var execExit *exec.ExitError + if errors.As(err, &execExit) { + return out, errOut, execExit.ExitCode(), ErrExitNonZero{ExitCode: execExit.ExitCode(), Stdout: out, Stderr: errOut} + } + return out, errOut, code, err + } + // no error, but a non-zero exit code + if code != 0 { + return out, errOut, code, ErrExitNonZero{ExitCode: code, Stdout: out, Stderr: errOut} + } + return out, errOut, code, nil +} diff --git a/internal/infra/osexec/osexec_test.go b/internal/infra/osexec/osexec_test.go new file mode 100644 index 0000000..9f7e35d --- /dev/null +++ b/internal/infra/osexec/osexec_test.go @@ -0,0 +1,71 @@ +package osexec + +import ( + "context" + "errors" + "strings" + "testing" + "time" +) + +func TestFakeRunnerSuccess(t *testing.T) { + fr := &FakeRunner{Stdout: "ok"} + ctx := context.Background() + out, errOut, code, err := fr.Run(ctx, "echo", "ok") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if out != "ok" || errOut != "" || code != 0 { + t.Fatalf("expected ok, got out=%q err=%q code=%d", out, errOut, code) + } +} + +func TestExecWithFakeRunnerNonZero(t *testing.T) { + fr := &FakeRunner{Stdout: "", Stderr: "fail", ExitCode: 2} + ctx := context.Background() + out, errOut, code, err := ExecWithRunner(fr, ctx, "false") + if err == nil { + t.Fatalf("expected ErrExitNonZero, got no error") + } + if e, ok := err.(ErrExitNonZero); !ok { + t.Fatalf("expected ErrExitNonZero, got %T %v", err, err) + } else if e.ExitCode != 2 { + t.Fatalf("expected exit 2, got %d", e.ExitCode) + } + if code != 2 || out != "" || errOut != "fail" { + t.Fatalf("mismatch output: code=%d out=%q err=%q", code, out, errOut) + } +} + +func TestExecWithFakeRunnerTimeout(t *testing.T) { + fr := &FakeRunner{Stdout: "", Stderr: "", ExitCode: 0, Delay: 200 * time.Millisecond} + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + _, _, _, err := ExecWithRunner(fr, ctx, "sleep") + if !errors.Is(err, ErrTimeout) && err != context.DeadlineExceeded && err != context.Canceled { + t.Fatalf("expected timeout error, got: %v", err) + } +} + +func TestRedact(t *testing.T) { + r := NewDefaultRunner([]string{"s3_secret_[a-z0-9]+"}) + // Redact function is unexported; replicate behavior + out := "User: s3_secret_abc123 is set" + redacted := r.redact(out) + if strings.Contains(redacted, "s3_secret_") { + t.Fatalf("expected redaction in %q", redacted) + } +} + +func TestTruncate(t *testing.T) { + r := NewDefaultRunner(nil) + r.MaxOutputBytes = 10 + long := strings.Repeat("x", 50) + truncated, truncatedFlag := r.truncate(long) + if !truncatedFlag { + t.Fatalf("expected truncated flag true") + } + if len(truncated) > r.MaxOutputBytes+20 { // includes '... (truncated)' + t.Fatalf("unexpected truncated length") + } +} diff --git a/internal/infra/zfs/zfs.go b/internal/infra/zfs/zfs.go new file mode 100644 index 0000000..34a8091 --- /dev/null +++ b/internal/infra/zfs/zfs.go @@ -0,0 +1,138 @@ +package zfs + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/osexec" +) + +type Adapter struct { + Runner osexec.Runner +} + +func NewAdapter(runner osexec.Runner) *Adapter { return &Adapter{Runner: runner} } + +func (a *Adapter) ListPools(ctx context.Context) ([]domain.Pool, error) { + out, errOut, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "list", "-H", "-o", "name,health,size") + _ = errOut + if err != nil { + return nil, err + } + if code != 0 { + return nil, fmt.Errorf("zpool list returned %d: %s", code, out) + } + var pools []domain.Pool + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, l := range lines { + parts := strings.Split(l, "\t") + if len(parts) >= 3 { + pools = append(pools, domain.Pool{Name: parts[0], Health: parts[1], Capacity: parts[2]}) + } + } + return pools, nil +} + +func (a *Adapter) GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) { + out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "status", pool) + if err != nil { + return domain.PoolHealth{}, err + } + // heuristic: find HEALTH: lines or scan lines + status := "UNKNOWN" + detail := "" + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, "state:") || strings.Contains(line, "health:") { + detail = detail + line + "\n" + if strings.Contains(line, "ONLINE") || strings.Contains(line, "ONLINE") { + status = "ONLINE" + } + if strings.Contains(line, "DEGRADED") { + status = "DEGRADED" + } + } + } + return domain.PoolHealth{Pool: pool, Status: status, Detail: detail}, nil +} + +func (a *Adapter) CreatePoolSync(ctx context.Context, name string, vdevs []string) error { + args := append([]string{"create", name}, vdevs...) + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", args...) + if err != nil { + return err + } + if code != 0 { + return fmt.Errorf("zpool create failed: %s", stderr) + } + return nil +} + +func (a *Adapter) ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) { + out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", "list", "-H", "-o", "name,type", "-r", pool) + if err != nil { + return nil, err + } + var res []domain.Dataset + for _, l := range strings.Split(strings.TrimSpace(out), "\n") { + parts := strings.Split(l, "\t") + if len(parts) >= 2 { + res = append(res, domain.Dataset{Name: parts[0], Pool: pool, Type: parts[1]}) + } + } + return res, nil +} + +func (a *Adapter) CreateDataset(ctx context.Context, name string, props map[string]string) error { + args := []string{"create", name} + for k, v := range props { + args = append(args, "-o", fmt.Sprintf("%s=%s", k, v)) + } + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", args...) + if err != nil { + return err + } + if code != 0 { + return fmt.Errorf("zfs create failed: %s", stderr) + } + return nil +} + +func (a *Adapter) Snapshot(ctx context.Context, dataset, snapName string) error { + name := fmt.Sprintf("%s@%s", dataset, snapName) + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", "snapshot", name) + if err != nil { + return err + } + if code != 0 { + return fmt.Errorf("zfs snapshot failed: %s", stderr) + } + return nil +} + +func (a *Adapter) ScrubStart(ctx context.Context, pool string) error { + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "scrub", pool) + if err != nil { + return err + } + if code != 0 { + return fmt.Errorf("zpool scrub failed: %s", stderr) + } + return nil +} + +func (a *Adapter) ScrubStatus(ctx context.Context, pool string) (string, error) { + out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "status", pool) + if err != nil { + return "", err + } + // Find scan line + for _, line := range strings.Split(out, "\n") { + if strings.Contains(line, "scan: ") { + return strings.TrimSpace(line), nil + } + } + return "no-scan-status", nil +} diff --git a/internal/infra/zfs/zfs_test.go b/internal/infra/zfs/zfs_test.go new file mode 100644 index 0000000..e93733f --- /dev/null +++ b/internal/infra/zfs/zfs_test.go @@ -0,0 +1,58 @@ +package zfs + +import ( + "context" + "testing" + "time" + + "github.com/example/storage-appliance/internal/infra/osexec" +) + +func TestListPoolsParse(t *testing.T) { + out := "tank\tONLINE\t1.00T\n" + fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0} + a := NewAdapter(fr) + ctx := context.Background() + pools, err := a.ListPools(ctx) + if err != nil { + t.Fatalf("ListPools failed: %v", err) + } + if len(pools) != 1 || pools[0].Name != "tank" || pools[0].Health != "ONLINE" { + t.Fatalf("unexpected pools result: %+v", pools) + } +} + +func TestListDatasetsParse(t *testing.T) { + out := "tank\tdataset\ntank/ds\tdataset\n" + fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0} + a := NewAdapter(fr) + pool := "tank" + ds, err := a.ListDatasets(context.Background(), pool) + if err != nil { + t.Fatalf("ListDatasets failed: %v", err) + } + if len(ds) != 2 { + t.Fatalf("expected 2 datasets, got %d", len(ds)) + } +} + +func TestScrubStatusParse(t *testing.T) { + out := " scan: scrub repaired 0 in 0h0m with 0 errors on Tue Jul 1 12:34:56 2025\n" + fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0} + a := NewAdapter(fr) + s, err := a.ScrubStatus(context.Background(), "tank") + if err != nil { + t.Fatalf("ScrubStatus failed: %v", err) + } + if s == "" { + t.Fatalf("expected scan line, got empty") + } +} + +func TestCreatePoolSync(t *testing.T) { + fr := &osexec.FakeRunner{Stdout: "", ExitCode: 0} + a := NewAdapter(fr) + if err := a.CreatePoolSync(context.Background(), "tank", []string{"/dev/sda"}); err != nil { + t.Fatalf("CreatePoolSync failed: %v", err) + } +} diff --git a/internal/service/storage/storage.go b/internal/service/storage/storage.go new file mode 100644 index 0000000..208bb36 --- /dev/null +++ b/internal/service/storage/storage.go @@ -0,0 +1,102 @@ +package storage + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/example/storage-appliance/internal/audit" + "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/zfs" + "github.com/example/storage-appliance/internal/service" +) + +var ( + ErrForbidden = errors.New("forbidden") + ErrDuplicatePool = errors.New("duplicate pool name") +) + +type StorageService struct { + ZFS zfs.Adapter + JobRunner service.JobRunner + Audit audit.AuditLogger +} + +func NewStorageService(z *zfs.Adapter, jr service.JobRunner, al audit.AuditLogger) *StorageService { + return &StorageService{ZFS: *z, JobRunner: jr, Audit: al} +} + +// ListPools returns pools via zfs adapter +func (s *StorageService) ListPools(ctx context.Context) ([]domain.Pool, error) { + return s.ZFS.ListPools(ctx) +} + +// CreatePool validates and enqueues a create pool job; user must be admin +func (s *StorageService) CreatePool(ctx context.Context, user string, role string, name string, vdevs []string) (string, error) { + if role != "admin" { + return "", ErrForbidden + } + // Simple validation: new name not in existing pools + pools, err := s.ZFS.ListPools(ctx) + if err != nil { + return "", err + } + for _, p := range pools { + if p.Name == name { + return "", ErrDuplicatePool + } + } + // Create a job to build a pool. For skeleton, we just create a job entry with type create-pool + j := domain.Job{Type: "create-pool", Status: "queued", Owner: domain.UUID(user)} + id, err := s.JobRunner.Enqueue(ctx, j) + // Store details in audit + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.create.request", ResourceType: "pool", ResourceID: name, Success: err == nil, Details: map[string]any{"vdevs": vdevs}}) + } + return id, err +} + +// Snapshot dataset +func (s *StorageService) Snapshot(ctx context.Context, user, role, dataset, snapName string) (string, error) { + if role != "admin" && role != "operator" { + return "", ErrForbidden + } + // call zfs snapshot, but do as job; enqueue + j := domain.Job{Type: "snapshot", Status: "queued", Owner: domain.UUID(user)} + id, err := s.JobRunner.Enqueue(ctx, j) + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "dataset.snapshot.request", ResourceType: "snapshot", ResourceID: fmt.Sprintf("%s@%s", dataset, snapName), Success: err == nil, Details: map[string]any{"dataset": dataset}}) + } + return id, err +} + +func (s *StorageService) ScrubStart(ctx context.Context, user, role, pool string) (string, error) { + if role != "admin" && role != "operator" { + return "", ErrForbidden + } + j := domain.Job{Type: "scrub", Status: "queued", Owner: domain.UUID(user)} + id, err := s.JobRunner.Enqueue(ctx, j) + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.scrub.request", ResourceType: "pool", ResourceID: pool, Success: err == nil}) + } + return id, err +} + +// ListDatasets returns datasets for a pool +func (s *StorageService) ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) { + return s.ZFS.ListDatasets(ctx, pool) +} + +// CreateDataset creates dataset synchronously or as job; for skeleton, do sync +func (s *StorageService) CreateDataset(ctx context.Context, user, role, name string, props map[string]string) error { + if role != "admin" && role != "operator" { + return ErrForbidden + } + return s.ZFS.CreateDataset(ctx, name, props) +} + +// GetPoolStatus calls the adapter +func (s *StorageService) GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) { + return s.ZFS.GetPoolStatus(ctx, pool) +} diff --git a/internal/templates/hx_pools.html b/internal/templates/hx_pools.html new file mode 100644 index 0000000..7ada337 --- /dev/null +++ b/internal/templates/hx_pools.html @@ -0,0 +1,16 @@ +{{define "hx_pools"}} +
+ + + + + + {{range .}} + + {{else}} + + {{end}} + +
NameHealthCapacity
{{.Name}}{{.Health}}{{.Capacity}}
No pools
+
+{{end}} diff --git a/internal/templates/job_row.html b/internal/templates/job_row.html new file mode 100644 index 0000000..b8c4598 --- /dev/null +++ b/internal/templates/job_row.html @@ -0,0 +1,6 @@ +{{define "job_row"}} +
+
Job: {{.JobID}} {{if .Name}}— {{.Name}}{{end}}
+
Status: {{.Status}}{{if .Progress}} — {{.Progress}}%{{end}}
+
+{{end}} diff --git a/internal/templates/storage.html b/internal/templates/storage.html new file mode 100644 index 0000000..8fd676e --- /dev/null +++ b/internal/templates/storage.html @@ -0,0 +1,21 @@ +{{define "content"}} +
+

Storage

+
+ +
+
+ {{template "hx_pools.html" .}} +
+
+

Create Pool

+
+
+ + + +
+
+
+
+{{end}} diff --git a/migrations/0002_audit_and_snapshots.sql b/migrations/0002_audit_and_snapshots.sql new file mode 100644 index 0000000..20eae49 --- /dev/null +++ b/migrations/0002_audit_and_snapshots.sql @@ -0,0 +1,25 @@ +-- 0002_audit_and_snapshots.sql +CREATE TABLE IF NOT EXISTS audit_events ( + id TEXT PRIMARY KEY, + ts DATETIME DEFAULT CURRENT_TIMESTAMP, + user_id TEXT, + action TEXT, + resource_type TEXT, + resource_id TEXT, + success INTEGER DEFAULT 1, + details TEXT +); + +CREATE TABLE IF NOT EXISTS datasets ( + name TEXT PRIMARY KEY, + pool TEXT, + type TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS snapshots ( + id TEXT PRIMARY KEY, + dataset TEXT, + name TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +);