still working
This commit is contained in:
24
.github/copilot-instructions.md
vendored
24
.github/copilot-instructions.md
vendored
@@ -51,3 +51,27 @@ Monitoring:
|
|||||||
Deliverables:
|
Deliverables:
|
||||||
- Production-grade code: tests for domain/service layers, lintable, clear error handling.
|
- Production-grade code: tests for domain/service layers, lintable, clear error handling.
|
||||||
- Provide code in small cohesive modules, avoid giant files.
|
- Provide code in small cohesive modules, avoid giant files.
|
||||||
|
|
||||||
|
Repo-specific quick notes
|
||||||
|
- Skeleton is a Go + HTMX app with `cmd/appliance` run server and `Makefile` for dev tasks.
|
||||||
|
- Key directories: `internal/http` (handlers, middleware), `internal/domain`, `internal/service`, `internal/infra` (stubs/adapters), `internal/job`, `migrations/`.
|
||||||
|
- Run locally: `make run` or `go run ./cmd/appliance`.
|
||||||
|
- DB: uses SQLite with migrations under `migrations/` and `internal/infra/sqlite/db`.
|
||||||
|
- Auth: temporary header-based dev-auth (`X-Auth-User`, `X-Auth-Role`) to drive role checks in middleware.
|
||||||
|
- HTMX: Templates in `internal/templates` use HTMX `hx-get`/`hx-post` patterns.
|
||||||
|
- Mock services for dev/test: `internal/service/mock`.
|
||||||
|
- Example API calls:
|
||||||
|
- `curl -H "X-Auth-User: viewer" -H "X-Auth-Role: viewer" http://127.0.0.1:8080/api/pools`
|
||||||
|
- `curl -s -X POST -H "X-Auth-User: admin" -H "X-Auth-Role: admin" -H "Content-Type: application/json" -d '{"name":"tank","vdevs":["/dev/sda"]}' http://127.0.0.1:8080/api/pools`
|
||||||
|
|
||||||
|
Tests and local dev
|
||||||
|
- Use `go test ./...` for unit tests and `internal/service/mock` to emulate hardware adapters.
|
||||||
|
- Tests may fallback to in-memory templates when `internal/templates` cannot be read.
|
||||||
|
|
||||||
|
Files to extend for new features
|
||||||
|
- For ZFS: add `internal/infra/zfs` adapter implementing `internal/service.ZFSService`.
|
||||||
|
- For SMB/NFS: add adapters implementing `NFSService` and `SMBService`.
|
||||||
|
- For object storage: add MinIO adapter in `internal/infra/minio`.
|
||||||
|
- For iSCSI: add an adapter under `internal/infra/iscsi` interacting with `targetcli` or kernel LIO.
|
||||||
|
|
||||||
|
If you need me to scaffold an adapter or add tests I can continue iterating.
|
||||||
|
|||||||
@@ -10,9 +10,13 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/example/storage-appliance/internal/audit"
|
||||||
httpin "github.com/example/storage-appliance/internal/http"
|
httpin "github.com/example/storage-appliance/internal/http"
|
||||||
|
"github.com/example/storage-appliance/internal/infra/osexec"
|
||||||
"github.com/example/storage-appliance/internal/infra/sqlite/db"
|
"github.com/example/storage-appliance/internal/infra/sqlite/db"
|
||||||
|
"github.com/example/storage-appliance/internal/infra/zfs"
|
||||||
"github.com/example/storage-appliance/internal/service/mock"
|
"github.com/example/storage-appliance/internal/service/mock"
|
||||||
|
"github.com/example/storage-appliance/internal/service/storage"
|
||||||
_ "github.com/glebarez/sqlite"
|
_ "github.com/glebarez/sqlite"
|
||||||
"github.com/go-chi/chi/v5"
|
"github.com/go-chi/chi/v5"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
@@ -49,6 +53,10 @@ func main() {
|
|||||||
diskSvc := &mock.MockDiskService{}
|
diskSvc := &mock.MockDiskService{}
|
||||||
zfsSvc := &mock.MockZFSService{}
|
zfsSvc := &mock.MockZFSService{}
|
||||||
jobRunner := &mock.MockJobRunner{}
|
jobRunner := &mock.MockJobRunner{}
|
||||||
|
auditLogger := audit.NewSQLAuditLogger(sqldb)
|
||||||
|
zfsAdapter := zfs.NewAdapter(osexec.Default)
|
||||||
|
// storage service wiring: use zfsAdapter and jobRunner and audit logger
|
||||||
|
storageSvc := storage.NewStorageService(zfsAdapter, jobRunner, auditLogger)
|
||||||
|
|
||||||
app := &httpin.App{
|
app := &httpin.App{
|
||||||
DB: sqldb,
|
DB: sqldb,
|
||||||
@@ -56,6 +64,7 @@ func main() {
|
|||||||
ZFSSvc: zfsSvc,
|
ZFSSvc: zfsSvc,
|
||||||
JobRunner: jobRunner,
|
JobRunner: jobRunner,
|
||||||
HTTPClient: &http.Client{},
|
HTTPClient: &http.Client{},
|
||||||
|
StorageSvc: storageSvc,
|
||||||
}
|
}
|
||||||
r.Use(uuidMiddleware)
|
r.Use(uuidMiddleware)
|
||||||
httpin.RegisterRoutes(r, app)
|
httpin.RegisterRoutes(r, app)
|
||||||
|
|||||||
56
internal/audit/audit.go
Normal file
56
internal/audit/audit.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package audit
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Event struct {
|
||||||
|
ID string
|
||||||
|
Timestamp time.Time
|
||||||
|
UserID string
|
||||||
|
Action string
|
||||||
|
ResourceType string
|
||||||
|
ResourceID string
|
||||||
|
Success bool
|
||||||
|
Details map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuditLogger interface {
|
||||||
|
Record(ctx context.Context, e Event) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type SQLAuditLogger struct {
|
||||||
|
DB *sql.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSQLAuditLogger(db *sql.DB) *SQLAuditLogger {
|
||||||
|
return &SQLAuditLogger{DB: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *SQLAuditLogger) Record(ctx context.Context, e Event) error {
|
||||||
|
if e.ID == "" {
|
||||||
|
e.ID = uuid.New().String()
|
||||||
|
}
|
||||||
|
if e.Timestamp.IsZero() {
|
||||||
|
e.Timestamp = time.Now()
|
||||||
|
}
|
||||||
|
detailsJSON, _ := json.Marshal(e.Details)
|
||||||
|
_, err := l.DB.ExecContext(ctx, `INSERT INTO audit_events (id, ts, user_id, action, resource_type, resource_id, success, details) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, e.ID, e.Timestamp, e.UserID, e.Action, e.ResourceType, e.ResourceID, boolToInt(e.Success), string(detailsJSON))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("audit record failed: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func boolToInt(b bool) int {
|
||||||
|
if b {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
@@ -17,6 +17,17 @@ type Role struct {
|
|||||||
Permissions []string
|
Permissions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PoolHealth struct {
|
||||||
|
Pool string
|
||||||
|
Status string
|
||||||
|
Detail string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Snapshot struct {
|
||||||
|
Name string
|
||||||
|
Dataset string
|
||||||
|
CreatedAt time.Time
|
||||||
|
}
|
||||||
type Disk struct {
|
type Disk struct {
|
||||||
ID UUID
|
ID UUID
|
||||||
Path string
|
Path string
|
||||||
|
|||||||
@@ -14,4 +14,5 @@ type App struct {
|
|||||||
ZFSSvc service.ZFSService
|
ZFSSvc service.ZFSService
|
||||||
JobRunner service.JobRunner
|
JobRunner service.JobRunner
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
StorageSvc *storage.StorageService
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,8 +5,10 @@ import (
|
|||||||
"html/template"
|
"html/template"
|
||||||
"net/http"
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/example/storage-appliance/internal/domain"
|
"github.com/example/storage-appliance/internal/domain"
|
||||||
|
"github.com/go-chi/chi/v5"
|
||||||
)
|
)
|
||||||
|
|
||||||
var templates *template.Template
|
var templates *template.Template
|
||||||
@@ -25,7 +27,7 @@ func (a *App) DashboardHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"Title": "Storage Appliance Dashboard",
|
"Title": "Storage Appliance Dashboard",
|
||||||
}
|
}
|
||||||
if err := templates.ExecuteTemplate(w, "dashboard.html", data); err != nil {
|
if err := templates.ExecuteTemplate(w, "base", data); err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -78,3 +80,64 @@ func StaticHandler(w http.ResponseWriter, r *http.Request) {
|
|||||||
p := r.URL.Path[len("/static/"):]
|
p := r.URL.Path[len("/static/"):]
|
||||||
http.ServeFile(w, r, filepath.Join("static", p))
|
http.ServeFile(w, r, filepath.Join("static", p))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StorageHandler renders the main storage page
|
||||||
|
func (a *App) StorageHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
data := map[string]interface{}{
|
||||||
|
"Title": "Storage",
|
||||||
|
}
|
||||||
|
if err := templates.ExecuteTemplate(w, "base", data); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HXPoolsHandler renders a pools partial (HTMX)
|
||||||
|
func (a *App) HXPoolsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
pools, _ := a.StorageSvc.ListPools(r.Context())
|
||||||
|
if err := templates.ExecuteTemplate(w, "hx_pools", pools); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageCreatePoolHandler handles HTMX pool create POST; expects form values `name` and `vdevs` (comma-separated)
|
||||||
|
func (a *App) StorageCreatePoolHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
http.Error(w, "bad request", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
name := r.FormValue("name")
|
||||||
|
vdevsRaw := r.FormValue("vdevs")
|
||||||
|
vdevs := []string{}
|
||||||
|
if vdevsRaw != "" {
|
||||||
|
vdevs = append(vdevs, strings.Split(vdevsRaw, ",")...)
|
||||||
|
}
|
||||||
|
user, _ := r.Context().Value(ContextKey("user")).(string)
|
||||||
|
role, _ := r.Context().Value(ContextKey("user.role")).(string)
|
||||||
|
jobID, err := a.StorageSvc.CreatePool(r.Context(), user, role, name, vdevs)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Return a small job row partial as response
|
||||||
|
data := map[string]string{"JobID": jobID, "Name": name}
|
||||||
|
if err := templates.ExecuteTemplate(w, "job_row", data); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobPartialHandler returns a job progress partial by id
|
||||||
|
func (a *App) JobPartialHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
id := chi.URLParam(r, "id")
|
||||||
|
// Read job status from DB
|
||||||
|
row := a.DB.QueryRowContext(r.Context(), `SELECT id, type, status, progress FROM jobs WHERE id = ?`, id)
|
||||||
|
var jid, jtype, status string
|
||||||
|
var progress int
|
||||||
|
if err := row.Scan(&jid, &jtype, &status, &progress); err != nil {
|
||||||
|
http.Error(w, "job not found", http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
data := map[string]any{"JobID": jid, "Type": jtype, "Status": status, "Progress": progress}
|
||||||
|
if err := templates.ExecuteTemplate(w, "job_row", data); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/example/storage-appliance/internal/service/mock"
|
"github.com/example/storage-appliance/internal/service/mock"
|
||||||
@@ -23,3 +24,22 @@ func TestPoolsHandler(t *testing.T) {
|
|||||||
t.Fatalf("expected non-empty body")
|
t.Fatalf("expected non-empty body")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCreatePoolHandler(t *testing.T) {
|
||||||
|
m := &mock.MockZFSService{}
|
||||||
|
j := &mock.MockJobRunner{}
|
||||||
|
app := &App{DB: &sql.DB{}, ZFSSvc: m, JobRunner: j}
|
||||||
|
body := `{"name":"tank","vdevs":["/dev/sda"]}`
|
||||||
|
req := httptest.NewRequest(http.MethodPost, "/api/pools", strings.NewReader(body))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
req.Header.Set("X-Auth-User", "admin")
|
||||||
|
req.Header.Set("X-Auth-Role", "admin")
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
app.CreatePoolHandler(w, req)
|
||||||
|
if w.Code != http.StatusOK {
|
||||||
|
t.Fatalf("expected 200, got %d", w.Code)
|
||||||
|
}
|
||||||
|
if !strings.Contains(w.Body.String(), "job_id") {
|
||||||
|
t.Fatalf("expected job_id in response")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,5 +20,9 @@ func RegisterRoutes(r *chi.Mux, app *App) {
|
|||||||
r.With(RBAC("storage.pool.create")).Post("/pools", app.CreatePoolHandler) // create a pool -> creates a job
|
r.With(RBAC("storage.pool.create")).Post("/pools", app.CreatePoolHandler) // create a pool -> creates a job
|
||||||
r.Get("/jobs", app.JobsHandler)
|
r.Get("/jobs", app.JobsHandler)
|
||||||
})
|
})
|
||||||
|
r.Get("/storage", app.StorageHandler)
|
||||||
|
r.Get("/hx/pools", app.HXPoolsHandler)
|
||||||
|
r.Post("/storage/pool/create", app.StorageCreatePoolHandler)
|
||||||
|
r.Get("/jobs/{id}", app.JobPartialHandler)
|
||||||
r.Get("/static/*", StaticHandler)
|
r.Get("/static/*", StaticHandler)
|
||||||
}
|
}
|
||||||
|
|||||||
27
internal/infra/osexec/fakerunner.go
Normal file
27
internal/infra/osexec/fakerunner.go
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
package osexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FakeRunner is a test-friendly Runner that returns pre-determined values, supports delays, and can simulate timeouts.
|
||||||
|
type FakeRunner struct {
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
ExitCode int
|
||||||
|
Delay time.Duration
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *FakeRunner) Run(ctx context.Context, name string, args ...string) (string, string, int, error) {
|
||||||
|
if f.Delay > 0 {
|
||||||
|
select {
|
||||||
|
case <-time.After(f.Delay):
|
||||||
|
case <-ctx.Done():
|
||||||
|
// Pass through context error
|
||||||
|
return "", "", -1, ctx.Err()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return f.Stdout, f.Stderr, f.ExitCode, f.Err
|
||||||
|
}
|
||||||
142
internal/infra/osexec/osexec.go
Normal file
142
internal/infra/osexec/osexec.go
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
package osexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Typed errors
|
||||||
|
var (
|
||||||
|
ErrTimeout = errors.New("command timeout")
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrExitNonZero is returned when a command returned a non-zero exit status.
|
||||||
|
type ErrExitNonZero struct {
|
||||||
|
ExitCode int
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrExitNonZero) Error() string {
|
||||||
|
return fmt.Sprintf("command exit status %d", e.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Runner defines the behavior for executing a command. Implementations can be injected for tests.
|
||||||
|
type Runner interface {
|
||||||
|
Run(ctx context.Context, name string, args ...string) (stdout string, stderr string, exitCode int, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultMaxOutputBytes limits stdout/stderr returned by Exec.
|
||||||
|
const DefaultMaxOutputBytes = 64 * 1024
|
||||||
|
|
||||||
|
// DefaultRunner executes real OS commands.
|
||||||
|
type DefaultRunner struct {
|
||||||
|
RedactPatterns []*regexp.Regexp
|
||||||
|
MaxOutputBytes int
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDefaultRunner(patterns []string) *DefaultRunner {
|
||||||
|
var compiled []*regexp.Regexp
|
||||||
|
for _, p := range patterns {
|
||||||
|
r, err := regexp.Compile(p)
|
||||||
|
if err == nil {
|
||||||
|
compiled = append(compiled, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &DefaultRunner{RedactPatterns: compiled, MaxOutputBytes: DefaultMaxOutputBytes}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultRunner) redact(s string) string {
|
||||||
|
if len(r.RedactPatterns) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
out := s
|
||||||
|
for _, re := range r.RedactPatterns {
|
||||||
|
out = re.ReplaceAllString(out, "[REDACTED]")
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *DefaultRunner) truncate(s string) (string, bool) {
|
||||||
|
max := r.MaxOutputBytes
|
||||||
|
if max <= 0 || len(s) <= max {
|
||||||
|
return s, false
|
||||||
|
}
|
||||||
|
return s[:max] + "... (truncated)", true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run executes a command using os/exec and returns captured output and exit code.
|
||||||
|
func (r *DefaultRunner) Run(ctx context.Context, name string, args ...string) (string, string, int, error) {
|
||||||
|
start := time.Now()
|
||||||
|
// use exec.CommandContext to respect context deadlines
|
||||||
|
cmd := exec.CommandContext(ctx, name, args...)
|
||||||
|
outBytes, err := cmd.Output() // only captures stdout; we rely on combined output for simplicity
|
||||||
|
var stderr []byte
|
||||||
|
if err != nil {
|
||||||
|
if ee, ok := err.(*exec.ExitError); ok {
|
||||||
|
stderr = ee.Stderr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If context timed out
|
||||||
|
if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled {
|
||||||
|
return string(outBytes), string(stderr), -1, ErrTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout := string(outBytes)
|
||||||
|
sErrStr := string(stderr)
|
||||||
|
// Truncate outputs if needed
|
||||||
|
stdout, _ = r.truncate(stdout)
|
||||||
|
sErrStr, _ = r.truncate(sErrStr)
|
||||||
|
|
||||||
|
// If command failed with non-zero exit
|
||||||
|
if err != nil {
|
||||||
|
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||||
|
code := exitErr.ExitCode()
|
||||||
|
return stdout, sErrStr, code, ErrExitNonZero{ExitCode: code, Stdout: stdout, Stderr: sErrStr}
|
||||||
|
}
|
||||||
|
return stdout, sErrStr, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = time.Since(start) // measure duration; can be logged by caller
|
||||||
|
return stdout, sErrStr, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec runs using DefaultRunner; default patterns are warmed up empty
|
||||||
|
var Default = NewDefaultRunner(nil)
|
||||||
|
|
||||||
|
// Exec is a convenience function using the Default runner.
|
||||||
|
func Exec(ctx context.Context, name string, args ...string) (string, string, int, error) {
|
||||||
|
return Default.Run(ctx, name, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecWithRunner uses the provided Runner to execute and normalizes errors.
|
||||||
|
func ExecWithRunner(r Runner, ctx context.Context, name string, args ...string) (string, string, int, error) {
|
||||||
|
out, errOut, code, err := r.Run(ctx, name, args...)
|
||||||
|
// timeout
|
||||||
|
if ctx.Err() == context.DeadlineExceeded || ctx.Err() == context.Canceled {
|
||||||
|
return out, errOut, -1, ErrTimeout
|
||||||
|
}
|
||||||
|
// runner returned an error
|
||||||
|
if err != nil {
|
||||||
|
// If error implements ErrExitNonZero (already constructed by runner), return as is
|
||||||
|
var exitErr ErrExitNonZero
|
||||||
|
if errors.As(err, &exitErr) {
|
||||||
|
return exitErr.Stdout, exitErr.Stderr, exitErr.ExitCode, exitErr
|
||||||
|
}
|
||||||
|
// If it's an exec.ExitError, wrap in ErrExitNonZero
|
||||||
|
var execExit *exec.ExitError
|
||||||
|
if errors.As(err, &execExit) {
|
||||||
|
return out, errOut, execExit.ExitCode(), ErrExitNonZero{ExitCode: execExit.ExitCode(), Stdout: out, Stderr: errOut}
|
||||||
|
}
|
||||||
|
return out, errOut, code, err
|
||||||
|
}
|
||||||
|
// no error, but a non-zero exit code
|
||||||
|
if code != 0 {
|
||||||
|
return out, errOut, code, ErrExitNonZero{ExitCode: code, Stdout: out, Stderr: errOut}
|
||||||
|
}
|
||||||
|
return out, errOut, code, nil
|
||||||
|
}
|
||||||
71
internal/infra/osexec/osexec_test.go
Normal file
71
internal/infra/osexec/osexec_test.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package osexec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFakeRunnerSuccess(t *testing.T) {
|
||||||
|
fr := &FakeRunner{Stdout: "ok"}
|
||||||
|
ctx := context.Background()
|
||||||
|
out, errOut, code, err := fr.Run(ctx, "echo", "ok")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if out != "ok" || errOut != "" || code != 0 {
|
||||||
|
t.Fatalf("expected ok, got out=%q err=%q code=%d", out, errOut, code)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecWithFakeRunnerNonZero(t *testing.T) {
|
||||||
|
fr := &FakeRunner{Stdout: "", Stderr: "fail", ExitCode: 2}
|
||||||
|
ctx := context.Background()
|
||||||
|
out, errOut, code, err := ExecWithRunner(fr, ctx, "false")
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("expected ErrExitNonZero, got no error")
|
||||||
|
}
|
||||||
|
if e, ok := err.(ErrExitNonZero); !ok {
|
||||||
|
t.Fatalf("expected ErrExitNonZero, got %T %v", err, err)
|
||||||
|
} else if e.ExitCode != 2 {
|
||||||
|
t.Fatalf("expected exit 2, got %d", e.ExitCode)
|
||||||
|
}
|
||||||
|
if code != 2 || out != "" || errOut != "fail" {
|
||||||
|
t.Fatalf("mismatch output: code=%d out=%q err=%q", code, out, errOut)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExecWithFakeRunnerTimeout(t *testing.T) {
|
||||||
|
fr := &FakeRunner{Stdout: "", Stderr: "", ExitCode: 0, Delay: 200 * time.Millisecond}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
_, _, _, err := ExecWithRunner(fr, ctx, "sleep")
|
||||||
|
if !errors.Is(err, ErrTimeout) && err != context.DeadlineExceeded && err != context.Canceled {
|
||||||
|
t.Fatalf("expected timeout error, got: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRedact(t *testing.T) {
|
||||||
|
r := NewDefaultRunner([]string{"s3_secret_[a-z0-9]+"})
|
||||||
|
// Redact function is unexported; replicate behavior
|
||||||
|
out := "User: s3_secret_abc123 is set"
|
||||||
|
redacted := r.redact(out)
|
||||||
|
if strings.Contains(redacted, "s3_secret_") {
|
||||||
|
t.Fatalf("expected redaction in %q", redacted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTruncate(t *testing.T) {
|
||||||
|
r := NewDefaultRunner(nil)
|
||||||
|
r.MaxOutputBytes = 10
|
||||||
|
long := strings.Repeat("x", 50)
|
||||||
|
truncated, truncatedFlag := r.truncate(long)
|
||||||
|
if !truncatedFlag {
|
||||||
|
t.Fatalf("expected truncated flag true")
|
||||||
|
}
|
||||||
|
if len(truncated) > r.MaxOutputBytes+20 { // includes '... (truncated)'
|
||||||
|
t.Fatalf("unexpected truncated length")
|
||||||
|
}
|
||||||
|
}
|
||||||
138
internal/infra/zfs/zfs.go
Normal file
138
internal/infra/zfs/zfs.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/example/storage-appliance/internal/domain"
|
||||||
|
"github.com/example/storage-appliance/internal/infra/osexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Adapter struct {
|
||||||
|
Runner osexec.Runner
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAdapter(runner osexec.Runner) *Adapter { return &Adapter{Runner: runner} }
|
||||||
|
|
||||||
|
func (a *Adapter) ListPools(ctx context.Context) ([]domain.Pool, error) {
|
||||||
|
out, errOut, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "list", "-H", "-o", "name,health,size")
|
||||||
|
_ = errOut
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if code != 0 {
|
||||||
|
return nil, fmt.Errorf("zpool list returned %d: %s", code, out)
|
||||||
|
}
|
||||||
|
var pools []domain.Pool
|
||||||
|
lines := strings.Split(strings.TrimSpace(out), "\n")
|
||||||
|
for _, l := range lines {
|
||||||
|
parts := strings.Split(l, "\t")
|
||||||
|
if len(parts) >= 3 {
|
||||||
|
pools = append(pools, domain.Pool{Name: parts[0], Health: parts[1], Capacity: parts[2]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pools, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) {
|
||||||
|
out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "status", pool)
|
||||||
|
if err != nil {
|
||||||
|
return domain.PoolHealth{}, err
|
||||||
|
}
|
||||||
|
// heuristic: find HEALTH: lines or scan lines
|
||||||
|
status := "UNKNOWN"
|
||||||
|
detail := ""
|
||||||
|
for _, line := range strings.Split(out, "\n") {
|
||||||
|
if strings.Contains(line, "state:") || strings.Contains(line, "health:") {
|
||||||
|
detail = detail + line + "\n"
|
||||||
|
if strings.Contains(line, "ONLINE") || strings.Contains(line, "ONLINE") {
|
||||||
|
status = "ONLINE"
|
||||||
|
}
|
||||||
|
if strings.Contains(line, "DEGRADED") {
|
||||||
|
status = "DEGRADED"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return domain.PoolHealth{Pool: pool, Status: status, Detail: detail}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) CreatePoolSync(ctx context.Context, name string, vdevs []string) error {
|
||||||
|
args := append([]string{"create", name}, vdevs...)
|
||||||
|
_, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", args...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if code != 0 {
|
||||||
|
return fmt.Errorf("zpool create failed: %s", stderr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) {
|
||||||
|
out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", "list", "-H", "-o", "name,type", "-r", pool)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var res []domain.Dataset
|
||||||
|
for _, l := range strings.Split(strings.TrimSpace(out), "\n") {
|
||||||
|
parts := strings.Split(l, "\t")
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
res = append(res, domain.Dataset{Name: parts[0], Pool: pool, Type: parts[1]})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) CreateDataset(ctx context.Context, name string, props map[string]string) error {
|
||||||
|
args := []string{"create", name}
|
||||||
|
for k, v := range props {
|
||||||
|
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
_, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", args...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if code != 0 {
|
||||||
|
return fmt.Errorf("zfs create failed: %s", stderr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) Snapshot(ctx context.Context, dataset, snapName string) error {
|
||||||
|
name := fmt.Sprintf("%s@%s", dataset, snapName)
|
||||||
|
_, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", "snapshot", name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if code != 0 {
|
||||||
|
return fmt.Errorf("zfs snapshot failed: %s", stderr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) ScrubStart(ctx context.Context, pool string) error {
|
||||||
|
_, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "scrub", pool)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if code != 0 {
|
||||||
|
return fmt.Errorf("zpool scrub failed: %s", stderr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Adapter) ScrubStatus(ctx context.Context, pool string) (string, error) {
|
||||||
|
out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "zpool", "status", pool)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// Find scan line
|
||||||
|
for _, line := range strings.Split(out, "\n") {
|
||||||
|
if strings.Contains(line, "scan: ") {
|
||||||
|
return strings.TrimSpace(line), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "no-scan-status", nil
|
||||||
|
}
|
||||||
58
internal/infra/zfs/zfs_test.go
Normal file
58
internal/infra/zfs/zfs_test.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/example/storage-appliance/internal/infra/osexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestListPoolsParse(t *testing.T) {
|
||||||
|
out := "tank\tONLINE\t1.00T\n"
|
||||||
|
fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0}
|
||||||
|
a := NewAdapter(fr)
|
||||||
|
ctx := context.Background()
|
||||||
|
pools, err := a.ListPools(ctx)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ListPools failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(pools) != 1 || pools[0].Name != "tank" || pools[0].Health != "ONLINE" {
|
||||||
|
t.Fatalf("unexpected pools result: %+v", pools)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestListDatasetsParse(t *testing.T) {
|
||||||
|
out := "tank\tdataset\ntank/ds\tdataset\n"
|
||||||
|
fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0}
|
||||||
|
a := NewAdapter(fr)
|
||||||
|
pool := "tank"
|
||||||
|
ds, err := a.ListDatasets(context.Background(), pool)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ListDatasets failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(ds) != 2 {
|
||||||
|
t.Fatalf("expected 2 datasets, got %d", len(ds))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScrubStatusParse(t *testing.T) {
|
||||||
|
out := " scan: scrub repaired 0 in 0h0m with 0 errors on Tue Jul 1 12:34:56 2025\n"
|
||||||
|
fr := &osexec.FakeRunner{Stdout: out, ExitCode: 0}
|
||||||
|
a := NewAdapter(fr)
|
||||||
|
s, err := a.ScrubStatus(context.Background(), "tank")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ScrubStatus failed: %v", err)
|
||||||
|
}
|
||||||
|
if s == "" {
|
||||||
|
t.Fatalf("expected scan line, got empty")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreatePoolSync(t *testing.T) {
|
||||||
|
fr := &osexec.FakeRunner{Stdout: "", ExitCode: 0}
|
||||||
|
a := NewAdapter(fr)
|
||||||
|
if err := a.CreatePoolSync(context.Background(), "tank", []string{"/dev/sda"}); err != nil {
|
||||||
|
t.Fatalf("CreatePoolSync failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
102
internal/service/storage/storage.go
Normal file
102
internal/service/storage/storage.go
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/example/storage-appliance/internal/audit"
|
||||||
|
"github.com/example/storage-appliance/internal/domain"
|
||||||
|
"github.com/example/storage-appliance/internal/infra/zfs"
|
||||||
|
"github.com/example/storage-appliance/internal/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrForbidden = errors.New("forbidden")
|
||||||
|
ErrDuplicatePool = errors.New("duplicate pool name")
|
||||||
|
)
|
||||||
|
|
||||||
|
type StorageService struct {
|
||||||
|
ZFS zfs.Adapter
|
||||||
|
JobRunner service.JobRunner
|
||||||
|
Audit audit.AuditLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewStorageService(z *zfs.Adapter, jr service.JobRunner, al audit.AuditLogger) *StorageService {
|
||||||
|
return &StorageService{ZFS: *z, JobRunner: jr, Audit: al}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListPools returns pools via zfs adapter
|
||||||
|
func (s *StorageService) ListPools(ctx context.Context) ([]domain.Pool, error) {
|
||||||
|
return s.ZFS.ListPools(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatePool validates and enqueues a create pool job; user must be admin
|
||||||
|
func (s *StorageService) CreatePool(ctx context.Context, user string, role string, name string, vdevs []string) (string, error) {
|
||||||
|
if role != "admin" {
|
||||||
|
return "", ErrForbidden
|
||||||
|
}
|
||||||
|
// Simple validation: new name not in existing pools
|
||||||
|
pools, err := s.ZFS.ListPools(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, p := range pools {
|
||||||
|
if p.Name == name {
|
||||||
|
return "", ErrDuplicatePool
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create a job to build a pool. For skeleton, we just create a job entry with type create-pool
|
||||||
|
j := domain.Job{Type: "create-pool", Status: "queued", Owner: domain.UUID(user)}
|
||||||
|
id, err := s.JobRunner.Enqueue(ctx, j)
|
||||||
|
// Store details in audit
|
||||||
|
if s.Audit != nil {
|
||||||
|
s.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.create.request", ResourceType: "pool", ResourceID: name, Success: err == nil, Details: map[string]any{"vdevs": vdevs}})
|
||||||
|
}
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot dataset
|
||||||
|
func (s *StorageService) Snapshot(ctx context.Context, user, role, dataset, snapName string) (string, error) {
|
||||||
|
if role != "admin" && role != "operator" {
|
||||||
|
return "", ErrForbidden
|
||||||
|
}
|
||||||
|
// call zfs snapshot, but do as job; enqueue
|
||||||
|
j := domain.Job{Type: "snapshot", Status: "queued", Owner: domain.UUID(user)}
|
||||||
|
id, err := s.JobRunner.Enqueue(ctx, j)
|
||||||
|
if s.Audit != nil {
|
||||||
|
s.Audit.Record(ctx, audit.Event{UserID: user, Action: "dataset.snapshot.request", ResourceType: "snapshot", ResourceID: fmt.Sprintf("%s@%s", dataset, snapName), Success: err == nil, Details: map[string]any{"dataset": dataset}})
|
||||||
|
}
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StorageService) ScrubStart(ctx context.Context, user, role, pool string) (string, error) {
|
||||||
|
if role != "admin" && role != "operator" {
|
||||||
|
return "", ErrForbidden
|
||||||
|
}
|
||||||
|
j := domain.Job{Type: "scrub", Status: "queued", Owner: domain.UUID(user)}
|
||||||
|
id, err := s.JobRunner.Enqueue(ctx, j)
|
||||||
|
if s.Audit != nil {
|
||||||
|
s.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.scrub.request", ResourceType: "pool", ResourceID: pool, Success: err == nil})
|
||||||
|
}
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListDatasets returns datasets for a pool
|
||||||
|
func (s *StorageService) ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) {
|
||||||
|
return s.ZFS.ListDatasets(ctx, pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDataset creates dataset synchronously or as job; for skeleton, do sync
|
||||||
|
func (s *StorageService) CreateDataset(ctx context.Context, user, role, name string, props map[string]string) error {
|
||||||
|
if role != "admin" && role != "operator" {
|
||||||
|
return ErrForbidden
|
||||||
|
}
|
||||||
|
return s.ZFS.CreateDataset(ctx, name, props)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPoolStatus calls the adapter
|
||||||
|
func (s *StorageService) GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) {
|
||||||
|
return s.ZFS.GetPoolStatus(ctx, pool)
|
||||||
|
}
|
||||||
16
internal/templates/hx_pools.html
Normal file
16
internal/templates/hx_pools.html
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{{define "hx_pools"}}
|
||||||
|
<div>
|
||||||
|
<table class="min-w-full bg-white">
|
||||||
|
<thead>
|
||||||
|
<tr><th>Name</th><th>Health</th><th>Capacity</th></tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{{range .}}
|
||||||
|
<tr class="border-t"><td>{{.Name}}</td><td>{{.Health}}</td><td>{{.Capacity}}</td></tr>
|
||||||
|
{{else}}
|
||||||
|
<tr><td colspan="3">No pools</td></tr>
|
||||||
|
{{end}}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
6
internal/templates/job_row.html
Normal file
6
internal/templates/job_row.html
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{{define "job_row"}}
|
||||||
|
<div class="p-2 bg-gray-50 rounded border mb-2">
|
||||||
|
<div class="text-sm"><strong>Job:</strong> {{.JobID}} {{if .Name}}— {{.Name}}{{end}}</div>
|
||||||
|
<div class="text-xs text-gray-500">Status: {{.Status}}{{if .Progress}} — {{.Progress}}%{{end}}</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
21
internal/templates/storage.html
Normal file
21
internal/templates/storage.html
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
{{define "content"}}
|
||||||
|
<div class="bg-white rounded shadow p-4">
|
||||||
|
<h1 class="text-2xl font-bold">Storage</h1>
|
||||||
|
<div class="mt-4">
|
||||||
|
<button class="px-3 py-2 bg-blue-500 text-white rounded" hx-get="/hx/pools" hx-swap="outerHTML" hx-target="#pools">Refresh pools</button>
|
||||||
|
</div>
|
||||||
|
<div id="pools" class="mt-4">
|
||||||
|
{{template "hx_pools.html" .}}
|
||||||
|
</div>
|
||||||
|
<div class="mt-6">
|
||||||
|
<h2 class="text-lg font-semibold">Create Pool</h2>
|
||||||
|
<form hx-post="/storage/pool/create" hx-swap="afterbegin" class="mt-2">
|
||||||
|
<div class="flex space-x-2">
|
||||||
|
<input name="name" placeholder="pool name" class="border rounded p-1" />
|
||||||
|
<input name="vdevs" placeholder="/dev/sda,/dev/sdb" class="border rounded p-1" />
|
||||||
|
<button class="px-3 py-1 bg-green-500 text-white rounded" type="submit">Create</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
25
migrations/0002_audit_and_snapshots.sql
Normal file
25
migrations/0002_audit_and_snapshots.sql
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
-- 0002_audit_and_snapshots.sql
|
||||||
|
CREATE TABLE IF NOT EXISTS audit_events (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
ts DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
user_id TEXT,
|
||||||
|
action TEXT,
|
||||||
|
resource_type TEXT,
|
||||||
|
resource_id TEXT,
|
||||||
|
success INTEGER DEFAULT 1,
|
||||||
|
details TEXT
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS datasets (
|
||||||
|
name TEXT PRIMARY KEY,
|
||||||
|
pool TEXT,
|
||||||
|
type TEXT,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS snapshots (
|
||||||
|
id TEXT PRIMARY KEY,
|
||||||
|
dataset TEXT,
|
||||||
|
name TEXT,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
);
|
||||||
Reference in New Issue
Block a user