diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 4abf132..e419677 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,4 +1,5 @@ -You are an expert storage-systems engineer and Go backend architect. +You are an expert storage-systems engineer and Go backend architect. Also your skilled in HTMX-based server-rendered UIs. You have deep experience building storage management systems similar to TrueNAS or Unraid. Your also familiar with Linux storage subsystems like ZFS, NFS, Samba, iSCSI, and S3-compatible object storage. Your also knowledgeable about best practices in security, RBAC, observability, and clean architecture in Go applications. Your also an expert in designing UIs using HTMX and TailwindCSS for modern web applications. + Goal: Build a modern, sleek, high-performance storage appliance management UI similar to TrueNAS/Unraid. diff --git a/cmd/appliance/main.go b/cmd/appliance/main.go index f81f538..ac10a2a 100644 --- a/cmd/appliance/main.go +++ b/cmd/appliance/main.go @@ -12,10 +12,16 @@ import ( "github.com/example/storage-appliance/internal/audit" httpin "github.com/example/storage-appliance/internal/http" + iscsiinfra "github.com/example/storage-appliance/internal/infra/iscsi" + "github.com/example/storage-appliance/internal/infra/nfs" "github.com/example/storage-appliance/internal/infra/osexec" + "github.com/example/storage-appliance/internal/infra/samba" "github.com/example/storage-appliance/internal/infra/sqlite/db" "github.com/example/storage-appliance/internal/infra/zfs" + "github.com/example/storage-appliance/internal/job" + iscsiSvcPkg "github.com/example/storage-appliance/internal/service/iscsi" "github.com/example/storage-appliance/internal/service/mock" + "github.com/example/storage-appliance/internal/service/shares" "github.com/example/storage-appliance/internal/service/storage" _ "github.com/glebarez/sqlite" "github.com/go-chi/chi/v5" @@ -51,13 +57,23 @@ func main() { // Attach router and app dependencies // wire mocks for now; replace with real adapters in infra diskSvc := &mock.MockDiskService{} - zfsSvc := &mock.MockZFSService{} - jobRunner := &mock.MockJobRunner{} - auditLogger := audit.NewSQLAuditLogger(sqldb) + // job runner uses sqlite DB and zfs adapter zfsAdapter := zfs.NewAdapter(osexec.Default) + jobRunner := &job.Runner{DB: sqldb} + auditLogger := audit.NewSQLAuditLogger(sqldb) + jobRunner.ZFS = zfsAdapter + jobRunner.Audit = auditLogger // storage service wiring: use zfsAdapter and jobRunner and audit logger storageSvc := storage.NewStorageService(zfsAdapter, jobRunner, auditLogger) + nfsAdapter := nfs.NewAdapter(osexec.Default, "") + sambaAdapter := samba.NewAdapter(osexec.Default, "") + sharesSvc := shares.NewSharesService(sqldb, nfsAdapter, sambaAdapter, auditLogger) + // iSCSI adapter and service + iscsiAdapter := iscsiinfra.NewAdapter(osexec.Default) + iscsiSvc := iscsiSvcPkg.NewISCSIService(sqldb, zfsAdapter, iscsiAdapter, auditLogger) + + zfsSvc := zfsAdapter app := &httpin.App{ DB: sqldb, DiskSvc: diskSvc, @@ -65,6 +81,9 @@ func main() { JobRunner: jobRunner, HTTPClient: &http.Client{}, StorageSvc: storageSvc, + ShareSvc: sharesSvc, + ISCSISvc: iscsiSvc, + Runner: osexec.Default, } r.Use(uuidMiddleware) httpin.RegisterRoutes(r, app) diff --git a/internal/audit/audit.go b/internal/audit/audit.go index 3a92bd1..be209ba 100644 --- a/internal/audit/audit.go +++ b/internal/audit/audit.go @@ -2,7 +2,9 @@ package audit import ( "context" + "crypto/sha256" "database/sql" + "encoding/hex" "encoding/json" "log" "time" @@ -19,6 +21,12 @@ type Event struct { ResourceID string Success bool Details map[string]any + // Enhanced fields + Actor string // Username or user identifier + Resource string // Full resource identifier (e.g., "pool:my-pool") + PayloadHash string // SHA256 hash of request payload + Result string // Success/failure message or status + ClientIP string // Client IP address } type AuditLogger interface { @@ -40,12 +48,67 @@ func (l *SQLAuditLogger) Record(ctx context.Context, e Event) error { if e.Timestamp.IsZero() { e.Timestamp = time.Now() } - detailsJSON, _ := json.Marshal(e.Details) - _, err := l.DB.ExecContext(ctx, `INSERT INTO audit_events (id, ts, user_id, action, resource_type, resource_id, success, details) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, e.ID, e.Timestamp, e.UserID, e.Action, e.ResourceType, e.ResourceID, boolToInt(e.Success), string(detailsJSON)) - if err != nil { - log.Printf("audit record failed: %v", err) + + // Set actor from UserID if not provided + if e.Actor == "" { + e.Actor = e.UserID } - return err + + // Build resource string from ResourceType and ResourceID + if e.Resource == "" { + if e.ResourceID != "" { + e.Resource = e.ResourceType + ":" + e.ResourceID + } else { + e.Resource = e.ResourceType + } + } + + // Set result from Success if not provided + if e.Result == "" { + if e.Success { + e.Result = "success" + } else { + e.Result = "failure" + } + } + + detailsJSON, _ := json.Marshal(e.Details) + + // Try to insert with all columns, fallback to basic columns if enhanced columns don't exist + _, err := l.DB.ExecContext(ctx, + `INSERT INTO audit_events (id, ts, user_id, action, resource_type, resource_id, success, details, actor, resource, payload_hash, result, client_ip) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + e.ID, e.Timestamp, e.UserID, e.Action, e.ResourceType, e.ResourceID, boolToInt(e.Success), string(detailsJSON), + e.Actor, e.Resource, e.PayloadHash, e.Result, e.ClientIP) + if err != nil { + // Fallback to basic insert if enhanced columns don't exist yet + _, err2 := l.DB.ExecContext(ctx, + `INSERT INTO audit_events (id, ts, user_id, action, resource_type, resource_id, success, details) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + e.ID, e.Timestamp, e.UserID, e.Action, e.ResourceType, e.ResourceID, boolToInt(e.Success), string(detailsJSON)) + if err2 != nil { + log.Printf("audit record failed: %v (fallback also failed: %v)", err, err2) + return err2 + } + log.Printf("audit record inserted with fallback (enhanced columns may not exist): %v", err) + } + return nil +} + +// HashPayload computes SHA256 hash of a payload (JSON string or bytes) +func HashPayload(payload interface{}) string { + var data []byte + switch v := payload.(type) { + case []byte: + data = v + case string: + data = []byte(v) + default: + jsonData, _ := json.Marshal(payload) + data = jsonData + } + hash := sha256.Sum256(data) + return hex.EncodeToString(hash[:]) } func boolToInt(b bool) int { diff --git a/internal/auth/password.go b/internal/auth/password.go new file mode 100644 index 0000000..99687fe --- /dev/null +++ b/internal/auth/password.go @@ -0,0 +1,89 @@ +package auth + +import ( + "crypto/rand" + "crypto/subtle" + "encoding/base64" + "errors" + "fmt" + "strings" + + "golang.org/x/crypto/argon2" +) + +const ( + // Argon2id parameters + argon2Memory = 64 * 1024 // 64 MB + argon2Iterations = 3 + argon2Parallelism = 2 + argon2SaltLength = 16 + argon2KeyLength = 32 +) + +// HashPassword hashes a password using Argon2id +func HashPassword(password string) (string, error) { + // Generate a random salt + salt := make([]byte, argon2SaltLength) + if _, err := rand.Read(salt); err != nil { + return "", err + } + + // Hash the password + hash := argon2.IDKey([]byte(password), salt, argon2Iterations, argon2Memory, argon2Parallelism, argon2KeyLength) + + // Encode the hash and salt + b64Salt := base64.RawStdEncoding.EncodeToString(salt) + b64Hash := base64.RawStdEncoding.EncodeToString(hash) + + // Return the encoded hash in the format: $argon2id$v=19$m=65536,t=3,p=2$salt$hash + return fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", + argon2.Version, argon2Memory, argon2Iterations, argon2Parallelism, b64Salt, b64Hash), nil +} + +// VerifyPassword verifies a password against a hash +func VerifyPassword(password, encodedHash string) (bool, error) { + // Parse the encoded hash + parts := strings.Split(encodedHash, "$") + if len(parts) != 6 { + return false, errors.New("invalid hash format") + } + + if parts[1] != "argon2id" { + return false, errors.New("unsupported hash algorithm") + } + + // Parse version + var version int + if _, err := fmt.Sscanf(parts[2], "v=%d", &version); err != nil { + return false, err + } + if version != argon2.Version { + return false, errors.New("incompatible version") + } + + // Parse parameters + var memory, iterations, parallelism int + if _, err := fmt.Sscanf(parts[3], "m=%d,t=%d,p=%d", &memory, &iterations, ¶llelism); err != nil { + return false, err + } + + // Decode salt and hash + salt, err := base64.RawStdEncoding.DecodeString(parts[4]) + if err != nil { + return false, err + } + + hash, err := base64.RawStdEncoding.DecodeString(parts[5]) + if err != nil { + return false, err + } + + // Compute the hash of the password + otherHash := argon2.IDKey([]byte(password), salt, uint32(iterations), uint32(memory), uint8(parallelism), uint32(len(hash))) + + // Compare hashes in constant time + if subtle.ConstantTimeCompare(hash, otherHash) == 1 { + return true, nil + } + return false, nil +} diff --git a/internal/auth/rbac.go b/internal/auth/rbac.go new file mode 100644 index 0000000..d8a0220 --- /dev/null +++ b/internal/auth/rbac.go @@ -0,0 +1,183 @@ +package auth + +import ( + "context" + "database/sql" +) + +type Permission struct { + ID string + Name string + Description string +} + +type Role struct { + ID string + Name string + Description string +} + +type RBACStore struct { + DB *sql.DB +} + +func NewRBACStore(db *sql.DB) *RBACStore { + return &RBACStore{DB: db} +} + +// GetUserRoles retrieves all roles for a user +func (s *RBACStore) GetUserRoles(ctx context.Context, userID string) ([]Role, error) { + rows, err := s.DB.QueryContext(ctx, + `SELECT r.id, r.name, r.description FROM roles r + INNER JOIN user_roles ur ON r.id = ur.role_id + WHERE ur.user_id = ?`, + userID) + if err != nil { + return nil, err + } + defer rows.Close() + + var roles []Role + for rows.Next() { + var role Role + if err := rows.Scan(&role.ID, &role.Name, &role.Description); err != nil { + return nil, err + } + roles = append(roles, role) + } + return roles, rows.Err() +} + +// GetRolePermissions retrieves all permissions for a role +func (s *RBACStore) GetRolePermissions(ctx context.Context, roleID string) ([]Permission, error) { + rows, err := s.DB.QueryContext(ctx, + `SELECT p.id, p.name, p.description FROM permissions p + INNER JOIN role_permissions rp ON p.id = rp.permission_id + WHERE rp.role_id = ?`, + roleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var permissions []Permission + for rows.Next() { + var perm Permission + if err := rows.Scan(&perm.ID, &perm.Name, &perm.Description); err != nil { + return nil, err + } + permissions = append(permissions, perm) + } + return permissions, rows.Err() +} + +// GetUserPermissions retrieves all permissions for a user (through their roles) +func (s *RBACStore) GetUserPermissions(ctx context.Context, userID string) ([]Permission, error) { + rows, err := s.DB.QueryContext(ctx, + `SELECT DISTINCT p.id, p.name, p.description FROM permissions p + INNER JOIN role_permissions rp ON p.id = rp.permission_id + INNER JOIN user_roles ur ON rp.role_id = ur.role_id + WHERE ur.user_id = ?`, + userID) + if err != nil { + return nil, err + } + defer rows.Close() + + var permissions []Permission + for rows.Next() { + var perm Permission + if err := rows.Scan(&perm.ID, &perm.Name, &perm.Description); err != nil { + return nil, err + } + permissions = append(permissions, perm) + } + return permissions, rows.Err() +} + +// UserHasPermission checks if a user has a specific permission +func (s *RBACStore) UserHasPermission(ctx context.Context, userID, permission string) (bool, error) { + var count int + err := s.DB.QueryRowContext(ctx, + `SELECT COUNT(*) FROM permissions p + INNER JOIN role_permissions rp ON p.id = rp.permission_id + INNER JOIN user_roles ur ON rp.role_id = ur.role_id + WHERE ur.user_id = ? AND p.name = ?`, + userID, permission).Scan(&count) + if err != nil { + return false, err + } + return count > 0, nil +} + +// AssignRoleToUser assigns a role to a user +func (s *RBACStore) AssignRoleToUser(ctx context.Context, userID, roleID string) error { + _, err := s.DB.ExecContext(ctx, + `INSERT OR IGNORE INTO user_roles (user_id, role_id) VALUES (?, ?)`, + userID, roleID) + return err +} + +// RemoveRoleFromUser removes a role from a user +func (s *RBACStore) RemoveRoleFromUser(ctx context.Context, userID, roleID string) error { + _, err := s.DB.ExecContext(ctx, + `DELETE FROM user_roles WHERE user_id = ? AND role_id = ?`, + userID, roleID) + return err +} + +// GetAllRoles retrieves all roles +func (s *RBACStore) GetAllRoles(ctx context.Context) ([]Role, error) { + rows, err := s.DB.QueryContext(ctx, + `SELECT id, name, description FROM roles ORDER BY name`) + if err != nil { + return nil, err + } + defer rows.Close() + + var roles []Role + for rows.Next() { + var role Role + if err := rows.Scan(&role.ID, &role.Name, &role.Description); err != nil { + return nil, err + } + roles = append(roles, role) + } + return roles, rows.Err() +} + +// GetAllPermissions retrieves all permissions +func (s *RBACStore) GetAllPermissions(ctx context.Context) ([]Permission, error) { + rows, err := s.DB.QueryContext(ctx, + `SELECT id, name, description FROM permissions ORDER BY name`) + if err != nil { + return nil, err + } + defer rows.Close() + + var permissions []Permission + for rows.Next() { + var perm Permission + if err := rows.Scan(&perm.ID, &perm.Name, &perm.Description); err != nil { + return nil, err + } + permissions = append(permissions, perm) + } + return permissions, rows.Err() +} + +// AssignPermissionToRole assigns a permission to a role +func (s *RBACStore) AssignPermissionToRole(ctx context.Context, roleID, permissionID string) error { + _, err := s.DB.ExecContext(ctx, + `INSERT OR IGNORE INTO role_permissions (role_id, permission_id) VALUES (?, ?)`, + roleID, permissionID) + return err +} + +// RemovePermissionFromRole removes a permission from a role +func (s *RBACStore) RemovePermissionFromRole(ctx context.Context, roleID, permissionID string) error { + _, err := s.DB.ExecContext(ctx, + `DELETE FROM role_permissions WHERE role_id = ? AND permission_id = ?`, + roleID, permissionID) + return err +} diff --git a/internal/auth/session.go b/internal/auth/session.go new file mode 100644 index 0000000..b599229 --- /dev/null +++ b/internal/auth/session.go @@ -0,0 +1,108 @@ +package auth + +import ( + "context" + "crypto/rand" + "database/sql" + "encoding/base64" + "time" + + "github.com/google/uuid" +) + +const ( + SessionCookieName = "session_token" + SessionDuration = 24 * time.Hour +) + +type Session struct { + ID string + UserID string + Token string + ExpiresAt time.Time + CreatedAt time.Time +} + +type SessionStore struct { + DB *sql.DB +} + +func NewSessionStore(db *sql.DB) *SessionStore { + return &SessionStore{DB: db} +} + +// GenerateToken generates a secure random token +func GenerateToken() (string, error) { + b := make([]byte, 32) + if _, err := rand.Read(b); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(b), nil +} + +// CreateSession creates a new session for a user +func (s *SessionStore) CreateSession(ctx context.Context, userID string) (*Session, error) { + token, err := GenerateToken() + if err != nil { + return nil, err + } + + sessionID := uuid.New().String() + expiresAt := time.Now().Add(SessionDuration) + + _, err = s.DB.ExecContext(ctx, + `INSERT INTO sessions (id, user_id, token, expires_at) VALUES (?, ?, ?, ?)`, + sessionID, userID, token, expiresAt) + if err != nil { + return nil, err + } + + return &Session{ + ID: sessionID, + UserID: userID, + Token: token, + ExpiresAt: expiresAt, + CreatedAt: time.Now(), + }, nil +} + +// GetSession retrieves a session by token +func (s *SessionStore) GetSession(ctx context.Context, token string) (*Session, error) { + var session Session + var expiresAtStr string + err := s.DB.QueryRowContext(ctx, + `SELECT id, user_id, token, expires_at, created_at FROM sessions WHERE token = ? AND expires_at > ?`, + token, time.Now()).Scan(&session.ID, &session.UserID, &session.Token, &expiresAtStr, &session.CreatedAt) + if err != nil { + return nil, err + } + + session.ExpiresAt, err = time.Parse("2006-01-02 15:04:05", expiresAtStr) + if err != nil { + // Try with timezone + session.ExpiresAt, err = time.Parse(time.RFC3339, expiresAtStr) + if err != nil { + return nil, err + } + } + + return &session, nil +} + +// DeleteSession deletes a session by token +func (s *SessionStore) DeleteSession(ctx context.Context, token string) error { + _, err := s.DB.ExecContext(ctx, `DELETE FROM sessions WHERE token = ?`, token) + return err +} + +// DeleteUserSessions deletes all sessions for a user +func (s *SessionStore) DeleteUserSessions(ctx context.Context, userID string) error { + _, err := s.DB.ExecContext(ctx, `DELETE FROM sessions WHERE user_id = ?`, userID) + return err +} + +// CleanupExpiredSessions removes expired sessions +func (s *SessionStore) CleanupExpiredSessions(ctx context.Context) error { + _, err := s.DB.ExecContext(ctx, `DELETE FROM sessions WHERE expires_at < ?`, time.Now()) + return err +} diff --git a/internal/auth/user.go b/internal/auth/user.go new file mode 100644 index 0000000..5ec6f2e --- /dev/null +++ b/internal/auth/user.go @@ -0,0 +1,102 @@ +package auth + +import ( + "context" + "database/sql" + "errors" +) + +type User struct { + ID string + Username string + PasswordHash string + Role string // Legacy field, kept for backward compatibility + CreatedAt string +} + +type UserStore struct { + DB *sql.DB +} + +func NewUserStore(db *sql.DB) *UserStore { + return &UserStore{DB: db} +} + +// GetUserByUsername retrieves a user by username +func (s *UserStore) GetUserByUsername(ctx context.Context, username string) (*User, error) { + var user User + err := s.DB.QueryRowContext(ctx, + `SELECT id, username, password_hash, role, created_at FROM users WHERE username = ?`, + username).Scan(&user.ID, &user.Username, &user.PasswordHash, &user.Role, &user.CreatedAt) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.New("user not found") + } + return nil, err + } + return &user, nil +} + +// GetUserByID retrieves a user by ID +func (s *UserStore) GetUserByID(ctx context.Context, userID string) (*User, error) { + var user User + err := s.DB.QueryRowContext(ctx, + `SELECT id, username, password_hash, role, created_at FROM users WHERE id = ?`, + userID).Scan(&user.ID, &user.Username, &user.PasswordHash, &user.Role, &user.CreatedAt) + if err != nil { + if err == sql.ErrNoRows { + return nil, errors.New("user not found") + } + return nil, err + } + return &user, nil +} + +// CreateUser creates a new user +func (s *UserStore) CreateUser(ctx context.Context, username, password string) (*User, error) { + passwordHash, err := HashPassword(password) + if err != nil { + return nil, err + } + + userID := username // Using username as ID for simplicity, could use UUID + _, err = s.DB.ExecContext(ctx, + `INSERT INTO users (id, username, password_hash) VALUES (?, ?, ?)`, + userID, username, passwordHash) + if err != nil { + return nil, err + } + + return s.GetUserByID(ctx, userID) +} + +// UpdatePassword updates a user's password +func (s *UserStore) UpdatePassword(ctx context.Context, userID, newPassword string) error { + passwordHash, err := HashPassword(newPassword) + if err != nil { + return err + } + + _, err = s.DB.ExecContext(ctx, + `UPDATE users SET password_hash = ? WHERE id = ?`, + passwordHash, userID) + return err +} + +// Authenticate verifies username and password +func (s *UserStore) Authenticate(ctx context.Context, username, password string) (*User, error) { + user, err := s.GetUserByUsername(ctx, username) + if err != nil { + return nil, err + } + + valid, err := VerifyPassword(password, user.PasswordHash) + if err != nil { + return nil, err + } + if !valid { + return nil, errors.New("invalid password") + } + + return user, nil +} diff --git a/internal/domain/domain.go b/internal/domain/domain.go index d90b28d..19cbaf9 100644 --- a/internal/domain/domain.go +++ b/internal/domain/domain.go @@ -52,10 +52,11 @@ type Dataset struct { } type Share struct { - ID UUID - Name string - Path string - Type string // nfs or smb + ID UUID + Name string + Path string + Type string // nfs or smb + Config map[string]string } type LUN struct { @@ -73,4 +74,5 @@ type Job struct { Owner UUID CreatedAt time.Time UpdatedAt time.Time + Details map[string]any } diff --git a/internal/http/app.go b/internal/http/app.go index 05e73e8..b504518 100644 --- a/internal/http/app.go +++ b/internal/http/app.go @@ -4,7 +4,9 @@ import ( "database/sql" "net/http" + "github.com/example/storage-appliance/internal/infra/osexec" "github.com/example/storage-appliance/internal/service" + "github.com/example/storage-appliance/internal/service/storage" ) // App contains injected dependencies for handlers. @@ -15,4 +17,8 @@ type App struct { JobRunner service.JobRunner HTTPClient *http.Client StorageSvc *storage.StorageService + ShareSvc service.SharesService + ISCSISvc service.ISCSIService + ObjectSvc service.ObjectService + Runner osexec.Runner } diff --git a/internal/http/handlers.go b/internal/http/handlers.go index 1200f6a..08c6a28 100644 --- a/internal/http/handlers.go +++ b/internal/http/handlers.go @@ -2,20 +2,27 @@ package http import ( "encoding/json" + "github.com/example/storage-appliance/internal/audit" + "github.com/example/storage-appliance/internal/domain" + "github.com/go-chi/chi/v5" "html/template" "net/http" "path/filepath" "strings" - - "github.com/example/storage-appliance/internal/domain" - "github.com/go-chi/chi/v5" ) var templates *template.Template func init() { var err error + // Try a couple of relative paths so tests work regardless of cwd templates, err = template.ParseGlob("internal/templates/*.html") + if err != nil { + templates, err = template.ParseGlob("../templates/*.html") + } + if err != nil { + templates, err = template.ParseGlob("./templates/*.html") + } if err != nil { // Fallback to a minimal template so tests pass when files are missing templates = template.New("dashboard.html") @@ -24,9 +31,9 @@ func init() { } func (a *App) DashboardHandler(w http.ResponseWriter, r *http.Request) { - data := map[string]interface{}{ + data := templateData(r, map[string]interface{}{ "Title": "Storage Appliance Dashboard", - } + }) if err := templates.ExecuteTemplate(w, "base", data); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -39,6 +46,11 @@ func (a *App) PoolsHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } + // audit the list action if possible + if a.StorageSvc != nil && a.StorageSvc.Audit != nil { + user, _ := r.Context().Value(ContextKeyUser).(string) + a.StorageSvc.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.list", ResourceType: "pool", ResourceID: "all", Success: true}) + } j, err := json.Marshal(pools) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) @@ -53,6 +65,176 @@ func (a *App) JobsHandler(w http.ResponseWriter, r *http.Request) { w.Write([]byte(`[]`)) } +// PoolDatasetsHandler returns datasets for a given pool via API +func (a *App) PoolDatasetsHandler(w http.ResponseWriter, r *http.Request) { + pool := chi.URLParam(r, "pool") + ds, err := a.StorageSvc.ListDatasets(r.Context(), pool) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + b, _ := json.Marshal(ds) + w.Header().Set("Content-Type", "application/json") + w.Write(b) + if a.StorageSvc != nil && a.StorageSvc.Audit != nil { + user, _ := r.Context().Value(ContextKeyUser).(string) + a.StorageSvc.Audit.Record(r.Context(), audit.Event{UserID: user, Action: "dataset.list", ResourceType: "dataset", ResourceID: pool, Success: true}) + } +} + +// CreateDatasetHandler handles dataset creation via API +func (a *App) CreateDatasetHandler(w http.ResponseWriter, r *http.Request) { + type req struct { + Name string `json:"name"` + Props map[string]string `json:"props"` + } + var body req + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if err := a.StorageSvc.CreateDataset(r.Context(), user, role, body.Name, body.Props); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// SnapshotHandler creates a snapshot via Storage service and returns job id +func (a *App) SnapshotHandler(w http.ResponseWriter, r *http.Request) { + dataset := chi.URLParam(r, "dataset") + type req struct { + Name string `json:"name"` + } + var body req + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + id, err := a.StorageSvc.Snapshot(r.Context(), user, role, dataset, body.Name) + if err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"job_id":"` + id + `"}`)) +} + +// PoolScrubHandler starts a scrub on the pool and returns a job id +func (a *App) PoolScrubHandler(w http.ResponseWriter, r *http.Request) { + pool := chi.URLParam(r, "pool") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + id, err := a.StorageSvc.ScrubStart(r.Context(), user, role, pool) + if err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"job_id":"` + id + `"}`)) +} + +// NFSStatusHandler returns nfs server service status +func (a *App) NFSStatusHandler(w http.ResponseWriter, r *http.Request) { + status, err := a.ShareSvc.NFSStatus(r.Context()) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"status":"` + status + `"}`)) +} + +// ObjectStoreHandler renders object storage page (MinIO) +func (a *App) ObjectStoreHandler(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{"Title": "Object Storage"} + if err := templates.ExecuteTemplate(w, "base", data); err != nil { + if err2 := templates.ExecuteTemplate(w, "object_store", data); err2 != nil { + http.Error(w, err2.Error(), http.StatusInternalServerError) + } + } +} + +// HXBucketsHandler renders buckets list partial +func (a *App) HXBucketsHandler(w http.ResponseWriter, r *http.Request) { + var buckets []string + if a.ObjectSvc != nil { + buckets, _ = a.ObjectSvc.ListBuckets(r.Context()) + } + if err := templates.ExecuteTemplate(w, "hx_buckets", buckets); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// CreateBucketHandler creates a bucket through the ObjectSvc +func (a *App) CreateBucketHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.FormValue("name") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ObjectSvc == nil { + http.Error(w, "object service not configured", http.StatusInternalServerError) + return + } + id, err := a.ObjectSvc.CreateBucket(r.Context(), user, role, name) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"JobID": id, "Name": name} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// ObjectSettingsHandler handles updating object storage settings +func (a *App) ObjectSettingsHandler(w http.ResponseWriter, r *http.Request) { + // accept JSON body with settings or form values + type req struct { + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + DataPath string `json:"data_path"` + Port int `json:"port"` + TLS bool `json:"tls"` + } + var body req + if r.Header.Get("Content-Type") == "application/json" { + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + } else { + if err := r.ParseForm(); err == nil { + body.AccessKey = r.FormValue("access_key") + body.SecretKey = r.FormValue("secret_key") + body.DataPath = r.FormValue("data_path") + // parse port and tls + } + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ObjectSvc == nil { + http.Error(w, "object service not configured", http.StatusInternalServerError) + return + } + // wrap settings as an 'any' to satisfy interface (object service expects a specific type internally) + // For now, cast to the concrete struct via type assertion inside the service, but we need to pass as any + settings := map[string]any{"access_key": body.AccessKey, "secret_key": body.SecretKey, "data_path": body.DataPath, "port": body.Port, "tls": body.TLS} + // ObjectService.SetSettings expects settings 'any' (simplified), need to convert inside + if err := a.ObjectSvc.SetSettings(r.Context(), user, role, settings); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) +} + // CreatePoolHandler receives a request to create a pool and enqueues a job func (a *App) CreatePoolHandler(w http.ResponseWriter, r *http.Request) { // Minimal implementation that reads 'name' and 'vdevs' @@ -65,9 +247,20 @@ func (a *App) CreatePoolHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, "bad request", http.StatusBadRequest) return } - // Create a job and enqueue - j := domain.Job{Type: "create-pool", Status: "queued", Progress: 0} - id, err := a.JobRunner.Enqueue(r.Context(), j) + // prefer storage service which adds validation/audit; fall back to job runner + var id string + var err error + if a.StorageSvc != nil { + user, _ := r.Context().Value(ContextKeyUser).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + id, err = a.StorageSvc.CreatePool(r.Context(), user, role, body.Name, body.Vdevs) + } else if a.JobRunner != nil { + j := domain.Job{Type: "create-pool", Status: "queued", Progress: 0, Details: map[string]any{"name": body.Name, "vdevs": body.Vdevs}} + id, err = a.JobRunner.Enqueue(r.Context(), j) + } else { + http.Error(w, "no job runner", http.StatusInternalServerError) + return + } if err != nil { http.Error(w, "failed to create job", http.StatusInternalServerError) return @@ -83,9 +276,9 @@ func StaticHandler(w http.ResponseWriter, r *http.Request) { // StorageHandler renders the main storage page func (a *App) StorageHandler(w http.ResponseWriter, r *http.Request) { - data := map[string]interface{}{ + data := templateData(r, map[string]interface{}{ "Title": "Storage", - } + }) if err := templates.ExecuteTemplate(w, "base", data); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) } @@ -141,3 +334,347 @@ func (a *App) JobPartialHandler(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) } } + +// SharesNFSHandler renders the NFS shares page +func (a *App) SharesNFSHandler(w http.ResponseWriter, r *http.Request) { + data := templateData(r, map[string]interface{}{"Title": "NFS Shares"}) + if err := templates.ExecuteTemplate(w, "base", data); err != nil { + // fallback to rendering the content template directly (useful in tests) + if err2 := templates.ExecuteTemplate(w, "shares_nfs", data); err2 != nil { + http.Error(w, err2.Error(), http.StatusInternalServerError) + } + } +} + +// HXNFSHandler renders NFS shares partial +func (a *App) HXNFSHandler(w http.ResponseWriter, r *http.Request) { + shares := []domain.Share{} + if a.ShareSvc != nil { + shares, _ = a.ShareSvc.ListNFS(r.Context()) + } + if err := templates.ExecuteTemplate(w, "hx_nfs_shares", shares); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// CreateNFSHandler handles NFS create requests (HTMX form or JSON) +func (a *App) CreateNFSHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.FormValue("name") + path := r.FormValue("path") + optsRaw := r.FormValue("options") + opts := map[string]string{} + if optsRaw != "" { + // expecting JSON options for MVP + _ = json.Unmarshal([]byte(optsRaw), &opts) + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ShareSvc == nil { + http.Error(w, "no share service", http.StatusInternalServerError) + return + } + id, err := a.ShareSvc.CreateNFS(r.Context(), user, role, name, path, opts) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // Return a job/creation partial: reuse job_row for a simple message + data := map[string]any{"JobID": id, "Name": name, "Status": "queued"} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// DeleteNFSHandler handles NFS share deletion +func (a *App) DeleteNFSHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + id := r.FormValue("id") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ShareSvc == nil { + http.Error(w, "no share service", http.StatusInternalServerError) + return + } + if err := a.ShareSvc.DeleteNFS(r.Context(), user, role, id); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + // return partial table after deletion + shares, _ := a.ShareSvc.ListNFS(r.Context()) + if err := templates.ExecuteTemplate(w, "hx_nfs_shares", shares); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// SharesSMBHandler renders the SMB shares page +func (a *App) SharesSMBHandler(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{"Title": "SMB Shares"} + if err := templates.ExecuteTemplate(w, "base", data); err != nil { + // fallback for tests + if err2 := templates.ExecuteTemplate(w, "shares_smb", data); err2 != nil { + http.Error(w, err2.Error(), http.StatusInternalServerError) + } + } +} + +// ISCSIHandler renders the iSCSI page +func (a *App) ISCSIHandler(w http.ResponseWriter, r *http.Request) { + data := map[string]interface{}{"Title": "iSCSI Targets"} + if err := templates.ExecuteTemplate(w, "base", data); err != nil { + if err2 := templates.ExecuteTemplate(w, "iscsi", data); err2 != nil { + http.Error(w, err2.Error(), http.StatusInternalServerError) + } + } +} + +// HXISCSIHandler renders iSCSI targets partial +func (a *App) HXISCSIHandler(w http.ResponseWriter, r *http.Request) { + targets := []map[string]any{} + if a.ISCSISvc != nil { + targets, _ = a.ISCSISvc.ListTargets(r.Context()) + } + if err := templates.ExecuteTemplate(w, "hx_iscsi_targets", targets); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// HXISCLUNsHandler renders LUNs for a target +func (a *App) HXISCLUNsHandler(w http.ResponseWriter, r *http.Request) { + targetID := chi.URLParam(r, "target") + luns := []map[string]any{} + if a.ISCSISvc != nil { + luns, _ = a.ISCSISvc.ListLUNs(r.Context(), targetID) + } + if err := templates.ExecuteTemplate(w, "hx_iscsi_luns", luns); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// ISCSI Target info partial +func (a *App) ISCSITargetInfoHandler(w http.ResponseWriter, r *http.Request) { + targetID := chi.URLParam(r, "target") + var info map[string]any + if a.ISCSISvc != nil { + info, _ = a.ISCSISvc.GetTargetInfo(r.Context(), targetID) + } + if err := templates.ExecuteTemplate(w, "hx_iscsi_target_info", info); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// CreateISCSITargetHandler handles creating an iSCSI target via form/JSON +func (a *App) CreateISCSITargetHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.FormValue("name") + iqn := r.FormValue("iqn") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + id, err := a.ISCSISvc.CreateTarget(r.Context(), user, role, name, iqn) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"ID": id, "Name": name} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// CreateISCSILUNHandler handles creating a LUN for a target +func (a *App) CreateISCSILUNHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + targetID := r.FormValue("target_id") + zvol := r.FormValue("zvol") + size := r.FormValue("size") + blocksize := 512 + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + id, err := a.ISCSISvc.CreateLUN(r.Context(), user, role, targetID, zvol, size, blocksize) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"JobID": id, "Name": zvol} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// DeleteISCSILUNHandler deletes a LUN with optional 'force' param +func (a *App) DeleteISCSILUNHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + id := r.FormValue("id") + force := r.FormValue("force") == "1" || r.FormValue("force") == "true" + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + if err := a.ISCSISvc.DeleteLUN(r.Context(), user, role, id, force); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// AddISCSIPortalHandler configures a portal for a target +func (a *App) AddISCSIPortalHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + targetID := r.FormValue("target_id") + address := r.FormValue("address") + // default port 3260 + port := 3260 + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + id, err := a.ISCSISvc.AddPortal(r.Context(), user, role, targetID, address, port) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"ID": id} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// AddISCSIInitiatorHandler adds an initiator to an IQN ACL +func (a *App) AddISCSIInitiatorHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + targetID := r.FormValue("target_id") + initiator := r.FormValue("initiator_iqn") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + id, err := a.ISCSISvc.AddInitiator(r.Context(), user, role, targetID, initiator) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"ID": id} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// UnmapISCSILUNHandler performs the 'drain' step to unmap the LUN +func (a *App) UnmapISCSILUNHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + id := r.FormValue("id") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ISCSISvc == nil { + http.Error(w, "no iscsi service", http.StatusInternalServerError) + return + } + if err := a.ISCSISvc.UnmapLUN(r.Context(), user, role, id); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + w.WriteHeader(http.StatusNoContent) +} + +// HXSmbHandler renders SMB shares partial +func (a *App) HXSmbHandler(w http.ResponseWriter, r *http.Request) { + shares := []domain.Share{} + if a.ShareSvc != nil { + shares, _ = a.ShareSvc.ListSMB(r.Context()) + } + if err := templates.ExecuteTemplate(w, "hx_smb_shares", shares); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// CreateSMBHandler handles SMB creation (HTMX) +func (a *App) CreateSMBHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + name := r.FormValue("name") + path := r.FormValue("path") + readOnly := r.FormValue("read_only") == "1" || r.FormValue("read_only") == "true" + allowedUsersRaw := r.FormValue("allowed_users") + var allowed []string + if allowedUsersRaw != "" { + allowed = strings.Split(allowedUsersRaw, ",") + } + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ShareSvc == nil { + http.Error(w, "no share service", http.StatusInternalServerError) + return + } + id, err := a.ShareSvc.CreateSMB(r.Context(), user, role, name, path, readOnly, allowed) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + data := map[string]any{"JobID": id, "Name": name, "Status": "queued"} + if err := templates.ExecuteTemplate(w, "job_row", data); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +// DeleteSMBHandler handles SMB deletion +func (a *App) DeleteSMBHandler(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, "bad request", http.StatusBadRequest) + return + } + id := r.FormValue("id") + user, _ := r.Context().Value(ContextKey("user")).(string) + role, _ := r.Context().Value(ContextKey("user.role")).(string) + if a.ShareSvc == nil { + http.Error(w, "no share service", http.StatusInternalServerError) + return + } + if err := a.ShareSvc.DeleteSMB(r.Context(), user, role, id); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + shares, _ := a.ShareSvc.ListSMB(r.Context()) + if err := templates.ExecuteTemplate(w, "hx_smb_shares", shares); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} diff --git a/internal/http/handlers_test.go b/internal/http/handlers_test.go index cc33e77..6b1a476 100644 --- a/internal/http/handlers_test.go +++ b/internal/http/handlers_test.go @@ -43,3 +43,81 @@ func TestCreatePoolHandler(t *testing.T) { t.Fatalf("expected job_id in response") } } + +func TestSharesNFSHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + req := httptest.NewRequest(http.MethodGet, "/shares/nfs", nil) + w := httptest.NewRecorder() + app.SharesNFSHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} + +func TestCreateNFSHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + form := "name=data&path=tank/ds&options={}" // simple form body + req := httptest.NewRequest(http.MethodPost, "/shares/nfs/create", strings.NewReader(form)) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Auth-User", "admin") + req.Header.Set("X-Auth-Role", "admin") + w := httptest.NewRecorder() + app.CreateNFSHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} + +func TestNFSStatusHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + req := httptest.NewRequest(http.MethodGet, "/api/shares/nfs/status", nil) + w := httptest.NewRecorder() + app.NFSStatusHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} + +func TestSharesSMBHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + req := httptest.NewRequest(http.MethodGet, "/shares/smb", nil) + w := httptest.NewRecorder() + app.SharesSMBHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} + +func TestCreateSMBHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + form := "name=smb1&path=tank/ds&allowed_users=user1,user2&read_only=1" + req := httptest.NewRequest(http.MethodPost, "/shares/smb/create", strings.NewReader(form)) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Auth-User", "admin") + req.Header.Set("X-Auth-Role", "admin") + w := httptest.NewRecorder() + app.CreateSMBHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} + +func TestDeleteSMBHandler(t *testing.T) { + m := &mock.MockSharesService{} + app := &App{DB: &sql.DB{}, ShareSvc: m} + form := "id=smb-1" + req := httptest.NewRequest(http.MethodPost, "/shares/smb/delete", strings.NewReader(form)) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("X-Auth-User", "admin") + req.Header.Set("X-Auth-Role", "admin") + w := httptest.NewRecorder() + app.DeleteSMBHandler(w, req) + if w.Code != http.StatusOK { + t.Fatalf("expected 200, got %d; body: %s", w.Code, w.Body.String()) + } +} diff --git a/internal/http/middleware.go b/internal/http/middleware.go index 50645fe..ff12ca2 100644 --- a/internal/http/middleware.go +++ b/internal/http/middleware.go @@ -2,9 +2,14 @@ package http import ( "context" + "crypto/rand" + "encoding/base64" "log" "net/http" + "strings" "time" + + "github.com/example/storage-appliance/internal/auth" ) // ContextKey used to store values in context @@ -12,6 +17,9 @@ type ContextKey string const ( ContextKeyRequestID ContextKey = "request-id" + ContextKeyUser ContextKey = "user" + ContextKeyUserID ContextKey = "user.id" + ContextKeySession ContextKey = "session" ) // RequestID middleware sets a request ID in headers and request context @@ -30,49 +38,170 @@ func Logging(next http.Handler) http.Handler { }) } -// Auth middleware placeholder to authenticate users -func Auth(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Basic dev auth: read X-Auth-User; in real world, validate session/jwt - username := r.Header.Get("X-Auth-User") - if username == "" { - username = "anonymous" - } - // Role hint: header X-Auth-Role (admin/operator/viewer) - role := r.Header.Get("X-Auth-Role") - if role == "" { - if username == "admin" { - role = "admin" - } else { - role = "viewer" +// AuthMiddleware creates an auth middleware that uses the provided App +func AuthMiddleware(app *App) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Skip auth for login and public routes + if strings.HasPrefix(r.URL.Path, "/login") || strings.HasPrefix(r.URL.Path, "/static") || r.URL.Path == "/healthz" || r.URL.Path == "/metrics" { + next.ServeHTTP(w, r) + return } - } - ctx := context.WithValue(r.Context(), ContextKey("user"), username) - ctx = context.WithValue(ctx, ContextKey("user.role"), role) - next.ServeHTTP(w, r.WithContext(ctx)) - }) + + // Get session token from cookie + cookie, err := r.Cookie(auth.SessionCookieName) + if err != nil { + // No session, redirect to login + if r.Header.Get("HX-Request") == "true" { + w.Header().Set("HX-Redirect", "/login") + w.WriteHeader(http.StatusUnauthorized) + } else { + http.Redirect(w, r, "/login", http.StatusFound) + } + return + } + + // Validate session + sessionStore := auth.NewSessionStore(app.DB) + session, err := sessionStore.GetSession(r.Context(), cookie.Value) + if err != nil { + // Invalid session, redirect to login + if r.Header.Get("HX-Request") == "true" { + w.Header().Set("HX-Redirect", "/login") + w.WriteHeader(http.StatusUnauthorized) + } else { + http.Redirect(w, r, "/login", http.StatusFound) + } + return + } + + // Get user + userStore := auth.NewUserStore(app.DB) + user, err := userStore.GetUserByID(r.Context(), session.UserID) + if err != nil { + http.Error(w, "user not found", http.StatusUnauthorized) + return + } + + // Store user info in context + ctx := context.WithValue(r.Context(), ContextKeyUser, user.Username) + ctx = context.WithValue(ctx, ContextKeyUserID, user.ID) + ctx = context.WithValue(ctx, ContextKeySession, session) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) + } } -// CSRF middleware placeholder (reads X-CSRF-Token) -func CSRFMiddleware(next http.Handler) http.Handler { +// Auth is a legacy wrapper for backward compatibility +func Auth(next http.Handler) http.Handler { + // This will be replaced by AuthMiddleware in router + return next +} + +// RequireAuth middleware ensures user is authenticated (alternative to Auth that doesn't redirect) +func RequireAuth(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // TODO: check and enforce CSRF tokens for mutating requests + userID := r.Context().Value(ContextKeyUserID) + if userID == nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } next.ServeHTTP(w, r) }) } -// RBAC middleware placeholder -func RBAC(permission string) func(http.Handler) http.Handler { +// CSRFMiddleware creates a CSRF middleware that uses the provided App +func CSRFMiddleware(app *App) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Try to read role from context and permit admin always - role := r.Context().Value(ContextKey("user.role")) - if role == "admin" { + // For safe methods, ensure CSRF token cookie exists + if r.Method == "GET" || r.Method == "HEAD" || r.Method == "OPTIONS" { + // Set CSRF token cookie if it doesn't exist + if cookie, err := r.Cookie("csrf_token"); err != nil || cookie.Value == "" { + token := generateCSRFToken() + http.SetCookie(w, &http.Cookie{ + Name: "csrf_token", + Value: token, + Path: "/", + HttpOnly: false, // Needed for HTMX to read it + Secure: false, + SameSite: http.SameSiteStrictMode, + MaxAge: 86400, // 24 hours + }) + } next.ServeHTTP(w, r) return } - // For now, only admin is permitted; add permission checks here + + // Get CSRF token from header (HTMX compatible) or form + token := r.Header.Get("X-CSRF-Token") + if token == "" { + token = r.FormValue("csrf_token") + } + + // Get expected token from cookie + expectedToken := getCSRFToken(r) + if token == "" || token != expectedToken { + http.Error(w, "invalid CSRF token", http.StatusForbidden) + return + } + next.ServeHTTP(w, r) }) } } + +// getCSRFToken retrieves or generates a CSRF token for the session +func getCSRFToken(r *http.Request) string { + // Try to get from cookie first + cookie, err := r.Cookie("csrf_token") + if err == nil && cookie.Value != "" { + return cookie.Value + } + + // Generate new token (will be set in cookie by handler) + return generateCSRFToken() +} + +func generateCSRFToken() string { + b := make([]byte, 32) + rand.Read(b) + return base64.URLEncoding.EncodeToString(b) +} + +// RequirePermission creates a permission check middleware +func RequirePermission(app *App, permission string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userID := r.Context().Value(ContextKeyUserID) + if userID == nil { + http.Error(w, "unauthorized", http.StatusUnauthorized) + return + } + + rbacStore := auth.NewRBACStore(app.DB) + hasPermission, err := rbacStore.UserHasPermission(r.Context(), userID.(string), permission) + if err != nil { + log.Printf("permission check error: %v", err) + http.Error(w, "internal error", http.StatusInternalServerError) + return + } + + if !hasPermission { + http.Error(w, "forbidden", http.StatusForbidden) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// RBAC middleware (kept for backward compatibility) +func RBAC(permission string) func(http.Handler) http.Handler { + // This will be replaced by RequirePermission in router + return func(next http.Handler) http.Handler { + return next + } +} diff --git a/internal/http/router.go b/internal/http/router.go index 1df215f..b4026f7 100644 --- a/internal/http/router.go +++ b/internal/http/router.go @@ -10,19 +10,76 @@ import ( func RegisterRoutes(r *chi.Mux, app *App) { r.Use(Logging) r.Use(RequestID) - r.Use(Auth) + r.Use(CSRFMiddleware(app)) + r.Use(AuthMiddleware(app)) + + // Public routes + r.Get("/login", app.LoginHandler) + r.Post("/login", app.LoginHandler) + r.Post("/logout", app.LogoutHandler) + r.Get("/healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) + r.Get("/metrics", app.MetricsHandler) // Prometheus metrics (public for scraping) + + // Protected routes r.Get("/", app.DashboardHandler) r.Get("/dashboard", app.DashboardHandler) - r.Get("/healthz", func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) + r.Get("/monitoring", app.MonitoringHandler) + r.Get("/hx/monitoring", app.HXMonitoringHandler) + r.Get("/hx/monitoring/group", app.HXMonitoringGroupHandler) + // API namespace r.Route("/api", func(r chi.Router) { r.Get("/pools", app.PoolsHandler) - r.With(RBAC("storage.pool.create")).Post("/pools", app.CreatePoolHandler) // create a pool -> creates a job + r.With(RequirePermission(app, "storage.pool.create")).Post("/pools", app.CreatePoolHandler) // create a pool -> creates a job + r.Get("/pools/{pool}/datasets", app.PoolDatasetsHandler) + r.With(RequirePermission(app, "storage.dataset.create")).Post("/datasets", app.CreateDatasetHandler) + r.With(RequirePermission(app, "storage.dataset.snapshot")).Post("/datasets/{dataset}/snapshot", app.SnapshotHandler) + r.With(RequirePermission(app, "storage.pool.scrub")).Post("/pools/{pool}/scrub", app.PoolScrubHandler) r.Get("/jobs", app.JobsHandler) + r.Get("/shares/nfs/status", app.NFSStatusHandler) }) + r.Get("/storage", app.StorageHandler) + r.Get("/shares/nfs", app.SharesNFSHandler) + r.Get("/hx/shares/nfs", app.HXNFSHandler) + r.With(RequirePermission(app, "shares.nfs.create")).Post("/shares/nfs/create", app.CreateNFSHandler) + r.With(RequirePermission(app, "shares.nfs.delete")).Post("/shares/nfs/delete", app.DeleteNFSHandler) + r.Get("/shares/smb", app.SharesSMBHandler) + r.Get("/hx/shares/smb", app.HXSmbHandler) + r.With(RequirePermission(app, "shares.smb.create")).Post("/shares/smb/create", app.CreateSMBHandler) + r.With(RequirePermission(app, "shares.smb.delete")).Post("/shares/smb/delete", app.DeleteSMBHandler) r.Get("/hx/pools", app.HXPoolsHandler) - r.Post("/storage/pool/create", app.StorageCreatePoolHandler) + r.With(RequirePermission(app, "storage.pool.create")).Post("/storage/pool/create", app.StorageCreatePoolHandler) r.Get("/jobs/{id}", app.JobPartialHandler) + + // iSCSI routes + r.Get("/iscsi", app.ISCSIHandler) + r.Get("/api/iscsi/hx_targets", app.HXISCSIHandler) + r.Get("/api/iscsi/hx_luns/{target}", app.HXISCLUNsHandler) + r.Get("/api/iscsi/target/{target}", app.ISCSITargetInfoHandler) + r.With(RequirePermission(app, "iscsi.target.create")).Post("/api/iscsi/create_target", app.CreateISCSITargetHandler) + r.With(RequirePermission(app, "iscsi.lun.create")).Post("/api/iscsi/create_lun", app.CreateISCSILUNHandler) + r.With(RequirePermission(app, "iscsi.lun.delete")).Post("/api/iscsi/delete_lun", app.DeleteISCSILUNHandler) + r.With(RequirePermission(app, "iscsi.lun.unmap")).Post("/api/iscsi/unmap_lun", app.UnmapISCSILUNHandler) + r.With(RequirePermission(app, "iscsi.portal.create")).Post("/api/iscsi/add_portal", app.AddISCSIPortalHandler) + r.With(RequirePermission(app, "iscsi.initiator.create")).Post("/api/iscsi/add_initiator", app.AddISCSIInitiatorHandler) + + // Admin routes + r.Route("/admin", func(r chi.Router) { + r.Use(RequirePermission(app, "users.manage")) + r.Get("/users", app.UsersHandler) + r.Get("/hx/users", app.HXUsersHandler) + r.Post("/users/create", app.CreateUserHandler) + r.Post("/users/{id}/delete", app.DeleteUserHandler) + r.Post("/users/{id}/roles", app.UpdateUserRolesHandler) + + r.Use(RequirePermission(app, "roles.manage")) + r.Get("/roles", app.RolesHandler) + r.Get("/hx/roles", app.HXRolesHandler) + r.Post("/roles/create", app.CreateRoleHandler) + r.Post("/roles/{id}/delete", app.DeleteRoleHandler) + r.Post("/roles/{id}/permissions", app.UpdateRolePermissionsHandler) + }) + r.Get("/static/*", StaticHandler) } diff --git a/internal/infra/crypto/crypto.go b/internal/infra/crypto/crypto.go new file mode 100644 index 0000000..4b1c3af --- /dev/null +++ b/internal/infra/crypto/crypto.go @@ -0,0 +1,59 @@ +package crypto + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "errors" + "io" +) + +// Encrypt uses AES-GCM with a 32 byte key +func Encrypt(key []byte, plaintext string) (string, error) { + if len(key) != 32 { + return "", errors.New("invalid key length") + } + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return "", err + } + nonce := make([]byte, aesgcm.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", err + } + ct := aesgcm.Seal(nonce, nonce, []byte(plaintext), nil) + return base64.StdEncoding.EncodeToString(ct), nil +} + +func Decrypt(key []byte, cipherText string) (string, error) { + if len(key) != 32 { + return "", errors.New("invalid key length") + } + data, err := base64.StdEncoding.DecodeString(cipherText) + if err != nil { + return "", err + } + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + aesgcm, err := cipher.NewGCM(block) + if err != nil { + return "", err + } + nonceSize := aesgcm.NonceSize() + if len(data) < nonceSize { + return "", errors.New("ciphertext too short") + } + nonce, ct := data[:nonceSize], data[nonceSize:] + pt, err := aesgcm.Open(nil, nonce, ct, nil) + if err != nil { + return "", err + } + return string(pt), nil +} diff --git a/internal/infra/iscsi/iscsi.go b/internal/infra/iscsi/iscsi.go new file mode 100644 index 0000000..d7481d4 --- /dev/null +++ b/internal/infra/iscsi/iscsi.go @@ -0,0 +1,121 @@ +package iscsi + +import ( + "context" + "fmt" + "time" + + "github.com/example/storage-appliance/internal/infra/osexec" +) + +// Adapter wraps targetcli invocations for LIO (targetcli) management. +type Adapter struct { + Runner osexec.Runner +} + +func NewAdapter(runner osexec.Runner) *Adapter { return &Adapter{Runner: runner} } + +// CreateTarget creates an IQN target via targetcli +func (a *Adapter) CreateTarget(ctx context.Context, iqn string) error { + // Use a short timeout for cli interactions + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", "/iscsi", "create", iqn) + if err != nil { + return fmt.Errorf("targetcli create target failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli create returned: %s", stderr) + } + return nil +} + +// CreateBackstore creates a block backstore for a zvol device. +func (a *Adapter) CreateBackstore(ctx context.Context, name, devpath string) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + // targetcli syntax: /backstores/block create + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", "/backstores/block", "create", name, devpath) + if err != nil { + return fmt.Errorf("targetcli create backstore failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli backstore returned: %s", stderr) + } + return nil +} + +// CreateLUN maps backstore into target's TPG1 LUNs +func (a *Adapter) CreateLUN(ctx context.Context, iqn, backstoreName string, lunID int) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + bsPath := fmt.Sprintf("/backstores/block/%s", backstoreName) + tpgPath := fmt.Sprintf("/iscsi/%s/tpg1/luns", iqn) + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", tpgPath, "create", bsPath) + if err != nil { + return fmt.Errorf("targetcli create lun failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli create lun returned: %s", stderr) + } + return nil +} + +// DeleteLUN unmaps a LUN from a target; if the LUN is mapped fail unless forced. +func (a *Adapter) DeleteLUN(ctx context.Context, iqn string, lunID int) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + tpgPath := fmt.Sprintf("/iscsi/%s/tpg1/luns", iqn) + // delete by numeric id + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", tpgPath, "delete", fmt.Sprintf("%d", lunID)) + if err != nil { + return fmt.Errorf("targetcli delete lun failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli delete lun returned: %s", stderr) + } + return nil +} + +func (a *Adapter) AddPortal(ctx context.Context, iqn, address string, port int) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + tpgPath := fmt.Sprintf("/iscsi/%s/tpg1/portals", iqn) + addr := fmt.Sprintf("%s:%d", address, port) + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", tpgPath, "create", addr) + if err != nil { + return fmt.Errorf("targetcli add portal failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli add portal returned: %s", stderr) + } + return nil +} + +func (a *Adapter) AddACL(ctx context.Context, iqn, initiator string) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + aclPath := fmt.Sprintf("/iscsi/%s/tpg1/acls", iqn) + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", aclPath, "create", initiator) + if err != nil { + return fmt.Errorf("targetcli add acl failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli add acl returned: %s", stderr) + } + return nil +} + +// Save writes the configuration to storage (saving targetcli config) +func (a *Adapter) Save(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "targetcli", "saveconfig") + if err != nil { + return fmt.Errorf("targetcli save failed: %v %s", err, stderr) + } + if code != 0 { + return fmt.Errorf("targetcli save returned: %s", stderr) + } + return nil +} diff --git a/internal/infra/minio/minio.go b/internal/infra/minio/minio.go new file mode 100644 index 0000000..b38879b --- /dev/null +++ b/internal/infra/minio/minio.go @@ -0,0 +1,122 @@ +package minio + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/example/storage-appliance/internal/infra/osexec" +) + +type Adapter struct { + Runner osexec.Runner + EnvPath string +} + +func NewAdapter(runner osexec.Runner, envPath string) *Adapter { + if envPath == "" { + envPath = "/etc/minio/minio.env" + } + return &Adapter{Runner: runner, EnvPath: envPath} +} + +type Settings struct { + AccessKey string `json:"access_key"` + SecretKey string `json:"secret_key"` + DataPath string `json:"data_path"` + Port int `json:"port"` + TLS bool `json:"tls"` +} + +// WriteEnv writes environment file used by MinIO service +func (a *Adapter) WriteEnv(ctx context.Context, s Settings) error { + dir := filepath.Dir(a.EnvPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + // env lines + lines := []string{ + fmt.Sprintf("MINIO_ROOT_USER=%s", s.AccessKey), + fmt.Sprintf("MINIO_ROOT_PASSWORD=%s", s.SecretKey), + fmt.Sprintf("MINIO_VOLUMES=%s", s.DataPath), + } + if s.Port != 0 { + lines = append(lines, fmt.Sprintf("MINIO_OPTS=--address :%d", s.Port)) + } + content := strings.Join(lines, "\n") + "\n" + tmp := filepath.Join(dir, ".minio.env.tmp") + if err := os.WriteFile(tmp, []byte(content), 0600); err != nil { + return err + } + if err := os.Rename(tmp, a.EnvPath); err != nil { + return err + } + return nil +} + +// Reload reloads minio service to pick up new env; prefer systemctl reload +func (a *Adapter) Reload(ctx context.Context) error { + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "systemctl", "reload", "minio") + if err == nil { + return nil + } + // fallback to restart + _, stderr, _, err = osexec.ExecWithRunner(a.Runner, ctx, "systemctl", "restart", "minio") + if err != nil { + return fmt.Errorf("minio reload/restart failed: %s", stderr) + } + return nil +} + +// ConfigureMC configures mc alias to point to the MinIO service using given settings +func (a *Adapter) ConfigureMC(ctx context.Context, alias string, settings Settings) error { + secure := "--insecure" + if settings.TLS { + secure = "" + } + // mc alias set [--api S3v4] + endpoint := fmt.Sprintf("http://127.0.0.1:%d", settings.Port) + if settings.TLS { + endpoint = fmt.Sprintf("https://127.0.0.1:%d", settings.Port) + } + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "mc", "alias", "set", alias, endpoint, settings.AccessKey, settings.SecretKey, secure) + if err != nil { + return fmt.Errorf("mc alias set failed: %s", stderr) + } + return nil +} + +// ListBuckets uses mc to list buckets via alias +func (a *Adapter) ListBuckets(ctx context.Context, alias string) ([]string, error) { + out, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "mc", "ls", "--json", alias) + if err != nil { + return nil, fmt.Errorf("mc ls failed: %s", stderr) + } + // parse JSON lines, each contains a 'key' or 'name' - in mc, `ls --json` returns 'key' + var buckets []string + lines := strings.Split(strings.TrimSpace(out), "\n") + for _, l := range lines { + var obj map[string]any + if err := json.Unmarshal([]byte(l), &obj); err != nil { + continue + } + if otype, ok := obj["type"].(string); ok && otype == "bucket" { + if name, ok := obj["key"].(string); ok { + buckets = append(buckets, name) + } + } + } + return buckets, nil +} + +// CreateBucket uses mc to create a new bucket alias/ +func (a *Adapter) CreateBucket(ctx context.Context, alias, name string) error { + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "mc", "mb", alias+"/"+name) + if err != nil { + return fmt.Errorf("mc mb failed: %s", stderr) + } + return nil +} diff --git a/internal/infra/nfs/nfs.go b/internal/infra/nfs/nfs.go new file mode 100644 index 0000000..e3d3823 --- /dev/null +++ b/internal/infra/nfs/nfs.go @@ -0,0 +1,74 @@ +package nfs + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/osexec" +) + +type Adapter struct { + Runner osexec.Runner + ExportsPath string +} + +func NewAdapter(runner osexec.Runner, exportsPath string) *Adapter { + if exportsPath == "" { + exportsPath = "/etc/exports" + } + return &Adapter{Runner: runner, ExportsPath: exportsPath} +} + +// RenderExports renders the given shares into /etc/exports atomically +func (a *Adapter) RenderExports(ctx context.Context, shares []domain.Share) error { + var lines []string + for _, s := range shares { + // default options for NFS export + opts := "rw,sync,no_root_squash" + if s.Type == "nfs" { + // if options stored as JSON use it + if sPath := s.Path; sPath != "" { + // options may be in s.Name? No, for now use default + } + } + lines = append(lines, fmt.Sprintf("%s %s", s.Path, opts)) + } + content := strings.Join(lines, "\n") + "\n" + + dir := filepath.Dir(a.ExportsPath) + tmp := filepath.Join(dir, ".exports.tmp") + if err := os.WriteFile(tmp, []byte(content), 0644); err != nil { + return err + } + // atomic rename + if err := os.Rename(tmp, a.ExportsPath); err != nil { + return err + } + return nil +} + +// Apply runs exportfs -ra to apply exports +func (a *Adapter) Apply(ctx context.Context) error { + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "exportfs", "-ra") + if err != nil { + return fmt.Errorf("exportfs failed: %s", stderr) + } + return nil +} + +// Status checks systemd for nfs server status +func (a *Adapter) Status(ctx context.Context) (string, error) { + // try common unit names + names := []string{"nfs-server", "nfs-kernel-server"} + for _, n := range names { + out, _, _, err := osexec.ExecWithRunner(a.Runner, ctx, "systemctl", "is-active", n) + if err == nil && strings.TrimSpace(out) != "" { + return strings.TrimSpace(out), nil + } + } + return "unknown", nil +} diff --git a/internal/infra/samba/samba.go b/internal/infra/samba/samba.go new file mode 100644 index 0000000..a826fd0 --- /dev/null +++ b/internal/infra/samba/samba.go @@ -0,0 +1,87 @@ +package samba + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/osexec" +) + +type Adapter struct { + Runner osexec.Runner + IncludePath string +} + +func NewAdapter(runner osexec.Runner, includePath string) *Adapter { + if includePath == "" { + includePath = "/etc/samba/smb.conf.d/appliance.conf" + } + return &Adapter{Runner: runner, IncludePath: includePath} +} + +// RenderConf writes the Samba include file for appliance-managed shares +func (a *Adapter) RenderConf(ctx context.Context, shares []domain.Share) error { + var lines []string + lines = append(lines, "# Appliance-managed SMB share configuration") + for _, s := range shares { + if s.Type != "smb" { + continue + } + opts := []string{"path = " + s.Path} + // parse options if stored in s.Name or s.Config; fallback to broad default + // s.Config may have read-only or allowed users + if ro, ok := s.Config["read_only"]; ok && ro == "true" { + opts = append(opts, "read only = yes") + } else { + opts = append(opts, "read only = no") + } + if users, ok := s.Config["allowed_users"]; ok { + opts = append(opts, "valid users = "+users) + } + // write section + lines = append(lines, fmt.Sprintf("[%s]", s.Name)) + for _, l := range opts { + lines = append(lines, l) + } + lines = append(lines, "") + } + content := strings.Join(lines, "\n") + "\n" + dir := filepath.Dir(a.IncludePath) + tmp := filepath.Join(dir, ".appliance.smb.tmp") + if err := os.WriteFile(tmp, []byte(content), 0644); err != nil { + return err + } + if err := os.Rename(tmp, a.IncludePath); err != nil { + return err + } + return nil +} + +// Reload reloads or restarts samba to apply config +func (a *Adapter) Reload(ctx context.Context) error { + // try to reload first + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "systemctl", "reload", "smbd") + if err == nil { + return nil + } + // fallback to restart + _, stderr, _, err = osexec.ExecWithRunner(a.Runner, ctx, "systemctl", "restart", "smbd") + if err != nil { + return fmt.Errorf("samba reload/restart failed: %s", stderr) + } + return nil +} + +// CreateSambaUser optional: stub for creating a local samba user mapped to appliance user +func (a *Adapter) CreateSambaUser(ctx context.Context, user, password string) error { + // This is optional - we use smbpasswd command in production; stub for now + _, stderr, _, err := osexec.ExecWithRunner(a.Runner, ctx, "smbpasswd", "-a", user) + if err != nil { + return fmt.Errorf("smbpasswd failed: %s", stderr) + } + return nil +} diff --git a/internal/infra/sqlite/db/migrations.go b/internal/infra/sqlite/db/migrations.go index 8630ad4..c95d0fc 100644 --- a/internal/infra/sqlite/db/migrations.go +++ b/internal/infra/sqlite/db/migrations.go @@ -3,8 +3,9 @@ package db import ( "context" "database/sql" - "golang.org/x/crypto/bcrypt" "log" + + "github.com/example/storage-appliance/internal/auth" ) // MigrateAndSeed performs a very small migration set and seeds an admin user @@ -19,12 +20,37 @@ func MigrateAndSeed(ctx context.Context, db *sql.DB) error { `CREATE TABLE IF NOT EXISTS users (id TEXT PRIMARY KEY, username TEXT NOT NULL UNIQUE, password_hash TEXT, role TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, `CREATE TABLE IF NOT EXISTS pools (name TEXT PRIMARY KEY, guid TEXT, health TEXT, capacity TEXT);`, `CREATE TABLE IF NOT EXISTS jobs (id TEXT PRIMARY KEY, type TEXT, status TEXT, progress INTEGER DEFAULT 0, owner TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME);`, + `CREATE TABLE IF NOT EXISTS shares (id TEXT PRIMARY KEY, name TEXT, path TEXT, type TEXT, options TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS object_storage (id TEXT PRIMARY KEY, name TEXT, access_key TEXT, secret_key TEXT, data_path TEXT, port INTEGER, tls INTEGER DEFAULT 0, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS buckets (id TEXT PRIMARY KEY, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS iscsi_targets (id TEXT PRIMARY KEY, iqn TEXT NOT NULL UNIQUE, name TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS iscsi_portals (id TEXT PRIMARY KEY, target_id TEXT NOT NULL, address TEXT NOT NULL, port INTEGER DEFAULT 3260, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS iscsi_initiators (id TEXT PRIMARY KEY, target_id TEXT NOT NULL, initiator_iqn TEXT NOT NULL, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS iscsi_luns (id TEXT PRIMARY KEY, target_id TEXT NOT NULL, lun_id INTEGER NOT NULL, zvol TEXT NOT NULL, size INTEGER, blocksize INTEGER, mapped INTEGER DEFAULT 0, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + // RBAC tables + `CREATE TABLE IF NOT EXISTS roles (id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS permissions (id TEXT PRIMARY KEY, name TEXT NOT NULL UNIQUE, description TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP);`, + `CREATE TABLE IF NOT EXISTS role_permissions (role_id TEXT NOT NULL, permission_id TEXT NOT NULL, PRIMARY KEY (role_id, permission_id), FOREIGN KEY (role_id) REFERENCES roles(id) ON DELETE CASCADE, FOREIGN KEY (permission_id) REFERENCES permissions(id) ON DELETE CASCADE);`, + `CREATE TABLE IF NOT EXISTS user_roles (user_id TEXT NOT NULL, role_id TEXT NOT NULL, PRIMARY KEY (user_id, role_id), FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE, FOREIGN KEY (role_id) REFERENCES roles(id) ON DELETE CASCADE);`, + `CREATE TABLE IF NOT EXISTS sessions (id TEXT PRIMARY KEY, user_id TEXT NOT NULL, token TEXT NOT NULL UNIQUE, expires_at DATETIME NOT NULL, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE);`, + `CREATE INDEX IF NOT EXISTS idx_sessions_token ON sessions(token);`, + `CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id);`, + `CREATE INDEX IF NOT EXISTS idx_sessions_expires_at ON sessions(expires_at);`, } for _, s := range stmts { if _, err := tx.ExecContext(ctx, s); err != nil { return err } } + + // Enhance audit_events table if needed (add missing columns) + enhanceAuditTable(ctx, tx) + + // Seed default roles and permissions + if err := seedRolesAndPermissions(ctx, tx); err != nil { + return err + } + // Seed a default admin user if not exists var count int if err := tx.QueryRowContext(ctx, `SELECT COUNT(1) FROM users WHERE username = 'admin'`).Scan(&count); err != nil { @@ -32,13 +58,132 @@ func MigrateAndSeed(ctx context.Context, db *sql.DB) error { } if count == 0 { // note: simple seeded password: admin (do not use in prod) - pwHash, _ := bcrypt.GenerateFromPassword([]byte("admin"), bcrypt.DefaultCost) - if _, err := tx.ExecContext(ctx, `INSERT INTO users (id, username, password_hash, role) VALUES (?, 'admin', ?, 'admin')`, "admin", string(pwHash)); err != nil { + pwHash, err := auth.HashPassword("admin") + if err != nil { return err } + if _, err := tx.ExecContext(ctx, `INSERT INTO users (id, username, password_hash, role) VALUES (?, 'admin', ?, 'admin')`, "admin", pwHash); err != nil { + return err + } + // Assign admin role to admin user + var adminRoleID string + if err := tx.QueryRowContext(ctx, `SELECT id FROM roles WHERE name = 'admin'`).Scan(&adminRoleID); err == nil { + tx.ExecContext(ctx, `INSERT OR IGNORE INTO user_roles (user_id, role_id) VALUES (?, ?)`, "admin", adminRoleID) + } } if err := tx.Commit(); err != nil { return err } return nil } + +func enhanceAuditTable(ctx context.Context, tx *sql.Tx) { + // Check if columns exist and add them if missing + // SQLite doesn't support IF NOT EXISTS for ALTER TABLE, so we'll try-catch + columns := []struct { + name string + stmt string + }{ + {"actor", `ALTER TABLE audit_events ADD COLUMN actor TEXT;`}, + {"resource", `ALTER TABLE audit_events ADD COLUMN resource TEXT;`}, + {"payload_hash", `ALTER TABLE audit_events ADD COLUMN payload_hash TEXT;`}, + {"result", `ALTER TABLE audit_events ADD COLUMN result TEXT;`}, + {"client_ip", `ALTER TABLE audit_events ADD COLUMN client_ip TEXT;`}, + } + + for _, col := range columns { + _, err := tx.ExecContext(ctx, col.stmt) + if err != nil { + // Column might already exist, ignore error + log.Printf("Note: %s column may already exist: %v", col.name, err) + } + } +} + +func seedRolesAndPermissions(ctx context.Context, tx *sql.Tx) error { + // Seed default roles + roles := []struct { + id string + name string + description string + }{ + {"admin", "admin", "Administrator with full access"}, + {"operator", "operator", "Operator with limited administrative access"}, + {"viewer", "viewer", "Read-only access"}, + } + + for _, r := range roles { + _, err := tx.ExecContext(ctx, `INSERT OR IGNORE INTO roles (id, name, description) VALUES (?, ?, ?)`, r.id, r.name, r.description) + if err != nil { + return err + } + } + + // Seed default permissions + permissions := []struct { + id string + name string + description string + }{ + {"storage.pool.create", "storage.pool.create", "Create storage pools"}, + {"storage.pool.scrub", "storage.pool.scrub", "Scrub storage pools"}, + {"storage.dataset.create", "storage.dataset.create", "Create datasets"}, + {"storage.dataset.snapshot", "storage.dataset.snapshot", "Create snapshots"}, + {"shares.nfs.create", "shares.nfs.create", "Create NFS shares"}, + {"shares.nfs.delete", "shares.nfs.delete", "Delete NFS shares"}, + {"shares.smb.create", "shares.smb.create", "Create SMB shares"}, + {"shares.smb.delete", "shares.smb.delete", "Delete SMB shares"}, + {"iscsi.target.create", "iscsi.target.create", "Create iSCSI targets"}, + {"iscsi.lun.create", "iscsi.lun.create", "Create iSCSI LUNs"}, + {"iscsi.lun.delete", "iscsi.lun.delete", "Delete iSCSI LUNs"}, + {"iscsi.lun.unmap", "iscsi.lun.unmap", "Unmap iSCSI LUNs"}, + {"iscsi.portal.create", "iscsi.portal.create", "Add iSCSI portals"}, + {"iscsi.initiator.create", "iscsi.initiator.create", "Add iSCSI initiators"}, + {"users.manage", "users.manage", "Manage users"}, + {"roles.manage", "roles.manage", "Manage roles and permissions"}, + } + + for _, p := range permissions { + _, err := tx.ExecContext(ctx, `INSERT OR IGNORE INTO permissions (id, name, description) VALUES (?, ?, ?)`, p.id, p.name, p.description) + if err != nil { + return err + } + } + + // Assign all permissions to admin role + var adminRoleID string + if err := tx.QueryRowContext(ctx, `SELECT id FROM roles WHERE name = 'admin'`).Scan(&adminRoleID); err != nil { + return err + } + + for _, p := range permissions { + _, err := tx.ExecContext(ctx, `INSERT OR IGNORE INTO role_permissions (role_id, permission_id) VALUES (?, ?)`, adminRoleID, p.id) + if err != nil { + return err + } + } + + // Assign some permissions to operator role + var operatorRoleID string + if err := tx.QueryRowContext(ctx, `SELECT id FROM roles WHERE name = 'operator'`).Scan(&operatorRoleID); err == nil { + operatorPerms := []string{ + "storage.pool.create", + "storage.dataset.create", + "storage.dataset.snapshot", + "shares.nfs.create", + "shares.nfs.delete", + "shares.smb.create", + "shares.smb.delete", + "iscsi.target.create", + "iscsi.lun.create", + "iscsi.lun.delete", + "iscsi.portal.create", + "iscsi.initiator.create", + } + for _, permID := range operatorPerms { + tx.ExecContext(ctx, `INSERT OR IGNORE INTO role_permissions (role_id, permission_id) VALUES (?, ?)`, operatorRoleID, permID) + } + } + + return nil +} diff --git a/internal/infra/stubs/iscsi.go b/internal/infra/stubs/iscsi.go index ee663da..a3082fe 100644 --- a/internal/infra/stubs/iscsi.go +++ b/internal/infra/stubs/iscsi.go @@ -16,3 +16,28 @@ func (i *ISCSIAdapter) CreateLUN(ctx context.Context, target string, backstore s log.Printf("iscsi: CreateLUN target=%s backstore=%s lun=%d (stub)", target, backstore, lunID) return nil } + +func (i *ISCSIAdapter) CreateBackstore(ctx context.Context, name string, devpath string) error { + log.Printf("iscsi: CreateBackstore name=%s dev=%s (stub)", name, devpath) + return nil +} + +func (i *ISCSIAdapter) DeleteLUN(ctx context.Context, target string, lunID int) error { + log.Printf("iscsi: DeleteLUN target=%s lun=%d (stub)", target, lunID) + return nil +} + +func (i *ISCSIAdapter) AddPortal(ctx context.Context, iqn string, addr string, port int) error { + log.Printf("iscsi: AddPortal iqn=%s addr=%s port=%d (stub)", iqn, addr, port) + return nil +} + +func (i *ISCSIAdapter) AddACL(ctx context.Context, iqn, initiator string) error { + log.Printf("iscsi: AddACL iqn=%s initiator=%s (stub)", iqn, initiator) + return nil +} + +func (i *ISCSIAdapter) Save(ctx context.Context) error { + log.Printf("iscsi: Save (stub)") + return nil +} diff --git a/internal/infra/zfs/zfs.go b/internal/infra/zfs/zfs.go index 34a8091..0ae8376 100644 --- a/internal/infra/zfs/zfs.go +++ b/internal/infra/zfs/zfs.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "strings" - "time" "github.com/example/storage-appliance/internal/domain" "github.com/example/storage-appliance/internal/infra/osexec" @@ -100,6 +99,30 @@ func (a *Adapter) CreateDataset(ctx context.Context, name string, props map[stri return nil } +// CreateZVol creates a block device zvol with the given size and optional props +func (a *Adapter) CreateZVol(ctx context.Context, name, size string, props map[string]string) error { + args := []string{"create", "-V", size, name} + for k, v := range props { + args = append([]string{"create", "-o", fmt.Sprintf("%s=%s", k, v)}, args...) + } + // Note: above building may produce repeated 'create' parts - keep simple: build args now + // We'll just construct a direct zfs create -V size -o prop=val name + args = []string{"create", "-V", size} + for k, v := range props { + args = append(args, "-o", fmt.Sprintf("%s=%s", k, v)) + } + args = append(args, name) + out, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", args...) + _ = out + if err != nil { + return err + } + if code != 0 { + return fmt.Errorf("zfs create vol failed: %s", stderr) + } + return nil +} + func (a *Adapter) Snapshot(ctx context.Context, dataset, snapName string) error { name := fmt.Sprintf("%s@%s", dataset, snapName) _, stderr, code, err := osexec.ExecWithRunner(a.Runner, ctx, "zfs", "snapshot", name) diff --git a/internal/infra/zfs/zfs_test.go b/internal/infra/zfs/zfs_test.go index e93733f..2572705 100644 --- a/internal/infra/zfs/zfs_test.go +++ b/internal/infra/zfs/zfs_test.go @@ -3,7 +3,6 @@ package zfs import ( "context" "testing" - "time" "github.com/example/storage-appliance/internal/infra/osexec" ) diff --git a/internal/job/runner.go b/internal/job/runner.go index 4b2a1a4..291262d 100644 --- a/internal/job/runner.go +++ b/internal/job/runner.go @@ -3,15 +3,20 @@ package job import ( "context" "database/sql" + "encoding/json" "log" "time" + "github.com/example/storage-appliance/internal/audit" "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/zfs" "github.com/google/uuid" ) type Runner struct { - DB *sql.DB + DB *sql.DB + ZFS *zfs.Adapter + Audit audit.AuditLogger } func (r *Runner) Enqueue(ctx context.Context, j domain.Job) (string, error) { @@ -22,16 +27,112 @@ func (r *Runner) Enqueue(ctx context.Context, j domain.Job) (string, error) { j.Status = "queued" j.CreatedAt = time.Now() j.UpdatedAt = time.Now() - _, err := r.DB.ExecContext(ctx, `INSERT INTO jobs (id, type, status, progress, owner, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?)`, - j.ID, j.Type, j.Status, j.Progress, j.Owner, j.CreatedAt, j.UpdatedAt) + // persist details JSON if present + detailsJSON := "" + if j.Details != nil { + b, _ := json.Marshal(j.Details) + detailsJSON = string(b) + } + _, err := r.DB.ExecContext(ctx, `INSERT INTO jobs (id, type, status, progress, owner, created_at, updated_at, details) VALUES (?, ?, ?, ?, ?, ?, ?, ?)`, + j.ID, j.Type, j.Status, j.Progress, j.Owner, j.CreatedAt, j.UpdatedAt, detailsJSON) if err != nil { return "", err } log.Printf("enqueued job %s (%s)", j.ID, j.Type) // run async worker (very simple worker for skeleton) go func() { - time.Sleep(1 * time.Second) - r.updateStatus(ctx, j.ID, "succeeded", 100) + // update running + _ = r.updateStatus(ctx, j.ID, "running", 0) + // execute based on job type + switch j.Type { + case "create-pool": + // parse details: expect name and vdevs + var name string + var vdevs []string + if j.Details != nil { + if n, ok := j.Details["name"].(string); ok { + name = n + } + if rawV, ok := j.Details["vdevs"].([]any); ok { + for _, vv := range rawV { + if s, ok := vv.(string); ok { + vdevs = append(vdevs, s) + } + } + } + } + _ = r.updateStatus(ctx, j.ID, "running", 10) + if r.ZFS != nil { + // call sync create pool + if err := r.ZFS.CreatePoolSync(ctx, name, vdevs); err != nil { + _ = r.updateStatus(ctx, j.ID, "failed", 0) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "pool.create", ResourceType: "pool", ResourceID: name, Success: false, Details: map[string]any{"error": err.Error()}}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "pool.create", ResourceType: "pool", ResourceID: name, Success: true}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + case "snapshot": + _ = r.updateStatus(ctx, j.ID, "running", 10) + // call zfs snapshot; expect dataset and name + var dataset, snapName string + if j.Details != nil { + if d, ok := j.Details["dataset"].(string); ok { + dataset = d + } + if s, ok := j.Details["snap_name"].(string); ok { + snapName = s + } + } + if r.ZFS != nil { + if err := r.ZFS.Snapshot(ctx, dataset, snapName); err != nil { + _ = r.updateStatus(ctx, j.ID, "failed", 0) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "snapshot", ResourceType: "snapshot", ResourceID: dataset + "@" + snapName, Success: false, Details: map[string]any{"error": err.Error()}}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "snapshot", ResourceType: "snapshot", ResourceID: dataset + "@" + snapName, Success: true}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + case "scrub": + _ = r.updateStatus(ctx, j.ID, "running", 10) + var pool string + if j.Details != nil { + if p, ok := j.Details["pool"].(string); ok { + pool = p + } + } + if r.ZFS != nil { + if err := r.ZFS.ScrubStart(ctx, pool); err != nil { + _ = r.updateStatus(ctx, j.ID, "failed", 0) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "pool.scrub", ResourceType: "pool", ResourceID: pool, Success: false, Details: map[string]any{"error": err.Error()}}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + if r.Audit != nil { + r.Audit.Record(ctx, audit.Event{UserID: string(j.Owner), Action: "pool.scrub", ResourceType: "pool", ResourceID: pool, Success: true}) + } + return + } + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + default: + // unknown job types just succeed + time.Sleep(500 * time.Millisecond) + _ = r.updateStatus(ctx, j.ID, "succeeded", 100) + } }() return id, nil } diff --git a/internal/service/interfaces.go b/internal/service/interfaces.go index 2812706..df6a50e 100644 --- a/internal/service/interfaces.go +++ b/internal/service/interfaces.go @@ -12,9 +12,45 @@ type DiskService interface { type ZFSService interface { ListPools(ctx context.Context) ([]domain.Pool, error) - CreatePool(ctx context.Context, name string, vdevs []string) (string, error) + // CreatePool is a higher level operation handled by StorageService with jobs + // CreatePool(ctx context.Context, name string, vdevs []string) (string, error) + GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) + ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) + CreateDataset(ctx context.Context, name string, props map[string]string) error + Snapshot(ctx context.Context, dataset, snapName string) error + ScrubStart(ctx context.Context, pool string) error + ScrubStatus(ctx context.Context, pool string) (string, error) } type JobRunner interface { Enqueue(ctx context.Context, j domain.Job) (string, error) } + +type SharesService interface { + ListNFS(ctx context.Context) ([]domain.Share, error) + CreateNFS(ctx context.Context, user, role, name, path string, opts map[string]string) (string, error) + DeleteNFS(ctx context.Context, user, role, id string) error + NFSStatus(ctx context.Context) (string, error) + ListSMB(ctx context.Context) ([]domain.Share, error) + CreateSMB(ctx context.Context, user, role, name, path string, readOnly bool, allowedUsers []string) (string, error) + DeleteSMB(ctx context.Context, user, role, id string) error +} + +type ObjectService interface { + SetSettings(ctx context.Context, user, role string, s map[string]any) error + GetSettings(ctx context.Context) (map[string]any, error) + ListBuckets(ctx context.Context) ([]string, error) + CreateBucket(ctx context.Context, user, role, name string) (string, error) +} + +type ISCSIService interface { + ListTargets(ctx context.Context) ([]map[string]any, error) + CreateTarget(ctx context.Context, user, role, name, iqn string) (string, error) + CreateLUN(ctx context.Context, user, role, targetID, lunName string, size string, blocksize int) (string, error) + DeleteLUN(ctx context.Context, user, role, id string, force bool) error + UnmapLUN(ctx context.Context, user, role, id string) error + AddPortal(ctx context.Context, user, role, targetID, address string, port int) (string, error) + AddInitiator(ctx context.Context, user, role, targetID, initiatorIQN string) (string, error) + ListLUNs(ctx context.Context, targetID string) ([]map[string]any, error) + GetTargetInfo(ctx context.Context, targetID string) (map[string]any, error) +} diff --git a/internal/service/iscsi/iscsi.go b/internal/service/iscsi/iscsi.go new file mode 100644 index 0000000..b1a421b --- /dev/null +++ b/internal/service/iscsi/iscsi.go @@ -0,0 +1,228 @@ +package iscsi + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "github.com/google/uuid" + "github.com/example/storage-appliance/internal/audit" + "github.com/example/storage-appliance/internal/infra/iscsi" + "github.com/example/storage-appliance/internal/infra/zfs" +) + +var ( + ErrForbidden = errors.New("forbidden") +) + +type ISCSIService struct { + DB *sql.DB + ZFS *zfs.Adapter + ISCSI *iscsi.Adapter + Audit audit.AuditLogger +} + +func NewISCSIService(db *sql.DB, z *zfs.Adapter, i *iscsi.Adapter, a audit.AuditLogger) *ISCSIService { + return &ISCSIService{DB: db, ZFS: z, ISCSI: i, Audit: a} +} + +func (s *ISCSIService) ListTargets(ctx context.Context) ([]map[string]any, error) { + rows, err := s.DB.QueryContext(ctx, `SELECT id, iqn, name, created_at FROM iscsi_targets`) + if err != nil { return nil, err } + defer rows.Close() + res := []map[string]any{} + for rows.Next() { + var id, iqn, name string + var created time.Time + if err := rows.Scan(&id, &iqn, &name, &created); err != nil { return nil, err } + res = append(res, map[string]any{"id": id, "iqn": iqn, "name": name, "created_at": created}) + } + return res, nil +} + +func (s *ISCSIService) CreateTarget(ctx context.Context, user, role, name, iqn string) (string, error) { + if role != "admin" { return "", ErrForbidden } + if iqn == "" || !strings.HasPrefix(iqn, "iqn.") { return "", errors.New("invalid IQN") } + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO iscsi_targets (id, iqn, name) VALUES (?, ?, ?)`, id, iqn, name); err != nil { + return "", err + } + if s.ISCSI != nil { + if err := s.ISCSI.CreateTarget(ctx, iqn); err != nil { + return "", err + } + if err := s.ISCSI.Save(ctx); err != nil { return "", err } + } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.target.create", ResourceType: "iscsi_target", ResourceID: id, Success: true}) } + return id, nil +} + +// CreateLUN creates a zvol and maps it as LUN for the IQN. lunName is the zvol path e.g. pool/dataset/vol +func (s *ISCSIService) CreateLUN(ctx context.Context, user, role, targetID, lunName string, size string, blocksize int) (string, error) { + if role != "admin" && role != "operator" { return "", ErrForbidden } + // fetch target IQN + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { + return "", err + } + // build zvol name + zvol := lunName // expect fully qualified dataset, e.g., pool/iscsi/target/lun0 + // create zvol via zfs adapter + props := map[string]string{} + if blocksize > 0 { + // convert bytes to K unit if divisible + // For simplicity, just set volblocksize as "8K" or "512"; attempt simple conversion + props["volblocksize"] = fmt.Sprintf("%d", blocksize) + } + if s.ZFS != nil { + if _, err := s.ZFS.ListDatasets(ctx, ""); err == nil { // no-op to check connectivity + } + if err := s.ZFS.CreateZVol(ctx, zvol, size, props); err != nil { + return "", err + } + } + // backstore name and device path + bsName := "bs-" + uuid.New().String() + devpath := "/dev/zvol/" + zvol + if s.ISCSI != nil { + if err := s.ISCSI.CreateBackstore(ctx, bsName, devpath); err != nil { + return "", err + } + } + // determine LUN ID as next available for target + var maxLun sql.NullInt64 + if err := s.DB.QueryRowContext(ctx, `SELECT MAX(lun_id) FROM iscsi_luns WHERE target_id = ?`, targetID).Scan(&maxLun); err != nil && err != sql.ErrNoRows { return "", err } + nextLun := 0 + if maxLun.Valid { nextLun = int(maxLun.Int64) + 1 } + if s.ISCSI != nil { + if err := s.ISCSI.CreateLUN(ctx, iqn, bsName, nextLun); err != nil { + return "", err + } + if err := s.ISCSI.Save(ctx); err != nil { return "", err } + } + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO iscsi_luns (id, target_id, lun_id, zvol, size, blocksize, mapped) VALUES (?, ?, ?, ?, ?, ?, 1)`, id, targetID, nextLun, zvol, sizeToInt(size), blocksize); err != nil { + return "", err + } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.lun.create", ResourceType: "iscsi_lun", ResourceID: id, Success: true}) } + return id, nil +} + +func sizeToInt(s string) int { + // naive conversion: strip trailing G/M/K + // This function can be improved; for now return 0 + return 0 +} + +func (s *ISCSIService) ListLUNs(ctx context.Context, targetID string) ([]map[string]any, error) { + rows, err := s.DB.QueryContext(ctx, `SELECT id, lun_id, zvol, size, blocksize, mapped, created_at FROM iscsi_luns WHERE target_id = ?`, targetID) + if err != nil { return nil, err } + defer rows.Close() + res := []map[string]any{} + for rows.Next() { + var id, zvol string + var lunID int + var size int + var blocksize int + var mapped int + var created time.Time + if err := rows.Scan(&id, &lunID, &zvol, &size, &blocksize, &mapped, &created); err != nil { return nil, err } + res = append(res, map[string]any{"id": id, "lun_id": lunID, "zvol": zvol, "size": size, "blocksize": blocksize, "mapped": mapped == 1, "created_at": created}) + } + return res, nil +} + +func (s *ISCSIService) GetTargetInfo(ctx context.Context, targetID string) (map[string]any, error) { + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { return nil, err } + portals := []map[string]any{} + rows, err := s.DB.QueryContext(ctx, `SELECT id, address, port FROM iscsi_portals WHERE target_id = ?`, targetID) + if err != nil { return nil, err } + defer rows.Close() + for rows.Next() { + var id, address string + var port int + if err := rows.Scan(&id, &address, &port); err != nil { return nil, err } + portals = append(portals, map[string]any{"id": id, "address": address, "port": port}) + } + inits := []map[string]any{} + rows2, err := s.DB.QueryContext(ctx, `SELECT id, initiator_iqn FROM iscsi_initiators WHERE target_id = ?`, targetID) + if err != nil { return nil, err } + defer rows2.Close() + for rows2.Next() { + var id, iqnStr string + if err := rows2.Scan(&id, &iqnStr); err != nil { return nil, err } + inits = append(inits, map[string]any{"id": id, "iqn": iqnStr}) + } + res := map[string]any{"iqn": iqn, "portals": portals, "initiators": inits} + return res, nil +} + +func (s *ISCSIService) DeleteLUN(ctx context.Context, user, role, id string, force bool) error { + if role != "admin" { return ErrForbidden } + // check LUN + var mappedInt int + var targetID string + var lunID int + if err := s.DB.QueryRowContext(ctx, `SELECT target_id, lun_id, mapped FROM iscsi_luns WHERE id = ?`, id).Scan(&targetID, &lunID, &mappedInt); err != nil { return err } + if mappedInt == 1 && !force { return errors.New("LUN is mapped; unmap (drain) before deletion or specify force") } + // delete via adapter + if s.ISCSI != nil { + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { return err } + if err := s.ISCSI.DeleteLUN(ctx, iqn, lunID); err != nil { return err } + if err := s.ISCSI.Save(ctx); err != nil { return err } + } + if _, err := s.DB.ExecContext(ctx, `DELETE FROM iscsi_luns WHERE id = ?`, id); err != nil { return err } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.lun.delete", ResourceType: "iscsi_lun", ResourceID: id, Success: true}) } + return nil +} + +// UnmapLUN removes LUN mapping from target, sets mapped to false in DB +func (s *ISCSIService) UnmapLUN(ctx context.Context, user, role, id string) error { + if role != "admin" && role != "operator" { return ErrForbidden } + var targetID string + var lunID int + if err := s.DB.QueryRowContext(ctx, `SELECT target_id, lun_id FROM iscsi_luns WHERE id = ?`, id).Scan(&targetID, &lunID); err != nil { return err } + if s.ISCSI != nil { + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { return err } + if err := s.ISCSI.DeleteLUN(ctx, iqn, lunID); err != nil { return err } + if err := s.ISCSI.Save(ctx); err != nil { return err } + } + if _, err := s.DB.ExecContext(ctx, `UPDATE iscsi_luns SET mapped = 0 WHERE id = ?`, id); err != nil { return err } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.lun.unmap", ResourceType: "iscsi_lun", ResourceID: id, Success: true}) } + return nil +} + +func (s *ISCSIService) AddPortal(ctx context.Context, user, role, targetID, address string, port int) (string, error) { + if role != "admin" && role != "operator" { return "", ErrForbidden } + // verify target exists and fetch IQN + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { return "", err } + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO iscsi_portals (id, target_id, address, port) VALUES (?, ?, ?, ?)`, id, targetID, address, port); err != nil { return "", err } + if s.ISCSI != nil { + if err := s.ISCSI.AddPortal(ctx, iqn, address, port); err != nil { return "", err } + if err := s.ISCSI.Save(ctx); err != nil { return "", err } + } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.portal.add", ResourceType: "iscsi_portal", ResourceID: id, Success: true}) } + return id, nil +} + +func (s *ISCSIService) AddInitiator(ctx context.Context, user, role, targetID, initiatorIQN string) (string, error) { + if role != "admin" && role != "operator" { return "", ErrForbidden } + var iqn string + if err := s.DB.QueryRowContext(ctx, `SELECT iqn FROM iscsi_targets WHERE id = ?`, targetID).Scan(&iqn); err != nil { return "", err } + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO iscsi_initiators (id, target_id, initiator_iqn) VALUES (?, ?, ?)`, id, targetID, initiatorIQN); err != nil { return "", err } + if s.ISCSI != nil { + if err := s.ISCSI.AddACL(ctx, iqn, initiatorIQN); err != nil { return "", err } + if err := s.ISCSI.Save(ctx); err != nil { return "", err } + } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "iscsi.initiator.add", ResourceType: "iscsi_initiator", ResourceID: id, Success: true}) } + return id, nil +} diff --git a/internal/service/mock/mock_service.go b/internal/service/mock/mock_service.go index ec04b25..cb8cc36 100644 --- a/internal/service/mock/mock_service.go +++ b/internal/service/mock/mock_service.go @@ -10,9 +10,11 @@ import ( ) var ( - _ service.DiskService = (*MockDiskService)(nil) - _ service.ZFSService = (*MockZFSService)(nil) - _ service.JobRunner = (*MockJobRunner)(nil) + _ service.DiskService = (*MockDiskService)(nil) + _ service.ZFSService = (*MockZFSService)(nil) + _ service.JobRunner = (*MockJobRunner)(nil) + _ service.SharesService = (*MockSharesService)(nil) + _ service.ISCSIService = (*MockISCSIService)(nil) ) type MockDiskService struct{} @@ -32,8 +34,32 @@ func (m *MockZFSService) ListPools(ctx context.Context) ([]domain.Pool, error) { } func (m *MockZFSService) CreatePool(ctx context.Context, name string, vdevs []string) (string, error) { - // spawn instant job id for mock - return "job-" + uuid.New().String(), nil + // not implemented on adapter-level mock + return "", nil +} + +func (m *MockZFSService) GetPoolStatus(ctx context.Context, pool string) (domain.PoolHealth, error) { + return domain.PoolHealth{Pool: pool, Status: "ONLINE", Detail: "mocked"}, nil +} + +func (m *MockZFSService) ListDatasets(ctx context.Context, pool string) ([]domain.Dataset, error) { + return []domain.Dataset{{Name: pool + "/dataset1", Pool: pool, Type: "filesystem"}}, nil +} + +func (m *MockZFSService) CreateDataset(ctx context.Context, name string, props map[string]string) error { + return nil +} + +func (m *MockZFSService) Snapshot(ctx context.Context, dataset, snapName string) error { + return nil +} + +func (m *MockZFSService) ScrubStart(ctx context.Context, pool string) error { + return nil +} + +func (m *MockZFSService) ScrubStatus(ctx context.Context, pool string) (string, error) { + return "none", nil } type MockJobRunner struct{} @@ -45,3 +71,67 @@ func (m *MockJobRunner) Enqueue(ctx context.Context, j domain.Job) (string, erro }() return uuid.New().String(), nil } + +type MockSharesService struct{} + +func (m *MockSharesService) ListNFS(ctx context.Context) ([]domain.Share, error) { + return []domain.Share{{ID: domain.UUID(uuid.New().String()), Name: "data", Path: "tank/ds", Type: "nfs"}}, nil +} + +func (m *MockSharesService) CreateNFS(ctx context.Context, user, role, name, path string, opts map[string]string) (string, error) { + return "share-" + uuid.New().String(), nil +} + +func (m *MockSharesService) DeleteNFS(ctx context.Context, user, role, id string) error { + return nil +} +func (m *MockSharesService) NFSStatus(ctx context.Context) (string, error) { + return "active", nil +} +func (m *MockSharesService) ListSMB(ctx context.Context) ([]domain.Share, error) { + return []domain.Share{{ID: domain.UUID(uuid.New().String()), Name: "smb1", Path: "tank/ds", Type: "smb", Config: map[string]string{"read_only": "false"}}}, nil +} +func (m *MockSharesService) CreateSMB(ctx context.Context, user, role, name, path string, readOnly bool, allowedUsers []string) (string, error) { + return "smb-" + uuid.New().String(), nil +} +func (m *MockSharesService) DeleteSMB(ctx context.Context, user, role, id string) error { + return nil +} + +type MockISCSIService struct{} + +func (m *MockISCSIService) ListTargets(ctx context.Context) ([]map[string]any, error) { + return []map[string]any{{"id": "t-1", "iqn": "iqn.2025-12.org.example:target1", "name": "test"}}, nil +} + +func (m *MockISCSIService) CreateTarget(ctx context.Context, user, role, name, iqn string) (string, error) { + return "t-" + uuid.New().String(), nil +} + +func (m *MockISCSIService) CreateLUN(ctx context.Context, user, role, targetID, lunName string, size string, blocksize int) (string, error) { + return "lun-" + uuid.New().String(), nil +} + +func (m *MockISCSIService) DeleteLUN(ctx context.Context, user, role, id string, force bool) error { + return nil +} + +func (m *MockISCSIService) ListLUNs(ctx context.Context, targetID string) ([]map[string]any, error) { + return []map[string]any{{"id": "lun-1", "lun_id": 0, "zvol": "tank/ds/vol1", "size": 10737418240}}, nil +} + +func (m *MockISCSIService) UnmapLUN(ctx context.Context, user, role, id string) error { + return nil +} + +func (m *MockISCSIService) AddPortal(ctx context.Context, user, role, targetID, address string, port int) (string, error) { + return "portal-" + uuid.New().String(), nil +} + +func (m *MockISCSIService) AddInitiator(ctx context.Context, user, role, targetID, initiatorIQN string) (string, error) { + return "init-" + uuid.New().String(), nil +} + +func (m *MockISCSIService) GetTargetInfo(ctx context.Context, targetID string) (map[string]any, error) { + return map[string]any{"iqn": "iqn.2025-12.org.example:target1", "portals": []map[string]any{{"id": "p-1", "address": "10.0.0.1", "port": 3260}}, "initiators": []map[string]any{{"id": "i-1", "iqn": "iqn.1993-08.org.debian:01"}}}, nil +} diff --git a/internal/service/objectstore/objectstore.go b/internal/service/objectstore/objectstore.go new file mode 100644 index 0000000..7f97358 --- /dev/null +++ b/internal/service/objectstore/objectstore.go @@ -0,0 +1,159 @@ +package objectstore + +import ( + "context" + "database/sql" + "errors" + "fmt" + "time" + + "github.com/example/storage-appliance/internal/audit" + "github.com/example/storage-appliance/internal/infra/minio" + "github.com/example/storage-appliance/internal/infra/crypto" +) + +var ( + ErrForbidden = errors.New("forbidden") +) + +type Settings struct { + ID string + Name string + AccessKey string + SecretKey string + DataPath string + Port int + TLS bool + CreatedAt time.Time +} + +type ObjectService struct { + DB *sql.DB + Minio *minio.Adapter + Audit audit.AuditLogger + // encryption key for secret storage + Key []byte +} + +func NewObjectService(db *sql.DB, m *minio.Adapter, a audit.AuditLogger, key []byte) *ObjectService { + return &ObjectService{DB: db, Minio: m, Audit: a, Key: key} +} + +func (s *ObjectService) SetSettings(ctx context.Context, user, role string, stMap map[string]any) error { + if role != "admin" { + return ErrForbidden + } + // convert map to Settings struct for local use + st := Settings{} + if v, ok := stMap["access_key"].(string); ok { st.AccessKey = v } + if v, ok := stMap["secret_key"].(string); ok { st.SecretKey = v } + if v, ok := stMap["data_path"].(string); ok { st.DataPath = v } + if v, ok := stMap["name"].(string); ok { st.Name = v } + if v, ok := stMap["port"].(int); ok { st.Port = v } + if v, ok := stMap["tls"].(bool); ok { st.TLS = v } + + // encrypt access key and secret key + if len(s.Key) != 32 { + return errors.New("encryption key must be 32 bytes") + } + encAccess, err := crypto.Encrypt(s.Key, st.AccessKey) + if err != nil { + return err + } + encSecret, err := crypto.Encrypt(s.Key, st.SecretKey) + if err != nil { + return err + } + // upsert into DB (single row) + if _, err := s.DB.ExecContext(ctx, `INSERT OR REPLACE INTO object_storage (id, name, access_key, secret_key, data_path, port, tls) VALUES ('minio', ?, ?, ?, ?, ?, ?)` , st.Name, encAccess, encSecret, st.DataPath, st.Port, boolToInt(st.TLS)); err != nil { + return err + } + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "object.settings.update", ResourceType: "object_storage", ResourceID: "minio", Success: true}) + } + if s.Minio != nil { + // write env file + settings := minio.Settings{AccessKey: st.AccessKey, SecretKey: st.SecretKey, DataPath: st.DataPath, Port: st.Port, TLS: st.TLS} + if err := s.Minio.WriteEnv(ctx, settings); err != nil { + return err + } + if err := s.Minio.Reload(ctx); err != nil { + return err + } + } + return nil +} + +func (s *ObjectService) GetSettings(ctx context.Context) (map[string]any, error) { + var st Settings + row := s.DB.QueryRowContext(ctx, `SELECT name, access_key, secret_key, data_path, port, tls, created_at FROM object_storage WHERE id = 'minio'`) + var encAccess, encSecret string + var tlsInt int + if err := row.Scan(&st.Name, &encAccess, &encSecret, &st.DataPath, &st.Port, &tlsInt, &st.CreatedAt); err != nil { + return nil, err + } + st.TLS = tlsInt == 1 + if len(s.Key) == 32 { + if A, err := crypto.Decrypt(s.Key, encAccess); err == nil { st.AccessKey = A } + if S, err := crypto.Decrypt(s.Key, encSecret); err == nil { st.SecretKey = S } + } + res := map[string]any{"name": st.Name, "access_key": st.AccessKey, "secret_key": st.SecretKey, "data_path": st.DataPath, "port": st.Port, "tls": st.TLS, "created_at": st.CreatedAt} + return res, nil +} + +func boolToInt(b bool) int { if b { return 1 }; return 0 } + +// ListBuckets via minio adapter or fallback to DB +func (s *ObjectService) ListBuckets(ctx context.Context) ([]string, error) { + if s.Minio != nil { + // ensure mc alias is configured + stMap, err := s.GetSettings(ctx) + if err != nil { return nil, err } + alias := "appliance" + mSet := minio.Settings{} + if v, ok := stMap["access_key"].(string); ok { mSet.AccessKey = v } + if v, ok := stMap["secret_key"].(string); ok { mSet.SecretKey = v } + if v, ok := stMap["data_path"].(string); ok { mSet.DataPath = v } + if v, ok := stMap["port"].(int); ok { mSet.Port = v } + if v, ok := stMap["tls"].(bool); ok { mSet.TLS = v } + s.Minio.ConfigureMC(ctx, alias, mSet) + return s.Minio.ListBuckets(ctx, alias) + } + // fallback to DB persisted buckets + rows, err := s.DB.QueryContext(ctx, `SELECT name FROM buckets`) + if err != nil { return nil, err } + defer rows.Close() + var res []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { return nil, err } + res = append(res, name) + } + return res, nil +} + +func (s *ObjectService) CreateBucket(ctx context.Context, user, role, name string) (string, error) { + if role != "admin" && role != "operator" { return "", ErrForbidden } + // attempt via minio adapter + if s.Minio != nil { + stMap, err := s.GetSettings(ctx) + if err != nil { return "", err } + alias := "appliance" + mSet := minio.Settings{} + if v, ok := stMap["access_key"].(string); ok { mSet.AccessKey = v } + if v, ok := stMap["secret_key"].(string); ok { mSet.SecretKey = v } + if v, ok := stMap["data_path"].(string); ok { mSet.DataPath = v } + if v, ok := stMap["port"].(int); ok { mSet.Port = v } + if v, ok := stMap["tls"].(bool); ok { mSet.TLS = v } + if err := s.Minio.ConfigureMC(ctx, alias, mSet); err != nil { return "", err } + if err := s.Minio.CreateBucket(ctx, alias, name); err != nil { return "", err } + // persist + id := fmt.Sprintf("bucket-%d", time.Now().UnixNano()) + if _, err := s.DB.ExecContext(ctx, `INSERT INTO buckets (id, name) VALUES (?, ?)`, id, name); err != nil { + return "", err + } + if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "object.bucket.create", ResourceType: "bucket", ResourceID: name, Success: true}) } + return id, nil + } + return "", errors.New("no minio adapter configured") +} diff --git a/internal/service/shares/shares.go b/internal/service/shares/shares.go new file mode 100644 index 0000000..f976723 --- /dev/null +++ b/internal/service/shares/shares.go @@ -0,0 +1,225 @@ +package shares + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "github.com/google/uuid" + "strings" + + "github.com/example/storage-appliance/internal/audit" + "github.com/example/storage-appliance/internal/domain" + "github.com/example/storage-appliance/internal/infra/nfs" + "github.com/example/storage-appliance/internal/infra/samba" +) + +var ( + ErrForbidden = errors.New("forbidden") +) + +type SharesService struct { + DB *sql.DB + NFS *nfs.Adapter + Samba *samba.Adapter + Audit audit.AuditLogger +} + +func NewSharesService(db *sql.DB, n *nfs.Adapter, s *samba.Adapter, a audit.AuditLogger) *SharesService { + return &SharesService{DB: db, NFS: n, Samba: s, Audit: a} +} + +func (s *SharesService) ListNFS(ctx context.Context) ([]domain.Share, error) { + rows, err := s.DB.QueryContext(ctx, `SELECT id, name, path, type, options FROM shares WHERE type = 'nfs'`) + if err != nil { + return nil, err + } + defer rows.Close() + var res []domain.Share + for rows.Next() { + var id, name, path, typ, options string + if err := rows.Scan(&id, &name, &path, &typ, &options); err != nil { + return nil, err + } + var optMap map[string]string + if options != "" { + _ = json.Unmarshal([]byte(options), &optMap) + } + res = append(res, domain.Share{ID: domain.UUID(id), Name: name, Path: path, Type: typ}) + } + return res, nil +} + +// CreateNFS stores a new NFS export, re-renders /etc/exports and applies it +func (s *SharesService) CreateNFS(ctx context.Context, user, role, name, path string, opts map[string]string) (string, error) { + if role != "admin" && role != "operator" { + return "", ErrForbidden + } + // Verify path exists and is a dataset: check dataset table for matching name + var count int + if err := s.DB.QueryRowContext(ctx, `SELECT COUNT(1) FROM datasets WHERE name = ?`, path).Scan(&count); err != nil { + return "", err + } + if count == 0 { + return "", fmt.Errorf("path not a known dataset: %s", path) + } + // Prevent exporting system paths: disallow leading '/' entries; require dataset name like pool/ds + if path == "/" || path == "/etc" || path == "/bin" || path == "/usr" { + return "", fmt.Errorf("can't export system path: %s", path) + } + // store options as JSON + optJSON, _ := json.Marshal(opts) + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO shares (id, name, path, type, options) VALUES (?, ?, ?, 'nfs', ?)`, id, name, path, string(optJSON)); err != nil { + return "", err + } + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "nfs.create", ResourceType: "share", ResourceID: name, Success: true, Details: map[string]any{"path": path}}) + } + // re-render exports + shares, err := s.ListNFS(ctx) + if err != nil { + return id, err + } + if s.NFS != nil { + if err := s.NFS.RenderExports(ctx, shares); err != nil { + return id, err + } + if err := s.NFS.Apply(ctx); err != nil { + return id, err + } + } + return id, nil +} + +// SMB functions +func (s *SharesService) ListSMB(ctx context.Context) ([]domain.Share, error) { + rows, err := s.DB.QueryContext(ctx, `SELECT id, name, path, type, options FROM shares WHERE type = 'smb'`) + if err != nil { + return nil, err + } + defer rows.Close() + var res []domain.Share + for rows.Next() { + var id, name, path, typ, options string + if err := rows.Scan(&id, &name, &path, &typ, &options); err != nil { + return nil, err + } + var config map[string]string + if options != "" { + _ = json.Unmarshal([]byte(options), &config) + } + res = append(res, domain.Share{ID: domain.UUID(id), Name: name, Path: path, Type: typ, Config: config}) + } + return res, nil +} + +func (s *SharesService) CreateSMB(ctx context.Context, user, role, name, path string, readOnly bool, allowedUsers []string) (string, error) { + if role != "admin" && role != "operator" { + return "", ErrForbidden + } + // Verify dataset + var count int + if err := s.DB.QueryRowContext(ctx, `SELECT COUNT(1) FROM datasets WHERE name = ?`, path).Scan(&count); err != nil { + return "", err + } + if count == 0 { + return "", fmt.Errorf("path not a known dataset: %s", path) + } + // disallow system paths by basic checks + if path == "/" || path == "/etc" || path == "/bin" || path == "/usr" { + return "", fmt.Errorf("can't export system path: %s", path) + } + // store options as JSON (read_only, allowed_users) + cfg := map[string]string{} + if readOnly { + cfg["read_only"] = "true" + } else { + cfg["read_only"] = "false" + } + if len(allowedUsers) > 0 { + cfg["allowed_users"] = strings.Join(allowedUsers, " ") + } + optJSON, _ := json.Marshal(cfg) + id := uuid.New().String() + if _, err := s.DB.ExecContext(ctx, `INSERT INTO shares (id, name, path, type, options) VALUES (?, ?, ?, 'smb', ?)`, id, name, path, string(optJSON)); err != nil { + return "", err + } + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "smb.create", ResourceType: "share", ResourceID: name, Success: true, Details: map[string]any{"path": path, "read_only": readOnly}}) + } + // re-render smb conf and reload + shares, err := s.ListSMB(ctx) + if err != nil { + return id, err + } + if s.Samba != nil { + if err := s.Samba.RenderConf(ctx, shares); err != nil { + return id, err + } + if err := s.Samba.Reload(ctx); err != nil { + return id, err + } + } + return id, nil +} + +func (s *SharesService) DeleteSMB(ctx context.Context, user, role, id string) error { + if role != "admin" && role != "operator" { + return ErrForbidden + } + if _, err := s.DB.ExecContext(ctx, `DELETE FROM shares WHERE id = ?`, id); err != nil { + return err + } + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "smb.delete", ResourceType: "share", ResourceID: id, Success: true}) + } + shares, err := s.ListSMB(ctx) + if err != nil { + return err + } + if s.Samba != nil { + if err := s.Samba.RenderConf(ctx, shares); err != nil { + return err + } + if err := s.Samba.Reload(ctx); err != nil { + return err + } + } + return nil +} + +func (s *SharesService) DeleteNFS(ctx context.Context, user, role, id string) error { + if role != "admin" && role != "operator" { + return ErrForbidden + } + // verify exists + if _, err := s.DB.ExecContext(ctx, `DELETE FROM shares WHERE id = ?`, id); err != nil { + return err + } + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "nfs.delete", ResourceType: "share", ResourceID: id, Success: true}) + } + // re-render exports + shares, err := s.ListNFS(ctx) + if err != nil { + return err + } + if s.NFS != nil { + if err := s.NFS.RenderExports(ctx, shares); err != nil { + return err + } + if err := s.NFS.Apply(ctx); err != nil { + return err + } + } + return nil +} + +func (s *SharesService) NFSStatus(ctx context.Context) (string, error) { + if s.NFS == nil { + return "unavailable", nil + } + return s.NFS.Status(ctx) +} diff --git a/internal/service/storage/storage.go b/internal/service/storage/storage.go index 208bb36..a77f67d 100644 --- a/internal/service/storage/storage.go +++ b/internal/service/storage/storage.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "strings" "github.com/example/storage-appliance/internal/audit" "github.com/example/storage-appliance/internal/domain" @@ -49,6 +48,7 @@ func (s *StorageService) CreatePool(ctx context.Context, user string, role strin } // Create a job to build a pool. For skeleton, we just create a job entry with type create-pool j := domain.Job{Type: "create-pool", Status: "queued", Owner: domain.UUID(user)} + j.Details = map[string]any{"name": name, "vdevs": vdevs} id, err := s.JobRunner.Enqueue(ctx, j) // Store details in audit if s.Audit != nil { @@ -64,6 +64,7 @@ func (s *StorageService) Snapshot(ctx context.Context, user, role, dataset, snap } // call zfs snapshot, but do as job; enqueue j := domain.Job{Type: "snapshot", Status: "queued", Owner: domain.UUID(user)} + j.Details = map[string]any{"dataset": dataset, "snap_name": snapName} id, err := s.JobRunner.Enqueue(ctx, j) if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "dataset.snapshot.request", ResourceType: "snapshot", ResourceID: fmt.Sprintf("%s@%s", dataset, snapName), Success: err == nil, Details: map[string]any{"dataset": dataset}}) @@ -76,6 +77,7 @@ func (s *StorageService) ScrubStart(ctx context.Context, user, role, pool string return "", ErrForbidden } j := domain.Job{Type: "scrub", Status: "queued", Owner: domain.UUID(user)} + j.Details = map[string]any{"pool": pool} id, err := s.JobRunner.Enqueue(ctx, j) if s.Audit != nil { s.Audit.Record(ctx, audit.Event{UserID: user, Action: "pool.scrub.request", ResourceType: "pool", ResourceID: pool, Success: err == nil}) @@ -93,7 +95,11 @@ func (s *StorageService) CreateDataset(ctx context.Context, user, role, name str if role != "admin" && role != "operator" { return ErrForbidden } - return s.ZFS.CreateDataset(ctx, name, props) + err := s.ZFS.CreateDataset(ctx, name, props) + if s.Audit != nil { + s.Audit.Record(ctx, audit.Event{UserID: user, Action: "dataset.create", ResourceType: "dataset", ResourceID: name, Success: err == nil, Details: map[string]any{"props": props}}) + } + return err } // GetPoolStatus calls the adapter diff --git a/internal/templates/base.html b/internal/templates/base.html index 61534f3..2401a60 100644 --- a/internal/templates/base.html +++ b/internal/templates/base.html @@ -5,9 +5,20 @@ + {{.Title}} - + {{if .CSRFToken}} + + {{end}}
diff --git a/internal/templates/hx_iscsi_luns.html b/internal/templates/hx_iscsi_luns.html new file mode 100644 index 0000000..5ebd07a --- /dev/null +++ b/internal/templates/hx_iscsi_luns.html @@ -0,0 +1,32 @@ +{{ define "hx_iscsi_luns" }} +
+ + + + {{ if . }} + {{ range . }} + + + + + + + {{ end }} + {{ end }} + +
LUN IDZVolSizeAction
{{ .lun_id }}{{ .zvol }}{{ .size }} +
+
+ + +
+
+ + + + +
+
+
+
+{{ end }} diff --git a/internal/templates/hx_iscsi_target_info.html b/internal/templates/hx_iscsi_target_info.html new file mode 100644 index 0000000..ba36448 --- /dev/null +++ b/internal/templates/hx_iscsi_target_info.html @@ -0,0 +1,18 @@ +{{ define "hx_iscsi_target_info" }} +
+

Initiator Connection

+

IQN: {{ .iqn }}

+
Portals
+
    + {{ range .portals }} +
  • {{ .address }}:{{ .port }}
  • + {{ end }} +
+
Allowed Initiators
+
    + {{ range .initiators }} +
  • {{ .iqn }}
  • + {{ end }} +
+
+{{ end }} diff --git a/internal/templates/hx_iscsi_targets.html b/internal/templates/hx_iscsi_targets.html new file mode 100644 index 0000000..9cc181c --- /dev/null +++ b/internal/templates/hx_iscsi_targets.html @@ -0,0 +1,19 @@ +{{ define "hx_iscsi_targets" }} + + + + {{ if . }} + {{ range . }} + + + + + + {{ end }} + {{ end }} + +
NameIQNAction
{{ .name }}{{ .iqn }} + + +
+{{ end }} diff --git a/internal/templates/hx_nfs_shares.html b/internal/templates/hx_nfs_shares.html new file mode 100644 index 0000000..8fb3b9f --- /dev/null +++ b/internal/templates/hx_nfs_shares.html @@ -0,0 +1,23 @@ +{{define "hx_nfs_shares"}} +
+ + + + + + {{range .}} + + + + {{else}} + + {{end}} + +
NamePathType
{{.Name}}{{.Path}}{{.Type}} +
+ + +
+
No NFS shares
+
+{{end}} diff --git a/internal/templates/hx_smb_shares.html b/internal/templates/hx_smb_shares.html new file mode 100644 index 0000000..fa16010 --- /dev/null +++ b/internal/templates/hx_smb_shares.html @@ -0,0 +1,23 @@ +{{define "hx_smb_shares"}} +
+ + + + + + {{range .}} + + + + {{else}} + + {{end}} + +
NamePathTypeOptions
{{.Name}}{{.Path}}{{.Type}}{{range $k,$v := .Config}}{{$k}}={{$v}} {{end}} +
+ + +
+
No SMB shares
+
+{{end}} diff --git a/internal/templates/iscsi.html b/internal/templates/iscsi.html new file mode 100644 index 0000000..d80e9c3 --- /dev/null +++ b/internal/templates/iscsi.html @@ -0,0 +1,23 @@ +{{ define "iscsi" }} +
+

iSCSI Targets

+
+
+

Create Target

+
+ + + +
+
+
+

Create LUN

+
+ + + + +
+
+
+{{ end }} diff --git a/internal/templates/shares_nfs.html b/internal/templates/shares_nfs.html new file mode 100644 index 0000000..2c9b233 --- /dev/null +++ b/internal/templates/shares_nfs.html @@ -0,0 +1,22 @@ +{{define "content"}} +
+

NFS Shares

+
+ +
+
+ {{template "hx_nfs_shares" .}} +
+
+

Create NFS Share

+
+
+ + + + +
+
+
+
+{{end}} diff --git a/internal/templates/shares_smb.html b/internal/templates/shares_smb.html new file mode 100644 index 0000000..1ffb707 --- /dev/null +++ b/internal/templates/shares_smb.html @@ -0,0 +1,23 @@ +{{define "content"}} +
+

SMB Shares

+
+ +
+
+ {{template "hx_smb_shares" .}} +
+
+

Create SMB Share

+
+
+ + + + + +
+
+
+
+{{end}} diff --git a/internal/templates/storage.html b/internal/templates/storage.html index 8fd676e..ae19399 100644 --- a/internal/templates/storage.html +++ b/internal/templates/storage.html @@ -5,7 +5,7 @@
- {{template "hx_pools.html" .}} + {{template "hx_pools" .}}

Create Pool

diff --git a/migrations/0003_jobs_details.sql b/migrations/0003_jobs_details.sql new file mode 100644 index 0000000..ca244ad --- /dev/null +++ b/migrations/0003_jobs_details.sql @@ -0,0 +1,2 @@ +-- 0003_jobs_details.sql +ALTER TABLE jobs ADD COLUMN details TEXT; diff --git a/migrations/0004_shares.sql b/migrations/0004_shares.sql new file mode 100644 index 0000000..571b1e8 --- /dev/null +++ b/migrations/0004_shares.sql @@ -0,0 +1,9 @@ +-- 0004_shares.sql +CREATE TABLE IF NOT EXISTS shares ( + id TEXT PRIMARY KEY, + name TEXT, + path TEXT, + type TEXT, + options TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); diff --git a/migrations/0006_minio.sql b/migrations/0006_minio.sql new file mode 100644 index 0000000..ea0aac0 --- /dev/null +++ b/migrations/0006_minio.sql @@ -0,0 +1,17 @@ +-- 0006_minio.sql +CREATE TABLE IF NOT EXISTS object_storage ( + id TEXT PRIMARY KEY, + name TEXT, + access_key TEXT, + secret_key TEXT, + data_path TEXT, + port INTEGER, + tls INTEGER DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS buckets ( + id TEXT PRIMARY KEY, + name TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); diff --git a/migrations/0007_iscsi.sql b/migrations/0007_iscsi.sql new file mode 100644 index 0000000..5780723 --- /dev/null +++ b/migrations/0007_iscsi.sql @@ -0,0 +1,36 @@ +-- 0007_iscsi.sql +CREATE TABLE IF NOT EXISTS iscsi_targets ( + id TEXT PRIMARY KEY, + iqn TEXT NOT NULL UNIQUE, + name TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +CREATE TABLE IF NOT EXISTS iscsi_portals ( + id TEXT PRIMARY KEY, + target_id TEXT NOT NULL, + address TEXT NOT NULL, + port INTEGER DEFAULT 3260, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY(target_id) REFERENCES iscsi_targets(id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS iscsi_initiators ( + id TEXT PRIMARY KEY, + target_id TEXT NOT NULL, + initiator_iqn TEXT NOT NULL, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY(target_id) REFERENCES iscsi_targets(id) ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS iscsi_luns ( + id TEXT PRIMARY KEY, + target_id TEXT NOT NULL, + lun_id INTEGER NOT NULL, + zvol TEXT NOT NULL, + size INTEGER, + blocksize INTEGER, + mapped INTEGER DEFAULT 0, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY(target_id) REFERENCES iscsi_targets(id) ON DELETE CASCADE +); diff --git a/migrations/0008_auth_rbac.sql b/migrations/0008_auth_rbac.sql new file mode 100644 index 0000000..e2e0210 --- /dev/null +++ b/migrations/0008_auth_rbac.sql @@ -0,0 +1,54 @@ +-- 0008_auth_rbac.sql +-- Enhanced users table (if not already exists, will be created by migrations.go) +-- Roles table +CREATE TABLE IF NOT EXISTS roles ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Permissions table +CREATE TABLE IF NOT EXISTS permissions ( + id TEXT PRIMARY KEY, + name TEXT NOT NULL UNIQUE, + description TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP +); + +-- Many-to-many: roles to permissions +CREATE TABLE IF NOT EXISTS role_permissions ( + role_id TEXT NOT NULL, + permission_id TEXT NOT NULL, + PRIMARY KEY (role_id, permission_id), + FOREIGN KEY (role_id) REFERENCES roles(id) ON DELETE CASCADE, + FOREIGN KEY (permission_id) REFERENCES permissions(id) ON DELETE CASCADE +); + +-- Many-to-many: users to roles +CREATE TABLE IF NOT EXISTS user_roles ( + user_id TEXT NOT NULL, + role_id TEXT NOT NULL, + PRIMARY KEY (user_id, role_id), + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE, + FOREIGN KEY (role_id) REFERENCES roles(id) ON DELETE CASCADE +); + +-- Sessions table for authentication +CREATE TABLE IF NOT EXISTS sessions ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + token TEXT NOT NULL UNIQUE, + expires_at DATETIME NOT NULL, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP, + FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS idx_sessions_token ON sessions(token); +CREATE INDEX IF NOT EXISTS idx_sessions_user_id ON sessions(user_id); +CREATE INDEX IF NOT EXISTS idx_sessions_expires_at ON sessions(expires_at); + +-- Enhanced audit_events table (add missing columns if they don't exist) +-- Note: SQLite doesn't support ALTER TABLE ADD COLUMN IF NOT EXISTS easily, +-- so we'll handle this in the migration code +