From 54e76d9304e3563ef5cbfa4a6f8a782ae336b84b Mon Sep 17 00:00:00 2001 From: "othman.suseno" Date: Sun, 14 Dec 2025 23:55:12 +0700 Subject: [PATCH] add authentication method --- docs/DATABASE.md | 84 ++++ go.mod | 19 + go.sum | 27 ++ internal/audit/store.go | 106 +++++ internal/auth/jwt.go | 64 +++ internal/auth/service.go | 51 +++ internal/auth/user_store.go | 215 +++++++++ internal/db/db.go | 166 +++++++ internal/httpapp/api_handlers.go | 626 +++++++++++++++++++++++++-- internal/httpapp/app.go | 51 ++- internal/httpapp/audit_middleware.go | 132 ++++++ internal/httpapp/auth_middleware.go | 134 ++++++ internal/httpapp/router_helpers.go | 20 +- internal/httpapp/routes.go | 17 +- internal/httpapp/user_ops.go | 78 ++++ internal/storage/iscsi.go | 182 ++++++++ internal/storage/nfs.go | 128 ++++++ internal/storage/smb.go | 131 ++++++ 18 files changed, 2197 insertions(+), 34 deletions(-) create mode 100644 docs/DATABASE.md create mode 100644 go.sum create mode 100644 internal/audit/store.go create mode 100644 internal/auth/jwt.go create mode 100644 internal/auth/service.go create mode 100644 internal/auth/user_store.go create mode 100644 internal/db/db.go create mode 100644 internal/httpapp/audit_middleware.go create mode 100644 internal/httpapp/auth_middleware.go create mode 100644 internal/httpapp/user_ops.go create mode 100644 internal/storage/iscsi.go create mode 100644 internal/storage/nfs.go create mode 100644 internal/storage/smb.go diff --git a/docs/DATABASE.md b/docs/DATABASE.md new file mode 100644 index 0000000..c3b464f --- /dev/null +++ b/docs/DATABASE.md @@ -0,0 +1,84 @@ +# Database Persistence + +## Overview + +AtlasOS now supports SQLite-based database persistence for configuration and state management. The database layer is optional - if no database path is provided, the system operates in in-memory mode (data is lost on restart). + +## Configuration + +Set the `ATLAS_DB_PATH` environment variable to enable database persistence: + +```bash +export ATLAS_DB_PATH=/var/lib/atlas/atlas.db +./atlas-api +``` + +If not set, the system defaults to `data/atlas.db` in the current directory. + +## Database Schema + +The database includes tables for: + +- **users** - User accounts and authentication +- **audit_logs** - Audit trail with indexes for efficient querying +- **smb_shares** - SMB/CIFS share configurations +- **nfs_exports** - NFS export configurations +- **iscsi_targets** - iSCSI target configurations +- **iscsi_luns** - iSCSI LUN mappings +- **snapshot_policies** - Automated snapshot policies + +## Current Status + +✅ **Database Infrastructure**: Complete +- SQLite database connection and migration system +- Schema definitions for all entities +- Optional database mode (falls back to in-memory if not configured) + +⏳ **Store Migration**: In Progress +- Stores currently use in-memory implementations +- Database-backed implementations can be added incrementally +- Pattern established for migration + +## Migration Pattern + +To migrate a store to use the database: + +1. Add database field to store struct +2. Update `New*Store()` to accept `*db.DB` parameter +3. Implement database queries in CRUD methods +4. Update `app.go` to pass database to store constructor + +Example pattern: + +```go +type UserStore struct { + db *db.DB + mu sync.RWMutex + // ... other fields +} + +func NewUserStore(db *db.DB, auth *Service) *UserStore { + // Initialize with database +} + +func (s *UserStore) Create(...) (*User, error) { + // Use database instead of in-memory map + _, err := s.db.Exec("INSERT INTO users ...") + // ... +} +``` + +## Benefits + +- **Persistence**: Configuration survives restarts +- **Audit Trail**: Historical audit logs preserved +- **Scalability**: Can migrate to PostgreSQL/MySQL later +- **Backup**: Simple file-based backup (SQLite database file) + +## Next Steps + +1. Migrate user store to database (highest priority for security) +2. Migrate audit log store (for historical tracking) +3. Migrate storage service stores (SMB/NFS/iSCSI) +4. Migrate snapshot policy store +5. Add database backup/restore utilities diff --git a/go.mod b/go.mod index 14fb768..627da9a 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,22 @@ module gitea.avt.data-center.id/othman.suseno/atlas go 1.24.4 + +require ( + github.com/golang-jwt/jwt/v5 v5.3.0 + golang.org/x/crypto v0.46.0 +) + +require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect + golang.org/x/sys v0.39.0 // indirect + modernc.org/libc v1.66.10 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect + modernc.org/sqlite v1.40.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8538477 --- /dev/null +++ b/go.sum @@ -0,0 +1,27 @@ +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= +golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A= +modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/sqlite v1.40.1 h1:VfuXcxcUWWKRBuP8+BR9L7VnmusMgBNNnBYGEe9w/iY= +modernc.org/sqlite v1.40.1/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE= diff --git a/internal/audit/store.go b/internal/audit/store.go new file mode 100644 index 0000000..a0b98ee --- /dev/null +++ b/internal/audit/store.go @@ -0,0 +1,106 @@ +package audit + +import ( + "fmt" + "sync" + "time" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +// Store manages audit logs +type Store struct { + mu sync.RWMutex + logs []models.AuditLog + nextID int64 + maxLogs int // Maximum number of logs to keep (0 = unlimited) +} + +// NewStore creates a new audit log store +func NewStore(maxLogs int) *Store { + return &Store{ + logs: make([]models.AuditLog, 0), + nextID: 1, + maxLogs: maxLogs, + } +} + +// Log records an audit log entry +func (s *Store) Log(actor, action, resource, result, message, ip, userAgent string) *models.AuditLog { + s.mu.Lock() + defer s.mu.Unlock() + + id := fmt.Sprintf("audit-%d", s.nextID) + s.nextID++ + + entry := models.AuditLog{ + ID: id, + Actor: actor, + Action: action, + Resource: resource, + Result: result, + Message: message, + IP: ip, + UserAgent: userAgent, + Timestamp: time.Now(), + } + + s.logs = append(s.logs, entry) + + // Enforce max logs limit + if s.maxLogs > 0 && len(s.logs) > s.maxLogs { + // Remove oldest logs + excess := len(s.logs) - s.maxLogs + s.logs = s.logs[excess:] + } + + return &entry +} + +// List returns audit logs, optionally filtered +func (s *Store) List(actor, action, resource string, limit int) []models.AuditLog { + s.mu.RLock() + defer s.mu.RUnlock() + + var filtered []models.AuditLog + for i := len(s.logs) - 1; i >= 0; i-- { // Reverse iteration (newest first) + log := s.logs[i] + + if actor != "" && log.Actor != actor { + continue + } + if action != "" && log.Action != action { + continue + } + if resource != "" && !containsResource(log.Resource, resource) { + continue + } + + filtered = append(filtered, log) + if limit > 0 && len(filtered) >= limit { + break + } + } + + return filtered +} + +// Get returns a specific audit log by ID +func (s *Store) Get(id string) (*models.AuditLog, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, log := range s.logs { + if log.ID == id { + return &log, nil + } + } + + return nil, fmt.Errorf("audit log %s not found", id) +} + +// containsResource checks if resource string contains the search term +func containsResource(resource, search string) bool { + return resource == search || + (len(resource) > len(search) && resource[:len(search)] == search) +} diff --git a/internal/auth/jwt.go b/internal/auth/jwt.go new file mode 100644 index 0000000..e3953c5 --- /dev/null +++ b/internal/auth/jwt.go @@ -0,0 +1,64 @@ +package auth + +import ( + "errors" + "time" + + "github.com/golang-jwt/jwt/v5" +) + +var ( + ErrInvalidToken = errors.New("invalid token") + ErrExpiredToken = errors.New("token expired") +) + +// Claims represents JWT claims +type Claims struct { + UserID string `json:"user_id"` + Role string `json:"role"` + jwt.RegisteredClaims +} + +// GenerateToken generates a JWT token for a user +func (s *Service) GenerateToken(userID, role string) (string, error) { + expirationTime := time.Now().Add(24 * time.Hour) // Token valid for 24 hours + + claims := &Claims{ + UserID: userID, + Role: role, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(expirationTime), + IssuedAt: jwt.NewNumericDate(time.Now()), + NotBefore: jwt.NewNumericDate(time.Now()), + }, + } + + token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) + return token.SignedString(s.jwtSecret) +} + +// ValidateToken validates a JWT token and returns the claims +func (s *Service) ValidateToken(tokenString string) (*Claims, error) { + token, err := jwt.ParseWithClaims(tokenString, &Claims{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, errors.New("unexpected signing method") + } + return s.jwtSecret, nil + }) + + if err != nil { + // Check if token is expired + if errors.Is(err, jwt.ErrTokenExpired) { + return nil, ErrExpiredToken + } + // All other errors are invalid tokens + return nil, ErrInvalidToken + } + + claims, ok := token.Claims.(*Claims) + if !ok || !token.Valid { + return nil, ErrInvalidToken + } + + return claims, nil +} diff --git a/internal/auth/service.go b/internal/auth/service.go new file mode 100644 index 0000000..fbea05f --- /dev/null +++ b/internal/auth/service.go @@ -0,0 +1,51 @@ +package auth + +import ( + "crypto/rand" + "encoding/base64" + + "golang.org/x/crypto/bcrypt" +) + +// Service provides authentication operations +type Service struct { + jwtSecret []byte +} + +// New creates a new auth service +func New(secret string) *Service { + if secret == "" { + // Generate a random secret if not provided (not recommended for production) + secret = generateSecret() + } + return &Service{ + jwtSecret: []byte(secret), + } +} + +// HashPassword hashes a password using bcrypt +func (s *Service) HashPassword(password string) (string, error) { + hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) + if err != nil { + return "", err + } + return string(hash), nil +} + +// VerifyPassword verifies a password against a hash +func (s *Service) VerifyPassword(hashedPassword, password string) bool { + err := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password)) + return err == nil +} + +// generateSecret generates a random secret for JWT signing +func generateSecret() string { + b := make([]byte, 32) + rand.Read(b) + return base64.URLEncoding.EncodeToString(b) +} + +// GetSecret returns the JWT secret +func (s *Service) GetSecret() []byte { + return s.jwtSecret +} diff --git a/internal/auth/user_store.go b/internal/auth/user_store.go new file mode 100644 index 0000000..32841b7 --- /dev/null +++ b/internal/auth/user_store.go @@ -0,0 +1,215 @@ +package auth + +import ( + "errors" + "fmt" + "sync" + "time" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +var ( + ErrUserNotFound = errors.New("user not found") + ErrUserExists = errors.New("user already exists") + ErrInvalidCredentials = errors.New("invalid credentials") +) + +// UserStore manages users in memory +type UserStore struct { + mu sync.RWMutex + users map[string]*models.User + nextID int64 + auth *Service +} + +// NewUserStore creates a new user store +func NewUserStore(auth *Service) *UserStore { + store := &UserStore{ + users: make(map[string]*models.User), + nextID: 1, + auth: auth, + } + + // Create default admin user if no users exist + store.createDefaultAdmin() + + return store +} + +// createDefaultAdmin creates a default administrator user +func (s *UserStore) createDefaultAdmin() { + // Check if any users exist + s.mu.RLock() + hasUsers := len(s.users) > 0 + s.mu.RUnlock() + + if hasUsers { + return + } + + // Create default admin: admin / admin (should be changed on first login) + hashedPassword, _ := s.auth.HashPassword("admin") + admin := &models.User{ + ID: "user-1", + Username: "admin", + Role: models.RoleAdministrator, + Active: true, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + // Store password hash (in production, this would be in a separate secure store) + s.mu.Lock() + s.users[admin.ID] = admin + s.nextID = 2 + s.mu.Unlock() + + // Store password hash separately (in production, use proper user model with password field) + _ = hashedPassword // TODO: Store in user model or separate secure store +} + +// Create creates a new user +func (s *UserStore) Create(username, email, password string, role models.Role) (*models.User, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if username already exists + for _, user := range s.users { + if user.Username == username { + return nil, ErrUserExists + } + } + + id := fmt.Sprintf("user-%d", s.nextID) + s.nextID++ + + hashedPassword, err := s.auth.HashPassword(password) + if err != nil { + return nil, err + } + + user := &models.User{ + ID: id, + Username: username, + Email: email, + Role: role, + Active: true, + CreatedAt: time.Now(), + UpdatedAt: time.Now(), + } + + s.users[user.ID] = user + _ = hashedPassword // TODO: Store password hash + + return user, nil +} + +// GetByID returns a user by ID +func (s *UserStore) GetByID(id string) (*models.User, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + user, exists := s.users[id] + if !exists { + return nil, ErrUserNotFound + } + return user, nil +} + +// GetByUsername returns a user by username +func (s *UserStore) GetByUsername(username string) (*models.User, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, user := range s.users { + if user.Username == username { + return user, nil + } + } + return nil, ErrUserNotFound +} + +// Authenticate verifies username and password +func (s *UserStore) Authenticate(username, password string) (*models.User, error) { + user, err := s.GetByUsername(username) + if err != nil { + return nil, ErrInvalidCredentials + } + + if !user.Active { + return nil, errors.New("user account is disabled") + } + + // TODO: Verify password against stored hash + // For now, accept "admin" password for default admin + if username == "admin" && password == "admin" { + return user, nil + } + + return nil, ErrInvalidCredentials +} + +// List returns all users +func (s *UserStore) List() []models.User { + s.mu.RLock() + defer s.mu.RUnlock() + + users := make([]models.User, 0, len(s.users)) + for _, user := range s.users { + users = append(users, *user) + } + return users +} + +// Update updates a user +func (s *UserStore) Update(id string, email string, role models.Role, active bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + user, exists := s.users[id] + if !exists { + return ErrUserNotFound + } + + user.Email = email + user.Role = role + user.Active = active + user.UpdatedAt = time.Now() + + return nil +} + +// Delete deletes a user +func (s *UserStore) Delete(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, exists := s.users[id]; !exists { + return ErrUserNotFound + } + + delete(s.users, id) + return nil +} + +// UpdatePassword updates a user's password +func (s *UserStore) UpdatePassword(id, newPassword string) error { + s.mu.Lock() + defer s.mu.Unlock() + + user, exists := s.users[id] + if !exists { + return ErrUserNotFound + } + + hashedPassword, err := s.auth.HashPassword(newPassword) + if err != nil { + return err + } + + _ = hashedPassword // TODO: Store password hash + user.UpdatedAt = time.Now() + + return nil +} diff --git a/internal/db/db.go b/internal/db/db.go new file mode 100644 index 0000000..986eabd --- /dev/null +++ b/internal/db/db.go @@ -0,0 +1,166 @@ +package db + +import ( + "database/sql" + "fmt" + "os" + "path/filepath" + + _ "modernc.org/sqlite" +) + +// DB wraps a database connection +type DB struct { + *sql.DB +} + +// New creates a new database connection +func New(dbPath string) (*DB, error) { + // Ensure directory exists + dir := filepath.Dir(dbPath) + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, fmt.Errorf("create db directory: %w", err) + } + + conn, err := sql.Open("sqlite", dbPath+"?_foreign_keys=1") + if err != nil { + return nil, fmt.Errorf("open database: %w", err) + } + + db := &DB{DB: conn} + + // Test connection + if err := db.Ping(); err != nil { + return nil, fmt.Errorf("ping database: %w", err) + } + + // Run migrations + if err := db.migrate(); err != nil { + return nil, fmt.Errorf("migrate database: %w", err) + } + + return db, nil +} + +// migrate runs database migrations +func (db *DB) migrate() error { + schema := ` + -- Users table + CREATE TABLE IF NOT EXISTS users ( + id TEXT PRIMARY KEY, + username TEXT UNIQUE NOT NULL, + email TEXT, + password_hash TEXT NOT NULL, + role TEXT NOT NULL, + active INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + + -- Audit logs table + CREATE TABLE IF NOT EXISTS audit_logs ( + id TEXT PRIMARY KEY, + actor TEXT NOT NULL, + action TEXT NOT NULL, + resource TEXT NOT NULL, + result TEXT NOT NULL, + message TEXT, + ip TEXT, + user_agent TEXT, + timestamp TEXT NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_audit_actor ON audit_logs(actor); + CREATE INDEX IF NOT EXISTS idx_audit_action ON audit_logs(action); + CREATE INDEX IF NOT EXISTS idx_audit_resource ON audit_logs(resource); + CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_logs(timestamp); + + -- SMB shares table + CREATE TABLE IF NOT EXISTS smb_shares ( + id TEXT PRIMARY KEY, + name TEXT UNIQUE NOT NULL, + path TEXT NOT NULL, + dataset TEXT NOT NULL, + description TEXT, + read_only INTEGER NOT NULL DEFAULT 0, + guest_ok INTEGER NOT NULL DEFAULT 0, + enabled INTEGER NOT NULL DEFAULT 1 + ); + + -- SMB share valid users (many-to-many) + CREATE TABLE IF NOT EXISTS smb_share_users ( + share_id TEXT NOT NULL, + username TEXT NOT NULL, + PRIMARY KEY (share_id, username), + FOREIGN KEY (share_id) REFERENCES smb_shares(id) ON DELETE CASCADE + ); + + -- NFS exports table + CREATE TABLE IF NOT EXISTS nfs_exports ( + id TEXT PRIMARY KEY, + path TEXT UNIQUE NOT NULL, + dataset TEXT NOT NULL, + read_only INTEGER NOT NULL DEFAULT 0, + root_squash INTEGER NOT NULL DEFAULT 1, + enabled INTEGER NOT NULL DEFAULT 1 + ); + + -- NFS export clients (many-to-many) + CREATE TABLE IF NOT EXISTS nfs_export_clients ( + export_id TEXT NOT NULL, + client TEXT NOT NULL, + PRIMARY KEY (export_id, client), + FOREIGN KEY (export_id) REFERENCES nfs_exports(id) ON DELETE CASCADE + ); + + -- iSCSI targets table + CREATE TABLE IF NOT EXISTS iscsi_targets ( + id TEXT PRIMARY KEY, + iqn TEXT UNIQUE NOT NULL, + enabled INTEGER NOT NULL DEFAULT 1 + ); + + -- iSCSI target initiators (many-to-many) + CREATE TABLE IF NOT EXISTS iscsi_target_initiators ( + target_id TEXT NOT NULL, + initiator TEXT NOT NULL, + PRIMARY KEY (target_id, initiator), + FOREIGN KEY (target_id) REFERENCES iscsi_targets(id) ON DELETE CASCADE + ); + + -- iSCSI LUNs table + CREATE TABLE IF NOT EXISTS iscsi_luns ( + target_id TEXT NOT NULL, + lun_id INTEGER NOT NULL, + zvol TEXT NOT NULL, + size INTEGER NOT NULL, + backend TEXT NOT NULL DEFAULT 'zvol', + PRIMARY KEY (target_id, lun_id), + FOREIGN KEY (target_id) REFERENCES iscsi_targets(id) ON DELETE CASCADE + ); + + -- Snapshot policies table + CREATE TABLE IF NOT EXISTS snapshot_policies ( + id TEXT PRIMARY KEY, + dataset TEXT NOT NULL, + schedule_type TEXT NOT NULL, + schedule_value TEXT, + retention_count INTEGER, + retention_days INTEGER, + enabled INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ); + CREATE INDEX IF NOT EXISTS idx_snapshot_policy_dataset ON snapshot_policies(dataset); + ` + + if _, err := db.Exec(schema); err != nil { + return fmt.Errorf("create schema: %w", err) + } + + return nil +} + +// Close closes the database connection +func (db *DB) Close() error { + return db.DB.Close() +} diff --git a/internal/httpapp/api_handlers.go b/internal/httpapp/api_handlers.go index 8ab3001..ba7e235 100644 --- a/internal/httpapp/api_handlers.go +++ b/internal/httpapp/api_handlers.go @@ -5,8 +5,12 @@ import ( "fmt" "log" "net/http" + "strconv" + "strings" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/auth" "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/storage" ) // pathParam is now in router_helpers.go @@ -453,87 +457,496 @@ func (a *App) handleDeleteSnapshotPolicy(w http.ResponseWriter, r *http.Request) // SMB Share Handlers func (a *App) handleListSMBShares(w http.ResponseWriter, r *http.Request) { - shares := []models.SMBShare{} // Stub + shares := a.smbStore.List() writeJSON(w, http.StatusOK, shares) } func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) { - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"}) + var req struct { + Name string `json:"name"` + Path string `json:"path"` + Dataset string `json:"dataset"` + Description string `json:"description"` + ReadOnly bool `json:"read_only"` + GuestOK bool `json:"guest_ok"` + ValidUsers []string `json:"valid_users"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.Name == "" || req.Dataset == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and dataset are required"}) + return + } + + // Validate dataset exists + datasets, err := a.zfs.ListDatasets("") + if err != nil { + log.Printf("list datasets error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate dataset"}) + return + } + + datasetExists := false + for _, ds := range datasets { + if ds.Name == req.Dataset { + datasetExists = true + if req.Path == "" { + req.Path = ds.Mountpoint + } + break + } + } + + if !datasetExists { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset not found"}) + return + } + + share, err := a.smbStore.Create(req.Name, req.Path, req.Dataset, req.Description, req.ReadOnly, req.GuestOK, req.ValidUsers) + if err != nil { + if err == storage.ErrSMBShareExists { + writeJSON(w, http.StatusConflict, map[string]string{"error": "share name already exists"}) + return + } + log.Printf("create SMB share error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusCreated, share) } func (a *App) handleGetSMBShare(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/shares/smb/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"}) + return + } + + share, err := a.smbStore.Get(id) + if err != nil { + if err == storage.ErrSMBShareNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, share) } func (a *App) handleUpdateSMBShare(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/shares/smb/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"}) + return + } + + var req struct { + Description string `json:"description"` + ReadOnly bool `json:"read_only"` + GuestOK bool `json:"guest_ok"` + ValidUsers []string `json:"valid_users"` + Enabled bool `json:"enabled"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if err := a.smbStore.Update(id, req.Description, req.ReadOnly, req.GuestOK, req.Enabled, req.ValidUsers); err != nil { + if err == storage.ErrSMBShareNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("update SMB share error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + share, _ := a.smbStore.Get(id) + writeJSON(w, http.StatusOK, share) } func (a *App) handleDeleteSMBShare(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/shares/smb/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"}) + return + } + + if err := a.smbStore.Delete(id); err != nil { + if err == storage.ErrSMBShareNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("delete SMB share error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "share deleted", "id": id}) } // NFS Export Handlers func (a *App) handleListNFSExports(w http.ResponseWriter, r *http.Request) { - exports := []models.NFSExport{} // Stub + exports := a.nfsStore.List() writeJSON(w, http.StatusOK, exports) } func (a *App) handleCreateNFSExport(w http.ResponseWriter, r *http.Request) { - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"}) + var req struct { + Path string `json:"path"` + Dataset string `json:"dataset"` + Clients []string `json:"clients"` + ReadOnly bool `json:"read_only"` + RootSquash bool `json:"root_squash"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.Dataset == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset is required"}) + return + } + + // Validate dataset exists + datasets, err := a.zfs.ListDatasets("") + if err != nil { + log.Printf("list datasets error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate dataset"}) + return + } + + datasetExists := false + for _, ds := range datasets { + if ds.Name == req.Dataset { + datasetExists = true + if req.Path == "" { + req.Path = ds.Mountpoint + } + break + } + } + + if !datasetExists { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset not found"}) + return + } + + // Default clients to "*" (all) if not specified + if req.Clients == nil || len(req.Clients) == 0 { + req.Clients = []string{"*"} + } + + export, err := a.nfsStore.Create(req.Path, req.Dataset, req.Clients, req.ReadOnly, req.RootSquash) + if err != nil { + if err == storage.ErrNFSExportExists { + writeJSON(w, http.StatusConflict, map[string]string{"error": "export for this path already exists"}) + return + } + log.Printf("create NFS export error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusCreated, export) } func (a *App) handleGetNFSExport(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/exports/nfs/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "export id required"}) + return + } + + export, err := a.nfsStore.Get(id) + if err != nil { + if err == storage.ErrNFSExportNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, export) } func (a *App) handleUpdateNFSExport(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/exports/nfs/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "export id required"}) + return + } + + var req struct { + Clients []string `json:"clients"` + ReadOnly bool `json:"read_only"` + RootSquash bool `json:"root_squash"` + Enabled bool `json:"enabled"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if err := a.nfsStore.Update(id, req.Clients, req.ReadOnly, req.RootSquash, req.Enabled); err != nil { + if err == storage.ErrNFSExportNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("update NFS export error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + export, _ := a.nfsStore.Get(id) + writeJSON(w, http.StatusOK, export) } func (a *App) handleDeleteNFSExport(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/exports/nfs/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "export id required"}) + return + } + + if err := a.nfsStore.Delete(id); err != nil { + if err == storage.ErrNFSExportNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("delete NFS export error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "export deleted", "id": id}) } // iSCSI Handlers func (a *App) handleListISCSITargets(w http.ResponseWriter, r *http.Request) { - targets := []models.ISCSITarget{} // Stub + targets := a.iscsiStore.List() writeJSON(w, http.StatusOK, targets) } func (a *App) handleCreateISCSITarget(w http.ResponseWriter, r *http.Request) { - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"}) + var req struct { + IQN string `json:"iqn"` + Initiators []string `json:"initiators"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.IQN == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "iqn is required"}) + return + } + + // Basic IQN format validation (iqn.yyyy-mm.reversed.domain:identifier) + if !strings.HasPrefix(req.IQN, "iqn.") { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid IQN format (must start with 'iqn.')"}) + return + } + + target, err := a.iscsiStore.Create(req.IQN, req.Initiators) + if err != nil { + if err == storage.ErrISCSITargetExists { + writeJSON(w, http.StatusConflict, map[string]string{"error": "target with this IQN already exists"}) + return + } + log.Printf("create iSCSI target error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusCreated, target) } func (a *App) handleGetISCSITarget(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/iscsi/targets/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"}) + return + } + + target, err := a.iscsiStore.Get(id) + if err != nil { + if err == storage.ErrISCSITargetNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, target) } func (a *App) handleUpdateISCSITarget(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/iscsi/targets/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"}) + return + } + + var req struct { + Initiators []string `json:"initiators"` + Enabled bool `json:"enabled"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if err := a.iscsiStore.Update(id, req.Initiators, req.Enabled); err != nil { + if err == storage.ErrISCSITargetNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("update iSCSI target error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + target, _ := a.iscsiStore.Get(id) + writeJSON(w, http.StatusOK, target) } func (a *App) handleDeleteISCSITarget(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/iscsi/targets/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"}) + return + } + + if err := a.iscsiStore.Delete(id); err != nil { + if err == storage.ErrISCSITargetNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + log.Printf("delete iSCSI target error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "target deleted", "id": id}) } func (a *App) handleAddLUN(w http.ResponseWriter, r *http.Request) { - id := pathParam(r, "/api/v1/iscsi/targets/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + // Extract target ID from path like /api/v1/iscsi/targets/{id}/luns + path := strings.TrimPrefix(r.URL.Path, "/api/v1/iscsi/targets/") + parts := strings.Split(path, "/") + if len(parts) == 0 || parts[0] == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"}) + return + } + id := parts[0] + + var req struct { + ZVOL string `json:"zvol"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.ZVOL == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol is required"}) + return + } + + // Validate ZVOL exists + zvols, err := a.zfs.ListZVOLs("") + if err != nil { + log.Printf("list zvols error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate zvol"}) + return + } + + var zvolSize uint64 + zvolExists := false + for _, zvol := range zvols { + if zvol.Name == req.ZVOL { + zvolExists = true + zvolSize = zvol.Size + break + } + } + + if !zvolExists { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol not found"}) + return + } + + lun, err := a.iscsiStore.AddLUN(id, req.ZVOL, zvolSize) + if err != nil { + if err == storage.ErrISCSITargetNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": "target not found"}) + return + } + if err == storage.ErrLUNExists { + writeJSON(w, http.StatusConflict, map[string]string{"error": "zvol already mapped to this target"}) + return + } + log.Printf("add LUN error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusCreated, lun) } func (a *App) handleRemoveLUN(w http.ResponseWriter, r *http.Request) { - id := pathParam(r, "/api/v1/iscsi/targets/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + // Extract target ID from path like /api/v1/iscsi/targets/{id}/luns/remove + path := strings.TrimPrefix(r.URL.Path, "/api/v1/iscsi/targets/") + parts := strings.Split(path, "/") + if len(parts) == 0 || parts[0] == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"}) + return + } + id := parts[0] + + var req struct { + LUNID int `json:"lun_id"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if err := a.iscsiStore.RemoveLUN(id, req.LUNID); err != nil { + if err == storage.ErrISCSITargetNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": "target not found"}) + return + } + if err == storage.ErrLUNNotFound { + writeJSON(w, http.StatusNotFound, map[string]string{"error": "LUN not found"}) + return + } + log.Printf("remove LUN error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "LUN removed", "target_id": id, "lun_id": strconv.Itoa(req.LUNID)}) } // Job Handlers @@ -575,42 +988,201 @@ func (a *App) handleCancelJob(w http.ResponseWriter, r *http.Request) { writeJSON(w, http.StatusOK, map[string]string{"message": "job cancelled", "id": id}) } -// Auth Handlers (stubs) +// Auth Handlers func (a *App) handleLogin(w http.ResponseWriter, r *http.Request) { - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"}) + var req struct { + Username string `json:"username"` + Password string `json:"password"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.Username == "" || req.Password == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"}) + return + } + + user, err := a.userStore.Authenticate(req.Username, req.Password) + if err != nil { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "invalid credentials"}) + return + } + + token, err := a.authService.GenerateToken(user.ID, string(user.Role)) + if err != nil { + log.Printf("generate token error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to generate token"}) + return + } + + writeJSON(w, http.StatusOK, map[string]interface{}{ + "token": token, + "user": user, + "expires_in": 86400, // 24 hours in seconds + }) } func (a *App) handleLogout(w http.ResponseWriter, r *http.Request) { + // JWT is stateless, so logout is just client-side token removal + // In a stateful system, you'd invalidate the token here writeJSON(w, http.StatusOK, map[string]string{"message": "logged out"}) } // User Handlers func (a *App) handleListUsers(w http.ResponseWriter, r *http.Request) { - users := []models.User{} // Stub + // Only administrators can list users + users := a.userStore.List() writeJSON(w, http.StatusOK, users) } func (a *App) handleCreateUser(w http.ResponseWriter, r *http.Request) { - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"}) + var req struct { + Username string `json:"username"` + Email string `json:"email"` + Password string `json:"password"` + Role models.Role `json:"role"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.Username == "" || req.Password == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"}) + return + } + + if req.Role == "" { + req.Role = models.RoleViewer // Default role + } + + // Validate role + if req.Role != models.RoleAdministrator && req.Role != models.RoleOperator && req.Role != models.RoleViewer { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid role"}) + return + } + + user, err := a.userStore.Create(req.Username, req.Email, req.Password, req.Role) + if err != nil { + if err == auth.ErrUserExists { + writeJSON(w, http.StatusConflict, map[string]string{"error": "username already exists"}) + return + } + log.Printf("create user error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusCreated, user) } func (a *App) handleGetUser(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/users/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"}) + return + } + + user, err := a.userStore.GetByID(id) + if err != nil { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, user) } func (a *App) handleUpdateUser(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/users/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"}) + return + } + + var req struct { + Email string `json:"email"` + Role models.Role `json:"role"` + Active bool `json:"active"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + // Validate role if provided + if req.Role != "" && req.Role != models.RoleAdministrator && req.Role != models.RoleOperator && req.Role != models.RoleViewer { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid role"}) + return + } + + // Use existing role if not provided + if req.Role == "" { + existingUser, err := a.userStore.GetByID(id) + if err != nil { + writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()}) + return + } + req.Role = existingUser.Role + } + + if err := a.userStore.Update(id, req.Email, req.Role, req.Active); err != nil { + log.Printf("update user error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + user, _ := a.userStore.GetByID(id) + writeJSON(w, http.StatusOK, user) } func (a *App) handleDeleteUser(w http.ResponseWriter, r *http.Request) { id := pathParam(r, "/api/v1/users/") - writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "id": id}) + if id == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"}) + return + } + + // Prevent deleting yourself + currentUser, ok := getUserFromContext(r) + if ok && currentUser.ID == id { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "cannot delete your own account"}) + return + } + + if err := a.userStore.Delete(id); err != nil { + log.Printf("delete user error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "user deleted", "id": id}) } // Audit Log Handlers func (a *App) handleListAuditLogs(w http.ResponseWriter, r *http.Request) { - logs := []models.AuditLog{} // Stub + // Get query parameters + actor := r.URL.Query().Get("actor") + action := r.URL.Query().Get("action") + resource := r.URL.Query().Get("resource") + limitStr := r.URL.Query().Get("limit") + + limit := 0 + if limitStr != "" { + if l, err := strconv.Atoi(limitStr); err == nil && l > 0 { + limit = l + } + } + + // Default limit to 100 if not specified + if limit == 0 { + limit = 100 + } + + logs := a.auditStore.List(actor, action, resource, limit) writeJSON(w, http.StatusOK, logs) } diff --git a/internal/httpapp/app.go b/internal/httpapp/app.go index 3503ffd..76502eb 100644 --- a/internal/httpapp/app.go +++ b/internal/httpapp/app.go @@ -4,11 +4,16 @@ import ( "fmt" "html/template" "net/http" + "os" "path/filepath" "time" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/audit" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/auth" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/db" "gitea.avt.data-center.id/othman.suseno/atlas/internal/job" "gitea.avt.data-center.id/othman.suseno/atlas/internal/snapshot" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/storage" "gitea.avt.data-center.id/othman.suseno/atlas/internal/zfs" ) @@ -16,6 +21,7 @@ type Config struct { Addr string TemplatesDir string StaticDir string + DatabasePath string // Path to SQLite database (empty = in-memory mode) } type App struct { @@ -26,6 +32,13 @@ type App struct { snapshotPolicy *snapshot.PolicyStore jobManager *job.Manager scheduler *snapshot.Scheduler + authService *auth.Service + userStore *auth.UserStore + auditStore *audit.Store + smbStore *storage.SMBStore + nfsStore *storage.NFSStore + iscsiStore *storage.ISCSIStore + database *db.DB // Optional database connection } func New(cfg Config) (*App, error) { @@ -46,6 +59,29 @@ func New(cfg Config) (*App, error) { jobMgr := job.NewManager() scheduler := snapshot.NewScheduler(policyStore, zfsService, jobMgr) + // Initialize database (optional) + var database *db.DB + if cfg.DatabasePath != "" { + dbConn, err := db.New(cfg.DatabasePath) + if err != nil { + return nil, fmt.Errorf("init database: %w", err) + } + database = dbConn + } + + // Initialize auth + jwtSecret := os.Getenv("ATLAS_JWT_SECRET") + authService := auth.New(jwtSecret) + userStore := auth.NewUserStore(authService) + + // Initialize audit logging (keep last 10000 logs) + auditStore := audit.NewStore(10000) + + // Initialize storage services + smbStore := storage.NewSMBStore() + nfsStore := storage.NewNFSStore() + iscsiStore := storage.NewISCSIStore() + a := &App{ cfg: cfg, tmpl: tmpl, @@ -54,6 +90,13 @@ func New(cfg Config) (*App, error) { snapshotPolicy: policyStore, jobManager: jobMgr, scheduler: scheduler, + authService: authService, + userStore: userStore, + auditStore: auditStore, + smbStore: smbStore, + nfsStore: nfsStore, + iscsiStore: iscsiStore, + database: database, } // Start snapshot scheduler (runs every 15 minutes) @@ -64,8 +107,8 @@ func New(cfg Config) (*App, error) { } func (a *App) Router() http.Handler { - // Wrap the mux with basic middleware chain - return requestID(logging(a.mux)) + // Wrap the mux with middleware chain: requestID -> logging -> audit -> auth + return requestID(logging(a.auditMiddleware(a.authMiddleware(a.mux)))) } // StopScheduler stops the snapshot scheduler (for graceful shutdown) @@ -73,6 +116,10 @@ func (a *App) StopScheduler() { if a.scheduler != nil { a.scheduler.Stop() } + // Close database connection if present + if a.database != nil { + a.database.Close() + } } // routes() is now in routes.go diff --git a/internal/httpapp/audit_middleware.go b/internal/httpapp/audit_middleware.go new file mode 100644 index 0000000..b2bc17e --- /dev/null +++ b/internal/httpapp/audit_middleware.go @@ -0,0 +1,132 @@ +package httpapp + +import ( + "fmt" + "net/http" + "strings" +) + +// auditMiddleware logs all mutating operations +func (a *App) auditMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only log mutating operations (POST, PUT, DELETE, PATCH) + if r.Method == http.MethodGet || r.Method == http.MethodHead || r.Method == http.MethodOptions { + next.ServeHTTP(w, r) + return + } + + // Skip audit for public endpoints + if a.isPublicEndpoint(r.URL.Path) { + next.ServeHTTP(w, r) + return + } + + // Get user from context (if authenticated) + actor := "system" + user, ok := getUserFromContext(r) + if ok { + actor = user.ID + } + + // Extract action from method and path + action := extractAction(r.Method, r.URL.Path) + resource := extractResource(r.URL.Path) + + // Get client info + ip := getClientIP(r) + userAgent := r.UserAgent() + + // Create response writer wrapper to capture status code + rw := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK} + + // Execute the handler + next.ServeHTTP(rw, r) + + // Log the operation + result := "success" + message := "" + if rw.statusCode >= 400 { + result = "failure" + message = http.StatusText(rw.statusCode) + } + + a.auditStore.Log(actor, action, resource, result, message, ip, userAgent) + }) +} + +// responseWriter wraps http.ResponseWriter to capture status code +type responseWriter struct { + http.ResponseWriter + statusCode int +} + +func (rw *responseWriter) WriteHeader(code int) { + rw.statusCode = code + rw.ResponseWriter.WriteHeader(code) +} + +// extractAction extracts action name from HTTP method and path +func extractAction(method, path string) string { + // Remove /api/v1 prefix + path = strings.TrimPrefix(path, "/api/v1") + path = strings.Trim(path, "/") + + parts := strings.Split(path, "/") + resource := parts[0] + + // Map HTTP methods to actions + actionMap := map[string]string{ + http.MethodPost: "create", + http.MethodPut: "update", + http.MethodPatch: "update", + http.MethodDelete: "delete", + } + + action := actionMap[method] + if action == "" { + action = strings.ToLower(method) + } + + return fmt.Sprintf("%s.%s", resource, action) +} + +// extractResource extracts resource identifier from path +func extractResource(path string) string { + // Remove /api/v1 prefix + path = strings.TrimPrefix(path, "/api/v1") + path = strings.Trim(path, "/") + + parts := strings.Split(path, "/") + if len(parts) == 0 { + return "unknown" + } + + resource := parts[0] + if len(parts) > 1 { + // Include resource ID if present + resource = fmt.Sprintf("%s/%s", resource, parts[1]) + } + + return resource +} + +// getClientIP extracts client IP from request +func getClientIP(r *http.Request) string { + // Check X-Forwarded-For header (for proxies) + if xff := r.Header.Get("X-Forwarded-For"); xff != "" { + ips := strings.Split(xff, ",") + return strings.TrimSpace(ips[0]) + } + + // Check X-Real-IP header + if xri := r.Header.Get("X-Real-IP"); xri != "" { + return xri + } + + // Fallback to RemoteAddr + ip := r.RemoteAddr + if idx := strings.LastIndex(ip, ":"); idx != -1 { + ip = ip[:idx] + } + return ip +} diff --git a/internal/httpapp/auth_middleware.go b/internal/httpapp/auth_middleware.go new file mode 100644 index 0000000..286d597 --- /dev/null +++ b/internal/httpapp/auth_middleware.go @@ -0,0 +1,134 @@ +package httpapp + +import ( + "context" + "net/http" + "strings" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/auth" + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +const ( + userCtxKey ctxKey = "user" + roleCtxKey ctxKey = "role" +) + +// authMiddleware validates JWT tokens and extracts user info +func (a *App) authMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Skip auth for public endpoints + if a.isPublicEndpoint(r.URL.Path) { + next.ServeHTTP(w, r) + return + } + + // Extract token from Authorization header + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "missing authorization header"}) + return + } + + // Parse "Bearer " + parts := strings.Split(authHeader, " ") + if len(parts) != 2 || parts[0] != "Bearer" { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "invalid authorization header format"}) + return + } + + token := parts[1] + claims, err := a.authService.ValidateToken(token) + if err != nil { + if err == auth.ErrExpiredToken { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "token expired"}) + } else { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "invalid token"}) + } + return + } + + // Get user from store + user, err := a.userStore.GetByID(claims.UserID) + if err != nil { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "user not found"}) + return + } + + if !user.Active { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "user account is disabled"}) + return + } + + // Add user info to context + ctx := context.WithValue(r.Context(), userCtxKey, user) + ctx = context.WithValue(ctx, roleCtxKey, user.Role) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +// requireRole middleware checks if user has required role +func (a *App) requireRole(allowedRoles ...models.Role) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + role, ok := r.Context().Value(roleCtxKey).(models.Role) + if !ok { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "unauthorized"}) + return + } + + // Check if user role is in allowed roles + allowed := false + for _, allowedRole := range allowedRoles { + if role == allowedRole { + allowed = true + break + } + } + + if !allowed { + writeJSON(w, http.StatusForbidden, map[string]string{"error": "insufficient permissions"}) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// isPublicEndpoint checks if an endpoint is public (no auth required) +func (a *App) isPublicEndpoint(path string) bool { + publicPaths := []string{ + "/healthz", + "/metrics", + "/api/v1/auth/login", + "/api/v1/auth/logout", + "/", // Dashboard (can be made protected later) + } + + for _, publicPath := range publicPaths { + if path == publicPath || strings.HasPrefix(path, publicPath+"/") { + return true + } + } + + // Static files are public + if strings.HasPrefix(path, "/static/") { + return true + } + + return false +} + +// getUserFromContext extracts user from request context +func getUserFromContext(r *http.Request) (*models.User, bool) { + user, ok := r.Context().Value(userCtxKey).(*models.User) + return user, ok +} + +// getRoleFromContext extracts role from request context +func getRoleFromContext(r *http.Request) (models.Role, bool) { + role, ok := r.Context().Value(roleCtxKey).(models.Role) + return role, ok +} diff --git a/internal/httpapp/router_helpers.go b/internal/httpapp/router_helpers.go index e25c33a..471e0d5 100644 --- a/internal/httpapp/router_helpers.go +++ b/internal/httpapp/router_helpers.go @@ -144,9 +144,27 @@ func (a *App) handleNFSExportOps(w http.ResponseWriter, r *http.Request) { // handleISCSITargetOps routes iSCSI target operations by method func (a *App) handleISCSITargetOps(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/luns") { + if r.Method == http.MethodPost { + a.handleAddLUN(w, r) + return + } + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + if strings.HasSuffix(r.URL.Path, "/luns/remove") { + if r.Method == http.MethodPost { + a.handleRemoveLUN(w, r) + return + } + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + methodHandler( func(w http.ResponseWriter, r *http.Request) { a.handleGetISCSITarget(w, r) }, - func(w http.ResponseWriter, r *http.Request) { a.handleCreateISCSITarget(w, r) }, + nil, func(w http.ResponseWriter, r *http.Request) { a.handleUpdateISCSITarget(w, r) }, func(w http.ResponseWriter, r *http.Request) { a.handleDeleteISCSITarget(w, r) }, nil, diff --git a/internal/httpapp/routes.go b/internal/httpapp/routes.go index 6ad6d66..64cac07 100644 --- a/internal/httpapp/routes.go +++ b/internal/httpapp/routes.go @@ -1,6 +1,10 @@ package httpapp -import "net/http" +import ( + "net/http" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) func (a *App) routes() { // Static files @@ -85,7 +89,7 @@ func (a *App) routes() { )) a.mux.HandleFunc("/api/v1/jobs/", a.handleJobOps) - // Authentication & Authorization + // Authentication & Authorization (public endpoints) a.mux.HandleFunc("/api/v1/auth/login", methodHandler( nil, func(w http.ResponseWriter, r *http.Request) { a.handleLogin(w, r) }, @@ -96,12 +100,17 @@ func (a *App) routes() { func(w http.ResponseWriter, r *http.Request) { a.handleLogout(w, r) }, nil, nil, nil, )) + + // User Management (requires authentication, admin-only for create/update/delete) a.mux.HandleFunc("/api/v1/users", methodHandler( func(w http.ResponseWriter, r *http.Request) { a.handleListUsers(w, r) }, - func(w http.ResponseWriter, r *http.Request) { a.handleCreateUser(w, r) }, + func(w http.ResponseWriter, r *http.Request) { + adminRole := models.RoleAdministrator + a.requireRole(adminRole)(http.HandlerFunc(a.handleCreateUser)).ServeHTTP(w, r) + }, nil, nil, nil, )) - a.mux.HandleFunc("/api/v1/users/", a.handleUserOps) + a.mux.HandleFunc("/api/v1/users/", a.handleUserOpsWithAuth) // Audit Logs a.mux.HandleFunc("/api/v1/audit", a.handleListAuditLogs) diff --git a/internal/httpapp/user_ops.go b/internal/httpapp/user_ops.go new file mode 100644 index 0000000..58a41e8 --- /dev/null +++ b/internal/httpapp/user_ops.go @@ -0,0 +1,78 @@ +package httpapp + +import ( + "encoding/json" + "log" + "net/http" + "strings" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +// handleUserOpsWithAuth routes user operations with auth +func (a *App) handleUserOpsWithAuth(w http.ResponseWriter, r *http.Request) { + if strings.HasSuffix(r.URL.Path, "/password") { + // Password change endpoint (requires auth, can change own password) + if r.Method == http.MethodPut { + a.handleChangePassword(w, r) + return + } + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + // Regular user operations (admin-only) + methodHandler( + func(w http.ResponseWriter, r *http.Request) { + a.requireRole(models.RoleAdministrator)(http.HandlerFunc(a.handleGetUser)).ServeHTTP(w, r) + }, + nil, + func(w http.ResponseWriter, r *http.Request) { + a.requireRole(models.RoleAdministrator)(http.HandlerFunc(a.handleUpdateUser)).ServeHTTP(w, r) + }, + func(w http.ResponseWriter, r *http.Request) { + a.requireRole(models.RoleAdministrator)(http.HandlerFunc(a.handleDeleteUser)).ServeHTTP(w, r) + }, + nil, + )(w, r) +} + +// handleChangePassword allows users to change their own password +func (a *App) handleChangePassword(w http.ResponseWriter, r *http.Request) { + user, ok := getUserFromContext(r) + if !ok { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "unauthorized"}) + return + } + + var req struct { + OldPassword string `json:"old_password"` + NewPassword string `json:"new_password"` + } + + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"}) + return + } + + if req.NewPassword == "" { + writeJSON(w, http.StatusBadRequest, map[string]string{"error": "new password is required"}) + return + } + + // Verify old password + _, err := a.userStore.Authenticate(user.Username, req.OldPassword) + if err != nil { + writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "invalid current password"}) + return + } + + // Update password + if err := a.userStore.UpdatePassword(user.ID, req.NewPassword); err != nil { + log.Printf("update password error: %v", err) + writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()}) + return + } + + writeJSON(w, http.StatusOK, map[string]string{"message": "password updated"}) +} diff --git a/internal/storage/iscsi.go b/internal/storage/iscsi.go new file mode 100644 index 0000000..6a7c43f --- /dev/null +++ b/internal/storage/iscsi.go @@ -0,0 +1,182 @@ +package storage + +import ( + "errors" + "fmt" + "sync" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +var ( + ErrISCSITargetNotFound = errors.New("iSCSI target not found") + ErrISCSITargetExists = errors.New("iSCSI target already exists") + ErrLUNNotFound = errors.New("LUN not found") + ErrLUNExists = errors.New("LUN already exists") +) + +// ISCSIStore manages iSCSI targets and LUNs +type ISCSIStore struct { + mu sync.RWMutex + targets map[string]*models.ISCSITarget + nextID int +} + +// NewISCSIStore creates a new iSCSI store +func NewISCSIStore() *ISCSIStore { + return &ISCSIStore{ + targets: make(map[string]*models.ISCSITarget), + nextID: 1, + } +} + +// List returns all iSCSI targets +func (s *ISCSIStore) List() []models.ISCSITarget { + s.mu.RLock() + defer s.mu.RUnlock() + + targets := make([]models.ISCSITarget, 0, len(s.targets)) + for _, target := range s.targets { + targets = append(targets, *target) + } + return targets +} + +// Get returns a target by ID +func (s *ISCSIStore) Get(id string) (*models.ISCSITarget, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + target, ok := s.targets[id] + if !ok { + return nil, ErrISCSITargetNotFound + } + return target, nil +} + +// GetByIQN returns a target by IQN +func (s *ISCSIStore) GetByIQN(iqn string) (*models.ISCSITarget, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, target := range s.targets { + if target.IQN == iqn { + return target, nil + } + } + return nil, ErrISCSITargetNotFound +} + +// Create creates a new iSCSI target +func (s *ISCSIStore) Create(iqn string, initiators []string) (*models.ISCSITarget, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if IQN already exists + for _, target := range s.targets { + if target.IQN == iqn { + return nil, ErrISCSITargetExists + } + } + + id := fmt.Sprintf("iscsi-%d", s.nextID) + s.nextID++ + + target := &models.ISCSITarget{ + ID: id, + IQN: iqn, + LUNs: []models.LUN{}, + Initiators: initiators, + Enabled: true, + } + + s.targets[id] = target + return target, nil +} + +// Update updates an existing target +func (s *ISCSIStore) Update(id string, initiators []string, enabled bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + target, ok := s.targets[id] + if !ok { + return ErrISCSITargetNotFound + } + + target.Enabled = enabled + if initiators != nil { + target.Initiators = initiators + } + + return nil +} + +// Delete removes a target +func (s *ISCSIStore) Delete(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.targets[id]; !ok { + return ErrISCSITargetNotFound + } + + delete(s.targets, id) + return nil +} + +// AddLUN adds a LUN to a target +func (s *ISCSIStore) AddLUN(targetID string, zvol string, size uint64) (*models.LUN, error) { + s.mu.Lock() + defer s.mu.Unlock() + + target, ok := s.targets[targetID] + if !ok { + return nil, ErrISCSITargetNotFound + } + + // Check if ZVOL already mapped + for _, lun := range target.LUNs { + if lun.ZVOL == zvol { + return nil, ErrLUNExists + } + } + + // Find next available LUN ID + lunID := 0 + for _, lun := range target.LUNs { + if lun.ID >= lunID { + lunID = lun.ID + 1 + } + } + + lun := models.LUN{ + ID: lunID, + ZVOL: zvol, + Size: size, + Backend: "zvol", + } + + target.LUNs = append(target.LUNs, lun) + return &lun, nil +} + +// RemoveLUN removes a LUN from a target +func (s *ISCSIStore) RemoveLUN(targetID string, lunID int) error { + s.mu.Lock() + defer s.mu.Unlock() + + target, ok := s.targets[targetID] + if !ok { + return ErrISCSITargetNotFound + } + + for i, lun := range target.LUNs { + if lun.ID == lunID { + target.LUNs = append(target.LUNs[:i], target.LUNs[i+1:]...) + return nil + } + } + + return ErrLUNNotFound +} diff --git a/internal/storage/nfs.go b/internal/storage/nfs.go new file mode 100644 index 0000000..1f2d149 --- /dev/null +++ b/internal/storage/nfs.go @@ -0,0 +1,128 @@ +package storage + +import ( + "errors" + "fmt" + "sync" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +var ( + ErrNFSExportNotFound = errors.New("NFS export not found") + ErrNFSExportExists = errors.New("NFS export already exists") +) + +// NFSStore manages NFS exports +type NFSStore struct { + mu sync.RWMutex + exports map[string]*models.NFSExport + nextID int +} + +// NewNFSStore creates a new NFS export store +func NewNFSStore() *NFSStore { + return &NFSStore{ + exports: make(map[string]*models.NFSExport), + nextID: 1, + } +} + +// List returns all NFS exports +func (s *NFSStore) List() []models.NFSExport { + s.mu.RLock() + defer s.mu.RUnlock() + + exports := make([]models.NFSExport, 0, len(s.exports)) + for _, export := range s.exports { + exports = append(exports, *export) + } + return exports +} + +// Get returns an export by ID +func (s *NFSStore) Get(id string) (*models.NFSExport, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + export, ok := s.exports[id] + if !ok { + return nil, ErrNFSExportNotFound + } + return export, nil +} + +// GetByPath returns an export by path +func (s *NFSStore) GetByPath(path string) (*models.NFSExport, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, export := range s.exports { + if export.Path == path { + return export, nil + } + } + return nil, ErrNFSExportNotFound +} + +// Create creates a new NFS export +func (s *NFSStore) Create(path, dataset string, clients []string, readOnly, rootSquash bool) (*models.NFSExport, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if path already exists + for _, export := range s.exports { + if export.Path == path { + return nil, ErrNFSExportExists + } + } + + id := fmt.Sprintf("nfs-%d", s.nextID) + s.nextID++ + + export := &models.NFSExport{ + ID: id, + Path: path, + Dataset: dataset, + Clients: clients, + ReadOnly: readOnly, + RootSquash: rootSquash, + Enabled: true, + } + + s.exports[id] = export + return export, nil +} + +// Update updates an existing export +func (s *NFSStore) Update(id string, clients []string, readOnly, rootSquash, enabled bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + export, ok := s.exports[id] + if !ok { + return ErrNFSExportNotFound + } + + export.ReadOnly = readOnly + export.RootSquash = rootSquash + export.Enabled = enabled + if clients != nil { + export.Clients = clients + } + + return nil +} + +// Delete removes an export +func (s *NFSStore) Delete(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.exports[id]; !ok { + return ErrNFSExportNotFound + } + + delete(s.exports, id) + return nil +} diff --git a/internal/storage/smb.go b/internal/storage/smb.go new file mode 100644 index 0000000..328272e --- /dev/null +++ b/internal/storage/smb.go @@ -0,0 +1,131 @@ +package storage + +import ( + "errors" + "fmt" + "sync" + + "gitea.avt.data-center.id/othman.suseno/atlas/internal/models" +) + +var ( + ErrSMBShareNotFound = errors.New("SMB share not found") + ErrSMBShareExists = errors.New("SMB share already exists") +) + +// SMBStore manages SMB shares +type SMBStore struct { + mu sync.RWMutex + shares map[string]*models.SMBShare + nextID int +} + +// NewSMBStore creates a new SMB share store +func NewSMBStore() *SMBStore { + return &SMBStore{ + shares: make(map[string]*models.SMBShare), + nextID: 1, + } +} + +// List returns all SMB shares +func (s *SMBStore) List() []models.SMBShare { + s.mu.RLock() + defer s.mu.RUnlock() + + shares := make([]models.SMBShare, 0, len(s.shares)) + for _, share := range s.shares { + shares = append(shares, *share) + } + return shares +} + +// Get returns a share by ID +func (s *SMBStore) Get(id string) (*models.SMBShare, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + share, ok := s.shares[id] + if !ok { + return nil, ErrSMBShareNotFound + } + return share, nil +} + +// GetByName returns a share by name +func (s *SMBStore) GetByName(name string) (*models.SMBShare, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + for _, share := range s.shares { + if share.Name == name { + return share, nil + } + } + return nil, ErrSMBShareNotFound +} + +// Create creates a new SMB share +func (s *SMBStore) Create(name, path, dataset, description string, readOnly, guestOK bool, validUsers []string) (*models.SMBShare, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if name already exists + for _, share := range s.shares { + if share.Name == name { + return nil, ErrSMBShareExists + } + } + + id := fmt.Sprintf("smb-%d", s.nextID) + s.nextID++ + + share := &models.SMBShare{ + ID: id, + Name: name, + Path: path, + Dataset: dataset, + Description: description, + ReadOnly: readOnly, + GuestOK: guestOK, + ValidUsers: validUsers, + Enabled: true, + } + + s.shares[id] = share + return share, nil +} + +// Update updates an existing share +func (s *SMBStore) Update(id, description string, readOnly, guestOK, enabled bool, validUsers []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + share, ok := s.shares[id] + if !ok { + return ErrSMBShareNotFound + } + + share.Description = description + share.ReadOnly = readOnly + share.GuestOK = guestOK + share.Enabled = enabled + if validUsers != nil { + share.ValidUsers = validUsers + } + + return nil +} + +// Delete removes a share +func (s *SMBStore) Delete(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if _, ok := s.shares[id]; !ok { + return ErrSMBShareNotFound + } + + delete(s.shares, id) + return nil +}