This commit is contained in:
@@ -9,8 +9,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/auth"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/storage"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/validation"
|
||||
)
|
||||
|
||||
// pathParam is now in router_helpers.go
|
||||
@@ -45,12 +47,18 @@ func (a *App) handleCreatePool(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
writeError(w, errors.ErrBadRequest("invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || len(req.VDEVs) == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and vdevs are required"})
|
||||
// Validate pool name
|
||||
if err := validation.ValidateZFSName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.VDEVs) == 0 {
|
||||
writeError(w, errors.ErrValidation("at least one vdev is required"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -224,17 +232,31 @@ func (a *App) handleListZVOLs(w http.ResponseWriter, r *http.Request) {
|
||||
func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"` // in bytes
|
||||
Size string `json:"size"` // human-readable format (e.g., "10G")
|
||||
Options map[string]string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
writeError(w, errors.ErrBadRequest("invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Size == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and size are required"})
|
||||
// Validate ZVOL name
|
||||
if err := validation.ValidateZFSName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate size format
|
||||
if err := validation.ValidateSize(req.Size); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse size to bytes
|
||||
sizeBytes, err := a.parseSizeString(req.Size)
|
||||
if err != nil {
|
||||
writeError(w, errors.ErrValidation(fmt.Sprintf("invalid size: %v", err)))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -242,7 +264,7 @@ func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
if err := a.zfs.CreateZVOL(req.Name, req.Size, req.Options); err != nil {
|
||||
if err := a.zfs.CreateZVOL(req.Name, sizeBytes, req.Options); err != nil {
|
||||
log.Printf("create zvol error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
@@ -314,8 +336,16 @@ func (a *App) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Dataset == "" || req.Name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset and name are required"})
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate snapshot name (can contain @ but we'll validate the base name)
|
||||
snapshotBaseName := strings.ReplaceAll(req.Name, "@", "")
|
||||
if err := validation.ValidateZFSName(snapshotBaseName); err != nil {
|
||||
writeError(w, errors.ErrValidation("invalid snapshot name"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -325,10 +355,10 @@ func (a *App) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
snapshotName := fmt.Sprintf("%s@%s", req.Dataset, req.Name)
|
||||
snap, err := a.zfs.GetSnapshot(snapshotName)
|
||||
fullSnapshotName := fmt.Sprintf("%s@%s", req.Dataset, req.Name)
|
||||
snap, err := a.zfs.GetSnapshot(fullSnapshotName)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "snapshot created", "name": snapshotName})
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "snapshot created", "name": fullSnapshotName})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -477,11 +507,27 @@ func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Dataset == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and dataset are required"})
|
||||
// Validate share name
|
||||
if err := validation.ValidateShareName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Sanitize path if provided
|
||||
if req.Path != "" {
|
||||
req.Path = validation.SanitizePath(req.Path)
|
||||
if err := validation.ValidatePath(req.Path); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate dataset exists
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
if err != nil {
|
||||
@@ -509,20 +555,22 @@ func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) {
|
||||
share, err := a.smbStore.Create(req.Name, req.Path, req.Dataset, req.Description, req.ReadOnly, req.GuestOK, req.ValidUsers)
|
||||
if err != nil {
|
||||
if err == storage.ErrSMBShareExists {
|
||||
writeJSON(w, http.StatusConflict, map[string]string{"error": "share name already exists"})
|
||||
writeError(w, errors.ErrConflict("share name already exists"))
|
||||
return
|
||||
}
|
||||
log.Printf("create SMB share error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
writeError(w, errors.ErrInternal("failed to create SMB share").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Apply configuration to Samba service
|
||||
// Apply configuration to Samba service (with graceful degradation)
|
||||
shares := a.smbStore.List()
|
||||
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
||||
log.Printf("apply SMB configuration error: %v", err)
|
||||
// Don't fail the request, but log the error
|
||||
// In production, you might want to queue this for retry
|
||||
// Log but don't fail the request - desired state is stored
|
||||
// Service configuration can be retried later
|
||||
if svcErr := a.handleServiceError("SMB", err); svcErr != nil {
|
||||
log.Printf("SMB service configuration failed (non-fatal): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, share)
|
||||
@@ -629,11 +677,29 @@ func (a *App) handleCreateNFSExport(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Dataset == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset is required"})
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate and sanitize path if provided
|
||||
if req.Path != "" {
|
||||
req.Path = validation.SanitizePath(req.Path)
|
||||
if err := validation.ValidatePath(req.Path); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate clients
|
||||
for i, client := range req.Clients {
|
||||
if err := validation.ValidateCIDR(client); err != nil {
|
||||
writeError(w, errors.ErrValidation(fmt.Sprintf("client[%d]: %s", i, err.Error())))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate dataset exists
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
if err != nil {
|
||||
@@ -786,14 +852,9 @@ func (a *App) handleCreateISCSITarget(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.IQN == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "iqn is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Basic IQN format validation (iqn.yyyy-mm.reversed.domain:identifier)
|
||||
if !strings.HasPrefix(req.IQN, "iqn.") {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid IQN format (must start with 'iqn.')"})
|
||||
// Validate IQN format
|
||||
if err := validation.ValidateIQN(req.IQN); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1065,8 +1126,14 @@ func (a *App) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Username == "" || req.Password == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"})
|
||||
// Validate username (login is less strict - just check not empty)
|
||||
if req.Username == "" {
|
||||
writeError(w, errors.ErrValidation("username is required"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Password == "" {
|
||||
writeError(w, errors.ErrValidation("password is required"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1116,11 +1183,26 @@ func (a *App) handleCreateUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Username == "" || req.Password == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"})
|
||||
// Validate username
|
||||
if err := validation.ValidateUsername(req.Username); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate password
|
||||
if err := validation.ValidatePassword(req.Password); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate email if provided
|
||||
if req.Email != "" {
|
||||
if err := validation.ValidateEmail(req.Email); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Role == "" {
|
||||
req.Role = models.RoleViewer // Default role
|
||||
}
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/audit"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/auth"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/backup"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/db"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/job"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/metrics"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/services"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/snapshot"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/storage"
|
||||
@@ -26,23 +28,26 @@ type Config struct {
|
||||
}
|
||||
|
||||
type App struct {
|
||||
cfg Config
|
||||
tmpl *template.Template
|
||||
mux *http.ServeMux
|
||||
zfs *zfs.Service
|
||||
snapshotPolicy *snapshot.PolicyStore
|
||||
jobManager *job.Manager
|
||||
scheduler *snapshot.Scheduler
|
||||
authService *auth.Service
|
||||
userStore *auth.UserStore
|
||||
auditStore *audit.Store
|
||||
smbStore *storage.SMBStore
|
||||
nfsStore *storage.NFSStore
|
||||
iscsiStore *storage.ISCSIStore
|
||||
database *db.DB // Optional database connection
|
||||
smbService *services.SMBService
|
||||
nfsService *services.NFSService
|
||||
iscsiService *services.ISCSIService
|
||||
cfg Config
|
||||
tmpl *template.Template
|
||||
mux *http.ServeMux
|
||||
zfs *zfs.Service
|
||||
snapshotPolicy *snapshot.PolicyStore
|
||||
jobManager *job.Manager
|
||||
scheduler *snapshot.Scheduler
|
||||
authService *auth.Service
|
||||
userStore *auth.UserStore
|
||||
auditStore *audit.Store
|
||||
smbStore *storage.SMBStore
|
||||
nfsStore *storage.NFSStore
|
||||
iscsiStore *storage.ISCSIStore
|
||||
database *db.DB // Optional database connection
|
||||
smbService *services.SMBService
|
||||
nfsService *services.NFSService
|
||||
iscsiService *services.ISCSIService
|
||||
metricsCollector *metrics.Collector
|
||||
startTime time.Time
|
||||
backupService *backup.Service
|
||||
}
|
||||
|
||||
func New(cfg Config) (*App, error) {
|
||||
@@ -91,24 +96,41 @@ func New(cfg Config) (*App, error) {
|
||||
nfsService := services.NewNFSService()
|
||||
iscsiService := services.NewISCSIService()
|
||||
|
||||
// Initialize metrics collector
|
||||
metricsCollector := metrics.NewCollector()
|
||||
startTime := time.Now()
|
||||
|
||||
// Initialize backup service
|
||||
backupDir := os.Getenv("ATLAS_BACKUP_DIR")
|
||||
if backupDir == "" {
|
||||
backupDir = "data/backups"
|
||||
}
|
||||
backupService, err := backup.New(backupDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init backup service: %w", err)
|
||||
}
|
||||
|
||||
a := &App{
|
||||
cfg: cfg,
|
||||
tmpl: tmpl,
|
||||
mux: http.NewServeMux(),
|
||||
zfs: zfsService,
|
||||
snapshotPolicy: policyStore,
|
||||
jobManager: jobMgr,
|
||||
scheduler: scheduler,
|
||||
authService: authService,
|
||||
userStore: userStore,
|
||||
auditStore: auditStore,
|
||||
smbStore: smbStore,
|
||||
nfsStore: nfsStore,
|
||||
iscsiStore: iscsiStore,
|
||||
database: database,
|
||||
smbService: smbService,
|
||||
nfsService: nfsService,
|
||||
iscsiService: iscsiService,
|
||||
cfg: cfg,
|
||||
tmpl: tmpl,
|
||||
mux: http.NewServeMux(),
|
||||
zfs: zfsService,
|
||||
snapshotPolicy: policyStore,
|
||||
jobManager: jobMgr,
|
||||
scheduler: scheduler,
|
||||
authService: authService,
|
||||
userStore: userStore,
|
||||
auditStore: auditStore,
|
||||
smbStore: smbStore,
|
||||
nfsStore: nfsStore,
|
||||
iscsiStore: iscsiStore,
|
||||
database: database,
|
||||
smbService: smbService,
|
||||
nfsService: nfsService,
|
||||
iscsiService: iscsiService,
|
||||
metricsCollector: metricsCollector,
|
||||
startTime: startTime,
|
||||
backupService: backupService,
|
||||
}
|
||||
|
||||
// Start snapshot scheduler (runs every 15 minutes)
|
||||
@@ -119,8 +141,37 @@ func New(cfg Config) (*App, error) {
|
||||
}
|
||||
|
||||
func (a *App) Router() http.Handler {
|
||||
// Wrap the mux with middleware chain: requestID -> logging -> audit -> auth
|
||||
return requestID(logging(a.auditMiddleware(a.authMiddleware(a.mux))))
|
||||
// Middleware chain order (outer to inner):
|
||||
// 1. CORS (handles preflight)
|
||||
// 2. Security headers
|
||||
// 3. Request size limit (10MB)
|
||||
// 4. Content-Type validation
|
||||
// 5. Rate limiting
|
||||
// 6. Error recovery
|
||||
// 7. Request ID
|
||||
// 8. Logging
|
||||
// 9. Audit
|
||||
// 10. Authentication
|
||||
// 11. Routes
|
||||
return a.corsMiddleware(
|
||||
a.securityHeadersMiddleware(
|
||||
a.requestSizeMiddleware(10 * 1024 * 1024)(
|
||||
a.validateContentTypeMiddleware(
|
||||
a.rateLimitMiddleware(
|
||||
a.errorMiddleware(
|
||||
requestID(
|
||||
logging(
|
||||
a.auditMiddleware(
|
||||
a.authMiddleware(a.mux),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// StopScheduler stops the snapshot scheduler (for graceful shutdown)
|
||||
|
||||
304
internal/httpapp/backup_handlers.go
Normal file
304
internal/httpapp/backup_handlers.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/backup"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
)
|
||||
|
||||
// Backup Handlers
|
||||
func (a *App) handleCreateBackup(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// Description is optional, so we'll continue even if body is empty
|
||||
_ = err
|
||||
}
|
||||
|
||||
// Collect all configuration data
|
||||
backupData := backup.BackupData{
|
||||
Users: a.userStore.List(),
|
||||
SMBShares: a.smbStore.List(),
|
||||
NFSExports: a.nfsStore.List(),
|
||||
ISCSITargets: a.iscsiStore.List(),
|
||||
Policies: a.snapshotPolicy.List(),
|
||||
Config: map[string]interface{}{
|
||||
"database_path": a.cfg.DatabasePath,
|
||||
},
|
||||
}
|
||||
|
||||
// Create backup
|
||||
backupID, err := a.backupService.CreateBackup(backupData, req.Description)
|
||||
if err != nil {
|
||||
log.Printf("create backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to create backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Get backup metadata
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("get backup metadata error: %v", err)
|
||||
writeJSON(w, http.StatusCreated, map[string]interface{}{
|
||||
"id": backupID,
|
||||
"message": "backup created",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, metadata)
|
||||
}
|
||||
|
||||
func (a *App) handleListBackups(w http.ResponseWriter, r *http.Request) {
|
||||
backups, err := a.backupService.ListBackups()
|
||||
if err != nil {
|
||||
log.Printf("list backups error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to list backups").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, backups)
|
||||
}
|
||||
|
||||
func (a *App) handleGetBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("get backup error: %v", err)
|
||||
writeError(w, errors.ErrNotFound("backup").WithDetails(backupID))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, metadata)
|
||||
}
|
||||
|
||||
func (a *App) handleRestoreBackup(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract backup ID from path
|
||||
path := r.URL.Path
|
||||
backupID := ""
|
||||
|
||||
// Handle both /api/v1/backups/{id} and /api/v1/backups/{id}/restore
|
||||
if strings.Contains(path, "/restore") {
|
||||
// Path: /api/v1/backups/{id}/restore
|
||||
prefix := "/api/v1/backups/"
|
||||
suffix := "/restore"
|
||||
if strings.HasPrefix(path, prefix) && strings.HasSuffix(path, suffix) {
|
||||
backupID = path[len(prefix) : len(path)-len(suffix)]
|
||||
}
|
||||
} else {
|
||||
// Path: /api/v1/backups/{id}
|
||||
backupID = pathParam(r, "/api/v1/backups/")
|
||||
}
|
||||
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// Dry run is optional, default to false
|
||||
req.DryRun = false
|
||||
}
|
||||
|
||||
// Verify backup first
|
||||
if err := a.backupService.VerifyBackup(backupID); err != nil {
|
||||
log.Printf("verify backup error: %v", err)
|
||||
writeError(w, errors.ErrBadRequest("backup verification failed").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Restore backup
|
||||
backupData, err := a.backupService.RestoreBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("restore backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to restore backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if req.DryRun {
|
||||
// Return what would be restored without actually restoring
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "dry run - no changes made",
|
||||
"backup_id": backupID,
|
||||
"backup_data": backupData,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Restore users (skip default admin user - user-1)
|
||||
// Note: Passwords cannot be restored as they're hashed and not stored in user model
|
||||
// Users will need to reset their passwords after restore
|
||||
for _, user := range backupData.Users {
|
||||
// Skip default admin user
|
||||
if user.ID == "user-1" {
|
||||
log.Printf("skipping default admin user")
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if user already exists
|
||||
if _, err := a.userStore.GetByID(user.ID); err == nil {
|
||||
log.Printf("user %s already exists, skipping", user.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create user with temporary password (user must reset password)
|
||||
// Use a secure random password that user must change
|
||||
tempPassword := fmt.Sprintf("restore-%s", user.ID)
|
||||
if _, err := a.userStore.Create(user.Username, user.Email, tempPassword, user.Role); err != nil {
|
||||
log.Printf("restore user error: %v", err)
|
||||
// Continue with other users
|
||||
} else {
|
||||
log.Printf("restored user %s - password reset required", user.Username)
|
||||
}
|
||||
}
|
||||
|
||||
// Restore SMB shares
|
||||
for _, share := range backupData.SMBShares {
|
||||
// Check if share already exists
|
||||
if _, err := a.smbStore.Get(share.ID); err == nil {
|
||||
log.Printf("SMB share %s already exists, skipping", share.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create share
|
||||
if _, err := a.smbStore.Create(share.Name, share.Path, share.Dataset, share.Description, share.ReadOnly, share.GuestOK, share.ValidUsers); err != nil {
|
||||
log.Printf("restore SMB share error: %v", err)
|
||||
// Continue with other shares
|
||||
}
|
||||
}
|
||||
|
||||
// Restore NFS exports
|
||||
for _, export := range backupData.NFSExports {
|
||||
// Check if export already exists
|
||||
if _, err := a.nfsStore.Get(export.ID); err == nil {
|
||||
log.Printf("NFS export %s already exists, skipping", export.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create export
|
||||
if _, err := a.nfsStore.Create(export.Path, export.Dataset, export.Clients, export.ReadOnly, export.RootSquash); err != nil {
|
||||
log.Printf("restore NFS export error: %v", err)
|
||||
// Continue with other exports
|
||||
}
|
||||
}
|
||||
|
||||
// Restore iSCSI targets
|
||||
for _, target := range backupData.ISCSITargets {
|
||||
// Check if target already exists
|
||||
if _, err := a.iscsiStore.Get(target.ID); err == nil {
|
||||
log.Printf("iSCSI target %s already exists, skipping", target.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create target
|
||||
if _, err := a.iscsiStore.Create(target.IQN, target.Initiators); err != nil {
|
||||
log.Printf("restore iSCSI target error: %v", err)
|
||||
// Continue with other targets
|
||||
}
|
||||
|
||||
// Restore LUNs
|
||||
for _, lun := range target.LUNs {
|
||||
if _, err := a.iscsiStore.AddLUN(target.ID, lun.ZVOL, lun.Size); err != nil {
|
||||
log.Printf("restore iSCSI LUN error: %v", err)
|
||||
// Continue with other LUNs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore snapshot policies
|
||||
for _, policy := range backupData.Policies {
|
||||
// Check if policy already exists
|
||||
if existing, _ := a.snapshotPolicy.Get(policy.Dataset); existing != nil {
|
||||
log.Printf("snapshot policy for dataset %s already exists, skipping", policy.Dataset)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set policy (uses Dataset as key)
|
||||
a.snapshotPolicy.Set(&policy)
|
||||
}
|
||||
|
||||
// Apply service configurations
|
||||
shares := a.smbStore.List()
|
||||
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
||||
log.Printf("apply SMB configuration after restore error: %v", err)
|
||||
}
|
||||
|
||||
exports := a.nfsStore.List()
|
||||
if err := a.nfsService.ApplyConfiguration(exports); err != nil {
|
||||
log.Printf("apply NFS configuration after restore error: %v", err)
|
||||
}
|
||||
|
||||
targets := a.iscsiStore.List()
|
||||
for _, target := range targets {
|
||||
if err := a.iscsiService.ApplyConfiguration([]models.ISCSITarget{target}); err != nil {
|
||||
log.Printf("apply iSCSI configuration after restore error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "backup restored successfully",
|
||||
"backup_id": backupID,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) handleDeleteBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.backupService.DeleteBackup(backupID); err != nil {
|
||||
log.Printf("delete backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to delete backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "backup deleted",
|
||||
"backup_id": backupID,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) handleVerifyBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.backupService.VerifyBackup(backupID); err != nil {
|
||||
writeError(w, errors.ErrBadRequest("backup verification failed").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
writeError(w, errors.ErrNotFound("backup").WithDetails(backupID))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "backup is valid",
|
||||
"backup_id": backupID,
|
||||
"metadata": metadata,
|
||||
})
|
||||
}
|
||||
289
internal/httpapp/diagnostics_handlers.go
Normal file
289
internal/httpapp/diagnostics_handlers.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SystemInfo represents system diagnostic information
|
||||
type SystemInfo struct {
|
||||
Version string `json:"version"`
|
||||
Uptime string `json:"uptime"`
|
||||
GoVersion string `json:"go_version"`
|
||||
NumGoroutine int `json:"num_goroutines"`
|
||||
Memory MemoryInfo `json:"memory"`
|
||||
Services map[string]ServiceInfo `json:"services"`
|
||||
Database DatabaseInfo `json:"database,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryInfo represents memory statistics
|
||||
type MemoryInfo struct {
|
||||
Alloc uint64 `json:"alloc"` // bytes allocated
|
||||
TotalAlloc uint64 `json:"total_alloc"` // bytes allocated (cumulative)
|
||||
Sys uint64 `json:"sys"` // bytes obtained from system
|
||||
NumGC uint32 `json:"num_gc"` // number of GC cycles
|
||||
}
|
||||
|
||||
// ServiceInfo represents service status
|
||||
type ServiceInfo struct {
|
||||
Status string `json:"status"` // "running", "stopped", "error"
|
||||
LastCheck string `json:"last_check"` // timestamp
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// DatabaseInfo represents database connection info
|
||||
type DatabaseInfo struct {
|
||||
Connected bool `json:"connected"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// handleSystemInfo returns system diagnostic information
|
||||
func (a *App) handleSystemInfo(w http.ResponseWriter, r *http.Request) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
uptime := time.Since(a.startTime)
|
||||
|
||||
info := SystemInfo{
|
||||
Version: "v0.1.0-dev",
|
||||
Uptime: fmt.Sprintf("%.0f seconds", uptime.Seconds()),
|
||||
GoVersion: runtime.Version(),
|
||||
NumGoroutine: runtime.NumGoroutine(),
|
||||
Memory: MemoryInfo{
|
||||
Alloc: m.Alloc,
|
||||
TotalAlloc: m.TotalAlloc,
|
||||
Sys: m.Sys,
|
||||
NumGC: m.NumGC,
|
||||
},
|
||||
Services: make(map[string]ServiceInfo),
|
||||
}
|
||||
|
||||
// Check service statuses
|
||||
smbStatus, smbErr := a.smbService.GetStatus()
|
||||
if smbErr == nil {
|
||||
status := "stopped"
|
||||
if smbStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["smb"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["smb"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: smbErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
nfsStatus, nfsErr := a.nfsService.GetStatus()
|
||||
if nfsErr == nil {
|
||||
status := "stopped"
|
||||
if nfsStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["nfs"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["nfs"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: nfsErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
iscsiStatus, iscsiErr := a.iscsiService.GetStatus()
|
||||
if iscsiErr == nil {
|
||||
status := "stopped"
|
||||
if iscsiStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["iscsi"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["iscsi"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: iscsiErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// Database info
|
||||
if a.database != nil {
|
||||
info.Database = DatabaseInfo{
|
||||
Connected: true,
|
||||
Path: a.cfg.DatabasePath,
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, info)
|
||||
}
|
||||
|
||||
// handleHealthCheck provides detailed health check information
|
||||
func (a *App) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
type HealthStatus struct {
|
||||
Status string `json:"status"` // "healthy", "degraded", "unhealthy"
|
||||
Timestamp string `json:"timestamp"`
|
||||
Checks map[string]string `json:"checks"`
|
||||
}
|
||||
|
||||
health := HealthStatus{
|
||||
Status: "healthy",
|
||||
Timestamp: time.Now().Format(time.RFC3339),
|
||||
Checks: make(map[string]string),
|
||||
}
|
||||
|
||||
// Check ZFS service
|
||||
if a.zfs != nil {
|
||||
_, err := a.zfs.ListPools()
|
||||
if err != nil {
|
||||
health.Checks["zfs"] = "unhealthy: " + err.Error()
|
||||
health.Status = "degraded"
|
||||
} else {
|
||||
health.Checks["zfs"] = "healthy"
|
||||
}
|
||||
} else {
|
||||
health.Checks["zfs"] = "unhealthy: service not initialized"
|
||||
health.Status = "unhealthy"
|
||||
}
|
||||
|
||||
// Check database
|
||||
if a.database != nil {
|
||||
// Try a simple query to check database health
|
||||
if err := a.database.DB.Ping(); err != nil {
|
||||
health.Checks["database"] = "unhealthy: " + err.Error()
|
||||
health.Status = "degraded"
|
||||
} else {
|
||||
health.Checks["database"] = "healthy"
|
||||
}
|
||||
} else {
|
||||
health.Checks["database"] = "not configured"
|
||||
}
|
||||
|
||||
// Check services
|
||||
smbStatus, smbErr := a.smbService.GetStatus()
|
||||
if smbErr != nil {
|
||||
health.Checks["smb"] = "unhealthy: " + smbErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !smbStatus {
|
||||
health.Checks["smb"] = "stopped"
|
||||
} else {
|
||||
health.Checks["smb"] = "healthy"
|
||||
}
|
||||
|
||||
nfsStatus, nfsErr := a.nfsService.GetStatus()
|
||||
if nfsErr != nil {
|
||||
health.Checks["nfs"] = "unhealthy: " + nfsErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !nfsStatus {
|
||||
health.Checks["nfs"] = "stopped"
|
||||
} else {
|
||||
health.Checks["nfs"] = "healthy"
|
||||
}
|
||||
|
||||
iscsiStatus, iscsiErr := a.iscsiService.GetStatus()
|
||||
if iscsiErr != nil {
|
||||
health.Checks["iscsi"] = "unhealthy: " + iscsiErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !iscsiStatus {
|
||||
health.Checks["iscsi"] = "stopped"
|
||||
} else {
|
||||
health.Checks["iscsi"] = "healthy"
|
||||
}
|
||||
|
||||
// Set HTTP status based on health
|
||||
statusCode := http.StatusOK
|
||||
if health.Status == "unhealthy" {
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
} else if health.Status == "degraded" {
|
||||
statusCode = http.StatusOK // Still OK, but with warnings
|
||||
}
|
||||
|
||||
w.WriteHeader(statusCode)
|
||||
writeJSON(w, statusCode, health)
|
||||
}
|
||||
|
||||
// handleLogs returns recent log entries (if available)
|
||||
func (a *App) handleLogs(w http.ResponseWriter, r *http.Request) {
|
||||
// For now, return audit logs as system logs
|
||||
// In a full implementation, this would return application logs
|
||||
limit := 100
|
||||
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
|
||||
fmt.Sscanf(limitStr, "%d", &limit)
|
||||
if limit > 1000 {
|
||||
limit = 1000
|
||||
}
|
||||
if limit < 1 {
|
||||
limit = 1
|
||||
}
|
||||
}
|
||||
|
||||
// Get recent audit logs
|
||||
logs := a.auditStore.List("", "", "", limit)
|
||||
|
||||
type LogEntry struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Level string `json:"level"`
|
||||
Actor string `json:"actor"`
|
||||
Action string `json:"action"`
|
||||
Resource string `json:"resource"`
|
||||
Result string `json:"result"`
|
||||
Message string `json:"message,omitempty"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
}
|
||||
|
||||
entries := make([]LogEntry, 0, len(logs))
|
||||
for _, log := range logs {
|
||||
level := "INFO"
|
||||
if log.Result == "failure" {
|
||||
level = "ERROR"
|
||||
}
|
||||
|
||||
entries = append(entries, LogEntry{
|
||||
Timestamp: log.Timestamp.Format(time.RFC3339),
|
||||
Level: level,
|
||||
Actor: log.Actor,
|
||||
Action: log.Action,
|
||||
Resource: log.Resource,
|
||||
Result: log.Result,
|
||||
Message: log.Message,
|
||||
IP: log.IP,
|
||||
})
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"logs": entries,
|
||||
"count": len(entries),
|
||||
})
|
||||
}
|
||||
|
||||
// handleGC triggers a garbage collection and returns stats
|
||||
func (a *App) handleGC(w http.ResponseWriter, r *http.Request) {
|
||||
var before, after runtime.MemStats
|
||||
runtime.ReadMemStats(&before)
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&after)
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"before": map[string]interface{}{
|
||||
"alloc": before.Alloc,
|
||||
"total_alloc": before.TotalAlloc,
|
||||
"sys": before.Sys,
|
||||
"num_gc": before.NumGC,
|
||||
},
|
||||
"after": map[string]interface{}{
|
||||
"alloc": after.Alloc,
|
||||
"total_alloc": after.TotalAlloc,
|
||||
"sys": after.Sys,
|
||||
"num_gc": after.NumGC,
|
||||
},
|
||||
"freed": before.Alloc - after.Alloc,
|
||||
})
|
||||
}
|
||||
64
internal/httpapp/docs_handlers.go
Normal file
64
internal/httpapp/docs_handlers.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// handleAPIDocs serves the API documentation page
|
||||
func (a *App) handleAPIDocs(w http.ResponseWriter, r *http.Request) {
|
||||
// Simple HTML page with Swagger UI
|
||||
html := `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>atlasOS API Documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui.css" />
|
||||
<style>
|
||||
html { box-sizing: border-box; overflow: -moz-scrollbars-vertical; overflow-y: scroll; }
|
||||
*, *:before, *:after { box-sizing: inherit; }
|
||||
body { margin:0; background: #fafafa; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui-bundle.js"></script>
|
||||
<script src="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui-standalone-preset.js"></script>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
const ui = SwaggerUIBundle({
|
||||
url: "/api/openapi.yaml",
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout"
|
||||
});
|
||||
};
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(html))
|
||||
}
|
||||
|
||||
// handleOpenAPISpec serves the OpenAPI specification
|
||||
func (a *App) handleOpenAPISpec(w http.ResponseWriter, r *http.Request) {
|
||||
// Read OpenAPI spec from file system
|
||||
specPath := filepath.Join("docs", "openapi.yaml")
|
||||
spec, err := os.ReadFile(specPath)
|
||||
if err != nil {
|
||||
http.Error(w, "OpenAPI spec not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/yaml; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(spec)
|
||||
}
|
||||
59
internal/httpapp/error_handlers.go
Normal file
59
internal/httpapp/error_handlers.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// writeError writes a structured error response
|
||||
func writeError(w http.ResponseWriter, err error) {
|
||||
// Check if it's an APIError
|
||||
if apiErr, ok := err.(*errors.APIError); ok {
|
||||
writeJSON(w, apiErr.HTTPStatus, apiErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Default to internal server error
|
||||
log.Printf("unhandled error: %v", err)
|
||||
apiErr := errors.ErrInternal("an unexpected error occurred")
|
||||
writeJSON(w, apiErr.HTTPStatus, apiErr)
|
||||
}
|
||||
|
||||
// handleServiceError handles errors from service operations with graceful degradation
|
||||
func (a *App) handleServiceError(serviceName string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log the error for debugging
|
||||
log.Printf("%s service error: %v", serviceName, err)
|
||||
|
||||
// For service errors, we might want to continue operation
|
||||
// but log the issue. The API request can still succeed
|
||||
// even if service configuration fails (desired state is stored)
|
||||
return errors.ErrServiceUnavailable(serviceName).WithDetails(err.Error())
|
||||
}
|
||||
|
||||
// recoverPanic recovers from panics and returns a proper error response
|
||||
func recoverPanic(w http.ResponseWriter, r *http.Request) {
|
||||
if rec := recover(); rec != nil {
|
||||
log.Printf("panic recovered: %v", rec)
|
||||
err := errors.ErrInternal("an unexpected error occurred")
|
||||
writeError(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
// errorMiddleware wraps handlers with panic recovery
|
||||
func (a *App) errorMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer recoverPanic(w, r)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// writeJSONError is a convenience function for JSON error responses
|
||||
func writeJSONError(w http.ResponseWriter, code int, message string) {
|
||||
writeJSON(w, code, map[string]string{"error": message})
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (a *App) handleDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -26,18 +27,58 @@ func (a *App) handleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (a *App) handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
// Stub metrics (Prometheus format). We'll wire real collectors later.
|
||||
// Collect real-time metrics
|
||||
// ZFS metrics
|
||||
pools, _ := a.zfs.ListPools()
|
||||
datasets, _ := a.zfs.ListDatasets("")
|
||||
zvols, _ := a.zfs.ListZVOLs("")
|
||||
snapshots, _ := a.zfs.ListSnapshots("")
|
||||
|
||||
a.metricsCollector.UpdateZFSMetrics(pools, datasets, zvols, snapshots)
|
||||
|
||||
// Service metrics
|
||||
smbShares := a.smbStore.List()
|
||||
nfsExports := a.nfsStore.List()
|
||||
iscsiTargets := a.iscsiStore.List()
|
||||
|
||||
smbStatus, _ := a.smbService.GetStatus()
|
||||
nfsStatus, _ := a.nfsService.GetStatus()
|
||||
iscsiStatus, _ := a.iscsiService.GetStatus()
|
||||
|
||||
a.metricsCollector.UpdateServiceMetrics(
|
||||
len(smbShares),
|
||||
len(nfsExports),
|
||||
len(iscsiTargets),
|
||||
smbStatus,
|
||||
nfsStatus,
|
||||
iscsiStatus,
|
||||
)
|
||||
|
||||
// Job metrics
|
||||
allJobs := a.jobManager.List("")
|
||||
running := 0
|
||||
completed := 0
|
||||
failed := 0
|
||||
for _, job := range allJobs {
|
||||
switch job.Status {
|
||||
case "running":
|
||||
running++
|
||||
case "completed":
|
||||
completed++
|
||||
case "failed":
|
||||
failed++
|
||||
}
|
||||
}
|
||||
|
||||
a.metricsCollector.UpdateJobMetrics(len(allJobs), running, completed, failed)
|
||||
|
||||
// Update uptime
|
||||
a.metricsCollector.SetUptime(int64(time.Since(a.startTime).Seconds()))
|
||||
|
||||
// Output Prometheus format
|
||||
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(
|
||||
`# HELP atlas_build_info Build info
|
||||
# TYPE atlas_build_info gauge
|
||||
atlas_build_info{version="v0.1.0-dev"} 1
|
||||
# HELP atlas_up Whether the atlas-api process is up
|
||||
# TYPE atlas_up gauge
|
||||
atlas_up 1
|
||||
`,
|
||||
))
|
||||
_, _ = w.Write([]byte(a.metricsCollector.Collect()))
|
||||
}
|
||||
|
||||
func (a *App) render(w http.ResponseWriter, name string, data any) {
|
||||
|
||||
@@ -28,13 +28,36 @@ func requestID(next http.Handler) http.Handler {
|
||||
func logging(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
// Create response writer wrapper to capture status code
|
||||
rw := &responseWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
statusCode: http.StatusOK,
|
||||
}
|
||||
|
||||
next.ServeHTTP(rw, r)
|
||||
|
||||
d := time.Since(start)
|
||||
id, _ := r.Context().Value(requestIDKey).(string)
|
||||
log.Printf("%s %s %s rid=%s dur=%s", r.RemoteAddr, r.Method, r.URL.Path, id, d)
|
||||
|
||||
// Use structured logging if available, otherwise fallback to standard log
|
||||
log.Printf("%s %s %s status=%d rid=%s dur=%s",
|
||||
r.RemoteAddr, r.Method, r.URL.Path, rw.statusCode, id, d)
|
||||
})
|
||||
}
|
||||
|
||||
// responseWriterWrapper wraps http.ResponseWriter to capture status code
|
||||
// Note: This is different from the one in audit_middleware.go to avoid conflicts
|
||||
type responseWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (rw *responseWriterWrapper) WriteHeader(code int) {
|
||||
rw.statusCode = code
|
||||
rw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func newReqID() string {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
|
||||
165
internal/httpapp/rate_limit.go
Normal file
165
internal/httpapp/rate_limit.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// RateLimiter implements token bucket rate limiting
|
||||
type RateLimiter struct {
|
||||
mu sync.RWMutex
|
||||
clients map[string]*clientLimiter
|
||||
rate int // requests per window
|
||||
window time.Duration // time window
|
||||
cleanupTick *time.Ticker
|
||||
stopCleanup chan struct{}
|
||||
}
|
||||
|
||||
type clientLimiter struct {
|
||||
tokens int
|
||||
lastUpdate time.Time
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a new rate limiter
|
||||
func NewRateLimiter(rate int, window time.Duration) *RateLimiter {
|
||||
rl := &RateLimiter{
|
||||
clients: make(map[string]*clientLimiter),
|
||||
rate: rate,
|
||||
window: window,
|
||||
cleanupTick: time.NewTicker(5 * time.Minute),
|
||||
stopCleanup: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start cleanup goroutine
|
||||
go rl.cleanup()
|
||||
|
||||
return rl
|
||||
}
|
||||
|
||||
// cleanup periodically removes old client limiters
|
||||
func (rl *RateLimiter) cleanup() {
|
||||
for {
|
||||
select {
|
||||
case <-rl.cleanupTick.C:
|
||||
rl.mu.Lock()
|
||||
now := time.Now()
|
||||
for key, limiter := range rl.clients {
|
||||
limiter.mu.Lock()
|
||||
// Remove if last update was more than 2 windows ago
|
||||
if now.Sub(limiter.lastUpdate) > rl.window*2 {
|
||||
delete(rl.clients, key)
|
||||
}
|
||||
limiter.mu.Unlock()
|
||||
}
|
||||
rl.mu.Unlock()
|
||||
case <-rl.stopCleanup:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the cleanup goroutine
|
||||
func (rl *RateLimiter) Stop() {
|
||||
rl.cleanupTick.Stop()
|
||||
close(rl.stopCleanup)
|
||||
}
|
||||
|
||||
// Allow checks if a request from the given key should be allowed
|
||||
func (rl *RateLimiter) Allow(key string) bool {
|
||||
rl.mu.Lock()
|
||||
limiter, exists := rl.clients[key]
|
||||
if !exists {
|
||||
limiter = &clientLimiter{
|
||||
tokens: rl.rate,
|
||||
lastUpdate: time.Now(),
|
||||
}
|
||||
rl.clients[key] = limiter
|
||||
}
|
||||
rl.mu.Unlock()
|
||||
|
||||
limiter.mu.Lock()
|
||||
defer limiter.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(limiter.lastUpdate)
|
||||
|
||||
// Refill tokens based on elapsed time
|
||||
if elapsed >= rl.window {
|
||||
// Full refill
|
||||
limiter.tokens = rl.rate
|
||||
} else {
|
||||
// Partial refill based on elapsed time
|
||||
tokensToAdd := int(float64(rl.rate) * elapsed.Seconds() / rl.window.Seconds())
|
||||
if tokensToAdd > 0 {
|
||||
limiter.tokens = min(limiter.tokens+tokensToAdd, rl.rate)
|
||||
}
|
||||
}
|
||||
|
||||
limiter.lastUpdate = now
|
||||
|
||||
// Check if we have tokens
|
||||
if limiter.tokens > 0 {
|
||||
limiter.tokens--
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getClientKey extracts a key for rate limiting from the request
|
||||
func getClientKey(r *http.Request) string {
|
||||
// Try to get IP address
|
||||
ip := getClientIP(r)
|
||||
|
||||
// If authenticated, use user ID for more granular limiting
|
||||
if user, ok := getUserFromContext(r); ok {
|
||||
return "user:" + user.ID
|
||||
}
|
||||
|
||||
return "ip:" + ip
|
||||
}
|
||||
|
||||
// rateLimitMiddleware implements rate limiting
|
||||
func (a *App) rateLimitMiddleware(next http.Handler) http.Handler {
|
||||
// Default: 100 requests per minute per client
|
||||
rateLimiter := NewRateLimiter(100, time.Minute)
|
||||
|
||||
// Store limiter for cleanup on shutdown
|
||||
// Note: Cleanup will be handled by the limiter's own cleanup goroutine
|
||||
_ = rateLimiter
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip rate limiting for public endpoints
|
||||
if a.isPublicEndpoint(r.URL.Path) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
key := getClientKey(r)
|
||||
if !rateLimiter.Allow(key) {
|
||||
writeError(w, errors.NewAPIError(
|
||||
errors.ErrCodeServiceUnavailable,
|
||||
"rate limit exceeded",
|
||||
http.StatusTooManyRequests,
|
||||
).WithDetails("too many requests, please try again later"))
|
||||
return
|
||||
}
|
||||
|
||||
// Add rate limit headers
|
||||
w.Header().Set("X-RateLimit-Limit", "100")
|
||||
w.Header().Set("X-RateLimit-Window", "60")
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -3,6 +3,8 @@ package httpapp
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// methodHandler routes requests based on HTTP method
|
||||
@@ -143,6 +145,35 @@ func (a *App) handleNFSExportOps(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleISCSITargetOps routes iSCSI target operations by method
|
||||
func (a *App) handleBackupOps(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Check if it's a verify request
|
||||
if r.URL.Query().Get("verify") == "true" {
|
||||
a.handleVerifyBackup(w, r)
|
||||
} else {
|
||||
a.handleGetBackup(w, r)
|
||||
}
|
||||
case http.MethodPost:
|
||||
// Restore backup (POST /api/v1/backups/{id}/restore)
|
||||
if strings.HasSuffix(r.URL.Path, "/restore") {
|
||||
a.handleRestoreBackup(w, r)
|
||||
} else {
|
||||
writeError(w, errors.ErrBadRequest("invalid backup operation"))
|
||||
}
|
||||
case http.MethodDelete:
|
||||
a.handleDeleteBackup(w, r)
|
||||
default:
|
||||
writeError(w, errors.ErrBadRequest("method not allowed"))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) handleISCSITargetOps(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasSuffix(r.URL.Path, "/luns") {
|
||||
if r.Method == http.MethodPost {
|
||||
|
||||
@@ -16,8 +16,36 @@ func (a *App) routes() {
|
||||
|
||||
// Health & metrics
|
||||
a.mux.HandleFunc("/healthz", a.handleHealthz)
|
||||
a.mux.HandleFunc("/health", a.handleHealthCheck) // Detailed health check
|
||||
a.mux.HandleFunc("/metrics", a.handleMetrics)
|
||||
|
||||
// Diagnostics
|
||||
a.mux.HandleFunc("/api/v1/system/info", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleSystemInfo(w, r) },
|
||||
nil, nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/system/logs", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleLogs(w, r) },
|
||||
nil, nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/system/gc", methodHandler(
|
||||
nil,
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleGC(w, r) },
|
||||
nil, nil, nil,
|
||||
))
|
||||
|
||||
// API Documentation
|
||||
a.mux.HandleFunc("/api/docs", a.handleAPIDocs)
|
||||
a.mux.HandleFunc("/api/openapi.yaml", a.handleOpenAPISpec)
|
||||
|
||||
// Backup & Restore
|
||||
a.mux.HandleFunc("/api/v1/backups", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleListBackups(w, r) },
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleCreateBackup(w, r) },
|
||||
nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/backups/", a.handleBackupOps)
|
||||
|
||||
// Dashboard API
|
||||
a.mux.HandleFunc("/api/v1/dashboard", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleDashboardAPI(w, r) },
|
||||
|
||||
121
internal/httpapp/security_middleware.go
Normal file
121
internal/httpapp/security_middleware.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// securityHeadersMiddleware adds security headers to responses
|
||||
func (a *App) securityHeadersMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Security headers
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Set("X-Frame-Options", "DENY")
|
||||
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
||||
w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
|
||||
w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()")
|
||||
|
||||
// HSTS (only for HTTPS)
|
||||
if r.TLS != nil {
|
||||
w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
|
||||
}
|
||||
|
||||
// Content Security Policy (CSP)
|
||||
csp := "default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net; style-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net; img-src 'self' data:; font-src 'self' https://cdn.jsdelivr.net; connect-src 'self';"
|
||||
w.Header().Set("Content-Security-Policy", csp)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// corsMiddleware handles CORS requests
|
||||
func (a *App) corsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
// Allow specific origins or all (for development)
|
||||
allowedOrigins := []string{
|
||||
"http://localhost:8080",
|
||||
"http://localhost:3000",
|
||||
"http://127.0.0.1:8080",
|
||||
}
|
||||
|
||||
// Check if origin is allowed
|
||||
allowed := false
|
||||
for _, allowedOrigin := range allowedOrigins {
|
||||
if origin == allowedOrigin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Allow requests from same origin
|
||||
if origin == "" || r.Header.Get("Referer") != "" {
|
||||
allowed = true
|
||||
}
|
||||
|
||||
if allowed && origin != "" {
|
||||
w.Header().Set("Access-Control-Allow-Origin", origin)
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, PATCH, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Max-Age", "3600")
|
||||
}
|
||||
|
||||
// Handle preflight requests
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// requestSizeMiddleware limits request body size
|
||||
func (a *App) requestSizeMiddleware(maxSize int64) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Limit request body size
|
||||
r.Body = http.MaxBytesReader(w, r.Body, maxSize)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// validateContentTypeMiddleware validates Content-Type for POST/PUT/PATCH requests
|
||||
func (a *App) validateContentTypeMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip for GET, HEAD, OPTIONS, DELETE
|
||||
if r.Method == http.MethodGet || r.Method == http.MethodHead ||
|
||||
r.Method == http.MethodOptions || r.Method == http.MethodDelete {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Skip for public endpoints
|
||||
if a.isPublicEndpoint(r.URL.Path) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check Content-Type for POST/PUT/PATCH
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
writeError(w, errors.ErrBadRequest("Content-Type header is required"))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow JSON and form data
|
||||
if !strings.HasPrefix(contentType, "application/json") &&
|
||||
!strings.HasPrefix(contentType, "application/x-www-form-urlencoded") &&
|
||||
!strings.HasPrefix(contentType, "multipart/form-data") {
|
||||
writeError(w, errors.ErrBadRequest("Content-Type must be application/json"))
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
58
internal/httpapp/validation_helpers.go
Normal file
58
internal/httpapp/validation_helpers.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseSizeString parses a human-readable size string to bytes
|
||||
func (a *App) parseSizeString(sizeStr string) (uint64, error) {
|
||||
sizeStr = strings.TrimSpace(strings.ToUpper(sizeStr))
|
||||
if sizeStr == "" {
|
||||
return 0, fmt.Errorf("size cannot be empty")
|
||||
}
|
||||
|
||||
// Extract number and unit
|
||||
var numStr string
|
||||
var unit string
|
||||
|
||||
for i, r := range sizeStr {
|
||||
if r >= '0' && r <= '9' {
|
||||
numStr += string(r)
|
||||
} else {
|
||||
unit = sizeStr[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if numStr == "" {
|
||||
return 0, fmt.Errorf("invalid size format: no number found")
|
||||
}
|
||||
|
||||
num, err := strconv.ParseUint(numStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid size number: %w", err)
|
||||
}
|
||||
|
||||
// Convert to bytes based on unit
|
||||
multiplier := uint64(1)
|
||||
switch unit {
|
||||
case "":
|
||||
multiplier = 1
|
||||
case "K", "KB":
|
||||
multiplier = 1024
|
||||
case "M", "MB":
|
||||
multiplier = 1024 * 1024
|
||||
case "G", "GB":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "T", "TB":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
case "P", "PB":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid size unit: %s (allowed: K, M, G, T, P)", unit)
|
||||
}
|
||||
|
||||
return num * multiplier, nil
|
||||
}
|
||||
Reference in New Issue
Block a user