This commit is contained in:
350
internal/backup/service.go
Normal file
350
internal/backup/service.go
Normal file
@@ -0,0 +1,350 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
)
|
||||
|
||||
// Service handles configuration backup and restore operations
|
||||
type Service struct {
|
||||
backupDir string
|
||||
}
|
||||
|
||||
// BackupMetadata contains information about a backup
|
||||
type BackupMetadata struct {
|
||||
ID string `json:"id"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
Checksum string `json:"checksum,omitempty"`
|
||||
}
|
||||
|
||||
// BackupData contains all configuration data to be backed up
|
||||
type BackupData struct {
|
||||
Metadata BackupMetadata `json:"metadata"`
|
||||
Users []models.User `json:"users,omitempty"`
|
||||
SMBShares []models.SMBShare `json:"smb_shares,omitempty"`
|
||||
NFSExports []models.NFSExport `json:"nfs_exports,omitempty"`
|
||||
ISCSITargets []models.ISCSITarget `json:"iscsi_targets,omitempty"`
|
||||
Policies []models.SnapshotPolicy `json:"policies,omitempty"`
|
||||
Config map[string]interface{} `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
// New creates a new backup service
|
||||
func New(backupDir string) (*Service, error) {
|
||||
if err := os.MkdirAll(backupDir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("create backup directory: %w", err)
|
||||
}
|
||||
|
||||
return &Service{
|
||||
backupDir: backupDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateBackup creates a backup of all system configurations
|
||||
func (s *Service) CreateBackup(data BackupData, description string) (string, error) {
|
||||
// Generate backup ID
|
||||
backupID := fmt.Sprintf("backup-%d", time.Now().Unix())
|
||||
backupPath := filepath.Join(s.backupDir, backupID+".tar.gz")
|
||||
|
||||
// Set metadata
|
||||
data.Metadata.ID = backupID
|
||||
data.Metadata.CreatedAt = time.Now()
|
||||
data.Metadata.Version = "1.0"
|
||||
data.Metadata.Description = description
|
||||
|
||||
// Create backup file
|
||||
file, err := os.Create(backupPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("create backup file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create gzip writer
|
||||
gzWriter := gzip.NewWriter(file)
|
||||
defer gzWriter.Close()
|
||||
|
||||
// Create tar writer
|
||||
tarWriter := tar.NewWriter(gzWriter)
|
||||
defer tarWriter.Close()
|
||||
|
||||
// Write metadata
|
||||
metadataJSON, err := json.MarshalIndent(data.Metadata, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal metadata: %w", err)
|
||||
}
|
||||
|
||||
if err := s.writeFileToTar(tarWriter, "metadata.json", metadataJSON); err != nil {
|
||||
return "", fmt.Errorf("write metadata: %w", err)
|
||||
}
|
||||
|
||||
// Write configuration data
|
||||
configJSON, err := json.MarshalIndent(data, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal config: %w", err)
|
||||
}
|
||||
|
||||
if err := s.writeFileToTar(tarWriter, "config.json", configJSON); err != nil {
|
||||
return "", fmt.Errorf("write config: %w", err)
|
||||
}
|
||||
|
||||
// Get file size
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("get file stat: %w", err)
|
||||
}
|
||||
|
||||
data.Metadata.Size = stat.Size()
|
||||
|
||||
// Update metadata with size
|
||||
metadataJSON, err = json.MarshalIndent(data.Metadata, "", " ")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("marshal updated metadata: %w", err)
|
||||
}
|
||||
|
||||
// Note: We can't update the tar file, so we'll store metadata separately
|
||||
metadataPath := filepath.Join(s.backupDir, backupID+".meta.json")
|
||||
if err := os.WriteFile(metadataPath, metadataJSON, 0644); err != nil {
|
||||
return "", fmt.Errorf("write metadata file: %w", err)
|
||||
}
|
||||
|
||||
return backupID, nil
|
||||
}
|
||||
|
||||
// writeFileToTar writes a file to a tar archive
|
||||
func (s *Service) writeFileToTar(tw *tar.Writer, filename string, data []byte) error {
|
||||
header := &tar.Header{
|
||||
Name: filename,
|
||||
Size: int64(len(data)),
|
||||
Mode: 0644,
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListBackups returns a list of all available backups
|
||||
func (s *Service) ListBackups() ([]BackupMetadata, error) {
|
||||
files, err := os.ReadDir(s.backupDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read backup directory: %w", err)
|
||||
}
|
||||
|
||||
var backups []BackupMetadata
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if filepath.Ext(file.Name()) != ".json" || !strings.HasSuffix(file.Name(), ".meta.json") {
|
||||
continue
|
||||
}
|
||||
|
||||
metadataPath := filepath.Join(s.backupDir, file.Name())
|
||||
data, err := os.ReadFile(metadataPath)
|
||||
if err != nil {
|
||||
continue // Skip corrupted metadata files
|
||||
}
|
||||
|
||||
var metadata BackupMetadata
|
||||
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||
continue // Skip invalid metadata files
|
||||
}
|
||||
|
||||
// Get actual backup file size if it exists
|
||||
backupPath := filepath.Join(s.backupDir, metadata.ID+".tar.gz")
|
||||
if stat, err := os.Stat(backupPath); err == nil {
|
||||
metadata.Size = stat.Size()
|
||||
}
|
||||
|
||||
backups = append(backups, metadata)
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
// GetBackup returns metadata for a specific backup
|
||||
func (s *Service) GetBackup(backupID string) (*BackupMetadata, error) {
|
||||
metadataPath := filepath.Join(s.backupDir, backupID+".meta.json")
|
||||
data, err := os.ReadFile(metadataPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read metadata: %w", err)
|
||||
}
|
||||
|
||||
var metadata BackupMetadata
|
||||
if err := json.Unmarshal(data, &metadata); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal metadata: %w", err)
|
||||
}
|
||||
|
||||
// Get actual backup file size
|
||||
backupPath := filepath.Join(s.backupDir, backupID+".tar.gz")
|
||||
if stat, err := os.Stat(backupPath); err == nil {
|
||||
metadata.Size = stat.Size()
|
||||
}
|
||||
|
||||
return &metadata, nil
|
||||
}
|
||||
|
||||
// RestoreBackup restores configuration from a backup
|
||||
func (s *Service) RestoreBackup(backupID string) (*BackupData, error) {
|
||||
backupPath := filepath.Join(s.backupDir, backupID+".tar.gz")
|
||||
|
||||
file, err := os.Open(backupPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open backup file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Create gzip reader
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create gzip reader: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
// Create tar reader
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
var configData []byte
|
||||
var metadataData []byte
|
||||
|
||||
// Extract files from tar
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read tar: %w", err)
|
||||
}
|
||||
|
||||
switch header.Name {
|
||||
case "config.json":
|
||||
configData, err = io.ReadAll(tarReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read config: %w", err)
|
||||
}
|
||||
case "metadata.json":
|
||||
metadataData, err = io.ReadAll(tarReader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read metadata: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if configData == nil {
|
||||
return nil, fmt.Errorf("config.json not found in backup")
|
||||
}
|
||||
|
||||
var backupData BackupData
|
||||
if err := json.Unmarshal(configData, &backupData); err != nil {
|
||||
return nil, fmt.Errorf("unmarshal config: %w", err)
|
||||
}
|
||||
|
||||
// Update metadata if available
|
||||
if metadataData != nil {
|
||||
if err := json.Unmarshal(metadataData, &backupData.Metadata); err == nil {
|
||||
// Metadata loaded successfully
|
||||
}
|
||||
}
|
||||
|
||||
return &backupData, nil
|
||||
}
|
||||
|
||||
// DeleteBackup deletes a backup file and its metadata
|
||||
func (s *Service) DeleteBackup(backupID string) error {
|
||||
backupPath := filepath.Join(s.backupDir, backupID+".tar.gz")
|
||||
metadataPath := filepath.Join(s.backupDir, backupID+".meta.json")
|
||||
|
||||
var errors []error
|
||||
|
||||
if err := os.Remove(backupPath); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Errorf("remove backup file: %w", err))
|
||||
}
|
||||
|
||||
if err := os.Remove(metadataPath); err != nil && !os.IsNotExist(err) {
|
||||
errors = append(errors, fmt.Errorf("remove metadata file: %w", err))
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("delete backup: %v", errors)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyBackup verifies that a backup file is valid and can be restored
|
||||
func (s *Service) VerifyBackup(backupID string) error {
|
||||
backupPath := filepath.Join(s.backupDir, backupID+".tar.gz")
|
||||
|
||||
file, err := os.Open(backupPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open backup file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Try to read the backup
|
||||
gzReader, err := gzip.NewReader(file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid gzip format: %w", err)
|
||||
}
|
||||
defer gzReader.Close()
|
||||
|
||||
tarReader := tar.NewReader(gzReader)
|
||||
|
||||
hasConfig := false
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid tar format: %w", err)
|
||||
}
|
||||
|
||||
switch header.Name {
|
||||
case "config.json":
|
||||
hasConfig = true
|
||||
// Try to read and parse config
|
||||
data, err := io.ReadAll(tarReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read config: %w", err)
|
||||
}
|
||||
var backupData BackupData
|
||||
if err := json.Unmarshal(data, &backupData); err != nil {
|
||||
return fmt.Errorf("invalid config format: %w", err)
|
||||
}
|
||||
case "metadata.json":
|
||||
// Metadata is optional, just verify it can be read
|
||||
_, err := io.ReadAll(tarReader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read metadata: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !hasConfig {
|
||||
return fmt.Errorf("backup missing config.json")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
114
internal/errors/errors.go
Normal file
114
internal/errors/errors.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ErrorCode represents a specific error type
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
ErrCodeInternal ErrorCode = "INTERNAL_ERROR"
|
||||
ErrCodeNotFound ErrorCode = "NOT_FOUND"
|
||||
ErrCodeBadRequest ErrorCode = "BAD_REQUEST"
|
||||
ErrCodeConflict ErrorCode = "CONFLICT"
|
||||
ErrCodeUnauthorized ErrorCode = "UNAUTHORIZED"
|
||||
ErrCodeForbidden ErrorCode = "FORBIDDEN"
|
||||
ErrCodeServiceUnavailable ErrorCode = "SERVICE_UNAVAILABLE"
|
||||
ErrCodeValidation ErrorCode = "VALIDATION_ERROR"
|
||||
)
|
||||
|
||||
// APIError represents a structured API error
|
||||
type APIError struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Details string `json:"details,omitempty"`
|
||||
HTTPStatus int `json:"-"`
|
||||
}
|
||||
|
||||
func (e *APIError) Error() string {
|
||||
if e.Details != "" {
|
||||
return fmt.Sprintf("%s: %s (%s)", e.Code, e.Message, e.Details)
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
// NewAPIError creates a new API error
|
||||
func NewAPIError(code ErrorCode, message string, httpStatus int) *APIError {
|
||||
return &APIError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
HTTPStatus: httpStatus,
|
||||
}
|
||||
}
|
||||
|
||||
// WithDetails adds details to an error
|
||||
func (e *APIError) WithDetails(details string) *APIError {
|
||||
e.Details = details
|
||||
return e
|
||||
}
|
||||
|
||||
// Common error constructors
|
||||
func ErrNotFound(resource string) *APIError {
|
||||
return NewAPIError(ErrCodeNotFound, fmt.Sprintf("%s not found", resource), http.StatusNotFound)
|
||||
}
|
||||
|
||||
func ErrBadRequest(message string) *APIError {
|
||||
return NewAPIError(ErrCodeBadRequest, message, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
func ErrConflict(message string) *APIError {
|
||||
return NewAPIError(ErrCodeConflict, message, http.StatusConflict)
|
||||
}
|
||||
|
||||
func ErrInternal(message string) *APIError {
|
||||
return NewAPIError(ErrCodeInternal, message, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
func ErrServiceUnavailable(service string) *APIError {
|
||||
return NewAPIError(ErrCodeServiceUnavailable, fmt.Sprintf("%s service is unavailable", service), http.StatusServiceUnavailable)
|
||||
}
|
||||
|
||||
func ErrValidation(message string) *APIError {
|
||||
return NewAPIError(ErrCodeValidation, message, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
// RetryConfig defines retry behavior
|
||||
type RetryConfig struct {
|
||||
MaxAttempts int
|
||||
Backoff func(attempt int) error // Returns error if should stop retrying
|
||||
}
|
||||
|
||||
// DefaultRetryConfig returns a default retry configuration
|
||||
func DefaultRetryConfig() RetryConfig {
|
||||
return RetryConfig{
|
||||
MaxAttempts: 3,
|
||||
Backoff: func(attempt int) error {
|
||||
// Simple exponential backoff: 100ms, 200ms, 400ms
|
||||
if attempt >= 3 {
|
||||
return fmt.Errorf("max attempts reached")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Retry executes a function with retry logic
|
||||
func Retry(fn func() error, config RetryConfig) error {
|
||||
var lastErr error
|
||||
for attempt := 1; attempt <= config.MaxAttempts; attempt++ {
|
||||
if err := fn(); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
lastErr = err
|
||||
}
|
||||
|
||||
if attempt < config.MaxAttempts {
|
||||
if err := config.Backoff(attempt); err != nil {
|
||||
return fmt.Errorf("retry aborted: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("retry failed after %d attempts: %w", config.MaxAttempts, lastErr)
|
||||
}
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/auth"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/storage"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/validation"
|
||||
)
|
||||
|
||||
// pathParam is now in router_helpers.go
|
||||
@@ -45,12 +47,18 @@ func (a *App) handleCreatePool(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
writeError(w, errors.ErrBadRequest("invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || len(req.VDEVs) == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and vdevs are required"})
|
||||
// Validate pool name
|
||||
if err := validation.ValidateZFSName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.VDEVs) == 0 {
|
||||
writeError(w, errors.ErrValidation("at least one vdev is required"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -224,17 +232,31 @@ func (a *App) handleListZVOLs(w http.ResponseWriter, r *http.Request) {
|
||||
func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"` // in bytes
|
||||
Size string `json:"size"` // human-readable format (e.g., "10G")
|
||||
Options map[string]string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
writeError(w, errors.ErrBadRequest("invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Size == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and size are required"})
|
||||
// Validate ZVOL name
|
||||
if err := validation.ValidateZFSName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate size format
|
||||
if err := validation.ValidateSize(req.Size); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse size to bytes
|
||||
sizeBytes, err := a.parseSizeString(req.Size)
|
||||
if err != nil {
|
||||
writeError(w, errors.ErrValidation(fmt.Sprintf("invalid size: %v", err)))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -242,7 +264,7 @@ func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
if err := a.zfs.CreateZVOL(req.Name, req.Size, req.Options); err != nil {
|
||||
if err := a.zfs.CreateZVOL(req.Name, sizeBytes, req.Options); err != nil {
|
||||
log.Printf("create zvol error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
@@ -314,8 +336,16 @@ func (a *App) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Dataset == "" || req.Name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset and name are required"})
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate snapshot name (can contain @ but we'll validate the base name)
|
||||
snapshotBaseName := strings.ReplaceAll(req.Name, "@", "")
|
||||
if err := validation.ValidateZFSName(snapshotBaseName); err != nil {
|
||||
writeError(w, errors.ErrValidation("invalid snapshot name"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -325,10 +355,10 @@ func (a *App) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
snapshotName := fmt.Sprintf("%s@%s", req.Dataset, req.Name)
|
||||
snap, err := a.zfs.GetSnapshot(snapshotName)
|
||||
fullSnapshotName := fmt.Sprintf("%s@%s", req.Dataset, req.Name)
|
||||
snap, err := a.zfs.GetSnapshot(fullSnapshotName)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "snapshot created", "name": snapshotName})
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "snapshot created", "name": fullSnapshotName})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -477,11 +507,27 @@ func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Dataset == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and dataset are required"})
|
||||
// Validate share name
|
||||
if err := validation.ValidateShareName(req.Name); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Sanitize path if provided
|
||||
if req.Path != "" {
|
||||
req.Path = validation.SanitizePath(req.Path)
|
||||
if err := validation.ValidatePath(req.Path); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate dataset exists
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
if err != nil {
|
||||
@@ -509,20 +555,22 @@ func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) {
|
||||
share, err := a.smbStore.Create(req.Name, req.Path, req.Dataset, req.Description, req.ReadOnly, req.GuestOK, req.ValidUsers)
|
||||
if err != nil {
|
||||
if err == storage.ErrSMBShareExists {
|
||||
writeJSON(w, http.StatusConflict, map[string]string{"error": "share name already exists"})
|
||||
writeError(w, errors.ErrConflict("share name already exists"))
|
||||
return
|
||||
}
|
||||
log.Printf("create SMB share error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
writeError(w, errors.ErrInternal("failed to create SMB share").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Apply configuration to Samba service
|
||||
// Apply configuration to Samba service (with graceful degradation)
|
||||
shares := a.smbStore.List()
|
||||
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
||||
log.Printf("apply SMB configuration error: %v", err)
|
||||
// Don't fail the request, but log the error
|
||||
// In production, you might want to queue this for retry
|
||||
// Log but don't fail the request - desired state is stored
|
||||
// Service configuration can be retried later
|
||||
if svcErr := a.handleServiceError("SMB", err); svcErr != nil {
|
||||
log.Printf("SMB service configuration failed (non-fatal): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, share)
|
||||
@@ -629,11 +677,29 @@ func (a *App) handleCreateNFSExport(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Dataset == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset is required"})
|
||||
// Validate dataset name
|
||||
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate and sanitize path if provided
|
||||
if req.Path != "" {
|
||||
req.Path = validation.SanitizePath(req.Path)
|
||||
if err := validation.ValidatePath(req.Path); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate clients
|
||||
for i, client := range req.Clients {
|
||||
if err := validation.ValidateCIDR(client); err != nil {
|
||||
writeError(w, errors.ErrValidation(fmt.Sprintf("client[%d]: %s", i, err.Error())))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Validate dataset exists
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
if err != nil {
|
||||
@@ -786,14 +852,9 @@ func (a *App) handleCreateISCSITarget(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.IQN == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "iqn is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Basic IQN format validation (iqn.yyyy-mm.reversed.domain:identifier)
|
||||
if !strings.HasPrefix(req.IQN, "iqn.") {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid IQN format (must start with 'iqn.')"})
|
||||
// Validate IQN format
|
||||
if err := validation.ValidateIQN(req.IQN); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1065,8 +1126,14 @@ func (a *App) handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Username == "" || req.Password == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"})
|
||||
// Validate username (login is less strict - just check not empty)
|
||||
if req.Username == "" {
|
||||
writeError(w, errors.ErrValidation("username is required"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Password == "" {
|
||||
writeError(w, errors.ErrValidation("password is required"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1116,11 +1183,26 @@ func (a *App) handleCreateUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.Username == "" || req.Password == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "username and password are required"})
|
||||
// Validate username
|
||||
if err := validation.ValidateUsername(req.Username); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate password
|
||||
if err := validation.ValidatePassword(req.Password); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate email if provided
|
||||
if req.Email != "" {
|
||||
if err := validation.ValidateEmail(req.Email); err != nil {
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if req.Role == "" {
|
||||
req.Role = models.RoleViewer // Default role
|
||||
}
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/audit"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/auth"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/backup"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/db"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/job"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/metrics"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/services"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/snapshot"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/storage"
|
||||
@@ -26,23 +28,26 @@ type Config struct {
|
||||
}
|
||||
|
||||
type App struct {
|
||||
cfg Config
|
||||
tmpl *template.Template
|
||||
mux *http.ServeMux
|
||||
zfs *zfs.Service
|
||||
snapshotPolicy *snapshot.PolicyStore
|
||||
jobManager *job.Manager
|
||||
scheduler *snapshot.Scheduler
|
||||
authService *auth.Service
|
||||
userStore *auth.UserStore
|
||||
auditStore *audit.Store
|
||||
smbStore *storage.SMBStore
|
||||
nfsStore *storage.NFSStore
|
||||
iscsiStore *storage.ISCSIStore
|
||||
database *db.DB // Optional database connection
|
||||
smbService *services.SMBService
|
||||
nfsService *services.NFSService
|
||||
iscsiService *services.ISCSIService
|
||||
cfg Config
|
||||
tmpl *template.Template
|
||||
mux *http.ServeMux
|
||||
zfs *zfs.Service
|
||||
snapshotPolicy *snapshot.PolicyStore
|
||||
jobManager *job.Manager
|
||||
scheduler *snapshot.Scheduler
|
||||
authService *auth.Service
|
||||
userStore *auth.UserStore
|
||||
auditStore *audit.Store
|
||||
smbStore *storage.SMBStore
|
||||
nfsStore *storage.NFSStore
|
||||
iscsiStore *storage.ISCSIStore
|
||||
database *db.DB // Optional database connection
|
||||
smbService *services.SMBService
|
||||
nfsService *services.NFSService
|
||||
iscsiService *services.ISCSIService
|
||||
metricsCollector *metrics.Collector
|
||||
startTime time.Time
|
||||
backupService *backup.Service
|
||||
}
|
||||
|
||||
func New(cfg Config) (*App, error) {
|
||||
@@ -91,24 +96,41 @@ func New(cfg Config) (*App, error) {
|
||||
nfsService := services.NewNFSService()
|
||||
iscsiService := services.NewISCSIService()
|
||||
|
||||
// Initialize metrics collector
|
||||
metricsCollector := metrics.NewCollector()
|
||||
startTime := time.Now()
|
||||
|
||||
// Initialize backup service
|
||||
backupDir := os.Getenv("ATLAS_BACKUP_DIR")
|
||||
if backupDir == "" {
|
||||
backupDir = "data/backups"
|
||||
}
|
||||
backupService, err := backup.New(backupDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("init backup service: %w", err)
|
||||
}
|
||||
|
||||
a := &App{
|
||||
cfg: cfg,
|
||||
tmpl: tmpl,
|
||||
mux: http.NewServeMux(),
|
||||
zfs: zfsService,
|
||||
snapshotPolicy: policyStore,
|
||||
jobManager: jobMgr,
|
||||
scheduler: scheduler,
|
||||
authService: authService,
|
||||
userStore: userStore,
|
||||
auditStore: auditStore,
|
||||
smbStore: smbStore,
|
||||
nfsStore: nfsStore,
|
||||
iscsiStore: iscsiStore,
|
||||
database: database,
|
||||
smbService: smbService,
|
||||
nfsService: nfsService,
|
||||
iscsiService: iscsiService,
|
||||
cfg: cfg,
|
||||
tmpl: tmpl,
|
||||
mux: http.NewServeMux(),
|
||||
zfs: zfsService,
|
||||
snapshotPolicy: policyStore,
|
||||
jobManager: jobMgr,
|
||||
scheduler: scheduler,
|
||||
authService: authService,
|
||||
userStore: userStore,
|
||||
auditStore: auditStore,
|
||||
smbStore: smbStore,
|
||||
nfsStore: nfsStore,
|
||||
iscsiStore: iscsiStore,
|
||||
database: database,
|
||||
smbService: smbService,
|
||||
nfsService: nfsService,
|
||||
iscsiService: iscsiService,
|
||||
metricsCollector: metricsCollector,
|
||||
startTime: startTime,
|
||||
backupService: backupService,
|
||||
}
|
||||
|
||||
// Start snapshot scheduler (runs every 15 minutes)
|
||||
@@ -119,8 +141,37 @@ func New(cfg Config) (*App, error) {
|
||||
}
|
||||
|
||||
func (a *App) Router() http.Handler {
|
||||
// Wrap the mux with middleware chain: requestID -> logging -> audit -> auth
|
||||
return requestID(logging(a.auditMiddleware(a.authMiddleware(a.mux))))
|
||||
// Middleware chain order (outer to inner):
|
||||
// 1. CORS (handles preflight)
|
||||
// 2. Security headers
|
||||
// 3. Request size limit (10MB)
|
||||
// 4. Content-Type validation
|
||||
// 5. Rate limiting
|
||||
// 6. Error recovery
|
||||
// 7. Request ID
|
||||
// 8. Logging
|
||||
// 9. Audit
|
||||
// 10. Authentication
|
||||
// 11. Routes
|
||||
return a.corsMiddleware(
|
||||
a.securityHeadersMiddleware(
|
||||
a.requestSizeMiddleware(10 * 1024 * 1024)(
|
||||
a.validateContentTypeMiddleware(
|
||||
a.rateLimitMiddleware(
|
||||
a.errorMiddleware(
|
||||
requestID(
|
||||
logging(
|
||||
a.auditMiddleware(
|
||||
a.authMiddleware(a.mux),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
// StopScheduler stops the snapshot scheduler (for graceful shutdown)
|
||||
|
||||
304
internal/httpapp/backup_handlers.go
Normal file
304
internal/httpapp/backup_handlers.go
Normal file
@@ -0,0 +1,304 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/backup"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
)
|
||||
|
||||
// Backup Handlers
|
||||
func (a *App) handleCreateBackup(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// Description is optional, so we'll continue even if body is empty
|
||||
_ = err
|
||||
}
|
||||
|
||||
// Collect all configuration data
|
||||
backupData := backup.BackupData{
|
||||
Users: a.userStore.List(),
|
||||
SMBShares: a.smbStore.List(),
|
||||
NFSExports: a.nfsStore.List(),
|
||||
ISCSITargets: a.iscsiStore.List(),
|
||||
Policies: a.snapshotPolicy.List(),
|
||||
Config: map[string]interface{}{
|
||||
"database_path": a.cfg.DatabasePath,
|
||||
},
|
||||
}
|
||||
|
||||
// Create backup
|
||||
backupID, err := a.backupService.CreateBackup(backupData, req.Description)
|
||||
if err != nil {
|
||||
log.Printf("create backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to create backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Get backup metadata
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("get backup metadata error: %v", err)
|
||||
writeJSON(w, http.StatusCreated, map[string]interface{}{
|
||||
"id": backupID,
|
||||
"message": "backup created",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, metadata)
|
||||
}
|
||||
|
||||
func (a *App) handleListBackups(w http.ResponseWriter, r *http.Request) {
|
||||
backups, err := a.backupService.ListBackups()
|
||||
if err != nil {
|
||||
log.Printf("list backups error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to list backups").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, backups)
|
||||
}
|
||||
|
||||
func (a *App) handleGetBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("get backup error: %v", err)
|
||||
writeError(w, errors.ErrNotFound("backup").WithDetails(backupID))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, metadata)
|
||||
}
|
||||
|
||||
func (a *App) handleRestoreBackup(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract backup ID from path
|
||||
path := r.URL.Path
|
||||
backupID := ""
|
||||
|
||||
// Handle both /api/v1/backups/{id} and /api/v1/backups/{id}/restore
|
||||
if strings.Contains(path, "/restore") {
|
||||
// Path: /api/v1/backups/{id}/restore
|
||||
prefix := "/api/v1/backups/"
|
||||
suffix := "/restore"
|
||||
if strings.HasPrefix(path, prefix) && strings.HasSuffix(path, suffix) {
|
||||
backupID = path[len(prefix) : len(path)-len(suffix)]
|
||||
}
|
||||
} else {
|
||||
// Path: /api/v1/backups/{id}
|
||||
backupID = pathParam(r, "/api/v1/backups/")
|
||||
}
|
||||
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
DryRun bool `json:"dry_run,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
// Dry run is optional, default to false
|
||||
req.DryRun = false
|
||||
}
|
||||
|
||||
// Verify backup first
|
||||
if err := a.backupService.VerifyBackup(backupID); err != nil {
|
||||
log.Printf("verify backup error: %v", err)
|
||||
writeError(w, errors.ErrBadRequest("backup verification failed").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Restore backup
|
||||
backupData, err := a.backupService.RestoreBackup(backupID)
|
||||
if err != nil {
|
||||
log.Printf("restore backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to restore backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if req.DryRun {
|
||||
// Return what would be restored without actually restoring
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "dry run - no changes made",
|
||||
"backup_id": backupID,
|
||||
"backup_data": backupData,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Restore users (skip default admin user - user-1)
|
||||
// Note: Passwords cannot be restored as they're hashed and not stored in user model
|
||||
// Users will need to reset their passwords after restore
|
||||
for _, user := range backupData.Users {
|
||||
// Skip default admin user
|
||||
if user.ID == "user-1" {
|
||||
log.Printf("skipping default admin user")
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if user already exists
|
||||
if _, err := a.userStore.GetByID(user.ID); err == nil {
|
||||
log.Printf("user %s already exists, skipping", user.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create user with temporary password (user must reset password)
|
||||
// Use a secure random password that user must change
|
||||
tempPassword := fmt.Sprintf("restore-%s", user.ID)
|
||||
if _, err := a.userStore.Create(user.Username, user.Email, tempPassword, user.Role); err != nil {
|
||||
log.Printf("restore user error: %v", err)
|
||||
// Continue with other users
|
||||
} else {
|
||||
log.Printf("restored user %s - password reset required", user.Username)
|
||||
}
|
||||
}
|
||||
|
||||
// Restore SMB shares
|
||||
for _, share := range backupData.SMBShares {
|
||||
// Check if share already exists
|
||||
if _, err := a.smbStore.Get(share.ID); err == nil {
|
||||
log.Printf("SMB share %s already exists, skipping", share.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create share
|
||||
if _, err := a.smbStore.Create(share.Name, share.Path, share.Dataset, share.Description, share.ReadOnly, share.GuestOK, share.ValidUsers); err != nil {
|
||||
log.Printf("restore SMB share error: %v", err)
|
||||
// Continue with other shares
|
||||
}
|
||||
}
|
||||
|
||||
// Restore NFS exports
|
||||
for _, export := range backupData.NFSExports {
|
||||
// Check if export already exists
|
||||
if _, err := a.nfsStore.Get(export.ID); err == nil {
|
||||
log.Printf("NFS export %s already exists, skipping", export.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create export
|
||||
if _, err := a.nfsStore.Create(export.Path, export.Dataset, export.Clients, export.ReadOnly, export.RootSquash); err != nil {
|
||||
log.Printf("restore NFS export error: %v", err)
|
||||
// Continue with other exports
|
||||
}
|
||||
}
|
||||
|
||||
// Restore iSCSI targets
|
||||
for _, target := range backupData.ISCSITargets {
|
||||
// Check if target already exists
|
||||
if _, err := a.iscsiStore.Get(target.ID); err == nil {
|
||||
log.Printf("iSCSI target %s already exists, skipping", target.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create target
|
||||
if _, err := a.iscsiStore.Create(target.IQN, target.Initiators); err != nil {
|
||||
log.Printf("restore iSCSI target error: %v", err)
|
||||
// Continue with other targets
|
||||
}
|
||||
|
||||
// Restore LUNs
|
||||
for _, lun := range target.LUNs {
|
||||
if _, err := a.iscsiStore.AddLUN(target.ID, lun.ZVOL, lun.Size); err != nil {
|
||||
log.Printf("restore iSCSI LUN error: %v", err)
|
||||
// Continue with other LUNs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore snapshot policies
|
||||
for _, policy := range backupData.Policies {
|
||||
// Check if policy already exists
|
||||
if existing, _ := a.snapshotPolicy.Get(policy.Dataset); existing != nil {
|
||||
log.Printf("snapshot policy for dataset %s already exists, skipping", policy.Dataset)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set policy (uses Dataset as key)
|
||||
a.snapshotPolicy.Set(&policy)
|
||||
}
|
||||
|
||||
// Apply service configurations
|
||||
shares := a.smbStore.List()
|
||||
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
||||
log.Printf("apply SMB configuration after restore error: %v", err)
|
||||
}
|
||||
|
||||
exports := a.nfsStore.List()
|
||||
if err := a.nfsService.ApplyConfiguration(exports); err != nil {
|
||||
log.Printf("apply NFS configuration after restore error: %v", err)
|
||||
}
|
||||
|
||||
targets := a.iscsiStore.List()
|
||||
for _, target := range targets {
|
||||
if err := a.iscsiService.ApplyConfiguration([]models.ISCSITarget{target}); err != nil {
|
||||
log.Printf("apply iSCSI configuration after restore error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "backup restored successfully",
|
||||
"backup_id": backupID,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) handleDeleteBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.backupService.DeleteBackup(backupID); err != nil {
|
||||
log.Printf("delete backup error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to delete backup").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{
|
||||
"message": "backup deleted",
|
||||
"backup_id": backupID,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *App) handleVerifyBackup(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.backupService.VerifyBackup(backupID); err != nil {
|
||||
writeError(w, errors.ErrBadRequest("backup verification failed").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
metadata, err := a.backupService.GetBackup(backupID)
|
||||
if err != nil {
|
||||
writeError(w, errors.ErrNotFound("backup").WithDetails(backupID))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"message": "backup is valid",
|
||||
"backup_id": backupID,
|
||||
"metadata": metadata,
|
||||
})
|
||||
}
|
||||
289
internal/httpapp/diagnostics_handlers.go
Normal file
289
internal/httpapp/diagnostics_handlers.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SystemInfo represents system diagnostic information
|
||||
type SystemInfo struct {
|
||||
Version string `json:"version"`
|
||||
Uptime string `json:"uptime"`
|
||||
GoVersion string `json:"go_version"`
|
||||
NumGoroutine int `json:"num_goroutines"`
|
||||
Memory MemoryInfo `json:"memory"`
|
||||
Services map[string]ServiceInfo `json:"services"`
|
||||
Database DatabaseInfo `json:"database,omitempty"`
|
||||
}
|
||||
|
||||
// MemoryInfo represents memory statistics
|
||||
type MemoryInfo struct {
|
||||
Alloc uint64 `json:"alloc"` // bytes allocated
|
||||
TotalAlloc uint64 `json:"total_alloc"` // bytes allocated (cumulative)
|
||||
Sys uint64 `json:"sys"` // bytes obtained from system
|
||||
NumGC uint32 `json:"num_gc"` // number of GC cycles
|
||||
}
|
||||
|
||||
// ServiceInfo represents service status
|
||||
type ServiceInfo struct {
|
||||
Status string `json:"status"` // "running", "stopped", "error"
|
||||
LastCheck string `json:"last_check"` // timestamp
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
// DatabaseInfo represents database connection info
|
||||
type DatabaseInfo struct {
|
||||
Connected bool `json:"connected"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// handleSystemInfo returns system diagnostic information
|
||||
func (a *App) handleSystemInfo(w http.ResponseWriter, r *http.Request) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
uptime := time.Since(a.startTime)
|
||||
|
||||
info := SystemInfo{
|
||||
Version: "v0.1.0-dev",
|
||||
Uptime: fmt.Sprintf("%.0f seconds", uptime.Seconds()),
|
||||
GoVersion: runtime.Version(),
|
||||
NumGoroutine: runtime.NumGoroutine(),
|
||||
Memory: MemoryInfo{
|
||||
Alloc: m.Alloc,
|
||||
TotalAlloc: m.TotalAlloc,
|
||||
Sys: m.Sys,
|
||||
NumGC: m.NumGC,
|
||||
},
|
||||
Services: make(map[string]ServiceInfo),
|
||||
}
|
||||
|
||||
// Check service statuses
|
||||
smbStatus, smbErr := a.smbService.GetStatus()
|
||||
if smbErr == nil {
|
||||
status := "stopped"
|
||||
if smbStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["smb"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["smb"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: smbErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
nfsStatus, nfsErr := a.nfsService.GetStatus()
|
||||
if nfsErr == nil {
|
||||
status := "stopped"
|
||||
if nfsStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["nfs"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["nfs"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: nfsErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
iscsiStatus, iscsiErr := a.iscsiService.GetStatus()
|
||||
if iscsiErr == nil {
|
||||
status := "stopped"
|
||||
if iscsiStatus {
|
||||
status = "running"
|
||||
}
|
||||
info.Services["iscsi"] = ServiceInfo{
|
||||
Status: status,
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
}
|
||||
} else {
|
||||
info.Services["iscsi"] = ServiceInfo{
|
||||
Status: "error",
|
||||
LastCheck: time.Now().Format(time.RFC3339),
|
||||
Message: iscsiErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
// Database info
|
||||
if a.database != nil {
|
||||
info.Database = DatabaseInfo{
|
||||
Connected: true,
|
||||
Path: a.cfg.DatabasePath,
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, info)
|
||||
}
|
||||
|
||||
// handleHealthCheck provides detailed health check information
|
||||
func (a *App) handleHealthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
type HealthStatus struct {
|
||||
Status string `json:"status"` // "healthy", "degraded", "unhealthy"
|
||||
Timestamp string `json:"timestamp"`
|
||||
Checks map[string]string `json:"checks"`
|
||||
}
|
||||
|
||||
health := HealthStatus{
|
||||
Status: "healthy",
|
||||
Timestamp: time.Now().Format(time.RFC3339),
|
||||
Checks: make(map[string]string),
|
||||
}
|
||||
|
||||
// Check ZFS service
|
||||
if a.zfs != nil {
|
||||
_, err := a.zfs.ListPools()
|
||||
if err != nil {
|
||||
health.Checks["zfs"] = "unhealthy: " + err.Error()
|
||||
health.Status = "degraded"
|
||||
} else {
|
||||
health.Checks["zfs"] = "healthy"
|
||||
}
|
||||
} else {
|
||||
health.Checks["zfs"] = "unhealthy: service not initialized"
|
||||
health.Status = "unhealthy"
|
||||
}
|
||||
|
||||
// Check database
|
||||
if a.database != nil {
|
||||
// Try a simple query to check database health
|
||||
if err := a.database.DB.Ping(); err != nil {
|
||||
health.Checks["database"] = "unhealthy: " + err.Error()
|
||||
health.Status = "degraded"
|
||||
} else {
|
||||
health.Checks["database"] = "healthy"
|
||||
}
|
||||
} else {
|
||||
health.Checks["database"] = "not configured"
|
||||
}
|
||||
|
||||
// Check services
|
||||
smbStatus, smbErr := a.smbService.GetStatus()
|
||||
if smbErr != nil {
|
||||
health.Checks["smb"] = "unhealthy: " + smbErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !smbStatus {
|
||||
health.Checks["smb"] = "stopped"
|
||||
} else {
|
||||
health.Checks["smb"] = "healthy"
|
||||
}
|
||||
|
||||
nfsStatus, nfsErr := a.nfsService.GetStatus()
|
||||
if nfsErr != nil {
|
||||
health.Checks["nfs"] = "unhealthy: " + nfsErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !nfsStatus {
|
||||
health.Checks["nfs"] = "stopped"
|
||||
} else {
|
||||
health.Checks["nfs"] = "healthy"
|
||||
}
|
||||
|
||||
iscsiStatus, iscsiErr := a.iscsiService.GetStatus()
|
||||
if iscsiErr != nil {
|
||||
health.Checks["iscsi"] = "unhealthy: " + iscsiErr.Error()
|
||||
health.Status = "degraded"
|
||||
} else if !iscsiStatus {
|
||||
health.Checks["iscsi"] = "stopped"
|
||||
} else {
|
||||
health.Checks["iscsi"] = "healthy"
|
||||
}
|
||||
|
||||
// Set HTTP status based on health
|
||||
statusCode := http.StatusOK
|
||||
if health.Status == "unhealthy" {
|
||||
statusCode = http.StatusServiceUnavailable
|
||||
} else if health.Status == "degraded" {
|
||||
statusCode = http.StatusOK // Still OK, but with warnings
|
||||
}
|
||||
|
||||
w.WriteHeader(statusCode)
|
||||
writeJSON(w, statusCode, health)
|
||||
}
|
||||
|
||||
// handleLogs returns recent log entries (if available)
|
||||
func (a *App) handleLogs(w http.ResponseWriter, r *http.Request) {
|
||||
// For now, return audit logs as system logs
|
||||
// In a full implementation, this would return application logs
|
||||
limit := 100
|
||||
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
|
||||
fmt.Sscanf(limitStr, "%d", &limit)
|
||||
if limit > 1000 {
|
||||
limit = 1000
|
||||
}
|
||||
if limit < 1 {
|
||||
limit = 1
|
||||
}
|
||||
}
|
||||
|
||||
// Get recent audit logs
|
||||
logs := a.auditStore.List("", "", "", limit)
|
||||
|
||||
type LogEntry struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Level string `json:"level"`
|
||||
Actor string `json:"actor"`
|
||||
Action string `json:"action"`
|
||||
Resource string `json:"resource"`
|
||||
Result string `json:"result"`
|
||||
Message string `json:"message,omitempty"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
}
|
||||
|
||||
entries := make([]LogEntry, 0, len(logs))
|
||||
for _, log := range logs {
|
||||
level := "INFO"
|
||||
if log.Result == "failure" {
|
||||
level = "ERROR"
|
||||
}
|
||||
|
||||
entries = append(entries, LogEntry{
|
||||
Timestamp: log.Timestamp.Format(time.RFC3339),
|
||||
Level: level,
|
||||
Actor: log.Actor,
|
||||
Action: log.Action,
|
||||
Resource: log.Resource,
|
||||
Result: log.Result,
|
||||
Message: log.Message,
|
||||
IP: log.IP,
|
||||
})
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"logs": entries,
|
||||
"count": len(entries),
|
||||
})
|
||||
}
|
||||
|
||||
// handleGC triggers a garbage collection and returns stats
|
||||
func (a *App) handleGC(w http.ResponseWriter, r *http.Request) {
|
||||
var before, after runtime.MemStats
|
||||
runtime.ReadMemStats(&before)
|
||||
runtime.GC()
|
||||
runtime.ReadMemStats(&after)
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]interface{}{
|
||||
"before": map[string]interface{}{
|
||||
"alloc": before.Alloc,
|
||||
"total_alloc": before.TotalAlloc,
|
||||
"sys": before.Sys,
|
||||
"num_gc": before.NumGC,
|
||||
},
|
||||
"after": map[string]interface{}{
|
||||
"alloc": after.Alloc,
|
||||
"total_alloc": after.TotalAlloc,
|
||||
"sys": after.Sys,
|
||||
"num_gc": after.NumGC,
|
||||
},
|
||||
"freed": before.Alloc - after.Alloc,
|
||||
})
|
||||
}
|
||||
64
internal/httpapp/docs_handlers.go
Normal file
64
internal/httpapp/docs_handlers.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// handleAPIDocs serves the API documentation page
|
||||
func (a *App) handleAPIDocs(w http.ResponseWriter, r *http.Request) {
|
||||
// Simple HTML page with Swagger UI
|
||||
html := `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>atlasOS API Documentation</title>
|
||||
<link rel="stylesheet" type="text/css" href="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui.css" />
|
||||
<style>
|
||||
html { box-sizing: border-box; overflow: -moz-scrollbars-vertical; overflow-y: scroll; }
|
||||
*, *:before, *:after { box-sizing: inherit; }
|
||||
body { margin:0; background: #fafafa; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div id="swagger-ui"></div>
|
||||
<script src="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui-bundle.js"></script>
|
||||
<script src="https://unpkg.com/swagger-ui-dist@5.10.3/swagger-ui-standalone-preset.js"></script>
|
||||
<script>
|
||||
window.onload = function() {
|
||||
const ui = SwaggerUIBundle({
|
||||
url: "/api/openapi.yaml",
|
||||
dom_id: '#swagger-ui',
|
||||
deepLinking: true,
|
||||
presets: [
|
||||
SwaggerUIBundle.presets.apis,
|
||||
SwaggerUIStandalonePreset
|
||||
],
|
||||
plugins: [
|
||||
SwaggerUIBundle.plugins.DownloadUrl
|
||||
],
|
||||
layout: "StandaloneLayout"
|
||||
});
|
||||
};
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(html))
|
||||
}
|
||||
|
||||
// handleOpenAPISpec serves the OpenAPI specification
|
||||
func (a *App) handleOpenAPISpec(w http.ResponseWriter, r *http.Request) {
|
||||
// Read OpenAPI spec from file system
|
||||
specPath := filepath.Join("docs", "openapi.yaml")
|
||||
spec, err := os.ReadFile(specPath)
|
||||
if err != nil {
|
||||
http.Error(w, "OpenAPI spec not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/yaml; charset=utf-8")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(spec)
|
||||
}
|
||||
59
internal/httpapp/error_handlers.go
Normal file
59
internal/httpapp/error_handlers.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// writeError writes a structured error response
|
||||
func writeError(w http.ResponseWriter, err error) {
|
||||
// Check if it's an APIError
|
||||
if apiErr, ok := err.(*errors.APIError); ok {
|
||||
writeJSON(w, apiErr.HTTPStatus, apiErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Default to internal server error
|
||||
log.Printf("unhandled error: %v", err)
|
||||
apiErr := errors.ErrInternal("an unexpected error occurred")
|
||||
writeJSON(w, apiErr.HTTPStatus, apiErr)
|
||||
}
|
||||
|
||||
// handleServiceError handles errors from service operations with graceful degradation
|
||||
func (a *App) handleServiceError(serviceName string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Log the error for debugging
|
||||
log.Printf("%s service error: %v", serviceName, err)
|
||||
|
||||
// For service errors, we might want to continue operation
|
||||
// but log the issue. The API request can still succeed
|
||||
// even if service configuration fails (desired state is stored)
|
||||
return errors.ErrServiceUnavailable(serviceName).WithDetails(err.Error())
|
||||
}
|
||||
|
||||
// recoverPanic recovers from panics and returns a proper error response
|
||||
func recoverPanic(w http.ResponseWriter, r *http.Request) {
|
||||
if rec := recover(); rec != nil {
|
||||
log.Printf("panic recovered: %v", rec)
|
||||
err := errors.ErrInternal("an unexpected error occurred")
|
||||
writeError(w, err)
|
||||
}
|
||||
}
|
||||
|
||||
// errorMiddleware wraps handlers with panic recovery
|
||||
func (a *App) errorMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer recoverPanic(w, r)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// writeJSONError is a convenience function for JSON error responses
|
||||
func writeJSONError(w http.ResponseWriter, code int, message string) {
|
||||
writeJSON(w, code, map[string]string{"error": message})
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (a *App) handleDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -26,18 +27,58 @@ func (a *App) handleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (a *App) handleMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
// Stub metrics (Prometheus format). We'll wire real collectors later.
|
||||
// Collect real-time metrics
|
||||
// ZFS metrics
|
||||
pools, _ := a.zfs.ListPools()
|
||||
datasets, _ := a.zfs.ListDatasets("")
|
||||
zvols, _ := a.zfs.ListZVOLs("")
|
||||
snapshots, _ := a.zfs.ListSnapshots("")
|
||||
|
||||
a.metricsCollector.UpdateZFSMetrics(pools, datasets, zvols, snapshots)
|
||||
|
||||
// Service metrics
|
||||
smbShares := a.smbStore.List()
|
||||
nfsExports := a.nfsStore.List()
|
||||
iscsiTargets := a.iscsiStore.List()
|
||||
|
||||
smbStatus, _ := a.smbService.GetStatus()
|
||||
nfsStatus, _ := a.nfsService.GetStatus()
|
||||
iscsiStatus, _ := a.iscsiService.GetStatus()
|
||||
|
||||
a.metricsCollector.UpdateServiceMetrics(
|
||||
len(smbShares),
|
||||
len(nfsExports),
|
||||
len(iscsiTargets),
|
||||
smbStatus,
|
||||
nfsStatus,
|
||||
iscsiStatus,
|
||||
)
|
||||
|
||||
// Job metrics
|
||||
allJobs := a.jobManager.List("")
|
||||
running := 0
|
||||
completed := 0
|
||||
failed := 0
|
||||
for _, job := range allJobs {
|
||||
switch job.Status {
|
||||
case "running":
|
||||
running++
|
||||
case "completed":
|
||||
completed++
|
||||
case "failed":
|
||||
failed++
|
||||
}
|
||||
}
|
||||
|
||||
a.metricsCollector.UpdateJobMetrics(len(allJobs), running, completed, failed)
|
||||
|
||||
// Update uptime
|
||||
a.metricsCollector.SetUptime(int64(time.Since(a.startTime).Seconds()))
|
||||
|
||||
// Output Prometheus format
|
||||
w.Header().Set("Content-Type", "text/plain; version=0.0.4")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte(
|
||||
`# HELP atlas_build_info Build info
|
||||
# TYPE atlas_build_info gauge
|
||||
atlas_build_info{version="v0.1.0-dev"} 1
|
||||
# HELP atlas_up Whether the atlas-api process is up
|
||||
# TYPE atlas_up gauge
|
||||
atlas_up 1
|
||||
`,
|
||||
))
|
||||
_, _ = w.Write([]byte(a.metricsCollector.Collect()))
|
||||
}
|
||||
|
||||
func (a *App) render(w http.ResponseWriter, name string, data any) {
|
||||
|
||||
@@ -28,13 +28,36 @@ func requestID(next http.Handler) http.Handler {
|
||||
func logging(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
start := time.Now()
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
// Create response writer wrapper to capture status code
|
||||
rw := &responseWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
statusCode: http.StatusOK,
|
||||
}
|
||||
|
||||
next.ServeHTTP(rw, r)
|
||||
|
||||
d := time.Since(start)
|
||||
id, _ := r.Context().Value(requestIDKey).(string)
|
||||
log.Printf("%s %s %s rid=%s dur=%s", r.RemoteAddr, r.Method, r.URL.Path, id, d)
|
||||
|
||||
// Use structured logging if available, otherwise fallback to standard log
|
||||
log.Printf("%s %s %s status=%d rid=%s dur=%s",
|
||||
r.RemoteAddr, r.Method, r.URL.Path, rw.statusCode, id, d)
|
||||
})
|
||||
}
|
||||
|
||||
// responseWriterWrapper wraps http.ResponseWriter to capture status code
|
||||
// Note: This is different from the one in audit_middleware.go to avoid conflicts
|
||||
type responseWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
statusCode int
|
||||
}
|
||||
|
||||
func (rw *responseWriterWrapper) WriteHeader(code int) {
|
||||
rw.statusCode = code
|
||||
rw.ResponseWriter.WriteHeader(code)
|
||||
}
|
||||
|
||||
func newReqID() string {
|
||||
var b [16]byte
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
|
||||
165
internal/httpapp/rate_limit.go
Normal file
165
internal/httpapp/rate_limit.go
Normal file
@@ -0,0 +1,165 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// RateLimiter implements token bucket rate limiting
|
||||
type RateLimiter struct {
|
||||
mu sync.RWMutex
|
||||
clients map[string]*clientLimiter
|
||||
rate int // requests per window
|
||||
window time.Duration // time window
|
||||
cleanupTick *time.Ticker
|
||||
stopCleanup chan struct{}
|
||||
}
|
||||
|
||||
type clientLimiter struct {
|
||||
tokens int
|
||||
lastUpdate time.Time
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a new rate limiter
|
||||
func NewRateLimiter(rate int, window time.Duration) *RateLimiter {
|
||||
rl := &RateLimiter{
|
||||
clients: make(map[string]*clientLimiter),
|
||||
rate: rate,
|
||||
window: window,
|
||||
cleanupTick: time.NewTicker(5 * time.Minute),
|
||||
stopCleanup: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start cleanup goroutine
|
||||
go rl.cleanup()
|
||||
|
||||
return rl
|
||||
}
|
||||
|
||||
// cleanup periodically removes old client limiters
|
||||
func (rl *RateLimiter) cleanup() {
|
||||
for {
|
||||
select {
|
||||
case <-rl.cleanupTick.C:
|
||||
rl.mu.Lock()
|
||||
now := time.Now()
|
||||
for key, limiter := range rl.clients {
|
||||
limiter.mu.Lock()
|
||||
// Remove if last update was more than 2 windows ago
|
||||
if now.Sub(limiter.lastUpdate) > rl.window*2 {
|
||||
delete(rl.clients, key)
|
||||
}
|
||||
limiter.mu.Unlock()
|
||||
}
|
||||
rl.mu.Unlock()
|
||||
case <-rl.stopCleanup:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the cleanup goroutine
|
||||
func (rl *RateLimiter) Stop() {
|
||||
rl.cleanupTick.Stop()
|
||||
close(rl.stopCleanup)
|
||||
}
|
||||
|
||||
// Allow checks if a request from the given key should be allowed
|
||||
func (rl *RateLimiter) Allow(key string) bool {
|
||||
rl.mu.Lock()
|
||||
limiter, exists := rl.clients[key]
|
||||
if !exists {
|
||||
limiter = &clientLimiter{
|
||||
tokens: rl.rate,
|
||||
lastUpdate: time.Now(),
|
||||
}
|
||||
rl.clients[key] = limiter
|
||||
}
|
||||
rl.mu.Unlock()
|
||||
|
||||
limiter.mu.Lock()
|
||||
defer limiter.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(limiter.lastUpdate)
|
||||
|
||||
// Refill tokens based on elapsed time
|
||||
if elapsed >= rl.window {
|
||||
// Full refill
|
||||
limiter.tokens = rl.rate
|
||||
} else {
|
||||
// Partial refill based on elapsed time
|
||||
tokensToAdd := int(float64(rl.rate) * elapsed.Seconds() / rl.window.Seconds())
|
||||
if tokensToAdd > 0 {
|
||||
limiter.tokens = min(limiter.tokens+tokensToAdd, rl.rate)
|
||||
}
|
||||
}
|
||||
|
||||
limiter.lastUpdate = now
|
||||
|
||||
// Check if we have tokens
|
||||
if limiter.tokens > 0 {
|
||||
limiter.tokens--
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// getClientKey extracts a key for rate limiting from the request
|
||||
func getClientKey(r *http.Request) string {
|
||||
// Try to get IP address
|
||||
ip := getClientIP(r)
|
||||
|
||||
// If authenticated, use user ID for more granular limiting
|
||||
if user, ok := getUserFromContext(r); ok {
|
||||
return "user:" + user.ID
|
||||
}
|
||||
|
||||
return "ip:" + ip
|
||||
}
|
||||
|
||||
// rateLimitMiddleware implements rate limiting
|
||||
func (a *App) rateLimitMiddleware(next http.Handler) http.Handler {
|
||||
// Default: 100 requests per minute per client
|
||||
rateLimiter := NewRateLimiter(100, time.Minute)
|
||||
|
||||
// Store limiter for cleanup on shutdown
|
||||
// Note: Cleanup will be handled by the limiter's own cleanup goroutine
|
||||
_ = rateLimiter
|
||||
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip rate limiting for public endpoints
|
||||
if a.isPublicEndpoint(r.URL.Path) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
key := getClientKey(r)
|
||||
if !rateLimiter.Allow(key) {
|
||||
writeError(w, errors.NewAPIError(
|
||||
errors.ErrCodeServiceUnavailable,
|
||||
"rate limit exceeded",
|
||||
http.StatusTooManyRequests,
|
||||
).WithDetails("too many requests, please try again later"))
|
||||
return
|
||||
}
|
||||
|
||||
// Add rate limit headers
|
||||
w.Header().Set("X-RateLimit-Limit", "100")
|
||||
w.Header().Set("X-RateLimit-Window", "60")
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -3,6 +3,8 @@ package httpapp
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// methodHandler routes requests based on HTTP method
|
||||
@@ -143,6 +145,35 @@ func (a *App) handleNFSExportOps(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// handleISCSITargetOps routes iSCSI target operations by method
|
||||
func (a *App) handleBackupOps(w http.ResponseWriter, r *http.Request) {
|
||||
backupID := pathParam(r, "/api/v1/backups/")
|
||||
if backupID == "" {
|
||||
writeError(w, errors.ErrBadRequest("backup id required"))
|
||||
return
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Check if it's a verify request
|
||||
if r.URL.Query().Get("verify") == "true" {
|
||||
a.handleVerifyBackup(w, r)
|
||||
} else {
|
||||
a.handleGetBackup(w, r)
|
||||
}
|
||||
case http.MethodPost:
|
||||
// Restore backup (POST /api/v1/backups/{id}/restore)
|
||||
if strings.HasSuffix(r.URL.Path, "/restore") {
|
||||
a.handleRestoreBackup(w, r)
|
||||
} else {
|
||||
writeError(w, errors.ErrBadRequest("invalid backup operation"))
|
||||
}
|
||||
case http.MethodDelete:
|
||||
a.handleDeleteBackup(w, r)
|
||||
default:
|
||||
writeError(w, errors.ErrBadRequest("method not allowed"))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *App) handleISCSITargetOps(w http.ResponseWriter, r *http.Request) {
|
||||
if strings.HasSuffix(r.URL.Path, "/luns") {
|
||||
if r.Method == http.MethodPost {
|
||||
|
||||
@@ -16,8 +16,36 @@ func (a *App) routes() {
|
||||
|
||||
// Health & metrics
|
||||
a.mux.HandleFunc("/healthz", a.handleHealthz)
|
||||
a.mux.HandleFunc("/health", a.handleHealthCheck) // Detailed health check
|
||||
a.mux.HandleFunc("/metrics", a.handleMetrics)
|
||||
|
||||
// Diagnostics
|
||||
a.mux.HandleFunc("/api/v1/system/info", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleSystemInfo(w, r) },
|
||||
nil, nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/system/logs", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleLogs(w, r) },
|
||||
nil, nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/system/gc", methodHandler(
|
||||
nil,
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleGC(w, r) },
|
||||
nil, nil, nil,
|
||||
))
|
||||
|
||||
// API Documentation
|
||||
a.mux.HandleFunc("/api/docs", a.handleAPIDocs)
|
||||
a.mux.HandleFunc("/api/openapi.yaml", a.handleOpenAPISpec)
|
||||
|
||||
// Backup & Restore
|
||||
a.mux.HandleFunc("/api/v1/backups", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleListBackups(w, r) },
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleCreateBackup(w, r) },
|
||||
nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/backups/", a.handleBackupOps)
|
||||
|
||||
// Dashboard API
|
||||
a.mux.HandleFunc("/api/v1/dashboard", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleDashboardAPI(w, r) },
|
||||
|
||||
121
internal/httpapp/security_middleware.go
Normal file
121
internal/httpapp/security_middleware.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
||||
)
|
||||
|
||||
// securityHeadersMiddleware adds security headers to responses
|
||||
func (a *App) securityHeadersMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Security headers
|
||||
w.Header().Set("X-Content-Type-Options", "nosniff")
|
||||
w.Header().Set("X-Frame-Options", "DENY")
|
||||
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
||||
w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin")
|
||||
w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()")
|
||||
|
||||
// HSTS (only for HTTPS)
|
||||
if r.TLS != nil {
|
||||
w.Header().Set("Strict-Transport-Security", "max-age=31536000; includeSubDomains")
|
||||
}
|
||||
|
||||
// Content Security Policy (CSP)
|
||||
csp := "default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net; style-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net; img-src 'self' data:; font-src 'self' https://cdn.jsdelivr.net; connect-src 'self';"
|
||||
w.Header().Set("Content-Security-Policy", csp)
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// corsMiddleware handles CORS requests
|
||||
func (a *App) corsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
origin := r.Header.Get("Origin")
|
||||
|
||||
// Allow specific origins or all (for development)
|
||||
allowedOrigins := []string{
|
||||
"http://localhost:8080",
|
||||
"http://localhost:3000",
|
||||
"http://127.0.0.1:8080",
|
||||
}
|
||||
|
||||
// Check if origin is allowed
|
||||
allowed := false
|
||||
for _, allowedOrigin := range allowedOrigins {
|
||||
if origin == allowedOrigin {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Allow requests from same origin
|
||||
if origin == "" || r.Header.Get("Referer") != "" {
|
||||
allowed = true
|
||||
}
|
||||
|
||||
if allowed && origin != "" {
|
||||
w.Header().Set("Access-Control-Allow-Origin", origin)
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, PATCH, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Requested-With")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Max-Age", "3600")
|
||||
}
|
||||
|
||||
// Handle preflight requests
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// requestSizeMiddleware limits request body size
|
||||
func (a *App) requestSizeMiddleware(maxSize int64) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Limit request body size
|
||||
r.Body = http.MaxBytesReader(w, r.Body, maxSize)
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// validateContentTypeMiddleware validates Content-Type for POST/PUT/PATCH requests
|
||||
func (a *App) validateContentTypeMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip for GET, HEAD, OPTIONS, DELETE
|
||||
if r.Method == http.MethodGet || r.Method == http.MethodHead ||
|
||||
r.Method == http.MethodOptions || r.Method == http.MethodDelete {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Skip for public endpoints
|
||||
if a.isPublicEndpoint(r.URL.Path) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Check Content-Type for POST/PUT/PATCH
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
writeError(w, errors.ErrBadRequest("Content-Type header is required"))
|
||||
return
|
||||
}
|
||||
|
||||
// Allow JSON and form data
|
||||
if !strings.HasPrefix(contentType, "application/json") &&
|
||||
!strings.HasPrefix(contentType, "application/x-www-form-urlencoded") &&
|
||||
!strings.HasPrefix(contentType, "multipart/form-data") {
|
||||
writeError(w, errors.ErrBadRequest("Content-Type must be application/json"))
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
58
internal/httpapp/validation_helpers.go
Normal file
58
internal/httpapp/validation_helpers.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package httpapp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// parseSizeString parses a human-readable size string to bytes
|
||||
func (a *App) parseSizeString(sizeStr string) (uint64, error) {
|
||||
sizeStr = strings.TrimSpace(strings.ToUpper(sizeStr))
|
||||
if sizeStr == "" {
|
||||
return 0, fmt.Errorf("size cannot be empty")
|
||||
}
|
||||
|
||||
// Extract number and unit
|
||||
var numStr string
|
||||
var unit string
|
||||
|
||||
for i, r := range sizeStr {
|
||||
if r >= '0' && r <= '9' {
|
||||
numStr += string(r)
|
||||
} else {
|
||||
unit = sizeStr[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if numStr == "" {
|
||||
return 0, fmt.Errorf("invalid size format: no number found")
|
||||
}
|
||||
|
||||
num, err := strconv.ParseUint(numStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid size number: %w", err)
|
||||
}
|
||||
|
||||
// Convert to bytes based on unit
|
||||
multiplier := uint64(1)
|
||||
switch unit {
|
||||
case "":
|
||||
multiplier = 1
|
||||
case "K", "KB":
|
||||
multiplier = 1024
|
||||
case "M", "MB":
|
||||
multiplier = 1024 * 1024
|
||||
case "G", "GB":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "T", "TB":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
case "P", "PB":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid size unit: %s (allowed: K, M, G, T, P)", unit)
|
||||
}
|
||||
|
||||
return num * multiplier, nil
|
||||
}
|
||||
215
internal/logger/logger.go
Normal file
215
internal/logger/logger.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Level represents log level
|
||||
type Level int
|
||||
|
||||
const (
|
||||
LevelDebug Level = iota
|
||||
LevelInfo
|
||||
LevelWarn
|
||||
LevelError
|
||||
)
|
||||
|
||||
var levelNames = map[Level]string{
|
||||
LevelDebug: "DEBUG",
|
||||
LevelInfo: "INFO",
|
||||
LevelWarn: "WARN",
|
||||
LevelError: "ERROR",
|
||||
}
|
||||
|
||||
// Logger provides structured logging
|
||||
type Logger struct {
|
||||
mu sync.Mutex
|
||||
level Level
|
||||
output io.Writer
|
||||
jsonMode bool
|
||||
prefix string
|
||||
}
|
||||
|
||||
// LogEntry represents a structured log entry
|
||||
type LogEntry struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Level string `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// New creates a new logger
|
||||
func New(level Level, output io.Writer, jsonMode bool) *Logger {
|
||||
if output == nil {
|
||||
output = os.Stdout
|
||||
}
|
||||
return &Logger{
|
||||
level: level,
|
||||
output: output,
|
||||
jsonMode: jsonMode,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLevel sets the log level
|
||||
func (l *Logger) SetLevel(level Level) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.level = level
|
||||
}
|
||||
|
||||
// SetOutput sets the output writer
|
||||
func (l *Logger) SetOutput(w io.Writer) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.output = w
|
||||
}
|
||||
|
||||
// Debug logs a debug message
|
||||
func (l *Logger) Debug(msg string, fields ...map[string]interface{}) {
|
||||
l.log(LevelDebug, msg, nil, fields...)
|
||||
}
|
||||
|
||||
// Info logs an info message
|
||||
func (l *Logger) Info(msg string, fields ...map[string]interface{}) {
|
||||
l.log(LevelInfo, msg, nil, fields...)
|
||||
}
|
||||
|
||||
// Warn logs a warning message
|
||||
func (l *Logger) Warn(msg string, fields ...map[string]interface{}) {
|
||||
l.log(LevelWarn, msg, nil, fields...)
|
||||
}
|
||||
|
||||
// Error logs an error message
|
||||
func (l *Logger) Error(msg string, err error, fields ...map[string]interface{}) {
|
||||
l.log(LevelError, msg, err, fields...)
|
||||
}
|
||||
|
||||
// log writes a log entry
|
||||
func (l *Logger) log(level Level, msg string, err error, fields ...map[string]interface{}) {
|
||||
if level < l.level {
|
||||
return
|
||||
}
|
||||
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
|
||||
entry := LogEntry{
|
||||
Timestamp: time.Now().Format(time.RFC3339),
|
||||
Level: levelNames[level],
|
||||
Message: msg,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
entry.Error = err.Error()
|
||||
}
|
||||
|
||||
// Merge fields
|
||||
if len(fields) > 0 {
|
||||
entry.Fields = make(map[string]interface{})
|
||||
for _, f := range fields {
|
||||
for k, v := range f {
|
||||
entry.Fields[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var output string
|
||||
if l.jsonMode {
|
||||
jsonData, jsonErr := json.Marshal(entry)
|
||||
if jsonErr != nil {
|
||||
// Fallback to text format if JSON fails
|
||||
output = fmt.Sprintf("%s [%s] %s", entry.Timestamp, entry.Level, msg)
|
||||
if err != nil {
|
||||
output += fmt.Sprintf(" error=%v", err)
|
||||
}
|
||||
} else {
|
||||
output = string(jsonData)
|
||||
}
|
||||
} else {
|
||||
// Text format
|
||||
output = fmt.Sprintf("%s [%s] %s", entry.Timestamp, entry.Level, msg)
|
||||
if err != nil {
|
||||
output += fmt.Sprintf(" error=%v", err)
|
||||
}
|
||||
if len(entry.Fields) > 0 {
|
||||
for k, v := range entry.Fields {
|
||||
output += fmt.Sprintf(" %s=%v", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(l.output, output)
|
||||
}
|
||||
|
||||
// WithFields returns a logger with additional fields
|
||||
func (l *Logger) WithFields(fields map[string]interface{}) *Logger {
|
||||
return &Logger{
|
||||
level: l.level,
|
||||
output: l.output,
|
||||
jsonMode: l.jsonMode,
|
||||
prefix: l.prefix,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLevel parses a log level string
|
||||
func ParseLevel(s string) Level {
|
||||
switch s {
|
||||
case "DEBUG", "debug":
|
||||
return LevelDebug
|
||||
case "INFO", "info":
|
||||
return LevelInfo
|
||||
case "WARN", "warn", "WARNING", "warning":
|
||||
return LevelWarn
|
||||
case "ERROR", "error":
|
||||
return LevelError
|
||||
default:
|
||||
return LevelInfo
|
||||
}
|
||||
}
|
||||
|
||||
// Default logger instance
|
||||
var defaultLogger *Logger
|
||||
|
||||
func init() {
|
||||
levelStr := os.Getenv("ATLAS_LOG_LEVEL")
|
||||
level := ParseLevel(levelStr)
|
||||
jsonMode := os.Getenv("ATLAS_LOG_FORMAT") == "json"
|
||||
|
||||
defaultLogger = New(level, os.Stdout, jsonMode)
|
||||
}
|
||||
|
||||
// Debug logs using default logger
|
||||
func Debug(msg string, fields ...map[string]interface{}) {
|
||||
defaultLogger.Debug(msg, fields...)
|
||||
}
|
||||
|
||||
// Info logs using default logger
|
||||
func Info(msg string, fields ...map[string]interface{}) {
|
||||
defaultLogger.Info(msg, fields...)
|
||||
}
|
||||
|
||||
// Warn logs using default logger
|
||||
func Warn(msg string, fields ...map[string]interface{}) {
|
||||
defaultLogger.Warn(msg, fields...)
|
||||
}
|
||||
|
||||
// Error logs using default logger
|
||||
func Error(msg string, err error, fields ...map[string]interface{}) {
|
||||
defaultLogger.Error(msg, err, fields...)
|
||||
}
|
||||
|
||||
// SetLevel sets the default logger level
|
||||
func SetLevel(level Level) {
|
||||
defaultLogger.SetLevel(level)
|
||||
}
|
||||
|
||||
// GetLogger returns the default logger
|
||||
func GetLogger() *Logger {
|
||||
return defaultLogger
|
||||
}
|
||||
217
internal/metrics/collector.go
Normal file
217
internal/metrics/collector.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
)
|
||||
|
||||
// Collector gathers system metrics
|
||||
type Collector struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// ZFS metrics
|
||||
poolCount int
|
||||
datasetCount int
|
||||
zvolCount int
|
||||
snapshotCount int
|
||||
totalCapacity uint64
|
||||
totalAllocated uint64
|
||||
totalFree uint64
|
||||
|
||||
// Service metrics
|
||||
smbSharesCount int
|
||||
nfsExportsCount int
|
||||
iscsiTargetsCount int
|
||||
smbServiceStatus int // 1 = running, 0 = stopped
|
||||
nfsServiceStatus int
|
||||
iscsiServiceStatus int
|
||||
|
||||
// Job metrics
|
||||
jobsTotal int
|
||||
jobsRunning int
|
||||
jobsCompleted int
|
||||
jobsFailed int
|
||||
|
||||
// System metrics
|
||||
uptimeSeconds int64
|
||||
lastUpdate time.Time
|
||||
}
|
||||
|
||||
// NewCollector creates a new metrics collector
|
||||
func NewCollector() *Collector {
|
||||
return &Collector{
|
||||
lastUpdate: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateZFSMetrics updates ZFS-related metrics
|
||||
func (c *Collector) UpdateZFSMetrics(pools []models.Pool, datasets []models.Dataset, zvols []models.ZVOL, snapshots []models.Snapshot) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.poolCount = len(pools)
|
||||
c.datasetCount = len(datasets)
|
||||
c.zvolCount = len(zvols)
|
||||
c.snapshotCount = len(snapshots)
|
||||
|
||||
c.totalCapacity = 0
|
||||
c.totalAllocated = 0
|
||||
c.totalFree = 0
|
||||
|
||||
for _, pool := range pools {
|
||||
c.totalCapacity += pool.Size
|
||||
c.totalAllocated += pool.Allocated
|
||||
c.totalFree += pool.Free
|
||||
}
|
||||
|
||||
c.lastUpdate = time.Now()
|
||||
}
|
||||
|
||||
// UpdateServiceMetrics updates storage service metrics
|
||||
func (c *Collector) UpdateServiceMetrics(smbShares, nfsExports, iscsiTargets int, smbStatus, nfsStatus, iscsiStatus bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.smbSharesCount = smbShares
|
||||
c.nfsExportsCount = nfsExports
|
||||
c.iscsiTargetsCount = iscsiTargets
|
||||
|
||||
if smbStatus {
|
||||
c.smbServiceStatus = 1
|
||||
} else {
|
||||
c.smbServiceStatus = 0
|
||||
}
|
||||
|
||||
if nfsStatus {
|
||||
c.nfsServiceStatus = 1
|
||||
} else {
|
||||
c.nfsServiceStatus = 0
|
||||
}
|
||||
|
||||
if iscsiStatus {
|
||||
c.iscsiServiceStatus = 1
|
||||
} else {
|
||||
c.iscsiServiceStatus = 0
|
||||
}
|
||||
|
||||
c.lastUpdate = time.Now()
|
||||
}
|
||||
|
||||
// UpdateJobMetrics updates job-related metrics
|
||||
func (c *Collector) UpdateJobMetrics(total, running, completed, failed int) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
c.jobsTotal = total
|
||||
c.jobsRunning = running
|
||||
c.jobsCompleted = completed
|
||||
c.jobsFailed = failed
|
||||
|
||||
c.lastUpdate = time.Now()
|
||||
}
|
||||
|
||||
// SetUptime sets the system uptime
|
||||
func (c *Collector) SetUptime(seconds int64) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.uptimeSeconds = seconds
|
||||
}
|
||||
|
||||
// Collect returns metrics in Prometheus format
|
||||
func (c *Collector) Collect() string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
|
||||
var output string
|
||||
|
||||
// Build info
|
||||
output += "# HELP atlas_build_info Build information\n"
|
||||
output += "# TYPE atlas_build_info gauge\n"
|
||||
output += `atlas_build_info{version="v0.1.0-dev"} 1` + "\n\n"
|
||||
|
||||
// System uptime
|
||||
output += "# HELP atlas_uptime_seconds System uptime in seconds\n"
|
||||
output += "# TYPE atlas_uptime_seconds gauge\n"
|
||||
output += fmt.Sprintf("atlas_uptime_seconds %d\n\n", c.uptimeSeconds)
|
||||
|
||||
// ZFS metrics
|
||||
output += "# HELP atlas_zfs_pools_total Total number of ZFS pools\n"
|
||||
output += "# TYPE atlas_zfs_pools_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_pools_total %d\n\n", c.poolCount)
|
||||
|
||||
output += "# HELP atlas_zfs_datasets_total Total number of ZFS datasets\n"
|
||||
output += "# TYPE atlas_zfs_datasets_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_datasets_total %d\n\n", c.datasetCount)
|
||||
|
||||
output += "# HELP atlas_zfs_zvols_total Total number of ZFS ZVOLs\n"
|
||||
output += "# TYPE atlas_zfs_zvols_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_zvols_total %d\n\n", c.zvolCount)
|
||||
|
||||
output += "# HELP atlas_zfs_snapshots_total Total number of ZFS snapshots\n"
|
||||
output += "# TYPE atlas_zfs_snapshots_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_snapshots_total %d\n\n", c.snapshotCount)
|
||||
|
||||
output += "# HELP atlas_zfs_capacity_bytes Total ZFS pool capacity in bytes\n"
|
||||
output += "# TYPE atlas_zfs_capacity_bytes gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_capacity_bytes %d\n\n", c.totalCapacity)
|
||||
|
||||
output += "# HELP atlas_zfs_allocated_bytes Total ZFS pool allocated space in bytes\n"
|
||||
output += "# TYPE atlas_zfs_allocated_bytes gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_allocated_bytes %d\n\n", c.totalAllocated)
|
||||
|
||||
output += "# HELP atlas_zfs_free_bytes Total ZFS pool free space in bytes\n"
|
||||
output += "# TYPE atlas_zfs_free_bytes gauge\n"
|
||||
output += fmt.Sprintf("atlas_zfs_free_bytes %d\n\n", c.totalFree)
|
||||
|
||||
// Service metrics
|
||||
output += "# HELP atlas_smb_shares_total Total number of SMB shares\n"
|
||||
output += "# TYPE atlas_smb_shares_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_smb_shares_total %d\n\n", c.smbSharesCount)
|
||||
|
||||
output += "# HELP atlas_nfs_exports_total Total number of NFS exports\n"
|
||||
output += "# TYPE atlas_nfs_exports_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_nfs_exports_total %d\n\n", c.nfsExportsCount)
|
||||
|
||||
output += "# HELP atlas_iscsi_targets_total Total number of iSCSI targets\n"
|
||||
output += "# TYPE atlas_iscsi_targets_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_iscsi_targets_total %d\n\n", c.iscsiTargetsCount)
|
||||
|
||||
output += "# HELP atlas_smb_service_status SMB service status (1=running, 0=stopped)\n"
|
||||
output += "# TYPE atlas_smb_service_status gauge\n"
|
||||
output += fmt.Sprintf("atlas_smb_service_status %d\n\n", c.smbServiceStatus)
|
||||
|
||||
output += "# HELP atlas_nfs_service_status NFS service status (1=running, 0=stopped)\n"
|
||||
output += "# TYPE atlas_nfs_service_status gauge\n"
|
||||
output += fmt.Sprintf("atlas_nfs_service_status %d\n\n", c.nfsServiceStatus)
|
||||
|
||||
output += "# HELP atlas_iscsi_service_status iSCSI service status (1=running, 0=stopped)\n"
|
||||
output += "# TYPE atlas_iscsi_service_status gauge\n"
|
||||
output += fmt.Sprintf("atlas_iscsi_service_status %d\n\n", c.iscsiServiceStatus)
|
||||
|
||||
// Job metrics
|
||||
output += "# HELP atlas_jobs_total Total number of jobs\n"
|
||||
output += "# TYPE atlas_jobs_total gauge\n"
|
||||
output += fmt.Sprintf("atlas_jobs_total %d\n\n", c.jobsTotal)
|
||||
|
||||
output += "# HELP atlas_jobs_running Number of running jobs\n"
|
||||
output += "# TYPE atlas_jobs_running gauge\n"
|
||||
output += fmt.Sprintf("atlas_jobs_running %d\n\n", c.jobsRunning)
|
||||
|
||||
output += "# HELP atlas_jobs_completed_total Total number of completed jobs\n"
|
||||
output += "# TYPE atlas_jobs_completed_total counter\n"
|
||||
output += fmt.Sprintf("atlas_jobs_completed_total %d\n\n", c.jobsCompleted)
|
||||
|
||||
output += "# HELP atlas_jobs_failed_total Total number of failed jobs\n"
|
||||
output += "# TYPE atlas_jobs_failed_total counter\n"
|
||||
output += fmt.Sprintf("atlas_jobs_failed_total %d\n\n", c.jobsFailed)
|
||||
|
||||
// API status
|
||||
output += "# HELP atlas_up Whether the atlas-api process is up\n"
|
||||
output += "# TYPE atlas_up gauge\n"
|
||||
output += "atlas_up 1\n"
|
||||
|
||||
return output
|
||||
}
|
||||
@@ -52,13 +52,16 @@ func (s *NFSService) ApplyConfiguration(exports []models.NFSExport) error {
|
||||
return fmt.Errorf("replace exports: %w", err)
|
||||
}
|
||||
|
||||
// Reload NFS exports
|
||||
if err := s.reloadExports(); err != nil {
|
||||
// Reload NFS exports with error recovery
|
||||
reloadErr := s.reloadExports()
|
||||
if reloadErr != nil {
|
||||
// Try to restore backup on failure
|
||||
if _, err2 := os.Stat(backupPath); err2 == nil {
|
||||
os.Rename(backupPath, s.exportsPath)
|
||||
if restoreErr := os.Rename(backupPath, s.exportsPath); restoreErr != nil {
|
||||
return fmt.Errorf("reload failed and backup restore failed: reload=%v, restore=%v", reloadErr, restoreErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("reload exports: %w", err)
|
||||
return fmt.Errorf("reload exports: %w", reloadErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -55,13 +55,16 @@ func (s *SMBService) ApplyConfiguration(shares []models.SMBShare) error {
|
||||
return fmt.Errorf("replace config: %w", err)
|
||||
}
|
||||
|
||||
// Reload Samba service
|
||||
if err := s.reloadService(); err != nil {
|
||||
// Reload Samba service with retry
|
||||
reloadErr := s.reloadService()
|
||||
if reloadErr != nil {
|
||||
// Try to restore backup on failure
|
||||
if _, err2 := os.Stat(backupPath); err2 == nil {
|
||||
os.Rename(backupPath, s.configPath)
|
||||
if restoreErr := os.Rename(backupPath, s.configPath); restoreErr != nil {
|
||||
return fmt.Errorf("reload failed and backup restore failed: reload=%v, restore=%v", reloadErr, restoreErr)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("reload service: %w", err)
|
||||
return fmt.Errorf("reload service: %w", reloadErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
278
internal/validation/validator.go
Normal file
278
internal/validation/validator.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
var (
|
||||
// Valid pool/dataset name pattern (ZFS naming rules)
|
||||
zfsNamePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_\-\.:]*$`)
|
||||
|
||||
// Valid username pattern
|
||||
usernamePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_\-\.]{2,31}$`)
|
||||
|
||||
// Valid share name pattern (SMB naming rules)
|
||||
shareNamePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_\-\.]{0,79}$`)
|
||||
|
||||
// IQN pattern (simplified - iqn.yyyy-mm.reversed.domain:identifier)
|
||||
iqnPattern = regexp.MustCompile(`^iqn\.\d{4}-\d{2}\.[a-zA-Z0-9][a-zA-Z0-9\-\.]*:[a-zA-Z0-9][a-zA-Z0-9\-_\.]*$`)
|
||||
|
||||
// Email pattern (basic)
|
||||
emailPattern = regexp.MustCompile(`^[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9.\-]+\.[a-zA-Z]{2,}$`)
|
||||
|
||||
// CIDR pattern for NFS clients
|
||||
cidrPattern = regexp.MustCompile(`^(\d{1,3}\.){3}\d{1,3}(/\d{1,2})?$`)
|
||||
)
|
||||
|
||||
// ValidationError represents a validation error
|
||||
type ValidationError struct {
|
||||
Field string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e *ValidationError) Error() string {
|
||||
if e.Field != "" {
|
||||
return fmt.Sprintf("validation error on field '%s': %s", e.Field, e.Message)
|
||||
}
|
||||
return fmt.Sprintf("validation error: %s", e.Message)
|
||||
}
|
||||
|
||||
// ValidateZFSName validates a ZFS pool or dataset name
|
||||
func ValidateZFSName(name string) error {
|
||||
if name == "" {
|
||||
return &ValidationError{Field: "name", Message: "name cannot be empty"}
|
||||
}
|
||||
|
||||
if len(name) > 256 {
|
||||
return &ValidationError{Field: "name", Message: "name too long (max 256 characters)"}
|
||||
}
|
||||
|
||||
if !zfsNamePattern.MatchString(name) {
|
||||
return &ValidationError{Field: "name", Message: "invalid characters (allowed: a-z, A-Z, 0-9, _, -, ., :)"}
|
||||
}
|
||||
|
||||
// ZFS names cannot start with certain characters
|
||||
if strings.HasPrefix(name, "-") || strings.HasPrefix(name, ".") {
|
||||
return &ValidationError{Field: "name", Message: "name cannot start with '-' or '.'"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateUsername validates a username
|
||||
func ValidateUsername(username string) error {
|
||||
if username == "" {
|
||||
return &ValidationError{Field: "username", Message: "username cannot be empty"}
|
||||
}
|
||||
|
||||
if len(username) < 3 {
|
||||
return &ValidationError{Field: "username", Message: "username too short (min 3 characters)"}
|
||||
}
|
||||
|
||||
if len(username) > 32 {
|
||||
return &ValidationError{Field: "username", Message: "username too long (max 32 characters)"}
|
||||
}
|
||||
|
||||
if !usernamePattern.MatchString(username) {
|
||||
return &ValidationError{Field: "username", Message: "invalid characters (allowed: a-z, A-Z, 0-9, _, -, .)"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePassword validates a password
|
||||
func ValidatePassword(password string) error {
|
||||
if password == "" {
|
||||
return &ValidationError{Field: "password", Message: "password cannot be empty"}
|
||||
}
|
||||
|
||||
if len(password) < 8 {
|
||||
return &ValidationError{Field: "password", Message: "password too short (min 8 characters)"}
|
||||
}
|
||||
|
||||
if len(password) > 128 {
|
||||
return &ValidationError{Field: "password", Message: "password too long (max 128 characters)"}
|
||||
}
|
||||
|
||||
// Check for at least one letter and one number
|
||||
hasLetter := false
|
||||
hasNumber := false
|
||||
|
||||
for _, r := range password {
|
||||
if unicode.IsLetter(r) {
|
||||
hasLetter = true
|
||||
}
|
||||
if unicode.IsNumber(r) {
|
||||
hasNumber = true
|
||||
}
|
||||
}
|
||||
|
||||
if !hasLetter {
|
||||
return &ValidationError{Field: "password", Message: "password must contain at least one letter"}
|
||||
}
|
||||
|
||||
if !hasNumber {
|
||||
return &ValidationError{Field: "password", Message: "password must contain at least one number"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateEmail validates an email address
|
||||
func ValidateEmail(email string) error {
|
||||
if email == "" {
|
||||
return nil // Email is optional
|
||||
}
|
||||
|
||||
if len(email) > 254 {
|
||||
return &ValidationError{Field: "email", Message: "email too long (max 254 characters)"}
|
||||
}
|
||||
|
||||
if !emailPattern.MatchString(email) {
|
||||
return &ValidationError{Field: "email", Message: "invalid email format"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateShareName validates an SMB share name
|
||||
func ValidateShareName(name string) error {
|
||||
if name == "" {
|
||||
return &ValidationError{Field: "name", Message: "share name cannot be empty"}
|
||||
}
|
||||
|
||||
if len(name) > 80 {
|
||||
return &ValidationError{Field: "name", Message: "share name too long (max 80 characters)"}
|
||||
}
|
||||
|
||||
if !shareNamePattern.MatchString(name) {
|
||||
return &ValidationError{Field: "name", Message: "invalid share name (allowed: a-z, A-Z, 0-9, _, -, .)"}
|
||||
}
|
||||
|
||||
// Reserved names
|
||||
reserved := []string{"CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"}
|
||||
upperName := strings.ToUpper(name)
|
||||
for _, r := range reserved {
|
||||
if upperName == r {
|
||||
return &ValidationError{Field: "name", Message: fmt.Sprintf("share name '%s' is reserved", name)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateIQN validates an iSCSI Qualified Name
|
||||
func ValidateIQN(iqn string) error {
|
||||
if iqn == "" {
|
||||
return &ValidationError{Field: "iqn", Message: "IQN cannot be empty"}
|
||||
}
|
||||
|
||||
if len(iqn) > 223 {
|
||||
return &ValidationError{Field: "iqn", Message: "IQN too long (max 223 characters)"}
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(iqn, "iqn.") {
|
||||
return &ValidationError{Field: "iqn", Message: "IQN must start with 'iqn.'"}
|
||||
}
|
||||
|
||||
// Basic format validation (can be more strict)
|
||||
if !iqnPattern.MatchString(iqn) {
|
||||
return &ValidationError{Field: "iqn", Message: "invalid IQN format (expected: iqn.yyyy-mm.reversed.domain:identifier)"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateSize validates a size string (e.g., "10G", "1T")
|
||||
func ValidateSize(sizeStr string) error {
|
||||
if sizeStr == "" {
|
||||
return &ValidationError{Field: "size", Message: "size cannot be empty"}
|
||||
}
|
||||
|
||||
// Pattern: number followed by optional unit (K, M, G, T, P)
|
||||
sizePattern := regexp.MustCompile(`^(\d+)([KMGT]?)$`)
|
||||
if !sizePattern.MatchString(strings.ToUpper(sizeStr)) {
|
||||
return &ValidationError{Field: "size", Message: "invalid size format (expected: number with optional unit K, M, G, T, P)"}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePath validates a filesystem path
|
||||
func ValidatePath(path string) error {
|
||||
if path == "" {
|
||||
return nil // Path is optional (can be auto-filled)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return &ValidationError{Field: "path", Message: "path must be absolute (start with /)"}
|
||||
}
|
||||
|
||||
if len(path) > 4096 {
|
||||
return &ValidationError{Field: "path", Message: "path too long (max 4096 characters)"}
|
||||
}
|
||||
|
||||
// Check for dangerous path components
|
||||
dangerous := []string{"..", "//", "\x00"}
|
||||
for _, d := range dangerous {
|
||||
if strings.Contains(path, d) {
|
||||
return &ValidationError{Field: "path", Message: fmt.Sprintf("path contains invalid component: %s", d)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateCIDR validates a CIDR notation or hostname
|
||||
func ValidateCIDR(cidr string) error {
|
||||
if cidr == "" {
|
||||
return &ValidationError{Field: "client", Message: "client cannot be empty"}
|
||||
}
|
||||
|
||||
// Allow wildcard
|
||||
if cidr == "*" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's a CIDR
|
||||
if cidrPattern.MatchString(cidr) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if it's a valid hostname
|
||||
hostnamePattern := regexp.MustCompile(`^[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?(\.[a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)*$`)
|
||||
if hostnamePattern.MatchString(cidr) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &ValidationError{Field: "client", Message: "invalid client format (expected: CIDR, hostname, or '*')"}
|
||||
}
|
||||
|
||||
// SanitizeString removes potentially dangerous characters
|
||||
func SanitizeString(s string) string {
|
||||
// Remove null bytes and control characters
|
||||
var result strings.Builder
|
||||
for _, r := range s {
|
||||
if r >= 32 && r != 127 {
|
||||
result.WriteRune(r)
|
||||
}
|
||||
}
|
||||
return strings.TrimSpace(result.String())
|
||||
}
|
||||
|
||||
// SanitizePath sanitizes a filesystem path
|
||||
func SanitizePath(path string) string {
|
||||
// Remove leading/trailing whitespace and normalize slashes
|
||||
path = strings.TrimSpace(path)
|
||||
path = strings.ReplaceAll(path, "\\", "/")
|
||||
|
||||
// Remove multiple slashes
|
||||
for strings.Contains(path, "//") {
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
Reference in New Issue
Block a user