diff --git a/backend/bin/calypso-api b/backend/bin/calypso-api index b90c252..41bd1bc 100755 Binary files a/backend/bin/calypso-api and b/backend/bin/calypso-api differ diff --git a/backend/calypso-api b/backend/calypso-api index 6e138a6..4dfe918 100755 Binary files a/backend/calypso-api and b/backend/calypso-api differ diff --git a/backend/internal/backup/handler.go b/backend/internal/backup/handler.go new file mode 100644 index 0000000..c3e7709 --- /dev/null +++ b/backend/internal/backup/handler.go @@ -0,0 +1,118 @@ +package backup + +import ( + "fmt" + "net/http" + + "github.com/atlasos/calypso/internal/common/logger" + "github.com/gin-gonic/gin" +) + +// Handler handles backup-related API requests +type Handler struct { + service *Service + logger *logger.Logger +} + +// NewHandler creates a new backup handler +func NewHandler(service *Service, log *logger.Logger) *Handler { + return &Handler{ + service: service, + logger: log, + } +} + +// ListJobs lists backup jobs with optional filters +func (h *Handler) ListJobs(c *gin.Context) { + opts := ListJobsOptions{ + Status: c.Query("status"), + JobType: c.Query("job_type"), + ClientName: c.Query("client_name"), + JobName: c.Query("job_name"), + } + + // Parse pagination + var limit, offset int + if limitStr := c.Query("limit"); limitStr != "" { + if _, err := fmt.Sscanf(limitStr, "%d", &limit); err == nil { + opts.Limit = limit + } + } + if offsetStr := c.Query("offset"); offsetStr != "" { + if _, err := fmt.Sscanf(offsetStr, "%d", &offset); err == nil { + opts.Offset = offset + } + } + + jobs, totalCount, err := h.service.ListJobs(c.Request.Context(), opts) + if err != nil { + h.logger.Error("Failed to list jobs", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list jobs"}) + return + } + + if jobs == nil { + jobs = []Job{} + } + + c.JSON(http.StatusOK, gin.H{ + "jobs": jobs, + "total": totalCount, + "limit": opts.Limit, + "offset": opts.Offset, + }) +} + +// GetJob retrieves a job by ID +func (h *Handler) GetJob(c *gin.Context) { + id := c.Param("id") + + job, err := h.service.GetJob(c.Request.Context(), id) + if err != nil { + if err.Error() == "job not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "job not found"}) + return + } + h.logger.Error("Failed to get job", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get job"}) + return + } + + c.JSON(http.StatusOK, job) +} + +// CreateJob creates a new backup job +func (h *Handler) CreateJob(c *gin.Context) { + var req CreateJobRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + // Validate job type + validJobTypes := map[string]bool{ + "Backup": true, "Restore": true, "Verify": true, "Copy": true, "Migrate": true, + } + if !validJobTypes[req.JobType] { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_type"}) + return + } + + // Validate job level + validJobLevels := map[string]bool{ + "Full": true, "Incremental": true, "Differential": true, "Since": true, + } + if !validJobLevels[req.JobLevel] { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_level"}) + return + } + + job, err := h.service.CreateJob(c.Request.Context(), req) + if err != nil { + h.logger.Error("Failed to create job", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create job"}) + return + } + + c.JSON(http.StatusCreated, job) +} diff --git a/backend/internal/backup/service.go b/backend/internal/backup/service.go new file mode 100644 index 0000000..7a2a9fd --- /dev/null +++ b/backend/internal/backup/service.go @@ -0,0 +1,781 @@ +package backup + +import ( + "context" + "database/sql" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/atlasos/calypso/internal/common/config" + "github.com/atlasos/calypso/internal/common/database" + "github.com/atlasos/calypso/internal/common/logger" +) + +// Service handles backup job operations +type Service struct { + db *database.DB + baculaDB *database.DB // Direct connection to Bacula database + logger *logger.Logger +} + +// NewService creates a new backup service +func NewService(db *database.DB, log *logger.Logger) *Service { + return &Service{ + db: db, + logger: log, + } +} + +// SetBaculaDatabase sets up direct connection to Bacula database +func (s *Service) SetBaculaDatabase(cfg config.DatabaseConfig, baculaDBName string) error { + // Create new database config for Bacula database + baculaCfg := cfg + baculaCfg.Database = baculaDBName // Override database name + + // Create connection to Bacula database + baculaDB, err := database.NewConnection(baculaCfg) + if err != nil { + return fmt.Errorf("failed to connect to Bacula database: %w", err) + } + + s.baculaDB = baculaDB + s.logger.Info("Connected to Bacula database", "database", baculaDBName, "host", cfg.Host, "port", cfg.Port) + return nil +} + +// Job represents a backup job +type Job struct { + ID string `json:"id"` + JobID int `json:"job_id"` + JobName string `json:"job_name"` + ClientName string `json:"client_name"` + JobType string `json:"job_type"` + JobLevel string `json:"job_level"` + Status string `json:"status"` + BytesWritten int64 `json:"bytes_written"` + FilesWritten int `json:"files_written"` + DurationSeconds *int `json:"duration_seconds,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + EndedAt *time.Time `json:"ended_at,omitempty"` + ErrorMessage *string `json:"error_message,omitempty"` + StorageName *string `json:"storage_name,omitempty"` + PoolName *string `json:"pool_name,omitempty"` + VolumeName *string `json:"volume_name,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// ListJobsOptions represents filtering and pagination options +type ListJobsOptions struct { + Status string // Filter by status: "Running", "Completed", "Failed", etc. + JobType string // Filter by job type: "Backup", "Restore", etc. + ClientName string // Filter by client name + JobName string // Filter by job name + Limit int // Number of results to return + Offset int // Offset for pagination +} + +// SyncJobsFromBacula syncs jobs from Bacula/Bareos to the database +// Tries to query Bacula database directly first, falls back to bconsole if database access fails +func (s *Service) SyncJobsFromBacula(ctx context.Context) error { + s.logger.Info("Starting sync from Bacula database", "bacula_db_configured", s.baculaDB != nil) + + // Check if Bacula database connection is configured + if s.baculaDB == nil { + s.logger.Warn("Bacula database connection not configured, trying bconsole fallback") + return s.syncFromBconsole(ctx) + } + + // Try to query Bacula database directly (if user has access) + jobs, err := s.queryBaculaDatabase(ctx) + if err != nil { + s.logger.Warn("Failed to query Bacula database directly, trying bconsole", "error", err) + // Fallback to bconsole + return s.syncFromBconsole(ctx) + } + + s.logger.Info("Queried Bacula database", "jobs_found", len(jobs)) + + if len(jobs) == 0 { + s.logger.Debug("No jobs found in Bacula database") + return nil + } + + // Upsert jobs to Calypso database + successCount := 0 + errorCount := 0 + for _, job := range jobs { + err := s.upsertJob(ctx, job) + if err != nil { + s.logger.Error("Failed to upsert job", "job_id", job.JobID, "job_name", job.JobName, "error", err) + errorCount++ + continue + } + successCount++ + s.logger.Debug("Upserted job", "job_id", job.JobID, "job_name", job.JobName) + } + + s.logger.Info("Synced jobs from Bacula database", "total", len(jobs), "success", successCount, "errors", errorCount) + + if errorCount > 0 { + return fmt.Errorf("failed to sync %d out of %d jobs", errorCount, len(jobs)) + } + + return nil +} + +// queryBaculaDatabase queries Bacula database directly +// Uses direct connection to Bacula database (no dblink needed) +func (s *Service) queryBaculaDatabase(ctx context.Context) ([]Job, error) { + // Use direct connection to Bacula database + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + return s.queryBaculaDirect(ctx) +} + +// queryBaculaDirect queries Job table directly (Bacularis approach) +// Assumes Bacula tables are in same database or accessible via search_path +func (s *Service) queryBaculaDirect(ctx context.Context) ([]Job, error) { + // Bacularis-style query: direct query to Job table with JOIN to Client + // This is the standard way Bacularis queries Bacula database + query := ` + SELECT + j.JobId as job_id, + j.Name as job_name, + COALESCE(c.Name, 'unknown') as client_name, + CASE + WHEN j.Type = 'B' THEN 'Backup' + WHEN j.Type = 'R' THEN 'Restore' + WHEN j.Type = 'V' THEN 'Verify' + WHEN j.Type = 'C' THEN 'Copy' + WHEN j.Type = 'M' THEN 'Migrate' + ELSE 'Backup' + END as job_type, + CASE + WHEN j.Level = 'F' THEN 'Full' + WHEN j.Level = 'I' THEN 'Incremental' + WHEN j.Level = 'D' THEN 'Differential' + WHEN j.Level = 'S' THEN 'Since' + ELSE 'Full' + END as job_level, + CASE + WHEN j.JobStatus = 'T' THEN 'Running' + WHEN j.JobStatus = 'C' THEN 'Completed' + WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed' + WHEN j.JobStatus = 'A' THEN 'Canceled' + WHEN j.JobStatus = 'W' THEN 'Waiting' + ELSE 'Waiting' + END as status, + COALESCE(j.JobBytes, 0) as bytes_written, + COALESCE(j.JobFiles, 0) as files_written, + j.StartTime as started_at, + j.EndTime as ended_at + FROM Job j + LEFT JOIN Client c ON j.ClientId = c.ClientId + ORDER BY j.StartTime DESC + LIMIT 1000 + ` + + // Use direct connection to Bacula database + rows, err := s.baculaDB.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to query Bacula Job table: %w", err) + } + defer rows.Close() + + var jobs []Job + for rows.Next() { + var job Job + var startedAt, endedAt sql.NullTime + + err := rows.Scan( + &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &startedAt, &endedAt, + ) + if err != nil { + s.logger.Error("Failed to scan Bacula job", "error", err) + continue + } + + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + // Calculate duration if both start and end times are available + if job.StartedAt != nil { + duration := int(endedAt.Time.Sub(*job.StartedAt).Seconds()) + job.DurationSeconds = &duration + } + } + + jobs = append(jobs, job) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + if len(jobs) > 0 { + s.logger.Info("Successfully queried Bacula database (direct)", "count", len(jobs)) + return jobs, nil + } + + return jobs, nil // Return empty list, not an error +} + +// syncFromBconsole syncs jobs using bconsole command (fallback method) +func (s *Service) syncFromBconsole(ctx context.Context) error { + // Execute bconsole command to list jobs + cmd := exec.CommandContext(ctx, "sh", "-c", "echo -e 'list jobs\nquit' | bconsole") + + output, err := cmd.CombinedOutput() + if err != nil { + s.logger.Debug("Failed to execute bconsole", "error", err, "output", string(output)) + return nil // Don't fail, just return empty + } + + if len(output) == 0 { + s.logger.Debug("bconsole returned empty output") + return nil + } + + // Parse bconsole output + jobs := s.parseBconsoleOutput(ctx, string(output)) + + if len(jobs) == 0 { + s.logger.Debug("No jobs found in bconsole output") + return nil + } + + // Upsert jobs to database + successCount := 0 + for _, job := range jobs { + err := s.upsertJob(ctx, job) + if err != nil { + s.logger.Error("Failed to upsert job", "job_id", job.JobID, "error", err) + continue + } + successCount++ + } + + s.logger.Info("Synced jobs from bconsole", "total", len(jobs), "success", successCount) + return nil +} + +// parseBconsoleOutput parses bconsole "list jobs" output +func (s *Service) parseBconsoleOutput(ctx context.Context, output string) []Job { + var jobs []Job + lines := strings.Split(output, "\n") + + // Skip header lines until we find the data rows + inDataSection := false + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip empty lines and separators + if line == "" || strings.HasPrefix(line, "+") { + continue + } + + // Start data section when we see header + if strings.HasPrefix(line, "| jobid") { + inDataSection = true + continue + } + + // Stop at footer separator + if strings.HasPrefix(line, "*") { + break + } + + if !inDataSection { + continue + } + + // Parse data row: | jobid | name | starttime | type | level | jobfiles | jobbytes | jobstatus | + if strings.HasPrefix(line, "|") { + parts := strings.Split(line, "|") + if len(parts) < 9 { + continue + } + + // Extract fields (skip first empty part) + jobIDStr := strings.TrimSpace(parts[1]) + jobName := strings.TrimSpace(parts[2]) + startTimeStr := strings.TrimSpace(parts[3]) + jobTypeChar := strings.TrimSpace(parts[4]) + jobLevelChar := strings.TrimSpace(parts[5]) + jobFilesStr := strings.TrimSpace(parts[6]) + jobBytesStr := strings.TrimSpace(parts[7]) + jobStatusChar := strings.TrimSpace(parts[8]) + + // Parse job ID + jobID, err := strconv.Atoi(jobIDStr) + if err != nil { + s.logger.Warn("Failed to parse job ID", "value", jobIDStr, "error", err) + continue + } + + // Parse start time + var startedAt *time.Time + if startTimeStr != "" && startTimeStr != "-" { + // Format: 2025-12-27 23:05:02 + parsedTime, err := time.Parse("2006-01-02 15:04:05", startTimeStr) + if err == nil { + startedAt = &parsedTime + } + } + + // Map job type + jobType := "Backup" + switch jobTypeChar { + case "B": + jobType = "Backup" + case "R": + jobType = "Restore" + case "V": + jobType = "Verify" + case "C": + jobType = "Copy" + case "M": + jobType = "Migrate" + } + + // Map job level + jobLevel := "Full" + switch jobLevelChar { + case "F": + jobLevel = "Full" + case "I": + jobLevel = "Incremental" + case "D": + jobLevel = "Differential" + case "S": + jobLevel = "Since" + } + + // Parse files and bytes + filesWritten := 0 + if jobFilesStr != "" && jobFilesStr != "-" { + if f, err := strconv.Atoi(jobFilesStr); err == nil { + filesWritten = f + } + } + + bytesWritten := int64(0) + if jobBytesStr != "" && jobBytesStr != "-" { + if b, err := strconv.ParseInt(jobBytesStr, 10, 64); err == nil { + bytesWritten = b + } + } + + // Map job status + status := "Waiting" + switch strings.ToLower(jobStatusChar) { + case "t", "T": + status = "Running" + case "c", "C": + status = "Completed" + case "f", "F": + status = "Failed" + case "A": + status = "Canceled" + case "W": + status = "Waiting" + } + + // Try to extract client name from job name (common pattern: JobName-ClientName) + clientName := "unknown" + // For now, use job name as client name if it looks like a client name + // In real implementation, we'd query job details from Bacula + if jobName != "" { + // Try to get client name from job details + clientNameFromJob := s.getClientNameFromJob(ctx, jobID) + if clientNameFromJob != "" { + clientName = clientNameFromJob + } else { + // Fallback: use job name as client name + clientName = jobName + } + } + + job := Job{ + JobID: jobID, + JobName: jobName, + ClientName: clientName, + JobType: jobType, + JobLevel: jobLevel, + Status: status, + BytesWritten: bytesWritten, + FilesWritten: filesWritten, + StartedAt: startedAt, + } + + jobs = append(jobs, job) + } + } + + return jobs +} + +// getClientNameFromJob gets client name from job details using bconsole +func (s *Service) getClientNameFromJob(ctx context.Context, jobID int) string { + // Execute bconsole to get job details + cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("echo -e 'list job jobid=%d\nquit' | bconsole", jobID)) + + output, err := cmd.CombinedOutput() + if err != nil { + s.logger.Debug("Failed to get job details", "job_id", jobID, "error", err) + return "" + } + + // Parse output to find Client line + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Client:") { + parts := strings.Split(line, ":") + if len(parts) >= 2 { + return strings.TrimSpace(parts[1]) + } + } + } + + return "" +} + +// upsertJob inserts or updates a job in the database +func (s *Service) upsertJob(ctx context.Context, job Job) error { + query := ` + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, started_at, ended_at, duration_seconds, updated_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, NOW()) + ON CONFLICT (job_id) DO UPDATE SET + job_name = EXCLUDED.job_name, + client_name = EXCLUDED.client_name, + job_type = EXCLUDED.job_type, + job_level = EXCLUDED.job_level, + status = EXCLUDED.status, + bytes_written = EXCLUDED.bytes_written, + files_written = EXCLUDED.files_written, + started_at = EXCLUDED.started_at, + ended_at = EXCLUDED.ended_at, + duration_seconds = EXCLUDED.duration_seconds, + updated_at = NOW() + ` + + // Use job name as client name if client_name is empty (we'll improve this later) + clientName := job.ClientName + if clientName == "" { + clientName = "unknown" + } + + result, err := s.db.ExecContext(ctx, query, + job.JobID, job.JobName, clientName, job.JobType, job.JobLevel, job.Status, + job.BytesWritten, job.FilesWritten, job.StartedAt, job.EndedAt, job.DurationSeconds, + ) + + if err != nil { + s.logger.Error("Database error in upsertJob", "job_id", job.JobID, "error", err) + return err + } + + rowsAffected, _ := result.RowsAffected() + s.logger.Debug("Upserted job to database", "job_id", job.JobID, "rows_affected", rowsAffected) + + return nil +} + +// ListJobs lists backup jobs with optional filters +func (s *Service) ListJobs(ctx context.Context, opts ListJobsOptions) ([]Job, int, error) { + // Try to sync jobs from Bacula first (non-blocking - if it fails, continue with database) + // Don't return error if sync fails, just log it and continue + // This allows the API to work even if bconsole is not available + s.logger.Info("ListJobs called, syncing from Bacula first") + syncErr := s.SyncJobsFromBacula(ctx) + if syncErr != nil { + s.logger.Warn("Failed to sync jobs from Bacula, using database only", "error", syncErr) + // Continue anyway - we'll use whatever is in the database + } else { + s.logger.Info("Successfully synced jobs from Bacula") + } + + // Build WHERE clause + whereClauses := []string{"1=1"} + args := []interface{}{} + argIndex := 1 + + if opts.Status != "" { + whereClauses = append(whereClauses, fmt.Sprintf("status = $%d", argIndex)) + args = append(args, opts.Status) + argIndex++ + } + + if opts.JobType != "" { + whereClauses = append(whereClauses, fmt.Sprintf("job_type = $%d", argIndex)) + args = append(args, opts.JobType) + argIndex++ + } + + if opts.ClientName != "" { + whereClauses = append(whereClauses, fmt.Sprintf("client_name ILIKE $%d", argIndex)) + args = append(args, "%"+opts.ClientName+"%") + argIndex++ + } + + if opts.JobName != "" { + whereClauses = append(whereClauses, fmt.Sprintf("job_name ILIKE $%d", argIndex)) + args = append(args, "%"+opts.JobName+"%") + argIndex++ + } + + whereClause := "" + if len(whereClauses) > 0 { + whereClause = "WHERE " + whereClauses[0] + for i := 1; i < len(whereClauses); i++ { + whereClause += " AND " + whereClauses[i] + } + } + + // Get total count + countQuery := fmt.Sprintf("SELECT COUNT(*) FROM backup_jobs %s", whereClause) + var totalCount int + err := s.db.QueryRowContext(ctx, countQuery, args...).Scan(&totalCount) + if err != nil { + return nil, 0, fmt.Errorf("failed to count jobs: %w", err) + } + + // Set default limit + limit := opts.Limit + if limit <= 0 { + limit = 50 + } + if limit > 100 { + limit = 100 + } + + // Build query with pagination + query := fmt.Sprintf(` + SELECT id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + FROM backup_jobs + %s + ORDER BY started_at DESC NULLS LAST, created_at DESC + LIMIT $%d OFFSET $%d + `, whereClause, argIndex, argIndex+1) + + args = append(args, limit, opts.Offset) + + rows, err := s.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, 0, fmt.Errorf("failed to query jobs: %w", err) + } + defer rows.Close() + + var jobs []Job + for rows.Next() { + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err := rows.Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + s.logger.Error("Failed to scan job", "error", err) + continue + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + jobs = append(jobs, job) + } + + return jobs, totalCount, rows.Err() +} + +// GetJob retrieves a job by ID +func (s *Service) GetJob(ctx context.Context, id string) (*Job, error) { + query := ` + SELECT id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + FROM backup_jobs + WHERE id = $1 + ` + + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err := s.db.QueryRowContext(ctx, query, id).Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("job not found") + } + return nil, fmt.Errorf("failed to get job: %w", err) + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + return &job, nil +} + +// CreateJobRequest represents a request to create a new backup job +type CreateJobRequest struct { + JobName string `json:"job_name" binding:"required"` + ClientName string `json:"client_name" binding:"required"` + JobType string `json:"job_type" binding:"required"` // 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate' + JobLevel string `json:"job_level" binding:"required"` // 'Full', 'Incremental', 'Differential', 'Since' + StorageName *string `json:"storage_name,omitempty"` + PoolName *string `json:"pool_name,omitempty"` +} + +// CreateJob creates a new backup job +func (s *Service) CreateJob(ctx context.Context, req CreateJobRequest) (*Job, error) { + // Generate a unique job ID (in real implementation, this would come from Bareos) + // For now, we'll use a simple incrementing approach or timestamp-based ID + var jobID int + err := s.db.QueryRowContext(ctx, ` + SELECT COALESCE(MAX(job_id), 0) + 1 FROM backup_jobs + `).Scan(&jobID) + if err != nil { + return nil, fmt.Errorf("failed to generate job ID: %w", err) + } + + // Insert the job into database + query := ` + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, + status, bytes_written, files_written, + storage_name, pool_name, started_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW()) + RETURNING id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + ` + + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err = s.db.QueryRowContext(ctx, query, + jobID, req.JobName, req.ClientName, req.JobType, req.JobLevel, + "Waiting", 0, 0, + req.StorageName, req.PoolName, + ).Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + return nil, fmt.Errorf("failed to create job: %w", err) + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + s.logger.Info("Backup job created", + "job_id", job.JobID, + "job_name", job.JobName, + "client_name", job.ClientName, + "job_type", job.JobType, + ) + + return &job, nil +} diff --git a/backend/internal/common/database/migrations.go b/backend/internal/common/database/migrations.go index 82a831a..54353af 100644 --- a/backend/internal/common/database/migrations.go +++ b/backend/internal/common/database/migrations.go @@ -59,7 +59,7 @@ func RunMigrations(ctx context.Context, db *DB) error { if _, err := tx.ExecContext(ctx, string(sql)); err != nil { tx.Rollback() - return fmt.Errorf("failed to execute migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err) } // Record migration @@ -68,11 +68,11 @@ func RunMigrations(ctx context.Context, db *DB) error { migration.Version, ); err != nil { tx.Rollback() - return fmt.Errorf("failed to record migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to record migration %d: %w", migration.Version, err) } if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to commit migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err) } log.Info("Migration applied successfully", "version", migration.Version) diff --git a/backend/internal/common/database/migrations/007_add_vendor_to_vtl_libraries.sql b/backend/internal/common/database/migrations/007_add_vendor_to_vtl_libraries.sql new file mode 100644 index 0000000..a23ebce --- /dev/null +++ b/backend/internal/common/database/migrations/007_add_vendor_to_vtl_libraries.sql @@ -0,0 +1,3 @@ +-- Add vendor column to virtual_tape_libraries table +ALTER TABLE virtual_tape_libraries ADD COLUMN IF NOT EXISTS vendor VARCHAR(255); + diff --git a/backend/internal/common/database/migrations/008_add_user_groups.sql b/backend/internal/common/database/migrations/008_add_user_groups.sql new file mode 100644 index 0000000..962757b --- /dev/null +++ b/backend/internal/common/database/migrations/008_add_user_groups.sql @@ -0,0 +1,45 @@ +-- Add user groups feature +-- Groups table +CREATE TABLE IF NOT EXISTS groups ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + name VARCHAR(255) NOT NULL UNIQUE, + description TEXT, + is_system BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- User groups junction table +CREATE TABLE IF NOT EXISTS user_groups ( + user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE, + group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + assigned_at TIMESTAMP NOT NULL DEFAULT NOW(), + assigned_by UUID REFERENCES users(id), + PRIMARY KEY (user_id, group_id) +); + +-- Group roles junction table (groups can have roles) +CREATE TABLE IF NOT EXISTS group_roles ( + group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE, + role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE, + granted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (group_id, role_id) +); + +-- Indexes +CREATE INDEX IF NOT EXISTS idx_groups_name ON groups(name); +CREATE INDEX IF NOT EXISTS idx_user_groups_user_id ON user_groups(user_id); +CREATE INDEX IF NOT EXISTS idx_user_groups_group_id ON user_groups(group_id); +CREATE INDEX IF NOT EXISTS idx_group_roles_group_id ON group_roles(group_id); +CREATE INDEX IF NOT EXISTS idx_group_roles_role_id ON group_roles(role_id); + +-- Insert default system groups +INSERT INTO groups (name, description, is_system) VALUES + ('wheel', 'System administrators group', true), + ('operators', 'System operators group', true), + ('backup', 'Backup operators group', true), + ('auditors', 'Auditors group', true), + ('storage_admins', 'Storage administrators group', true), + ('services', 'Service accounts group', true) +ON CONFLICT (name) DO NOTHING; + diff --git a/backend/internal/common/database/migrations/009_backup_jobs_schema.sql b/backend/internal/common/database/migrations/009_backup_jobs_schema.sql new file mode 100644 index 0000000..b4b6dc6 --- /dev/null +++ b/backend/internal/common/database/migrations/009_backup_jobs_schema.sql @@ -0,0 +1,34 @@ +-- AtlasOS - Calypso +-- Backup Jobs Schema +-- Version: 9.0 + +-- Backup jobs table +CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + job_id INTEGER NOT NULL UNIQUE, -- Bareos job ID + job_name VARCHAR(255) NOT NULL, + client_name VARCHAR(255) NOT NULL, + job_type VARCHAR(50) NOT NULL, -- 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate' + job_level VARCHAR(50) NOT NULL, -- 'Full', 'Incremental', 'Differential', 'Since' + status VARCHAR(50) NOT NULL, -- 'Running', 'Completed', 'Failed', 'Canceled', 'Waiting' + bytes_written BIGINT NOT NULL DEFAULT 0, + files_written INTEGER NOT NULL DEFAULT 0, + duration_seconds INTEGER, + started_at TIMESTAMP, + ended_at TIMESTAMP, + error_message TEXT, + storage_name VARCHAR(255), + pool_name VARCHAR(255), + volume_name VARCHAR(255), + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- Indexes for performance +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_name ON backup_jobs(job_name); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_client_name ON backup_jobs(client_name); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_started_at ON backup_jobs(started_at DESC); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_type ON backup_jobs(job_type); + diff --git a/backend/internal/common/database/migrations/010_add_backup_permissions.sql b/backend/internal/common/database/migrations/010_add_backup_permissions.sql new file mode 100644 index 0000000..6c8eb5d --- /dev/null +++ b/backend/internal/common/database/migrations/010_add_backup_permissions.sql @@ -0,0 +1,39 @@ +-- AtlasOS - Calypso +-- Add Backup Permissions +-- Version: 10.0 + +-- Insert backup permissions +INSERT INTO permissions (name, resource, action, description) VALUES + ('backup:read', 'backup', 'read', 'View backup jobs and history'), + ('backup:write', 'backup', 'write', 'Create and manage backup jobs'), + ('backup:manage', 'backup', 'manage', 'Full backup management') +ON CONFLICT (name) DO NOTHING; + +-- Assign backup permissions to roles + +-- Admin gets all backup permissions (explicitly assign since admin query in 001 only runs once) +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'admin' + AND p.resource = 'backup' +ON CONFLICT DO NOTHING; + +-- Operator gets read and write permissions for backup +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'operator' + AND p.resource = 'backup' + AND p.action IN ('read', 'write') +ON CONFLICT DO NOTHING; + +-- ReadOnly gets only read permission for backup +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'readonly' + AND p.resource = 'backup' + AND p.action = 'read' +ON CONFLICT DO NOTHING; + diff --git a/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql b/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql new file mode 100644 index 0000000..8cc0af0 --- /dev/null +++ b/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql @@ -0,0 +1,209 @@ +-- AtlasOS - Calypso +-- PostgreSQL Function to Sync Jobs from Bacula to Calypso +-- Version: 11.0 +-- +-- This function syncs jobs from Bacula database (Job table) to Calypso database (backup_jobs table) +-- Uses dblink extension to query Bacula database from Calypso database +-- +-- Prerequisites: +-- 1. dblink extension must be installed: CREATE EXTENSION IF NOT EXISTS dblink; +-- 2. User must have access to both databases +-- 3. Connection parameters must be configured in the function + +-- Create function to sync jobs from Bacula to Calypso +CREATE OR REPLACE FUNCTION sync_bacula_jobs( + bacula_db_name TEXT DEFAULT 'bacula', + bacula_host TEXT DEFAULT 'localhost', + bacula_port INTEGER DEFAULT 5432, + bacula_user TEXT DEFAULT 'calypso', + bacula_password TEXT DEFAULT '' +) +RETURNS TABLE( + jobs_synced INTEGER, + jobs_inserted INTEGER, + jobs_updated INTEGER, + errors INTEGER +) AS $$ +DECLARE + conn_str TEXT; + jobs_count INTEGER := 0; + inserted_count INTEGER := 0; + updated_count INTEGER := 0; + error_count INTEGER := 0; + job_record RECORD; +BEGIN + -- Build dblink connection string + conn_str := format( + 'dbname=%s host=%s port=%s user=%s password=%s', + bacula_db_name, + bacula_host, + bacula_port, + bacula_user, + bacula_password + ); + + -- Query jobs from Bacula database using dblink + FOR job_record IN + SELECT * FROM dblink( + conn_str, + $QUERY$ + SELECT + j.JobId, + j.Name as job_name, + COALESCE(c.Name, 'unknown') as client_name, + CASE + WHEN j.Type = 'B' THEN 'Backup' + WHEN j.Type = 'R' THEN 'Restore' + WHEN j.Type = 'V' THEN 'Verify' + WHEN j.Type = 'C' THEN 'Copy' + WHEN j.Type = 'M' THEN 'Migrate' + ELSE 'Backup' + END as job_type, + CASE + WHEN j.Level = 'F' THEN 'Full' + WHEN j.Level = 'I' THEN 'Incremental' + WHEN j.Level = 'D' THEN 'Differential' + WHEN j.Level = 'S' THEN 'Since' + ELSE 'Full' + END as job_level, + CASE + WHEN j.JobStatus = 'T' THEN 'Running' + WHEN j.JobStatus = 'C' THEN 'Completed' + WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed' + WHEN j.JobStatus = 'A' THEN 'Canceled' + WHEN j.JobStatus = 'W' THEN 'Waiting' + ELSE 'Waiting' + END as status, + COALESCE(j.JobBytes, 0) as bytes_written, + COALESCE(j.JobFiles, 0) as files_written, + j.StartTime as started_at, + j.EndTime as ended_at, + CASE + WHEN j.EndTime IS NOT NULL AND j.StartTime IS NOT NULL + THEN EXTRACT(EPOCH FROM (j.EndTime - j.StartTime))::INTEGER + ELSE NULL + END as duration_seconds + FROM Job j + LEFT JOIN Client c ON j.ClientId = c.ClientId + ORDER BY j.StartTime DESC + LIMIT 1000 + $QUERY$ + ) AS t( + job_id INTEGER, + job_name TEXT, + client_name TEXT, + job_type TEXT, + job_level TEXT, + status TEXT, + bytes_written BIGINT, + files_written INTEGER, + started_at TIMESTAMP, + ended_at TIMESTAMP, + duration_seconds INTEGER + ) + LOOP + BEGIN + -- Check if job already exists (before insert/update) + IF EXISTS (SELECT 1 FROM backup_jobs WHERE job_id = job_record.job_id) THEN + updated_count := updated_count + 1; + ELSE + inserted_count := inserted_count + 1; + END IF; + + -- Upsert job to backup_jobs table + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, started_at, ended_at, duration_seconds, + updated_at + ) VALUES ( + job_record.job_id, + job_record.job_name, + job_record.client_name, + job_record.job_type, + job_record.job_level, + job_record.status, + job_record.bytes_written, + job_record.files_written, + job_record.started_at, + job_record.ended_at, + job_record.duration_seconds, + NOW() + ) + ON CONFLICT (job_id) DO UPDATE SET + job_name = EXCLUDED.job_name, + client_name = EXCLUDED.client_name, + job_type = EXCLUDED.job_type, + job_level = EXCLUDED.job_level, + status = EXCLUDED.status, + bytes_written = EXCLUDED.bytes_written, + files_written = EXCLUDED.files_written, + started_at = EXCLUDED.started_at, + ended_at = EXCLUDED.ended_at, + duration_seconds = EXCLUDED.duration_seconds, + updated_at = NOW(); + + jobs_count := jobs_count + 1; + EXCEPTION + WHEN OTHERS THEN + error_count := error_count + 1; + -- Log error but continue with next job + RAISE WARNING 'Error syncing job %: %', job_record.job_id, SQLERRM; + END; + END LOOP; + + -- Return summary + RETURN QUERY SELECT jobs_count, inserted_count, updated_count, error_count; +END; +$$ LANGUAGE plpgsql; + +-- Create a simpler version that uses current database connection settings +-- This version assumes Bacula is on same host/port with same user +CREATE OR REPLACE FUNCTION sync_bacula_jobs_simple() +RETURNS TABLE( + jobs_synced INTEGER, + jobs_inserted INTEGER, + jobs_updated INTEGER, + errors INTEGER +) AS $$ +DECLARE + current_user_name TEXT; + current_host TEXT; + current_port INTEGER; + current_db TEXT; +BEGIN + -- Get current connection info + SELECT + current_user, + COALESCE(inet_server_addr()::TEXT, 'localhost'), + COALESCE(inet_server_port(), 5432), + current_database() + INTO + current_user_name, + current_host, + current_port, + current_db; + + -- Call main function with current connection settings + -- Note: password needs to be passed or configured in .pgpass + RETURN QUERY + SELECT * FROM sync_bacula_jobs( + 'bacula', -- Try 'bacula' first + current_host, + current_port, + current_user_name, + '' -- Empty password - will use .pgpass or peer authentication + ); +END; +$$ LANGUAGE plpgsql; + +-- Grant execute permission to calypso user +GRANT EXECUTE ON FUNCTION sync_bacula_jobs(TEXT, TEXT, INTEGER, TEXT, TEXT) TO calypso; +GRANT EXECUTE ON FUNCTION sync_bacula_jobs_simple() TO calypso; + +-- Create index if not exists (should already exist from migration 009) +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_updated_at ON backup_jobs(updated_at); + +COMMENT ON FUNCTION sync_bacula_jobs IS 'Syncs jobs from Bacula database to Calypso backup_jobs table using dblink'; +COMMENT ON FUNCTION sync_bacula_jobs_simple IS 'Simplified version that uses current connection settings (requires .pgpass for password)'; + diff --git a/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql.bak b/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql.bak new file mode 100644 index 0000000..24c40e7 --- /dev/null +++ b/backend/internal/common/database/migrations/011_sync_bacula_jobs_function.sql.bak @@ -0,0 +1,209 @@ +-- AtlasOS - Calypso +-- PostgreSQL Function to Sync Jobs from Bacula to Calypso +-- Version: 11.0 +-- +-- This function syncs jobs from Bacula database (Job table) to Calypso database (backup_jobs table) +-- Uses dblink extension to query Bacula database from Calypso database +-- +-- Prerequisites: +-- 1. dblink extension must be installed: CREATE EXTENSION IF NOT EXISTS dblink; +-- 2. User must have access to both databases +-- 3. Connection parameters must be configured in the function + +-- Create function to sync jobs from Bacula to Calypso +CREATE OR REPLACE FUNCTION sync_bacula_jobs( + bacula_db_name TEXT DEFAULT 'bacula', + bacula_host TEXT DEFAULT 'localhost', + bacula_port INTEGER DEFAULT 5432, + bacula_user TEXT DEFAULT 'calypso', + bacula_password TEXT DEFAULT '' +) +RETURNS TABLE( + jobs_synced INTEGER, + jobs_inserted INTEGER, + jobs_updated INTEGER, + errors INTEGER +) AS $$ +DECLARE + conn_str TEXT; + jobs_count INTEGER := 0; + inserted_count INTEGER := 0; + updated_count INTEGER := 0; + error_count INTEGER := 0; + job_record RECORD; +BEGIN + -- Build dblink connection string + conn_str := format( + 'dbname=%s host=%s port=%s user=%s password=%s', + bacula_db_name, + bacula_host, + bacula_port, + bacula_user, + bacula_password + ); + + -- Query jobs from Bacula database using dblink + FOR job_record IN + SELECT * FROM dblink( + conn_str, + $$ + SELECT + j.JobId, + j.Name as job_name, + COALESCE(c.Name, 'unknown') as client_name, + CASE + WHEN j.Type = 'B' THEN 'Backup' + WHEN j.Type = 'R' THEN 'Restore' + WHEN j.Type = 'V' THEN 'Verify' + WHEN j.Type = 'C' THEN 'Copy' + WHEN j.Type = 'M' THEN 'Migrate' + ELSE 'Backup' + END as job_type, + CASE + WHEN j.Level = 'F' THEN 'Full' + WHEN j.Level = 'I' THEN 'Incremental' + WHEN j.Level = 'D' THEN 'Differential' + WHEN j.Level = 'S' THEN 'Since' + ELSE 'Full' + END as job_level, + CASE + WHEN j.JobStatus = 'T' THEN 'Running' + WHEN j.JobStatus = 'C' THEN 'Completed' + WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed' + WHEN j.JobStatus = 'A' THEN 'Canceled' + WHEN j.JobStatus = 'W' THEN 'Waiting' + ELSE 'Waiting' + END as status, + COALESCE(j.JobBytes, 0) as bytes_written, + COALESCE(j.JobFiles, 0) as files_written, + j.StartTime as started_at, + j.EndTime as ended_at, + CASE + WHEN j.EndTime IS NOT NULL AND j.StartTime IS NOT NULL + THEN EXTRACT(EPOCH FROM (j.EndTime - j.StartTime))::INTEGER + ELSE NULL + END as duration_seconds + FROM Job j + LEFT JOIN Client c ON j.ClientId = c.ClientId + ORDER BY j.StartTime DESC + LIMIT 1000 + $$ + ) AS t( + job_id INTEGER, + job_name TEXT, + client_name TEXT, + job_type TEXT, + job_level TEXT, + status TEXT, + bytes_written BIGINT, + files_written INTEGER, + started_at TIMESTAMP, + ended_at TIMESTAMP, + duration_seconds INTEGER + ) + LOOP + BEGIN + -- Check if job already exists (before insert/update) + IF EXISTS (SELECT 1 FROM backup_jobs WHERE job_id = job_record.job_id) THEN + updated_count := updated_count + 1; + ELSE + inserted_count := inserted_count + 1; + END IF; + + -- Upsert job to backup_jobs table + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, started_at, ended_at, duration_seconds, + updated_at + ) VALUES ( + job_record.job_id, + job_record.job_name, + job_record.client_name, + job_record.job_type, + job_record.job_level, + job_record.status, + job_record.bytes_written, + job_record.files_written, + job_record.started_at, + job_record.ended_at, + job_record.duration_seconds, + NOW() + ) + ON CONFLICT (job_id) DO UPDATE SET + job_name = EXCLUDED.job_name, + client_name = EXCLUDED.client_name, + job_type = EXCLUDED.job_type, + job_level = EXCLUDED.job_level, + status = EXCLUDED.status, + bytes_written = EXCLUDED.bytes_written, + files_written = EXCLUDED.files_written, + started_at = EXCLUDED.started_at, + ended_at = EXCLUDED.ended_at, + duration_seconds = EXCLUDED.duration_seconds, + updated_at = NOW(); + + jobs_count := jobs_count + 1; + EXCEPTION + WHEN OTHERS THEN + error_count := error_count + 1; + -- Log error but continue with next job + RAISE WARNING 'Error syncing job %: %', job_record.job_id, SQLERRM; + END; + END LOOP; + + -- Return summary + RETURN QUERY SELECT jobs_count, inserted_count, updated_count, error_count; +END; +$$ LANGUAGE plpgsql; + +-- Create a simpler version that uses current database connection settings +-- This version assumes Bacula is on same host/port with same user +CREATE OR REPLACE FUNCTION sync_bacula_jobs_simple() +RETURNS TABLE( + jobs_synced INTEGER, + jobs_inserted INTEGER, + jobs_updated INTEGER, + errors INTEGER +) AS $$ +DECLARE + current_user_name TEXT; + current_host TEXT; + current_port INTEGER; + current_db TEXT; +BEGIN + -- Get current connection info + SELECT + current_user, + COALESCE(inet_server_addr()::TEXT, 'localhost'), + COALESCE(inet_server_port(), 5432), + current_database() + INTO + current_user_name, + current_host, + current_port, + current_db; + + -- Call main function with current connection settings + -- Note: password needs to be passed or configured in .pgpass + RETURN QUERY + SELECT * FROM sync_bacula_jobs( + 'bacula', -- Try 'bacula' first + current_host, + current_port, + current_user_name, + '' -- Empty password - will use .pgpass or peer authentication + ); +END; +$$ LANGUAGE plpgsql; + +-- Grant execute permission to calypso user +GRANT EXECUTE ON FUNCTION sync_bacula_jobs(TEXT, TEXT, INTEGER, TEXT, TEXT) TO calypso; +GRANT EXECUTE ON FUNCTION sync_bacula_jobs_simple() TO calypso; + +-- Create index if not exists (should already exist from migration 009) +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_updated_at ON backup_jobs(updated_at); + +COMMENT ON FUNCTION sync_bacula_jobs IS 'Syncs jobs from Bacula database to Calypso backup_jobs table using dblink'; +COMMENT ON FUNCTION sync_bacula_jobs_simple IS 'Simplified version that uses current connection settings (requires .pgpass for password)'; + diff --git a/backend/internal/common/router/cache.go b/backend/internal/common/router/cache.go index 0020420..c3907b9 100644 --- a/backend/internal/common/router/cache.go +++ b/backend/internal/common/router/cache.go @@ -51,6 +51,13 @@ func cacheMiddleware(cfg CacheConfig, cache *cache.Cache) gin.HandlerFunc { return } + // Don't cache VTL endpoints - they change frequently + path := c.Request.URL.Path + if strings.HasPrefix(path, "/api/v1/tape/vtl/") { + c.Next() + return + } + // Generate cache key from request path and query string keyParts := []string{c.Request.URL.Path} if c.Request.URL.RawQuery != "" { diff --git a/backend/internal/common/router/router.go b/backend/internal/common/router/router.go index c8e9f20..d963935 100644 --- a/backend/internal/common/router/router.go +++ b/backend/internal/common/router/router.go @@ -6,6 +6,7 @@ import ( "github.com/atlasos/calypso/internal/audit" "github.com/atlasos/calypso/internal/auth" + "github.com/atlasos/calypso/internal/backup" "github.com/atlasos/calypso/internal/common/cache" "github.com/atlasos/calypso/internal/common/config" "github.com/atlasos/calypso/internal/common/database" @@ -207,8 +208,21 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng scstGroup.POST("/targets", scstHandler.CreateTarget) scstGroup.POST("/targets/:id/luns", scstHandler.AddLUN) scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator) + scstGroup.POST("/targets/:id/enable", scstHandler.EnableTarget) + scstGroup.POST("/targets/:id/disable", scstHandler.DisableTarget) + scstGroup.GET("/initiators", scstHandler.ListAllInitiators) + scstGroup.GET("/initiators/:id", scstHandler.GetInitiator) + scstGroup.DELETE("/initiators/:id", scstHandler.RemoveInitiator) + scstGroup.GET("/extents", scstHandler.ListExtents) + scstGroup.POST("/extents", scstHandler.CreateExtent) + scstGroup.DELETE("/extents/:device", scstHandler.DeleteExtent) scstGroup.POST("/config/apply", scstHandler.ApplyConfig) scstGroup.GET("/handlers", scstHandler.ListHandlers) + scstGroup.GET("/portals", scstHandler.ListPortals) + scstGroup.GET("/portals/:id", scstHandler.GetPortal) + scstGroup.POST("/portals", scstHandler.CreatePortal) + scstGroup.PUT("/portals/:id", scstHandler.UpdatePortal) + scstGroup.DELETE("/portals/:id", scstHandler.DeletePortal) } // Physical Tape Libraries @@ -255,18 +269,70 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng systemGroup.POST("/services/:name/restart", systemHandler.RestartService) systemGroup.GET("/services/:name/logs", systemHandler.GetServiceLogs) systemGroup.POST("/support-bundle", systemHandler.GenerateSupportBundle) + systemGroup.GET("/interfaces", systemHandler.ListNetworkInterfaces) } - // IAM (admin only) + // IAM routes - GetUser can be accessed by user viewing own profile or admin iamHandler := iam.NewHandler(db, cfg, log) + protected.GET("/iam/users/:id", iamHandler.GetUser) + + // IAM admin routes iamGroup := protected.Group("/iam") iamGroup.Use(requireRole("admin")) { iamGroup.GET("/users", iamHandler.ListUsers) - iamGroup.GET("/users/:id", iamHandler.GetUser) iamGroup.POST("/users", iamHandler.CreateUser) iamGroup.PUT("/users/:id", iamHandler.UpdateUser) iamGroup.DELETE("/users/:id", iamHandler.DeleteUser) + // Roles routes + iamGroup.GET("/roles", iamHandler.ListRoles) + iamGroup.GET("/roles/:id", iamHandler.GetRole) + iamGroup.POST("/roles", iamHandler.CreateRole) + iamGroup.PUT("/roles/:id", iamHandler.UpdateRole) + iamGroup.DELETE("/roles/:id", iamHandler.DeleteRole) + iamGroup.GET("/roles/:id/permissions", iamHandler.GetRolePermissions) + iamGroup.POST("/roles/:id/permissions", iamHandler.AssignPermissionToRole) + iamGroup.DELETE("/roles/:id/permissions", iamHandler.RemovePermissionFromRole) + + // Permissions routes + iamGroup.GET("/permissions", iamHandler.ListPermissions) + + // User role/group assignment + iamGroup.POST("/users/:id/roles", iamHandler.AssignRoleToUser) + iamGroup.DELETE("/users/:id/roles", iamHandler.RemoveRoleFromUser) + iamGroup.POST("/users/:id/groups", iamHandler.AssignGroupToUser) + iamGroup.DELETE("/users/:id/groups", iamHandler.RemoveGroupFromUser) + + // Groups routes + iamGroup.GET("/groups", iamHandler.ListGroups) + iamGroup.GET("/groups/:id", iamHandler.GetGroup) + iamGroup.POST("/groups", iamHandler.CreateGroup) + iamGroup.PUT("/groups/:id", iamHandler.UpdateGroup) + iamGroup.DELETE("/groups/:id", iamHandler.DeleteGroup) + iamGroup.POST("/groups/:id/users", iamHandler.AddUserToGroup) + iamGroup.DELETE("/groups/:id/users/:user_id", iamHandler.RemoveUserFromGroup) + } + + // Backup Jobs + backupService := backup.NewService(db, log) + // Set up direct connection to Bacula database + // Try common Bacula database names + baculaDBName := "bacula" // Default + if err := backupService.SetBaculaDatabase(cfg.Database, baculaDBName); err != nil { + log.Warn("Failed to connect to Bacula database, trying 'bareos'", "error", err) + // Try 'bareos' as alternative + if err := backupService.SetBaculaDatabase(cfg.Database, "bareos"); err != nil { + log.Error("Failed to connect to Bacula database", "error", err, "tried", []string{"bacula", "bareos"}) + // Continue anyway - will fallback to bconsole + } + } + backupHandler := backup.NewHandler(backupService, log) + backupGroup := protected.Group("/backup") + backupGroup.Use(requirePermission("backup", "read")) + { + backupGroup.GET("/jobs", backupHandler.ListJobs) + backupGroup.GET("/jobs/:id", backupHandler.GetJob) + backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob) } // Monitoring diff --git a/backend/internal/iam/group.go b/backend/internal/iam/group.go new file mode 100644 index 0000000..b64243b --- /dev/null +++ b/backend/internal/iam/group.go @@ -0,0 +1,218 @@ +package iam + +import ( + "time" + + "github.com/atlasos/calypso/internal/common/database" +) + +// Group represents a user group +type Group struct { + ID string + Name string + Description string + IsSystem bool + CreatedAt time.Time + UpdatedAt time.Time + UserCount int + RoleCount int +} + +// GetGroupByID retrieves a group by ID +func GetGroupByID(db *database.DB, groupID string) (*Group, error) { + query := ` + SELECT id, name, description, is_system, created_at, updated_at + FROM groups + WHERE id = $1 + ` + + var group Group + err := db.QueryRow(query, groupID).Scan( + &group.ID, &group.Name, &group.Description, &group.IsSystem, + &group.CreatedAt, &group.UpdatedAt, + ) + if err != nil { + return nil, err + } + + // Get user count + var userCount int + db.QueryRow("SELECT COUNT(*) FROM user_groups WHERE group_id = $1", groupID).Scan(&userCount) + group.UserCount = userCount + + // Get role count + var roleCount int + db.QueryRow("SELECT COUNT(*) FROM group_roles WHERE group_id = $1", groupID).Scan(&roleCount) + group.RoleCount = roleCount + + return &group, nil +} + +// GetGroupByName retrieves a group by name +func GetGroupByName(db *database.DB, name string) (*Group, error) { + query := ` + SELECT id, name, description, is_system, created_at, updated_at + FROM groups + WHERE name = $1 + ` + + var group Group + err := db.QueryRow(query, name).Scan( + &group.ID, &group.Name, &group.Description, &group.IsSystem, + &group.CreatedAt, &group.UpdatedAt, + ) + if err != nil { + return nil, err + } + + return &group, nil +} + +// GetUserGroups retrieves all groups for a user +func GetUserGroups(db *database.DB, userID string) ([]string, error) { + query := ` + SELECT g.name + FROM groups g + INNER JOIN user_groups ug ON g.id = ug.group_id + WHERE ug.user_id = $1 + ORDER BY g.name + ` + + rows, err := db.Query(query, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + var groups []string + for rows.Next() { + var groupName string + if err := rows.Scan(&groupName); err != nil { + return nil, err + } + groups = append(groups, groupName) + } + + return groups, rows.Err() +} + +// GetGroupUsers retrieves all users in a group +func GetGroupUsers(db *database.DB, groupID string) ([]string, error) { + query := ` + SELECT u.id + FROM users u + INNER JOIN user_groups ug ON u.id = ug.user_id + WHERE ug.group_id = $1 + ORDER BY u.username + ` + + rows, err := db.Query(query, groupID) + if err != nil { + return nil, err + } + defer rows.Close() + + var userIDs []string + for rows.Next() { + var userID string + if err := rows.Scan(&userID); err != nil { + return nil, err + } + userIDs = append(userIDs, userID) + } + + return userIDs, rows.Err() +} + +// GetGroupRoles retrieves all roles for a group +func GetGroupRoles(db *database.DB, groupID string) ([]string, error) { + query := ` + SELECT r.name + FROM roles r + INNER JOIN group_roles gr ON r.id = gr.role_id + WHERE gr.group_id = $1 + ORDER BY r.name + ` + + rows, err := db.Query(query, groupID) + if err != nil { + return nil, err + } + defer rows.Close() + + var roles []string + for rows.Next() { + var role string + if err := rows.Scan(&role); err != nil { + return nil, err + } + roles = append(roles, role) + } + + return roles, rows.Err() +} + +// AddUserToGroup adds a user to a group +func AddUserToGroup(db *database.DB, userID, groupID, assignedBy string) error { + query := ` + INSERT INTO user_groups (user_id, group_id, assigned_by) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, group_id) DO NOTHING + ` + _, err := db.Exec(query, userID, groupID, assignedBy) + return err +} + +// RemoveUserFromGroup removes a user from a group +func RemoveUserFromGroup(db *database.DB, userID, groupID string) error { + query := `DELETE FROM user_groups WHERE user_id = $1 AND group_id = $2` + _, err := db.Exec(query, userID, groupID) + return err +} + +// AddRoleToGroup adds a role to a group +func AddRoleToGroup(db *database.DB, groupID, roleID string) error { + query := ` + INSERT INTO group_roles (group_id, role_id) + VALUES ($1, $2) + ON CONFLICT (group_id, role_id) DO NOTHING + ` + _, err := db.Exec(query, groupID, roleID) + return err +} + +// RemoveRoleFromGroup removes a role from a group +func RemoveRoleFromGroup(db *database.DB, groupID, roleID string) error { + query := `DELETE FROM group_roles WHERE group_id = $1 AND role_id = $2` + _, err := db.Exec(query, groupID, roleID) + return err +} + +// GetUserRolesFromGroups retrieves all roles for a user via groups +func GetUserRolesFromGroups(db *database.DB, userID string) ([]string, error) { + query := ` + SELECT DISTINCT r.name + FROM roles r + INNER JOIN group_roles gr ON r.id = gr.role_id + INNER JOIN user_groups ug ON gr.group_id = ug.group_id + WHERE ug.user_id = $1 + ORDER BY r.name + ` + + rows, err := db.Query(query, userID) + if err != nil { + return nil, err + } + defer rows.Close() + + var roles []string + for rows.Next() { + var role string + if err := rows.Scan(&role); err != nil { + return nil, err + } + roles = append(roles, role) + } + + return roles, rows.Err() +} diff --git a/backend/internal/iam/handler.go b/backend/internal/iam/handler.go index 226e8ab..7ca67dc 100644 --- a/backend/internal/iam/handler.go +++ b/backend/internal/iam/handler.go @@ -1,6 +1,7 @@ package iam import ( + "database/sql" "fmt" "net/http" "strings" @@ -64,15 +65,22 @@ func (h *Handler) ListUsers(c *gin.Context) { continue } + roles, _ := GetUserRoles(h.db, u.ID) + permissions, _ := GetUserPermissions(h.db, u.ID) + groups, _ := GetUserGroups(h.db, u.ID) + users = append(users, map[string]interface{}{ - "id": u.ID, - "username": u.Username, - "email": u.Email, - "full_name": u.FullName, - "is_active": u.IsActive, - "is_system": u.IsSystem, - "created_at": u.CreatedAt, - "updated_at": u.UpdatedAt, + "id": u.ID, + "username": u.Username, + "email": u.Email, + "full_name": u.FullName, + "is_active": u.IsActive, + "is_system": u.IsSystem, + "roles": roles, + "permissions": permissions, + "groups": groups, + "created_at": u.CreatedAt, + "updated_at": u.UpdatedAt, "last_login_at": u.LastLoginAt, }) } @@ -81,9 +89,45 @@ func (h *Handler) ListUsers(c *gin.Context) { } // GetUser retrieves a single user +// Permission: User can view their own profile, or admin can view any profile func (h *Handler) GetUser(c *gin.Context) { userID := c.Param("id") + // Get current authenticated user from context + authUser, exists := c.Get("user") + if !exists { + c.JSON(http.StatusUnauthorized, gin.H{"error": "authentication required"}) + return + } + + currentUser, ok := authUser.(*User) + if !ok { + c.JSON(http.StatusInternalServerError, gin.H{"error": "invalid user context"}) + return + } + + // Check permission: user can view own profile, or admin can view any profile + canView := false + if currentUser.ID == userID { + canView = true + } else { + // Check if current user is admin + roles, err := GetUserRoles(h.db, currentUser.ID) + if err == nil { + for _, role := range roles { + if role == "admin" { + canView = true + break + } + } + } + } + + if !canView { + c.JSON(http.StatusForbidden, gin.H{"error": "insufficient permissions"}) + return + } + user, err := GetUserByID(h.db, userID) if err != nil { c.JSON(http.StatusNotFound, gin.H{"error": "user not found"}) @@ -92,18 +136,21 @@ func (h *Handler) GetUser(c *gin.Context) { roles, _ := GetUserRoles(h.db, userID) permissions, _ := GetUserPermissions(h.db, userID) + groups, _ := GetUserGroups(h.db, userID) c.JSON(http.StatusOK, gin.H{ - "id": user.ID, - "username": user.Username, - "email": user.Email, - "full_name": user.FullName, - "is_active": user.IsActive, - "is_system": user.IsSystem, - "roles": roles, - "permissions": permissions, - "created_at": user.CreatedAt, - "updated_at": user.UpdatedAt, + "id": user.ID, + "username": user.Username, + "email": user.Email, + "full_name": user.FullName, + "is_active": user.IsActive, + "is_system": user.IsSystem, + "roles": roles, + "permissions": permissions, + "groups": groups, + "created_at": user.CreatedAt, + "updated_at": user.UpdatedAt, + "last_login_at": user.LastLoginAt, }) } @@ -152,16 +199,21 @@ func (h *Handler) UpdateUser(c *gin.Context) { userID := c.Param("id") var req struct { - Email *string `json:"email"` - FullName *string `json:"full_name"` - IsActive *bool `json:"is_active"` + Email *string `json:"email"` + FullName *string `json:"full_name"` + IsActive *bool `json:"is_active"` + Roles *[]string `json:"roles"` + Groups *[]string `json:"groups"` } if err := c.ShouldBindJSON(&req); err != nil { + h.logger.Error("Failed to bind JSON", "error", err) c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) return } + h.logger.Info("UpdateUser request received", "user_id", userID, "email", req.Email, "full_name", req.FullName, "is_active", req.IsActive, "roles", req.Roles, "groups", req.Groups) + // Build update query dynamically updates := []string{"updated_at = NOW()"} args := []interface{}{} @@ -183,19 +235,184 @@ func (h *Handler) UpdateUser(c *gin.Context) { argPos++ } - if len(updates) == 1 { + // Allow update if roles or groups are provided, even if no other fields are updated + if len(updates) == 1 && req.Roles == nil && req.Groups == nil { c.JSON(http.StatusBadRequest, gin.H{"error": "no fields to update"}) return } - args = append(args, userID) - query := "UPDATE users SET " + strings.Join(updates, ", ") + fmt.Sprintf(" WHERE id = $%d", argPos) + // Update user basic info if there are any changes + if len(updates) > 1 { + args = append(args, userID) + query := "UPDATE users SET " + strings.Join(updates, ", ") + fmt.Sprintf(" WHERE id = $%d", argPos) + _, err := h.db.Exec(query, args...) + if err != nil { + h.logger.Error("Failed to update user", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update user"}) + return + } + } - _, err := h.db.Exec(query, args...) - if err != nil { - h.logger.Error("Failed to update user", "error", err) - c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update user"}) - return + // Get current user ID from context for audit + authUser, _ := c.Get("user") + currentUser := authUser.(*User) + + // Update roles if provided + if req.Roles != nil { + h.logger.Info("Updating user roles", "user_id", userID, "roles", *req.Roles) + currentRoles, err := GetUserRoles(h.db, userID) + if err != nil { + h.logger.Error("Failed to get current roles for user", "user_id", userID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user roles"}) + return + } + + rolesToAdd := []string{} + rolesToRemove := []string{} + + // Find roles to add + for _, newRole := range *req.Roles { + found := false + for _, currentRole := range currentRoles { + if newRole == currentRole { + found = true + break + } + } + if !found { + rolesToAdd = append(rolesToAdd, newRole) + } + } + + // Find roles to remove + for _, currentRole := range currentRoles { + found := false + for _, newRole := range *req.Roles { + if currentRole == newRole { + found = true + break + } + } + if !found { + rolesToRemove = append(rolesToRemove, currentRole) + } + } + + // Add new roles + for _, roleName := range rolesToAdd { + roleID, err := GetRoleIDByName(h.db, roleName) + if err != nil { + if err == sql.ErrNoRows { + h.logger.Warn("Attempted to add non-existent role to user", "user_id", userID, "role_name", roleName) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("role '%s' not found", roleName)}) + return + } + h.logger.Error("Failed to get role ID by name", "role_name", roleName, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process roles"}) + return + } + if err := AddUserRole(h.db, userID, roleID, currentUser.ID); err != nil { + h.logger.Error("Failed to add role to user", "user_id", userID, "role_id", roleID, "error", err) + // Don't return early, continue with other roles + continue + } + h.logger.Info("Role added to user", "user_id", userID, "role_name", roleName) + } + + // Remove old roles + for _, roleName := range rolesToRemove { + roleID, err := GetRoleIDByName(h.db, roleName) + if err != nil { + // This case should be rare, but handle it defensively + h.logger.Error("Failed to get role ID for role to be removed", "role_name", roleName, "error", err) + continue + } + if err := RemoveUserRole(h.db, userID, roleID); err != nil { + h.logger.Error("Failed to remove role from user", "user_id", userID, "role_id", roleID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove role"}) + return + } + h.logger.Info("Role removed from user", "user_id", userID, "role_name", roleName) + } + } + + // Update groups if provided + if req.Groups != nil { + h.logger.Info("Updating user groups", "user_id", userID, "groups", *req.Groups) + currentGroups, err := GetUserGroups(h.db, userID) + if err != nil { + h.logger.Error("Failed to get current groups for user", "user_id", userID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user groups"}) + return + } + + groupsToAdd := []string{} + groupsToRemove := []string{} + + // Find groups to add + for _, newGroup := range *req.Groups { + found := false + for _, currentGroup := range currentGroups { + if newGroup == currentGroup { + found = true + break + } + } + if !found { + groupsToAdd = append(groupsToAdd, newGroup) + } + } + + // Find groups to remove + for _, currentGroup := range currentGroups { + found := false + for _, newGroup := range *req.Groups { + if currentGroup == newGroup { + found = true + break + } + } + if !found { + groupsToRemove = append(groupsToRemove, currentGroup) + } + } + + // Add new groups + for _, groupName := range groupsToAdd { + group, err := GetGroupByName(h.db, groupName) + if err != nil { + if err == sql.ErrNoRows { + h.logger.Warn("Attempted to add user to non-existent group", "user_id", userID, "group_name", groupName) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("group '%s' not found", groupName)}) + return + } + h.logger.Error("Failed to get group by name", "group_name", groupName, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process groups"}) + return + } + if err := AddUserToGroup(h.db, userID, group.ID, currentUser.ID); err != nil { + h.logger.Error("Failed to add user to group", "user_id", userID, "group_id", group.ID, "error", err) + // Don't return early, continue with other groups + continue + } + h.logger.Info("User added to group", "user_id", userID, "group_name", groupName) + } + + // Remove old groups + for _, groupName := range groupsToRemove { + group, err := GetGroupByName(h.db, groupName) + if err != nil { + // This case should be rare, but handle it defensively + h.logger.Error("Failed to get group ID for group to be removed", "group_name", groupName, "error", err) + continue + } + if err := RemoveUserFromGroup(h.db, userID, group.ID); err != nil { + h.logger.Error("Failed to remove user from group", "user_id", userID, "group_id", group.ID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove user from group"}) + return + } + h.logger.Info("User removed from group", "user_id", userID, "group_name", groupName) + } } h.logger.Info("User updated", "user_id", userID) @@ -230,3 +447,726 @@ func (h *Handler) DeleteUser(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"message": "user deleted successfully"}) } +// ListGroups lists all groups +func (h *Handler) ListGroups(c *gin.Context) { + query := ` + SELECT g.id, g.name, g.description, g.is_system, g.created_at, g.updated_at, + COUNT(DISTINCT ug.user_id) as user_count, + COUNT(DISTINCT gr.role_id) as role_count + FROM groups g + LEFT JOIN user_groups ug ON g.id = ug.group_id + LEFT JOIN group_roles gr ON g.id = gr.group_id + GROUP BY g.id, g.name, g.description, g.is_system, g.created_at, g.updated_at + ORDER BY g.name + ` + + rows, err := h.db.Query(query) + if err != nil { + h.logger.Error("Failed to list groups", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list groups"}) + return + } + defer rows.Close() + + var groups []map[string]interface{} + for rows.Next() { + var g struct { + ID string + Name string + Description sql.NullString + IsSystem bool + CreatedAt string + UpdatedAt string + UserCount int + RoleCount int + } + if err := rows.Scan(&g.ID, &g.Name, &g.Description, &g.IsSystem, + &g.CreatedAt, &g.UpdatedAt, &g.UserCount, &g.RoleCount); err != nil { + h.logger.Error("Failed to scan group", "error", err) + continue + } + + groups = append(groups, map[string]interface{}{ + "id": g.ID, + "name": g.Name, + "description": g.Description.String, + "is_system": g.IsSystem, + "user_count": g.UserCount, + "role_count": g.RoleCount, + "created_at": g.CreatedAt, + "updated_at": g.UpdatedAt, + }) + } + + c.JSON(http.StatusOK, gin.H{"groups": groups}) +} + +// GetGroup retrieves a single group +func (h *Handler) GetGroup(c *gin.Context) { + groupID := c.Param("id") + + group, err := GetGroupByID(h.db, groupID) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "group not found"}) + return + } + + users, _ := GetGroupUsers(h.db, groupID) + roles, _ := GetGroupRoles(h.db, groupID) + + c.JSON(http.StatusOK, gin.H{ + "id": group.ID, + "name": group.Name, + "description": group.Description, + "is_system": group.IsSystem, + "user_count": group.UserCount, + "role_count": group.RoleCount, + "users": users, + "roles": roles, + "created_at": group.CreatedAt, + "updated_at": group.UpdatedAt, + }) +} + +// CreateGroup creates a new group +func (h *Handler) CreateGroup(c *gin.Context) { + var req struct { + Name string `json:"name" binding:"required"` + Description string `json:"description"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + h.logger.Error("Invalid request to create group", "error", err) + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request: " + err.Error()}) + return + } + + // Trim whitespace + req.Name = strings.TrimSpace(req.Name) + if req.Name == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "name is required"}) + return + } + + // Handle empty description + description := strings.TrimSpace(req.Description) + if description == "" { + description = "" + } + + query := ` + INSERT INTO groups (name, description) + VALUES ($1, $2) + RETURNING id + ` + + var groupID string + err := h.db.QueryRow(query, req.Name, description).Scan(&groupID) + if err != nil { + // Check if it's a unique constraint violation + if strings.Contains(err.Error(), "duplicate key") || strings.Contains(err.Error(), "unique constraint") { + h.logger.Error("Group name already exists", "name", req.Name, "error", err) + c.JSON(http.StatusConflict, gin.H{"error": "group name already exists"}) + return + } + h.logger.Error("Failed to create group", "error", err, "name", req.Name) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create group: " + err.Error()}) + return + } + + h.logger.Info("Group created successfully", "group_id", groupID, "name", req.Name) + c.JSON(http.StatusCreated, gin.H{"id": groupID, "name": req.Name}) +} + +// UpdateGroup updates an existing group +func (h *Handler) UpdateGroup(c *gin.Context) { + groupID := c.Param("id") + + // Check if group is system group + var isSystem bool + err := h.db.QueryRow("SELECT is_system FROM groups WHERE id = $1", groupID).Scan(&isSystem) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "group not found"}) + return + } + + var req struct { + Name string `json:"name"` + Description string `json:"description"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Build update query dynamically + var updates []string + var args []interface{} + argIndex := 1 + + if req.Name != "" { + updates = append(updates, fmt.Sprintf("name = $%d", argIndex)) + args = append(args, req.Name) + argIndex++ + } + + if req.Description != "" { + updates = append(updates, fmt.Sprintf("description = $%d", argIndex)) + args = append(args, req.Description) + argIndex++ + } + + if len(updates) == 0 { + c.JSON(http.StatusBadRequest, gin.H{"error": "no fields to update"}) + return + } + + updates = append(updates, "updated_at = NOW()") + args = append(args, groupID) + + query := fmt.Sprintf("UPDATE groups SET %s WHERE id = $%d", strings.Join(updates, ", "), argIndex) + + _, err = h.db.Exec(query, args...) + if err != nil { + h.logger.Error("Failed to update group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update group"}) + return + } + + h.logger.Info("Group updated", "group_id", groupID) + c.JSON(http.StatusOK, gin.H{"message": "group updated successfully"}) +} + +// DeleteGroup deletes a group +func (h *Handler) DeleteGroup(c *gin.Context) { + groupID := c.Param("id") + + // Check if group is system group + var isSystem bool + err := h.db.QueryRow("SELECT is_system FROM groups WHERE id = $1", groupID).Scan(&isSystem) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "group not found"}) + return + } + + if isSystem { + c.JSON(http.StatusForbidden, gin.H{"error": "cannot delete system group"}) + return + } + + _, err = h.db.Exec("DELETE FROM groups WHERE id = $1", groupID) + if err != nil { + h.logger.Error("Failed to delete group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete group"}) + return + } + + h.logger.Info("Group deleted", "group_id", groupID) + c.JSON(http.StatusOK, gin.H{"message": "group deleted successfully"}) +} + +// AddUserToGroup adds a user to a group +func (h *Handler) AddUserToGroup(c *gin.Context) { + groupID := c.Param("id") + var req struct { + UserID string `json:"user_id" binding:"required"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Get current user ID from context + authUser, _ := c.Get("user") + currentUser := authUser.(*User) + + err := AddUserToGroup(h.db, req.UserID, groupID, currentUser.ID) + if err != nil { + h.logger.Error("Failed to add user to group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to add user to group"}) + return + } + + h.logger.Info("User added to group", "user_id", req.UserID, "group_id", groupID) + c.JSON(http.StatusOK, gin.H{"message": "user added to group successfully"}) +} + +// RemoveUserFromGroup removes a user from a group +func (h *Handler) RemoveUserFromGroup(c *gin.Context) { + groupID := c.Param("id") + userID := c.Param("user_id") + + err := RemoveUserFromGroup(h.db, userID, groupID) + if err != nil { + h.logger.Error("Failed to remove user from group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove user from group"}) + return + } + + h.logger.Info("User removed from group", "user_id", userID, "group_id", groupID) + c.JSON(http.StatusOK, gin.H{"message": "user removed from group successfully"}) +} + +// AssignRoleToUser assigns a role to a user +func (h *Handler) AssignRoleToUser(c *gin.Context) { + userID := c.Param("id") + var req struct { + RoleName string `json:"role_name" binding:"required"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Get role ID by name + roleID, err := GetRoleIDByName(h.db, req.RoleName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "role not found"}) + return + } + h.logger.Error("Failed to get role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign role"}) + return + } + + // Get current user ID from context + authUser, _ := c.Get("user") + currentUser := authUser.(*User) + + err = AddUserRole(h.db, userID, roleID, currentUser.ID) + if err != nil { + h.logger.Error("Failed to assign role to user", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign role"}) + return + } + + h.logger.Info("Role assigned to user", "user_id", userID, "role", req.RoleName) + c.JSON(http.StatusOK, gin.H{"message": "role assigned successfully"}) +} + +// RemoveRoleFromUser removes a role from a user +func (h *Handler) RemoveRoleFromUser(c *gin.Context) { + userID := c.Param("id") + + // Get role_name from query parameter + roleName := c.Query("role_name") + if roleName == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "role_name is required"}) + return + } + + // Get role ID by name + roleID, err := GetRoleIDByName(h.db, roleName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "role not found"}) + return + } + h.logger.Error("Failed to get role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove role"}) + return + } + + err = RemoveUserRole(h.db, userID, roleID) + if err != nil { + h.logger.Error("Failed to remove role from user", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove role"}) + return + } + + h.logger.Info("Role removed from user", "user_id", userID, "role", roleName) + c.JSON(http.StatusOK, gin.H{"message": "role removed successfully"}) +} + +// AssignGroupToUser assigns a group to a user +func (h *Handler) AssignGroupToUser(c *gin.Context) { + userID := c.Param("id") + var req struct { + GroupName string `json:"group_name" binding:"required"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Get group ID by name + group, err := GetGroupByName(h.db, req.GroupName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "group not found"}) + return + } + h.logger.Error("Failed to get group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign group"}) + return + } + groupID := group.ID + + // Get current user ID from context + authUser, _ := c.Get("user") + currentUser := authUser.(*User) + + err = AddUserToGroup(h.db, userID, groupID, currentUser.ID) + if err != nil { + h.logger.Error("Failed to assign group to user", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign group"}) + return + } + + h.logger.Info("Group assigned to user", "user_id", userID, "group", req.GroupName) + c.JSON(http.StatusOK, gin.H{"message": "group assigned successfully"}) +} + +// RemoveGroupFromUser removes a group from a user +func (h *Handler) RemoveGroupFromUser(c *gin.Context) { + userID := c.Param("id") + + // Get group_name from query parameter + groupName := c.Query("group_name") + if groupName == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "group_name is required"}) + return + } + + // Get group ID by name + group, err := GetGroupByName(h.db, groupName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "group not found"}) + return + } + h.logger.Error("Failed to get group", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove group"}) + return + } + groupID := group.ID + + err = RemoveUserFromGroup(h.db, userID, groupID) + if err != nil { + h.logger.Error("Failed to remove group from user", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove group"}) + return + } + + h.logger.Info("Group removed from user", "user_id", userID, "group", groupName) + c.JSON(http.StatusOK, gin.H{"message": "group removed successfully"}) +} + +// ListRoles lists all available roles +func (h *Handler) ListRoles(c *gin.Context) { + roles, err := ListRoles(h.db) + if err != nil { + h.logger.Error("Failed to list roles", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list roles"}) + return + } + + var result []map[string]interface{} + for _, role := range roles { + // Get user count for this role + userCount := 0 + userIDs, _ := GetRoleUsers(h.db, role.ID) + userCount = len(userIDs) + + result = append(result, map[string]interface{}{ + "id": role.ID, + "name": role.Name, + "description": role.Description, + "is_system": role.IsSystem, + "user_count": userCount, + "created_at": role.CreatedAt, + "updated_at": role.UpdatedAt, + }) + } + + c.JSON(http.StatusOK, gin.H{"roles": result}) +} + +// GetRole retrieves a single role +func (h *Handler) GetRole(c *gin.Context) { + roleID := c.Param("id") + + role, err := GetRoleByID(h.db, roleID) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "role not found"}) + return + } + h.logger.Error("Failed to get role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get role"}) + return + } + + // Get user count + userIDs, _ := GetRoleUsers(h.db, role.ID) + + c.JSON(http.StatusOK, gin.H{ + "id": role.ID, + "name": role.Name, + "description": role.Description, + "is_system": role.IsSystem, + "user_count": len(userIDs), + "created_at": role.CreatedAt, + "updated_at": role.UpdatedAt, + }) +} + +// CreateRole creates a new role +func (h *Handler) CreateRole(c *gin.Context) { + var req struct { + Name string `json:"name" binding:"required"` + Description string `json:"description"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + h.logger.Error("Invalid request to create role", "error", err) + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request: " + err.Error()}) + return + } + + // Trim whitespace + req.Name = strings.TrimSpace(req.Name) + if req.Name == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "name is required"}) + return + } + + // Handle empty description + description := strings.TrimSpace(req.Description) + if description == "" { + description = "" + } + + role, err := CreateRole(h.db, req.Name, description) + if err != nil { + // Check if it's a unique constraint violation + if strings.Contains(err.Error(), "duplicate key") || strings.Contains(err.Error(), "unique constraint") { + h.logger.Error("Role name already exists", "name", req.Name, "error", err) + c.JSON(http.StatusConflict, gin.H{"error": "role name already exists"}) + return + } + h.logger.Error("Failed to create role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create role"}) + return + } + + h.logger.Info("Role created", "role_id", role.ID, "name", role.Name) + c.JSON(http.StatusCreated, gin.H{"id": role.ID, "name": role.Name}) +} + +// UpdateRole updates an existing role +func (h *Handler) UpdateRole(c *gin.Context) { + roleID := c.Param("id") + + // Check if role is system role + var isSystem bool + err := h.db.QueryRow("SELECT is_system FROM roles WHERE id = $1", roleID).Scan(&isSystem) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "role not found"}) + return + } + + if isSystem { + c.JSON(http.StatusForbidden, gin.H{"error": "cannot modify system role"}) + return + } + + var req struct { + Name *string `json:"name"` + Description *string `json:"description"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Build update query dynamically + updates := []string{"updated_at = NOW()"} + args := []interface{}{} + argIndex := 1 + + if req.Name != nil { + name := strings.TrimSpace(*req.Name) + if name == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "name cannot be empty"}) + return + } + updates = append(updates, fmt.Sprintf("name = $%d", argIndex)) + args = append(args, name) + argIndex++ + } + + if req.Description != nil { + description := strings.TrimSpace(*req.Description) + updates = append(updates, fmt.Sprintf("description = $%d", argIndex)) + args = append(args, description) + argIndex++ + } + + if len(updates) == 1 { + c.JSON(http.StatusBadRequest, gin.H{"error": "no fields to update"}) + return + } + + args = append(args, roleID) + query := "UPDATE roles SET " + strings.Join(updates, ", ") + fmt.Sprintf(" WHERE id = $%d", argIndex) + + _, err = h.db.Exec(query, args...) + if err != nil { + // Check if it's a unique constraint violation + if strings.Contains(err.Error(), "duplicate key") || strings.Contains(err.Error(), "unique constraint") { + h.logger.Error("Role name already exists", "error", err) + c.JSON(http.StatusConflict, gin.H{"error": "role name already exists"}) + return + } + h.logger.Error("Failed to update role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update role"}) + return + } + + h.logger.Info("Role updated", "role_id", roleID) + c.JSON(http.StatusOK, gin.H{"message": "role updated successfully"}) +} + +// DeleteRole deletes a role +func (h *Handler) DeleteRole(c *gin.Context) { + roleID := c.Param("id") + + // Check if role is system role + var isSystem bool + err := h.db.QueryRow("SELECT is_system FROM roles WHERE id = $1", roleID).Scan(&isSystem) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "role not found"}) + return + } + + if isSystem { + c.JSON(http.StatusForbidden, gin.H{"error": "cannot delete system role"}) + return + } + + _, err = h.db.Exec("DELETE FROM roles WHERE id = $1", roleID) + if err != nil { + h.logger.Error("Failed to delete role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete role"}) + return + } + + h.logger.Info("Role deleted", "role_id", roleID) + c.JSON(http.StatusOK, gin.H{"message": "role deleted successfully"}) +} + +// GetRolePermissions retrieves all permissions for a role +func (h *Handler) GetRolePermissions(c *gin.Context) { + roleID := c.Param("id") + + permissions, err := GetRolePermissions(h.db, roleID) + if err != nil { + h.logger.Error("Failed to get role permissions", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get role permissions"}) + return + } + + c.JSON(http.StatusOK, gin.H{"permissions": permissions}) +} + +// AssignPermissionToRole assigns a permission to a role +func (h *Handler) AssignPermissionToRole(c *gin.Context) { + roleID := c.Param("id") + var req struct { + PermissionName string `json:"permission_name" binding:"required"` + } + + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + // Get permission ID by name + permissionID, err := GetPermissionIDByName(h.db, req.PermissionName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "permission not found"}) + return + } + h.logger.Error("Failed to get permission", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign permission"}) + return + } + + err = AddPermissionToRole(h.db, roleID, permissionID) + if err != nil { + h.logger.Error("Failed to assign permission to role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to assign permission"}) + return + } + + h.logger.Info("Permission assigned to role", "role_id", roleID, "permission", req.PermissionName) + c.JSON(http.StatusOK, gin.H{"message": "permission assigned successfully"}) +} + +// RemovePermissionFromRole removes a permission from a role +func (h *Handler) RemovePermissionFromRole(c *gin.Context) { + roleID := c.Param("id") + + // For DELETE requests, we can get permission_name from query param or body + var req struct { + PermissionName string `json:"permission_name"` + } + + // Try to get from query param first + permissionName := c.Query("permission_name") + if permissionName == "" { + // Try to get from body + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "permission_name is required"}) + return + } + permissionName = req.PermissionName + } + + if permissionName == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "permission_name is required"}) + return + } + + // Get permission ID by name + permissionID, err := GetPermissionIDByName(h.db, permissionName) + if err != nil { + if err == sql.ErrNoRows { + c.JSON(http.StatusNotFound, gin.H{"error": "permission not found"}) + return + } + h.logger.Error("Failed to get permission", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove permission"}) + return + } + + err = RemovePermissionFromRole(h.db, roleID, permissionID) + if err != nil { + h.logger.Error("Failed to remove permission from role", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove permission"}) + return + } + + h.logger.Info("Permission removed from role", "role_id", roleID, "permission", permissionName) + c.JSON(http.StatusOK, gin.H{"message": "permission removed successfully"}) +} + +// ListPermissions lists all available permissions +func (h *Handler) ListPermissions(c *gin.Context) { + permissions, err := ListPermissions(h.db) + if err != nil { + h.logger.Error("Failed to list permissions", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list permissions"}) + return + } + + c.JSON(http.StatusOK, gin.H{"permissions": permissions}) +} diff --git a/backend/internal/iam/role.go b/backend/internal/iam/role.go new file mode 100644 index 0000000..85c4424 --- /dev/null +++ b/backend/internal/iam/role.go @@ -0,0 +1,237 @@ +package iam + +import ( + "time" + + "github.com/atlasos/calypso/internal/common/database" +) + +// Role represents a system role +type Role struct { + ID string + Name string + Description string + IsSystem bool + CreatedAt time.Time + UpdatedAt time.Time +} + +// GetRoleByID retrieves a role by ID +func GetRoleByID(db *database.DB, roleID string) (*Role, error) { + query := ` + SELECT id, name, description, is_system, created_at, updated_at + FROM roles + WHERE id = $1 + ` + + var role Role + err := db.QueryRow(query, roleID).Scan( + &role.ID, &role.Name, &role.Description, &role.IsSystem, + &role.CreatedAt, &role.UpdatedAt, + ) + if err != nil { + return nil, err + } + + return &role, nil +} + +// GetRoleByName retrieves a role by name +func GetRoleByName(db *database.DB, name string) (*Role, error) { + query := ` + SELECT id, name, description, is_system, created_at, updated_at + FROM roles + WHERE name = $1 + ` + + var role Role + err := db.QueryRow(query, name).Scan( + &role.ID, &role.Name, &role.Description, &role.IsSystem, + &role.CreatedAt, &role.UpdatedAt, + ) + if err != nil { + return nil, err + } + + return &role, nil +} + +// ListRoles retrieves all roles +func ListRoles(db *database.DB) ([]*Role, error) { + query := ` + SELECT id, name, description, is_system, created_at, updated_at + FROM roles + ORDER BY name + ` + + rows, err := db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var roles []*Role + for rows.Next() { + var role Role + if err := rows.Scan( + &role.ID, &role.Name, &role.Description, &role.IsSystem, + &role.CreatedAt, &role.UpdatedAt, + ); err != nil { + return nil, err + } + roles = append(roles, &role) + } + + return roles, rows.Err() +} + +// CreateRole creates a new role +func CreateRole(db *database.DB, name, description string) (*Role, error) { + query := ` + INSERT INTO roles (name, description) + VALUES ($1, $2) + RETURNING id, name, description, is_system, created_at, updated_at + ` + + var role Role + err := db.QueryRow(query, name, description).Scan( + &role.ID, &role.Name, &role.Description, &role.IsSystem, + &role.CreatedAt, &role.UpdatedAt, + ) + if err != nil { + return nil, err + } + + return &role, nil +} + +// UpdateRole updates an existing role +func UpdateRole(db *database.DB, roleID, name, description string) error { + query := ` + UPDATE roles + SET name = $1, description = $2, updated_at = NOW() + WHERE id = $3 + ` + _, err := db.Exec(query, name, description, roleID) + return err +} + +// DeleteRole deletes a role +func DeleteRole(db *database.DB, roleID string) error { + query := `DELETE FROM roles WHERE id = $1` + _, err := db.Exec(query, roleID) + return err +} + +// GetRoleUsers retrieves all users with a specific role +func GetRoleUsers(db *database.DB, roleID string) ([]string, error) { + query := ` + SELECT u.id + FROM users u + INNER JOIN user_roles ur ON u.id = ur.user_id + WHERE ur.role_id = $1 + ORDER BY u.username + ` + + rows, err := db.Query(query, roleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var userIDs []string + for rows.Next() { + var userID string + if err := rows.Scan(&userID); err != nil { + return nil, err + } + userIDs = append(userIDs, userID) + } + + return userIDs, rows.Err() +} + +// GetRolePermissions retrieves all permissions for a role +func GetRolePermissions(db *database.DB, roleID string) ([]string, error) { + query := ` + SELECT p.name + FROM permissions p + INNER JOIN role_permissions rp ON p.id = rp.permission_id + WHERE rp.role_id = $1 + ORDER BY p.name + ` + + rows, err := db.Query(query, roleID) + if err != nil { + return nil, err + } + defer rows.Close() + + var permissions []string + for rows.Next() { + var perm string + if err := rows.Scan(&perm); err != nil { + return nil, err + } + permissions = append(permissions, perm) + } + + return permissions, rows.Err() +} + +// AddPermissionToRole assigns a permission to a role +func AddPermissionToRole(db *database.DB, roleID, permissionID string) error { + query := ` + INSERT INTO role_permissions (role_id, permission_id) + VALUES ($1, $2) + ON CONFLICT (role_id, permission_id) DO NOTHING + ` + _, err := db.Exec(query, roleID, permissionID) + return err +} + +// RemovePermissionFromRole removes a permission from a role +func RemovePermissionFromRole(db *database.DB, roleID, permissionID string) error { + query := `DELETE FROM role_permissions WHERE role_id = $1 AND permission_id = $2` + _, err := db.Exec(query, roleID, permissionID) + return err +} + +// GetPermissionIDByName retrieves a permission ID by name +func GetPermissionIDByName(db *database.DB, permissionName string) (string, error) { + var permissionID string + err := db.QueryRow("SELECT id FROM permissions WHERE name = $1", permissionName).Scan(&permissionID) + return permissionID, err +} + +// ListPermissions retrieves all permissions +func ListPermissions(db *database.DB) ([]map[string]interface{}, error) { + query := ` + SELECT id, name, resource, action, description + FROM permissions + ORDER BY resource, action + ` + + rows, err := db.Query(query) + if err != nil { + return nil, err + } + defer rows.Close() + + var permissions []map[string]interface{} + for rows.Next() { + var id, name, resource, action, description string + if err := rows.Scan(&id, &name, &resource, &action, &description); err != nil { + return nil, err + } + permissions = append(permissions, map[string]interface{}{ + "id": id, + "name": name, + "resource": resource, + "action": action, + "description": description, + }) + } + + return permissions, rows.Err() +} diff --git a/backend/internal/iam/user.go b/backend/internal/iam/user.go index 6512166..8bea90e 100644 --- a/backend/internal/iam/user.go +++ b/backend/internal/iam/user.go @@ -126,3 +126,27 @@ func GetUserPermissions(db *database.DB, userID string) ([]string, error) { return permissions, rows.Err() } +// AddUserRole assigns a role to a user +func AddUserRole(db *database.DB, userID, roleID, assignedBy string) error { + query := ` + INSERT INTO user_roles (user_id, role_id, assigned_by) + VALUES ($1, $2, $3) + ON CONFLICT (user_id, role_id) DO NOTHING + ` + _, err := db.Exec(query, userID, roleID, assignedBy) + return err +} + +// RemoveUserRole removes a role from a user +func RemoveUserRole(db *database.DB, userID, roleID string) error { + query := `DELETE FROM user_roles WHERE user_id = $1 AND role_id = $2` + _, err := db.Exec(query, userID, roleID) + return err +} + +// GetRoleIDByName retrieves a role ID by name +func GetRoleIDByName(db *database.DB, roleName string) (string, error) { + var roleID string + err := db.QueryRow("SELECT id FROM roles WHERE name = $1", roleName).Scan(&roleID) + return roleID, err +} diff --git a/backend/internal/monitoring/metrics.go b/backend/internal/monitoring/metrics.go index 075c28e..4df10e5 100644 --- a/backend/internal/monitoring/metrics.go +++ b/backend/internal/monitoring/metrics.go @@ -1,10 +1,14 @@ package monitoring import ( + "bufio" "context" "database/sql" "fmt" + "os" "runtime" + "strconv" + "strings" "time" "github.com/atlasos/calypso/internal/common/database" @@ -13,14 +17,14 @@ import ( // Metrics represents system metrics type Metrics struct { - System SystemMetrics `json:"system"` - Storage StorageMetrics `json:"storage"` - SCST SCSTMetrics `json:"scst"` - Tape TapeMetrics `json:"tape"` - VTL VTLMetrics `json:"vtl"` - Tasks TaskMetrics `json:"tasks"` - API APIMetrics `json:"api"` - CollectedAt time.Time `json:"collected_at"` + System SystemMetrics `json:"system"` + Storage StorageMetrics `json:"storage"` + SCST SCSTMetrics `json:"scst"` + Tape TapeMetrics `json:"tape"` + VTL VTLMetrics `json:"vtl"` + Tasks TaskMetrics `json:"tasks"` + API APIMetrics `json:"api"` + CollectedAt time.Time `json:"collected_at"` } // SystemMetrics represents system-level metrics @@ -37,11 +41,11 @@ type SystemMetrics struct { // StorageMetrics represents storage metrics type StorageMetrics struct { - TotalDisks int `json:"total_disks"` - TotalRepositories int `json:"total_repositories"` - TotalCapacityBytes int64 `json:"total_capacity_bytes"` - UsedCapacityBytes int64 `json:"used_capacity_bytes"` - AvailableBytes int64 `json:"available_bytes"` + TotalDisks int `json:"total_disks"` + TotalRepositories int `json:"total_repositories"` + TotalCapacityBytes int64 `json:"total_capacity_bytes"` + UsedCapacityBytes int64 `json:"used_capacity_bytes"` + AvailableBytes int64 `json:"available_bytes"` UsagePercent float64 `json:"usage_percent"` } @@ -72,28 +76,43 @@ type VTLMetrics struct { // TaskMetrics represents task execution metrics type TaskMetrics struct { - TotalTasks int `json:"total_tasks"` - PendingTasks int `json:"pending_tasks"` - RunningTasks int `json:"running_tasks"` - CompletedTasks int `json:"completed_tasks"` - FailedTasks int `json:"failed_tasks"` - AvgDurationSec float64 `json:"avg_duration_seconds"` + TotalTasks int `json:"total_tasks"` + PendingTasks int `json:"pending_tasks"` + RunningTasks int `json:"running_tasks"` + CompletedTasks int `json:"completed_tasks"` + FailedTasks int `json:"failed_tasks"` + AvgDurationSec float64 `json:"avg_duration_seconds"` } // APIMetrics represents API metrics type APIMetrics struct { - TotalRequests int64 `json:"total_requests"` - RequestsPerSec float64 `json:"requests_per_second"` - ErrorRate float64 `json:"error_rate"` - AvgLatencyMs float64 `json:"avg_latency_ms"` - ActiveConnections int `json:"active_connections"` + TotalRequests int64 `json:"total_requests"` + RequestsPerSec float64 `json:"requests_per_second"` + ErrorRate float64 `json:"error_rate"` + AvgLatencyMs float64 `json:"avg_latency_ms"` + ActiveConnections int `json:"active_connections"` } // MetricsService collects and provides system metrics type MetricsService struct { - db *database.DB - logger *logger.Logger - startTime time.Time + db *database.DB + logger *logger.Logger + startTime time.Time + lastCPU *cpuStats // For CPU usage calculation + lastCPUTime time.Time +} + +// cpuStats represents CPU statistics from /proc/stat +type cpuStats struct { + user uint64 + nice uint64 + system uint64 + idle uint64 + iowait uint64 + irq uint64 + softirq uint64 + steal uint64 + guest uint64 } // NewMetricsService creates a new metrics service @@ -115,6 +134,8 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) { sysMetrics, err := s.collectSystemMetrics(ctx) if err != nil { s.logger.Error("Failed to collect system metrics", "error", err) + // Set default/zero values if collection fails + metrics.System = SystemMetrics{} } else { metrics.System = *sysMetrics } @@ -167,21 +188,17 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) { // collectSystemMetrics collects system-level metrics func (s *MetricsService) collectSystemMetrics(ctx context.Context) (*SystemMetrics, error) { - var m runtime.MemStats - runtime.ReadMemStats(&m) + // Get system memory from /proc/meminfo + memoryTotal, memoryUsed, memoryPercent := s.getSystemMemory() - // Get memory info - memoryUsed := int64(m.Alloc) - memoryTotal := int64(m.Sys) - memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + // Get CPU usage from /proc/stat + cpuUsage := s.getCPUUsage() - // Uptime - uptime := time.Since(s.startTime).Seconds() + // Get system uptime from /proc/uptime + uptime := s.getSystemUptime() - // CPU and disk would require external tools or system calls - // For now, we'll use placeholders metrics := &SystemMetrics{ - CPUUsagePercent: 0.0, // Would need to read from /proc/stat + CPUUsagePercent: cpuUsage, MemoryUsed: memoryUsed, MemoryTotal: memoryTotal, MemoryPercent: memoryPercent, @@ -268,7 +285,7 @@ func (s *MetricsService) collectSCSTMetrics(ctx context.Context) (*SCSTMetrics, TotalTargets: totalTargets, TotalLUNs: totalLUNs, TotalInitiators: totalInitiators, - ActiveTargets: activeTargets, + ActiveTargets: activeTargets, }, nil } @@ -403,3 +420,232 @@ func (s *MetricsService) collectTaskMetrics(ctx context.Context) (*TaskMetrics, }, nil } +// getSystemUptime reads system uptime from /proc/uptime +// Returns uptime in seconds, or service uptime as fallback +func (s *MetricsService) getSystemUptime() float64 { + file, err := os.Open("/proc/uptime") + if err != nil { + // Fallback to service uptime if /proc/uptime is not available + s.logger.Warn("Failed to read /proc/uptime, using service uptime", "error", err) + return time.Since(s.startTime).Seconds() + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + // Fallback to service uptime if file is empty + s.logger.Warn("Failed to read /proc/uptime content, using service uptime") + return time.Since(s.startTime).Seconds() + } + + line := strings.TrimSpace(scanner.Text()) + fields := strings.Fields(line) + if len(fields) == 0 { + // Fallback to service uptime if no data + s.logger.Warn("No data in /proc/uptime, using service uptime") + return time.Since(s.startTime).Seconds() + } + + // First field is system uptime in seconds + uptimeSeconds, err := strconv.ParseFloat(fields[0], 64) + if err != nil { + // Fallback to service uptime if parsing fails + s.logger.Warn("Failed to parse /proc/uptime, using service uptime", "error", err) + return time.Since(s.startTime).Seconds() + } + + return uptimeSeconds +} + +// getSystemMemory reads system memory from /proc/meminfo +// Returns total, used (in bytes), and usage percentage +func (s *MetricsService) getSystemMemory() (int64, int64, float64) { + file, err := os.Open("/proc/meminfo") + if err != nil { + s.logger.Warn("Failed to read /proc/meminfo, using Go runtime memory", "error", err) + var m runtime.MemStats + runtime.ReadMemStats(&m) + memoryUsed := int64(m.Alloc) + memoryTotal := int64(m.Sys) + memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + return memoryTotal, memoryUsed, memoryPercent + } + defer file.Close() + + var memTotal, memAvailable, memFree, buffers, cached int64 + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + continue + } + + // Parse line like "MemTotal: 16375596 kB" + // or "MemTotal: 16375596" (some systems don't have unit) + colonIdx := strings.Index(line, ":") + if colonIdx == -1 { + continue + } + + key := strings.TrimSpace(line[:colonIdx]) + valuePart := strings.TrimSpace(line[colonIdx+1:]) + + // Split value part to get number (ignore unit like "kB") + fields := strings.Fields(valuePart) + if len(fields) == 0 { + continue + } + + value, err := strconv.ParseInt(fields[0], 10, 64) + if err != nil { + continue + } + + // Values in /proc/meminfo are in KB, convert to bytes + valueBytes := value * 1024 + + switch key { + case "MemTotal": + memTotal = valueBytes + case "MemAvailable": + memAvailable = valueBytes + case "MemFree": + memFree = valueBytes + case "Buffers": + buffers = valueBytes + case "Cached": + cached = valueBytes + } + } + + if err := scanner.Err(); err != nil { + s.logger.Warn("Error scanning /proc/meminfo", "error", err) + } + + if memTotal == 0 { + s.logger.Warn("Failed to get MemTotal from /proc/meminfo, using Go runtime memory", "memTotal", memTotal) + var m runtime.MemStats + runtime.ReadMemStats(&m) + memoryUsed := int64(m.Alloc) + memoryTotal := int64(m.Sys) + memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + return memoryTotal, memoryUsed, memoryPercent + } + + // Calculate used memory + // If MemAvailable exists (kernel 3.14+), use it for more accurate calculation + var memoryUsed int64 + if memAvailable > 0 { + memoryUsed = memTotal - memAvailable + } else { + // Fallback: MemTotal - MemFree - Buffers - Cached + memoryUsed = memTotal - memFree - buffers - cached + if memoryUsed < 0 { + memoryUsed = memTotal - memFree + } + } + + memoryPercent := float64(memoryUsed) / float64(memTotal) * 100 + + s.logger.Debug("System memory stats", + "memTotal", memTotal, + "memAvailable", memAvailable, + "memoryUsed", memoryUsed, + "memoryPercent", memoryPercent) + + return memTotal, memoryUsed, memoryPercent +} + +// getCPUUsage reads CPU usage from /proc/stat +// Requires two readings to calculate percentage +func (s *MetricsService) getCPUUsage() float64 { + currentCPU, err := s.readCPUStats() + if err != nil { + s.logger.Warn("Failed to read CPU stats", "error", err) + return 0.0 + } + + // If this is the first reading, store it and return 0 + if s.lastCPU == nil { + s.lastCPU = currentCPU + s.lastCPUTime = time.Now() + return 0.0 + } + + // Calculate time difference + timeDiff := time.Since(s.lastCPUTime).Seconds() + if timeDiff < 0.1 { + // Too soon, return previous value or 0 + return 0.0 + } + + // Calculate total CPU time + prevTotal := s.lastCPU.user + s.lastCPU.nice + s.lastCPU.system + s.lastCPU.idle + + s.lastCPU.iowait + s.lastCPU.irq + s.lastCPU.softirq + s.lastCPU.steal + s.lastCPU.guest + currTotal := currentCPU.user + currentCPU.nice + currentCPU.system + currentCPU.idle + + currentCPU.iowait + currentCPU.irq + currentCPU.softirq + currentCPU.steal + currentCPU.guest + + // Calculate idle time + prevIdle := s.lastCPU.idle + s.lastCPU.iowait + currIdle := currentCPU.idle + currentCPU.iowait + + // Calculate used time + totalDiff := currTotal - prevTotal + idleDiff := currIdle - prevIdle + + if totalDiff == 0 { + return 0.0 + } + + // Calculate CPU usage percentage + usagePercent := 100.0 * (1.0 - float64(idleDiff)/float64(totalDiff)) + + // Update last CPU stats + s.lastCPU = currentCPU + s.lastCPUTime = time.Now() + + return usagePercent +} + +// readCPUStats reads CPU statistics from /proc/stat +func (s *MetricsService) readCPUStats() (*cpuStats, error) { + file, err := os.Open("/proc/stat") + if err != nil { + return nil, fmt.Errorf("failed to open /proc/stat: %w", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + return nil, fmt.Errorf("failed to read /proc/stat") + } + + line := strings.TrimSpace(scanner.Text()) + if !strings.HasPrefix(line, "cpu ") { + return nil, fmt.Errorf("invalid /proc/stat format") + } + + fields := strings.Fields(line) + if len(fields) < 8 { + return nil, fmt.Errorf("insufficient CPU stats fields") + } + + stats := &cpuStats{} + stats.user, _ = strconv.ParseUint(fields[1], 10, 64) + stats.nice, _ = strconv.ParseUint(fields[2], 10, 64) + stats.system, _ = strconv.ParseUint(fields[3], 10, 64) + stats.idle, _ = strconv.ParseUint(fields[4], 10, 64) + stats.iowait, _ = strconv.ParseUint(fields[5], 10, 64) + stats.irq, _ = strconv.ParseUint(fields[6], 10, 64) + stats.softirq, _ = strconv.ParseUint(fields[7], 10, 64) + + if len(fields) > 8 { + stats.steal, _ = strconv.ParseUint(fields[8], 10, 64) + } + if len(fields) > 9 { + stats.guest, _ = strconv.ParseUint(fields[9], 10, 64) + } + + return stats, nil +} diff --git a/backend/internal/scst/handler.go b/backend/internal/scst/handler.go index 4433e5b..1a3349e 100644 --- a/backend/internal/scst/handler.go +++ b/backend/internal/scst/handler.go @@ -1,6 +1,7 @@ package scst import ( + "fmt" "net/http" "github.com/atlasos/calypso/internal/common/database" @@ -11,19 +12,19 @@ import ( // Handler handles SCST-related API requests type Handler struct { - service *Service + service *Service taskEngine *tasks.Engine - db *database.DB - logger *logger.Logger + db *database.DB + logger *logger.Logger } // NewHandler creates a new SCST handler func NewHandler(db *database.DB, log *logger.Logger) *Handler { return &Handler{ - service: NewService(db, log), + service: NewService(db, log), taskEngine: tasks.NewEngine(db, log), - db: db, - logger: log, + db: db, + logger: log, } } @@ -55,21 +56,34 @@ func (h *Handler) GetTarget(c *gin.Context) { } // Get LUNs - luns, _ := h.service.GetTargetLUNs(c.Request.Context(), targetID) + luns, err := h.service.GetTargetLUNs(c.Request.Context(), targetID) + if err != nil { + h.logger.Warn("Failed to get LUNs", "target_id", targetID, "error", err) + // Return empty array instead of nil + luns = []LUN{} + } + + // Get initiator groups + groups, err2 := h.service.GetTargetInitiatorGroups(c.Request.Context(), targetID) + if err2 != nil { + h.logger.Warn("Failed to get initiator groups", "target_id", targetID, "error", err2) + groups = []InitiatorGroup{} + } c.JSON(http.StatusOK, gin.H{ - "target": target, - "luns": luns, + "target": target, + "luns": luns, + "initiator_groups": groups, }) } // CreateTargetRequest represents a target creation request type CreateTargetRequest struct { - IQN string `json:"iqn" binding:"required"` - TargetType string `json:"target_type" binding:"required"` - Name string `json:"name" binding:"required"` - Description string `json:"description"` - SingleInitiatorOnly bool `json:"single_initiator_only"` + IQN string `json:"iqn" binding:"required"` + TargetType string `json:"target_type" binding:"required"` + Name string `json:"name" binding:"required"` + Description string `json:"description"` + SingleInitiatorOnly bool `json:"single_initiator_only"` } // CreateTarget creates a new SCST target @@ -83,13 +97,13 @@ func (h *Handler) CreateTarget(c *gin.Context) { userID, _ := c.Get("user_id") target := &Target{ - IQN: req.IQN, - TargetType: req.TargetType, - Name: req.Name, - Description: req.Description, - IsActive: true, + IQN: req.IQN, + TargetType: req.TargetType, + Name: req.Name, + Description: req.Description, + IsActive: true, SingleInitiatorOnly: req.SingleInitiatorOnly || req.TargetType == "vtl" || req.TargetType == "physical_tape", - CreatedBy: userID.(string), + CreatedBy: userID.(string), } if err := h.service.CreateTarget(c.Request.Context(), target); err != nil { @@ -103,9 +117,9 @@ func (h *Handler) CreateTarget(c *gin.Context) { // AddLUNRequest represents a LUN addition request type AddLUNRequest struct { - DeviceName string `json:"device_name" binding:"required"` - DevicePath string `json:"device_path" binding:"required"` - LUNNumber int `json:"lun_number" binding:"required"` + DeviceName string `json:"device_name" binding:"required"` + DevicePath string `json:"device_path" binding:"required"` + LUNNumber int `json:"lun_number" binding:"required"` HandlerType string `json:"handler_type" binding:"required"` } @@ -121,7 +135,15 @@ func (h *Handler) AddLUN(c *gin.Context) { var req AddLUNRequest if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + h.logger.Error("Failed to bind AddLUN request", "error", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)}) + return + } + + // Validate required fields + if req.DeviceName == "" || req.DevicePath == "" || req.HandlerType == "" { + h.logger.Error("Missing required fields in AddLUN request", "device_name", req.DeviceName, "device_path", req.DevicePath, "handler_type", req.HandlerType) + c.JSON(http.StatusBadRequest, gin.H{"error": "device_name, device_path, and handler_type are required"}) return } @@ -164,6 +186,110 @@ func (h *Handler) AddInitiator(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"}) } +// ListAllInitiators lists all initiators across all targets +func (h *Handler) ListAllInitiators(c *gin.Context) { + initiators, err := h.service.ListAllInitiators(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list initiators", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiators"}) + return + } + + if initiators == nil { + initiators = []InitiatorWithTarget{} + } + + c.JSON(http.StatusOK, gin.H{"initiators": initiators}) +} + +// RemoveInitiator removes an initiator +func (h *Handler) RemoveInitiator(c *gin.Context) { + initiatorID := c.Param("id") + + if err := h.service.RemoveInitiator(c.Request.Context(), initiatorID); err != nil { + if err.Error() == "initiator not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"}) + return + } + h.logger.Error("Failed to remove initiator", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Initiator removed successfully"}) +} + +// GetInitiator retrieves an initiator by ID +func (h *Handler) GetInitiator(c *gin.Context) { + initiatorID := c.Param("id") + + initiator, err := h.service.GetInitiator(c.Request.Context(), initiatorID) + if err != nil { + if err.Error() == "initiator not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"}) + return + } + h.logger.Error("Failed to get initiator", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator"}) + return + } + + c.JSON(http.StatusOK, initiator) +} + +// ListExtents lists all device extents +func (h *Handler) ListExtents(c *gin.Context) { + extents, err := h.service.ListExtents(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list extents", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list extents"}) + return + } + + if extents == nil { + extents = []Extent{} + } + + c.JSON(http.StatusOK, gin.H{"extents": extents}) +} + +// CreateExtentRequest represents a request to create an extent +type CreateExtentRequest struct { + DeviceName string `json:"device_name" binding:"required"` + DevicePath string `json:"device_path" binding:"required"` + HandlerType string `json:"handler_type" binding:"required"` +} + +// CreateExtent creates a new device extent +func (h *Handler) CreateExtent(c *gin.Context) { + var req CreateExtentRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.CreateExtent(c.Request.Context(), req.DeviceName, req.DevicePath, req.HandlerType); err != nil { + h.logger.Error("Failed to create extent", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{"message": "Extent created successfully"}) +} + +// DeleteExtent deletes a device extent +func (h *Handler) DeleteExtent(c *gin.Context) { + deviceName := c.Param("device") + + if err := h.service.DeleteExtent(c.Request.Context(), deviceName); err != nil { + h.logger.Error("Failed to delete extent", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Extent deleted successfully"}) +} + // ApplyConfig applies SCST configuration func (h *Handler) ApplyConfig(c *gin.Context) { userID, _ := c.Get("user_id") @@ -209,3 +335,142 @@ func (h *Handler) ListHandlers(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"handlers": handlers}) } +// ListPortals lists all iSCSI portals +func (h *Handler) ListPortals(c *gin.Context) { + portals, err := h.service.ListPortals(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list portals", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list portals"}) + return + } + + // Ensure we return an empty array instead of null + if portals == nil { + portals = []Portal{} + } + + c.JSON(http.StatusOK, gin.H{"portals": portals}) +} + +// CreatePortal creates a new portal +func (h *Handler) CreatePortal(c *gin.Context) { + var portal Portal + if err := c.ShouldBindJSON(&portal); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.CreatePortal(c.Request.Context(), &portal); err != nil { + h.logger.Error("Failed to create portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, portal) +} + +// UpdatePortal updates a portal +func (h *Handler) UpdatePortal(c *gin.Context) { + id := c.Param("id") + + var portal Portal + if err := c.ShouldBindJSON(&portal); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.UpdatePortal(c.Request.Context(), id, &portal); err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to update portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, portal) +} + +// EnableTarget enables a target +func (h *Handler) EnableTarget(c *gin.Context) { + targetID := c.Param("id") + + target, err := h.service.GetTarget(c.Request.Context(), targetID) + if err != nil { + if err.Error() == "target not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "target not found"}) + return + } + h.logger.Error("Failed to get target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"}) + return + } + + if err := h.service.EnableTarget(c.Request.Context(), target.IQN); err != nil { + h.logger.Error("Failed to enable target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Target enabled successfully"}) +} + +// DisableTarget disables a target +func (h *Handler) DisableTarget(c *gin.Context) { + targetID := c.Param("id") + + target, err := h.service.GetTarget(c.Request.Context(), targetID) + if err != nil { + if err.Error() == "target not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "target not found"}) + return + } + h.logger.Error("Failed to get target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"}) + return + } + + if err := h.service.DisableTarget(c.Request.Context(), target.IQN); err != nil { + h.logger.Error("Failed to disable target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Target disabled successfully"}) +} + +// DeletePortal deletes a portal +func (h *Handler) DeletePortal(c *gin.Context) { + id := c.Param("id") + + if err := h.service.DeletePortal(c.Request.Context(), id); err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to delete portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Portal deleted successfully"}) +} + +// GetPortal retrieves a portal by ID +func (h *Handler) GetPortal(c *gin.Context) { + id := c.Param("id") + + portal, err := h.service.GetPortal(c.Request.Context(), id) + if err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to get portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get portal"}) + return + } + + c.JSON(http.StatusOK, portal) +} diff --git a/backend/internal/scst/service.go b/backend/internal/scst/service.go index 243fa03..8ec7e55 100644 --- a/backend/internal/scst/service.go +++ b/backend/internal/scst/service.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "os" "os/exec" "strings" "time" @@ -28,47 +29,70 @@ func NewService(db *database.DB, log *logger.Logger) *Service { // Target represents an SCST iSCSI target type Target struct { - ID string `json:"id"` - IQN string `json:"iqn"` - TargetType string `json:"target_type"` // 'disk', 'vtl', 'physical_tape' - Name string `json:"name"` - Description string `json:"description"` - IsActive bool `json:"is_active"` - SingleInitiatorOnly bool `json:"single_initiator_only"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - CreatedBy string `json:"created_by"` + ID string `json:"id"` + IQN string `json:"iqn"` + TargetType string `json:"target_type"` // 'disk', 'vtl', 'physical_tape' + Name string `json:"name"` + Description string `json:"description"` + IsActive bool `json:"is_active"` + SingleInitiatorOnly bool `json:"single_initiator_only"` + LUNCount int `json:"lun_count"` // Number of LUNs attached to this target + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CreatedBy string `json:"created_by"` } // LUN represents an SCST LUN mapping type LUN struct { - ID string `json:"id"` - TargetID string `json:"target_id"` - LUNNumber int `json:"lun_number"` - DeviceName string `json:"device_name"` - DevicePath string `json:"device_path"` - HandlerType string `json:"handler_type"` - CreatedAt time.Time `json:"created_at"` + ID string `json:"id"` + TargetID string `json:"target_id"` + LUNNumber int `json:"lun_number"` + DeviceName string `json:"device_name"` + DevicePath string `json:"device_path"` + HandlerType string `json:"handler_type"` + Handler string `json:"handler"` // Alias for handler_type for frontend compatibility + DeviceType string `json:"device_type"` // Derived from handler_type + IsActive bool `json:"is_active"` // True if LUN exists in SCST + CreatedAt time.Time `json:"created_at"` } // InitiatorGroup represents an SCST initiator group type InitiatorGroup struct { - ID string `json:"id"` - TargetID string `json:"target_id"` - GroupName string `json:"group_name"` + ID string `json:"id"` + TargetID string `json:"target_id"` + GroupName string `json:"group_name"` Initiators []Initiator `json:"initiators"` - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` } // Initiator represents an iSCSI initiator type Initiator struct { - ID string `json:"id"` - GroupID string `json:"group_id"` - IQN string `json:"iqn"` - IsActive bool `json:"is_active"` + ID string `json:"id"` + GroupID string `json:"group_id"` + IQN string `json:"iqn"` + IsActive bool `json:"is_active"` CreatedAt time.Time `json:"created_at"` } +// Portal represents an iSCSI network portal (IP address and port) +type Portal struct { + ID string `json:"id"` + IPAddress string `json:"ip_address"` // IPv4 or IPv6 address, or "0.0.0.0" for all interfaces + Port int `json:"port"` // Default 3260 for iSCSI + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Extent represents an SCST device extent (opened device) +type Extent struct { + HandlerType string `json:"handler_type"` + DeviceName string `json:"device_name"` + DevicePath string `json:"device_path"` // Path to the actual device/file + IsInUse bool `json:"is_in_use"` // True if device is used by any LUN + LUNCount int `json:"lun_count"` // Number of LUNs using this device +} + // CreateTarget creates a new SCST iSCSI target func (s *Service) CreateTarget(ctx context.Context, target *Target) error { // Validate IQN format @@ -79,15 +103,39 @@ func (s *Service) CreateTarget(ctx context.Context, target *Target) error { // Create target in SCST cmd := exec.CommandContext(ctx, "scstadmin", "-add_target", target.IQN, "-driver", "iscsi") output, err := cmd.CombinedOutput() + outputStr := string(output) + if err != nil { // Check if target already exists - if strings.Contains(string(output), "already exists") { + if strings.Contains(outputStr, "already exists") { s.logger.Warn("Target already exists in SCST", "iqn", target.IQN) } else { - return fmt.Errorf("failed to create SCST target: %s: %w", string(output), err) + // Check for common SCST errors + if strings.Contains(outputStr, "User space process is not connected") || + strings.Contains(outputStr, "iscsi-scstd") { + return fmt.Errorf("iSCSI daemon (iscsi-scstd) is not running. Please start it with: systemctl start iscsi-scstd") + } + if strings.Contains(outputStr, "Failed to add target") { + return fmt.Errorf("failed to add target to SCST: %s. Check dmesg for details", strings.TrimSpace(outputStr)) + } + return fmt.Errorf("failed to create SCST target: %s: %w", outputStr, err) } } + // Check for warnings in output even if command succeeded + if strings.Contains(outputStr, "WARNING") || strings.Contains(outputStr, "Failed to add target") { + s.logger.Warn("SCST warning during target creation", "iqn", target.IQN, "output", outputStr) + // Check dmesg for more details + dmesgCmd := exec.CommandContext(ctx, "dmesg", "-T", "-l", "err,warn") + if dmesgOutput, dmesgErr := dmesgCmd.CombinedOutput(); dmesgErr == nil { + recentErrors := string(dmesgOutput) + if strings.Contains(recentErrors, "iscsi-scst") { + return fmt.Errorf("iSCSI daemon (iscsi-scstd) is not running. Please start it with: systemctl start iscsi-scstd") + } + } + return fmt.Errorf("target creation completed but SCST reported an error: %s. Check dmesg for details", strings.TrimSpace(outputStr)) + } + // Insert into database query := ` INSERT INTO scst_targets ( @@ -237,13 +285,201 @@ func (s *Service) AddInitiator(ctx context.Context, targetIQN, initiatorIQN stri return nil } +// GetTargetInitiatorGroups retrieves all initiator groups for a target +func (s *Service) GetTargetInitiatorGroups(ctx context.Context, targetID string) ([]InitiatorGroup, error) { + // Get all groups for this target + query := ` + SELECT id, target_id, group_name, created_at + FROM scst_initiator_groups + WHERE target_id = $1 + ORDER BY group_name + ` + + rows, err := s.db.QueryContext(ctx, query, targetID) + if err != nil { + return nil, fmt.Errorf("failed to get initiator groups: %w", err) + } + defer rows.Close() + + var groups []InitiatorGroup + for rows.Next() { + var group InitiatorGroup + err := rows.Scan(&group.ID, &group.TargetID, &group.GroupName, &group.CreatedAt) + if err != nil { + s.logger.Error("Failed to scan initiator group", "error", err) + continue + } + + // Get initiators for this group + initiators, err := s.getGroupInitiators(ctx, group.ID) + if err != nil { + s.logger.Warn("Failed to get initiators for group", "group_id", group.ID, "error", err) + group.Initiators = []Initiator{} + } else { + group.Initiators = initiators + } + + groups = append(groups, group) + } + + return groups, rows.Err() +} + +// InitiatorWithTarget extends Initiator with target information +type InitiatorWithTarget struct { + Initiator + TargetID string `json:"target_id"` + TargetIQN string `json:"target_iqn"` + TargetName string `json:"target_name"` + GroupName string `json:"group_name"` +} + +// ListAllInitiators lists all initiators across all targets +func (s *Service) ListAllInitiators(ctx context.Context) ([]InitiatorWithTarget, error) { + query := ` + SELECT i.id, i.group_id, i.iqn, i.is_active, i.created_at, + ig.target_id, ig.group_name, t.iqn as target_iqn, t.name as target_name + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + ORDER BY t.iqn, ig.group_name, i.iqn + ` + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to list initiators: %w", err) + } + defer rows.Close() + + var initiators []InitiatorWithTarget + for rows.Next() { + var initiator InitiatorWithTarget + err := rows.Scan( + &initiator.ID, &initiator.GroupID, &initiator.IQN, + &initiator.IsActive, &initiator.CreatedAt, + &initiator.TargetID, &initiator.GroupName, &initiator.TargetIQN, &initiator.TargetName, + ) + if err != nil { + s.logger.Error("Failed to scan initiator", "error", err) + continue + } + initiators = append(initiators, initiator) + } + + return initiators, rows.Err() +} + +// RemoveInitiator removes an initiator from a target +func (s *Service) RemoveInitiator(ctx context.Context, initiatorID string) error { + // Get initiator info + var initiatorIQN, groupName, targetIQN string + err := s.db.QueryRowContext(ctx, ` + SELECT i.iqn, ig.group_name, t.iqn + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + WHERE i.id = $1 + `, initiatorID).Scan(&initiatorIQN, &groupName, &targetIQN) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("initiator not found") + } + return fmt.Errorf("failed to get initiator: %w", err) + } + + // Remove from SCST + cmd := exec.CommandContext(ctx, "scstadmin", "-remove_init", initiatorIQN, + "-group", groupName, + "-target", targetIQN, + "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if !strings.Contains(outputStr, "not found") { + return fmt.Errorf("failed to remove initiator from SCST: %s: %w", outputStr, err) + } + // If not found in SCST, continue to remove from database + } + + // Remove from database + _, err = s.db.ExecContext(ctx, "DELETE FROM scst_initiators WHERE id = $1", initiatorID) + if err != nil { + return fmt.Errorf("failed to remove initiator from database: %w", err) + } + + s.logger.Info("Initiator removed", "initiator", initiatorIQN, "target", targetIQN) + return nil +} + +// GetInitiator retrieves an initiator by ID with full details +func (s *Service) GetInitiator(ctx context.Context, initiatorID string) (*Initiator, error) { + query := ` + SELECT i.id, i.group_id, i.iqn, i.is_active, i.created_at, + ig.target_id, ig.group_name, t.iqn as target_iqn, t.name as target_name + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + WHERE i.id = $1 + ` + + var initiator Initiator + var targetID, groupName, targetIQN, targetName string + err := s.db.QueryRowContext(ctx, query, initiatorID).Scan( + &initiator.ID, &initiator.GroupID, &initiator.IQN, + &initiator.IsActive, &initiator.CreatedAt, + &targetID, &groupName, &targetIQN, &targetName, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("initiator not found") + } + return nil, fmt.Errorf("failed to get initiator: %w", err) + } + + return &initiator, nil +} + +// getGroupInitiators retrieves all initiators for a group +func (s *Service) getGroupInitiators(ctx context.Context, groupID string) ([]Initiator, error) { + query := ` + SELECT id, group_id, iqn, is_active, created_at + FROM scst_initiators + WHERE group_id = $1 + ORDER BY iqn + ` + + rows, err := s.db.QueryContext(ctx, query, groupID) + if err != nil { + return nil, fmt.Errorf("failed to get initiators: %w", err) + } + defer rows.Close() + + var initiators []Initiator + for rows.Next() { + var initiator Initiator + err := rows.Scan(&initiator.ID, &initiator.GroupID, &initiator.IQN, &initiator.IsActive, &initiator.CreatedAt) + if err != nil { + s.logger.Error("Failed to scan initiator", "error", err) + continue + } + initiators = append(initiators, initiator) + } + + return initiators, rows.Err() +} + // ListTargets lists all SCST targets func (s *Service) ListTargets(ctx context.Context) ([]Target, error) { query := ` - SELECT id, iqn, target_type, name, description, is_active, - single_initiator_only, created_at, updated_at, created_by - FROM scst_targets - ORDER BY name + SELECT + t.id, t.iqn, t.target_type, t.name, t.description, t.is_active, + t.single_initiator_only, t.created_at, t.updated_at, t.created_by, + COALESCE(COUNT(l.id), 0) as lun_count + FROM scst_targets t + LEFT JOIN scst_luns l ON t.id = l.target_id + GROUP BY t.id, t.iqn, t.target_type, t.name, t.description, t.is_active, + t.single_initiator_only, t.created_at, t.updated_at, t.created_by + ORDER BY t.name ` rows, err := s.db.QueryContext(ctx, query) @@ -255,15 +491,20 @@ func (s *Service) ListTargets(ctx context.Context) ([]Target, error) { var targets []Target for rows.Next() { var target Target + var description sql.NullString err := rows.Scan( &target.ID, &target.IQN, &target.TargetType, &target.Name, - &target.Description, &target.IsActive, &target.SingleInitiatorOnly, + &description, &target.IsActive, &target.SingleInitiatorOnly, &target.CreatedAt, &target.UpdatedAt, &target.CreatedBy, + &target.LUNCount, ) if err != nil { s.logger.Error("Failed to scan target", "error", err) continue } + if description.Valid { + target.Description = description.String + } targets = append(targets, target) } @@ -280,9 +521,10 @@ func (s *Service) GetTarget(ctx context.Context, id string) (*Target, error) { ` var target Target + var description sql.NullString err := s.db.QueryRowContext(ctx, query, id).Scan( &target.ID, &target.IQN, &target.TargetType, &target.Name, - &target.Description, &target.IsActive, &target.SingleInitiatorOnly, + &description, &target.IsActive, &target.SingleInitiatorOnly, &target.CreatedAt, &target.UpdatedAt, &target.CreatedBy, ) if err != nil { @@ -292,11 +534,181 @@ func (s *Service) GetTarget(ctx context.Context, id string) (*Target, error) { return nil, fmt.Errorf("failed to get target: %w", err) } + if description.Valid { + target.Description = description.String + } + + // Sync enabled status from SCST + enabled, err := s.getTargetEnabledStatus(ctx, target.IQN) + if err == nil { + // Update database if status differs + if enabled != target.IsActive { + _, err = s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = $1 WHERE id = $2", enabled, target.ID) + if err != nil { + s.logger.Warn("Failed to update target status", "error", err) + } else { + target.IsActive = enabled + } + } + } + return &target, nil } +// getTargetEnabledStatus reads enabled status from SCST +func (s *Service) getTargetEnabledStatus(ctx context.Context, targetIQN string) (bool, error) { + // Read SCST config to check if target is enabled + cmd := exec.CommandContext(ctx, "scstadmin", "-write_config", "/tmp/scst_target_check.conf") + output, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("failed to write config: %s: %w", string(output), err) + } + + // Read config file + configData, err := os.ReadFile("/tmp/scst_target_check.conf") + if err != nil { + return false, fmt.Errorf("failed to read config: %w", err) + } + + // Check if target is enabled in config + // Format: TARGET iqn.2025-12.id.atlas:lun01 { enabled 1 } or { enabled 0 } + configStr := string(configData) + targetSection := fmt.Sprintf("TARGET %s", targetIQN) + if !strings.Contains(configStr, targetSection) { + return false, nil + } + + // Extract enabled status + lines := strings.Split(configStr, "\n") + inTargetSection := false + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, targetSection) { + inTargetSection = true + continue + } + if inTargetSection { + if strings.Contains(line, "enabled 1") { + return true, nil + } + if strings.Contains(line, "enabled 0") { + return false, nil + } + if strings.HasPrefix(line, "TARGET") { + // Next target, stop searching + break + } + } + } + + // Default to enabled if target exists but status not found + return true, nil +} + +// EnableTarget enables a target in SCST +func (s *Service) EnableTarget(ctx context.Context, targetIQN string) error { + cmd := exec.CommandContext(ctx, "scstadmin", "-enable_target", targetIQN, "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already enabled") { + return nil // Already enabled, no error + } + return fmt.Errorf("failed to enable target: %s: %w", outputStr, err) + } + + // Update database + var targetID string + err = s.db.QueryRowContext(ctx, "SELECT id FROM scst_targets WHERE iqn = $1", targetIQN).Scan(&targetID) + if err == nil { + s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = true WHERE id = $1", targetID) + } + + s.logger.Info("Target enabled", "iqn", targetIQN) + return nil +} + +// DisableTarget disables a target in SCST +func (s *Service) DisableTarget(ctx context.Context, targetIQN string) error { + cmd := exec.CommandContext(ctx, "scstadmin", "-disable_target", targetIQN, "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already disabled") { + return nil // Already disabled, no error + } + return fmt.Errorf("failed to disable target: %s: %w", outputStr, err) + } + + // Update database + var targetID string + err = s.db.QueryRowContext(ctx, "SELECT id FROM scst_targets WHERE iqn = $1", targetIQN).Scan(&targetID) + if err == nil { + s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = false WHERE id = $1", targetID) + } + + s.logger.Info("Target disabled", "iqn", targetIQN) + return nil +} + // GetTargetLUNs retrieves all LUNs for a target +// It reads from SCST first, then syncs with database func (s *Service) GetTargetLUNs(ctx context.Context, targetID string) ([]LUN, error) { + // Get target IQN + var targetIQN string + err := s.db.QueryRowContext(ctx, "SELECT iqn FROM scst_targets WHERE id = $1", targetID).Scan(&targetIQN) + if err != nil { + return nil, fmt.Errorf("failed to get target IQN: %w", err) + } + + // Read LUNs from SCST + scstLUNs, err := s.readLUNsFromSCST(ctx, targetIQN) + if err != nil { + s.logger.Warn("Failed to read LUNs from SCST, using database only", "error", err) + // Fallback to database only + return s.getLUNsFromDatabase(ctx, targetID) + } + + // Sync SCST LUNs with database + for _, scstLUN := range scstLUNs { + // Get device info from SCST + devicePath, handlerType, err := s.getDeviceInfo(ctx, scstLUN.DeviceName) + if err != nil { + s.logger.Warn("Failed to get device info", "device", scstLUN.DeviceName, "error", err) + // Try to get from existing database record if available + var existingPath, existingHandler string + s.db.QueryRowContext(ctx, + "SELECT device_path, handler_type FROM scst_luns WHERE target_id = $1 AND lun_number = $2", + targetID, scstLUN.LUNNumber, + ).Scan(&existingPath, &existingHandler) + if existingPath != "" { + devicePath = existingPath + } + if existingHandler != "" { + handlerType = existingHandler + } + } + + // Upsert into database + _, err = s.db.ExecContext(ctx, ` + INSERT INTO scst_luns (target_id, lun_number, device_name, device_path, handler_type) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (target_id, lun_number) DO UPDATE SET + device_name = EXCLUDED.device_name, + device_path = COALESCE(NULLIF(EXCLUDED.device_path, ''), scst_luns.device_path), + handler_type = COALESCE(NULLIF(EXCLUDED.handler_type, ''), scst_luns.handler_type) + `, targetID, scstLUN.LUNNumber, scstLUN.DeviceName, devicePath, handlerType) + if err != nil { + s.logger.Warn("Failed to sync LUN to database", "lun", scstLUN.LUNNumber, "error", err) + } + } + + // Return from database (now synced) + return s.getLUNsFromDatabase(ctx, targetID) +} + +// getLUNsFromDatabase retrieves LUNs from database only +func (s *Service) getLUNsFromDatabase(ctx context.Context, targetID string) ([]LUN, error) { query := ` SELECT id, target_id, lun_number, device_name, device_path, handler_type, created_at FROM scst_luns @@ -321,12 +733,377 @@ func (s *Service) GetTargetLUNs(ctx context.Context, targetID string) ([]LUN, er s.logger.Error("Failed to scan LUN", "error", err) continue } + + // Set handler and device_type for frontend compatibility + lun.Handler = lun.HandlerType + // Map handler type to user-friendly device type label + lun.DeviceType = s.getDeviceTypeLabel(lun.HandlerType) + // LUN is active if it exists in database (we sync from SCST, so if it's here, it's active) + lun.IsActive = true + luns = append(luns, lun) } return luns, rows.Err() } +// readLUNsFromSCST reads LUNs directly from SCST using scstadmin -list_group +func (s *Service) readLUNsFromSCST(ctx context.Context, targetIQN string) ([]LUN, error) { + cmd := exec.CommandContext(ctx, "scstadmin", "-list_group", + "-target", targetIQN, + "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to list group: %s: %w", string(output), err) + } + + // Parse output + // Format: + // Driver: iscsi + // Target: iqn.2025-12.id.atlas:lun01 + // + // Assigned LUNs: + // + // LUN Device + // ---------- + // 1 LUN01 + + lines := strings.Split(string(output), "\n") + var luns []LUN + inLUNSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the LUN section + if strings.Contains(line, "Assigned LUNs:") { + inLUNSection = true + continue + } + + // Skip separator line + if strings.HasPrefix(line, "---") { + continue + } + + // Skip header line "LUN Device" + if strings.HasPrefix(line, "LUN") && strings.Contains(line, "Device") { + continue + } + + // Parse LUN lines (format: "1 LUN01") + if inLUNSection && line != "" { + parts := strings.Fields(line) + if len(parts) >= 2 { + var lunNumber int + if _, err := fmt.Sscanf(parts[0], "%d", &lunNumber); err == nil { + deviceName := parts[1] + luns = append(luns, LUN{ + LUNNumber: lunNumber, + DeviceName: deviceName, + DevicePath: "", // Will be filled by getDeviceInfo + HandlerType: "", // Will be filled by getDeviceInfo + }) + } + } + } + + // Stop if we hit "All done" + if strings.Contains(line, "All done") { + break + } + } + + return luns, nil +} + +// getDeviceInfo gets device path and handler type from SCST +// Since scstadmin doesn't provide easy access to device attributes, +// we try to get handler type from device list, and device path from database if available +func (s *Service) getDeviceInfo(ctx context.Context, deviceName string) (string, string, error) { + // Find which handler this device belongs to + listCmd := exec.CommandContext(ctx, "scstadmin", "-list_device") + output, err := listCmd.Output() + if err != nil { + return "", "", fmt.Errorf("failed to list devices: %w", err) + } + + // Parse output to find handler + // Format: + // Handler Device + // ----------------------- + // vdisk_fileio LUN01 + lines := strings.Split(string(output), "\n") + var handlerType string + inHandlerSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the handler section + if strings.Contains(line, "Handler") && strings.Contains(line, "Device") { + inHandlerSection = true + continue + } + + // Skip separator + if strings.HasPrefix(line, "---") { + continue + } + + // Parse handler and device lines + if inHandlerSection && line != "" && !strings.Contains(line, "Collecting") && !strings.Contains(line, "All done") { + parts := strings.Fields(line) + if len(parts) >= 2 { + handler := parts[0] + device := parts[1] + if device == deviceName { + handlerType = handler + break + } + } + } + + if strings.Contains(line, "All done") { + break + } + } + + if handlerType == "" { + return "", "", fmt.Errorf("handler not found for device %s", deviceName) + } + + // Try to get device path from SCST config file + devicePath := s.getDevicePathFromConfig(deviceName, handlerType) + + return devicePath, handlerType, nil +} + +// getDevicePathFromConfig reads device path from SCST config file +func (s *Service) getDevicePathFromConfig(deviceName, handlerType string) string { + // Write current config to temp file + cmd := exec.Command("scstadmin", "-write_config", "/tmp/scst_device_info.conf") + if err := cmd.Run(); err != nil { + s.logger.Warn("Failed to write SCST config", "error", err) + return "" + } + + // Read config file + configData, err := os.ReadFile("/tmp/scst_device_info.conf") + if err != nil { + s.logger.Warn("Failed to read SCST config", "error", err) + return "" + } + + // Parse config to find device + // Format: + // DEVICE deviceName { + // filename /path/to/device + // } + lines := strings.Split(string(configData), "\n") + inDeviceBlock := false + var devicePath string + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're entering the device block + if strings.Contains(line, "DEVICE") && strings.Contains(line, deviceName) { + inDeviceBlock = true + continue + } + + // Check if we're leaving the device block + if inDeviceBlock && strings.HasPrefix(line, "}") { + break + } + + // Look for filename in device block + if inDeviceBlock && strings.Contains(line, "filename") { + parts := strings.Fields(line) + for j, part := range parts { + if part == "filename" && j+1 < len(parts) { + devicePath = parts[j+1] + break + } + } + if devicePath != "" { + break + } + } + } + + return devicePath +} + +// ListExtents lists all device extents (opened devices) in SCST +func (s *Service) ListExtents(ctx context.Context) ([]Extent, error) { + // List all devices from SCST + cmd := exec.CommandContext(ctx, "scstadmin", "-list_device") + output, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to list devices: %w", err) + } + + // Parse output + // Format: + // Handler Device + // ----------------------- + // vdisk_fileio LUN01 + lines := strings.Split(string(output), "\n") + var extents []Extent + inHandlerSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the handler section + if strings.Contains(line, "Handler") && strings.Contains(line, "Device") { + inHandlerSection = true + continue + } + + // Skip separator + if strings.HasPrefix(line, "---") { + continue + } + + // Skip header/footer lines + if strings.Contains(line, "Collecting") || strings.Contains(line, "All done") { + continue + } + + // Parse handler and device lines + if inHandlerSection && line != "" { + parts := strings.Fields(line) + if len(parts) >= 2 { + handlerType := parts[0] + deviceName := parts[1] + + // Skip if device is "-" (no device opened for this handler) + if deviceName == "-" { + continue + } + + // Get device path from config or database + devicePath := s.getDevicePathFromConfig(deviceName, handlerType) + if devicePath == "" { + // Try to get from database + var dbPath string + s.db.QueryRowContext(ctx, + "SELECT device_path FROM scst_luns WHERE device_name = $1 LIMIT 1", + deviceName, + ).Scan(&dbPath) + if dbPath != "" { + devicePath = dbPath + } + } + + // Count how many LUNs use this device + var lunCount int + s.db.QueryRowContext(ctx, + "SELECT COUNT(*) FROM scst_luns WHERE device_name = $1", + deviceName, + ).Scan(&lunCount) + + extents = append(extents, Extent{ + HandlerType: handlerType, + DeviceName: deviceName, + DevicePath: devicePath, + IsInUse: lunCount > 0, + LUNCount: lunCount, + }) + } + } + } + + return extents, nil +} + +// CreateExtent opens a device in SCST (creates an extent) +func (s *Service) CreateExtent(ctx context.Context, deviceName, devicePath, handlerType string) error { + // Validate handler type + handlers, err := s.DetectHandlers(ctx) + if err != nil { + return fmt.Errorf("failed to detect handlers: %w", err) + } + + handlerValid := false + for _, h := range handlers { + if h.Name == handlerType { + handlerValid = true + break + } + } + + if !handlerValid { + return fmt.Errorf("invalid handler type: %s", handlerType) + } + + // Open device in SCST + openCmd := exec.CommandContext(ctx, "scstadmin", "-open_dev", deviceName, + "-handler", handlerType, + "-attributes", fmt.Sprintf("filename=%s", devicePath)) + output, err := openCmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already exists") { + return fmt.Errorf("device %s already exists", deviceName) + } + return fmt.Errorf("failed to open device: %s: %w", outputStr, err) + } + + s.logger.Info("Extent created", "device", deviceName, "handler", handlerType, "path", devicePath) + return nil +} + +// DeleteExtent closes a device in SCST (removes an extent) +func (s *Service) DeleteExtent(ctx context.Context, deviceName string) error { + // Check if device is in use by any LUN + var lunCount int + err := s.db.QueryRowContext(ctx, + "SELECT COUNT(*) FROM scst_luns WHERE device_name = $1", + deviceName, + ).Scan(&lunCount) + if err == nil && lunCount > 0 { + return fmt.Errorf("device %s is in use by %d LUN(s). Remove LUNs first", deviceName, lunCount) + } + + // Close device in SCST + closeCmd := exec.CommandContext(ctx, "scstadmin", "-close_dev", deviceName) + output, err := closeCmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "not found") { + return fmt.Errorf("device %s not found", deviceName) + } + return fmt.Errorf("failed to close device: %s: %w", outputStr, err) + } + + s.logger.Info("Extent deleted", "device", deviceName) + return nil +} + +// getDeviceTypeLabel returns a user-friendly label for device type based on handler type +func (s *Service) getDeviceTypeLabel(handlerType string) string { + deviceTypeMap := map[string]string{ + "vdisk_fileio": "ZFS Volume", + "vdisk_blockio": "Block Device", + "vdisk_nullio": "Null Device", + "vcdrom": "CDROM", + "dev_cdrom": "CDROM", + "dev_disk": "Physical Disk", + "dev_disk_perf": "Physical Disk (Performance)", + } + + if label, ok := deviceTypeMap[handlerType]; ok { + return label + } + + // Default: return handler type as-is + return handlerType +} + // WriteConfig writes SCST configuration to file func (s *Service) WriteConfig(ctx context.Context, configPath string) error { cmd := exec.CommandContext(ctx, "scstadmin", "-write_config", configPath) @@ -339,24 +1116,244 @@ func (s *Service) WriteConfig(ctx context.Context, configPath string) error { return nil } +// HandlerInfo represents SCST handler information +type HandlerInfo struct { + Name string `json:"name"` + Label string `json:"label"` // Simple, user-friendly label + Description string `json:"description,omitempty"` +} + // DetectHandlers detects available SCST handlers -func (s *Service) DetectHandlers(ctx context.Context) ([]string, error) { +func (s *Service) DetectHandlers(ctx context.Context) ([]HandlerInfo, error) { cmd := exec.CommandContext(ctx, "scstadmin", "-list_handler") output, err := cmd.Output() if err != nil { return nil, fmt.Errorf("failed to list handlers: %w", err) } - // Parse output (simplified - actual parsing would be more robust) - handlers := []string{} + // Parse output - skip header lines and separator + handlers := []HandlerInfo{} lines := strings.Split(string(output), "\n") + skipHeader := true + for _, line := range lines { line = strings.TrimSpace(line) - if line != "" && !strings.HasPrefix(line, "Handler") { - handlers = append(handlers, line) + + // Skip empty lines + if line == "" { + continue } + + // Skip header line "Handler" + if strings.HasPrefix(line, "Handler") { + skipHeader = false + continue + } + + // Skip separator line "-------------" + if strings.HasPrefix(line, "---") { + continue + } + + // Skip lines before header (like "Collecting current configuration: done.") + if skipHeader { + continue + } + + // Skip footer lines + if strings.Contains(line, "All done") || strings.Contains(line, "Collecting") { + continue + } + + // Map handler names to labels and descriptions + label, description := s.getHandlerInfo(line) + handlers = append(handlers, HandlerInfo{ + Name: line, + Label: label, + Description: description, + }) } return handlers, nil } +// getHandlerInfo returns a simple label and description for a handler +func (s *Service) getHandlerInfo(handlerName string) (string, string) { + handlerInfo := map[string]struct { + label string + description string + }{ + "dev_disk": { + label: "Physical Disk", + description: "Physical disk handler", + }, + "dev_disk_perf": { + label: "Physical Disk (Performance)", + description: "Physical disk handler with performance optimizations", + }, + "dev_cdrom": { + label: "CDROM", + description: "CD/DVD-ROM handler", + }, + "vdisk_blockio": { + label: "Block Device", + description: "Virtual disk block I/O handler (for block devices)", + }, + "vdisk_fileio": { + label: "Volume", + description: "Virtual disk file I/O handler (for ZFS volumes and files)", + }, + "vdisk_nullio": { + label: "Null Device", + description: "Null I/O handler (for testing)", + }, + "vcdrom": { + label: "CDROM", + description: "Virtual CD-ROM handler", + }, + } + + if info, ok := handlerInfo[handlerName]; ok { + return info.label, info.description + } + + // Default: use handler name as label + return handlerName, "" +} + +// ListPortals lists all iSCSI portals +func (s *Service) ListPortals(ctx context.Context) ([]Portal, error) { + query := ` + SELECT id, ip_address, port, is_active, created_at, updated_at + FROM scst_portals + ORDER BY ip_address, port + ` + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to list portals: %w", err) + } + defer rows.Close() + + var portals []Portal + for rows.Next() { + var portal Portal + err := rows.Scan( + &portal.ID, &portal.IPAddress, &portal.Port, + &portal.IsActive, &portal.CreatedAt, &portal.UpdatedAt, + ) + if err != nil { + s.logger.Error("Failed to scan portal", "error", err) + continue + } + portals = append(portals, portal) + } + + return portals, rows.Err() +} + +// CreatePortal creates a new iSCSI portal +func (s *Service) CreatePortal(ctx context.Context, portal *Portal) error { + // Validate IP address format (basic validation) + if portal.IPAddress == "" { + return fmt.Errorf("IP address is required") + } + + // Validate port range + if portal.Port < 1 || portal.Port > 65535 { + return fmt.Errorf("port must be between 1 and 65535") + } + + // Default port to 3260 if not specified + if portal.Port == 0 { + portal.Port = 3260 + } + + // Insert into database + query := ` + INSERT INTO scst_portals (ip_address, port, is_active) + VALUES ($1, $2, $3) + RETURNING id, created_at, updated_at + ` + + err := s.db.QueryRowContext(ctx, query, + portal.IPAddress, portal.Port, portal.IsActive, + ).Scan(&portal.ID, &portal.CreatedAt, &portal.UpdatedAt) + if err != nil { + return fmt.Errorf("failed to create portal: %w", err) + } + + s.logger.Info("Portal created", "ip", portal.IPAddress, "port", portal.Port) + return nil +} + +// UpdatePortal updates an existing portal +func (s *Service) UpdatePortal(ctx context.Context, id string, portal *Portal) error { + // Validate port range + if portal.Port < 1 || portal.Port > 65535 { + return fmt.Errorf("port must be between 1 and 65535") + } + + query := ` + UPDATE scst_portals + SET ip_address = $1, port = $2, is_active = $3, updated_at = NOW() + WHERE id = $4 + RETURNING updated_at + ` + + err := s.db.QueryRowContext(ctx, query, + portal.IPAddress, portal.Port, portal.IsActive, id, + ).Scan(&portal.UpdatedAt) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("portal not found") + } + return fmt.Errorf("failed to update portal: %w", err) + } + + s.logger.Info("Portal updated", "id", id, "ip", portal.IPAddress, "port", portal.Port) + return nil +} + +// DeletePortal deletes a portal +func (s *Service) DeletePortal(ctx context.Context, id string) error { + result, err := s.db.ExecContext(ctx, "DELETE FROM scst_portals WHERE id = $1", id) + if err != nil { + return fmt.Errorf("failed to delete portal: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if rowsAffected == 0 { + return fmt.Errorf("portal not found") + } + + s.logger.Info("Portal deleted", "id", id) + return nil +} + +// GetPortal retrieves a portal by ID +func (s *Service) GetPortal(ctx context.Context, id string) (*Portal, error) { + query := ` + SELECT id, ip_address, port, is_active, created_at, updated_at + FROM scst_portals + WHERE id = $1 + ` + + var portal Portal + err := s.db.QueryRowContext(ctx, query, id).Scan( + &portal.ID, &portal.IPAddress, &portal.Port, + &portal.IsActive, &portal.CreatedAt, &portal.UpdatedAt, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("portal not found") + } + return nil, fmt.Errorf("failed to get portal: %w", err) + } + + return &portal, nil +} diff --git a/backend/internal/storage/handler.go b/backend/internal/storage/handler.go index c470fec..2f3382c 100644 --- a/backend/internal/storage/handler.go +++ b/backend/internal/storage/handler.go @@ -304,6 +304,13 @@ func (h *Handler) DeleteZFSPool(c *gin.Context) { return } + // Invalidate cache for pools list + if h.cache != nil { + cacheKey := "http:/api/v1/storage/zfs/pools:" + h.cache.Delete(cacheKey) + h.logger.Debug("Cache invalidated for pools list", "key", cacheKey) + } + c.JSON(http.StatusOK, gin.H{"message": "ZFS pool deleted successfully"}) } diff --git a/backend/internal/storage/zfs.go b/backend/internal/storage/zfs.go index facd226..8c8242b 100644 --- a/backend/internal/storage/zfs.go +++ b/backend/internal/storage/zfs.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "time" @@ -44,7 +45,8 @@ type ZFSPool struct { AutoExpand bool `json:"auto_expand"` ScrubInterval int `json:"scrub_interval"` // days IsActive bool `json:"is_active"` - HealthStatus string `json:"health_status"` // online, degraded, faulted, offline + HealthStatus string `json:"health_status"` // online, degraded, faulted, offline + CompressRatio float64 `json:"compress_ratio"` // compression ratio (e.g., 1.45x) CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` CreatedBy string `json:"created_by"` @@ -359,6 +361,26 @@ func (s *ZFSService) getSpareDisks(ctx context.Context, poolName string) ([]stri return spareDisks, nil } +// getCompressRatio gets the compression ratio from ZFS +func (s *ZFSService) getCompressRatio(ctx context.Context, poolName string) (float64, error) { + cmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "compressratio", poolName) + output, err := cmd.Output() + if err != nil { + return 1.0, err + } + + ratioStr := strings.TrimSpace(string(output)) + // Remove 'x' suffix if present (e.g., "1.45x" -> "1.45") + ratioStr = strings.TrimSuffix(ratioStr, "x") + + ratio, err := strconv.ParseFloat(ratioStr, 64) + if err != nil { + return 1.0, err + } + + return ratio, nil +} + // ListPools lists all ZFS pools func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) { query := ` @@ -407,8 +429,17 @@ func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) { pool.SpareDisks = spareDisks } + // Get compressratio from ZFS system + compressRatio, err := s.getCompressRatio(ctx, pool.Name) + if err != nil { + s.logger.Warn("Failed to get compressratio", "pool", pool.Name, "error", err) + pool.CompressRatio = 1.0 // Default to 1.0 if can't get ratio + } else { + pool.CompressRatio = compressRatio + } + pools = append(pools, &pool) - s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name) + s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name, "compressratio", pool.CompressRatio) } if err := rows.Err(); err != nil { diff --git a/backend/internal/storage/zfs_pool_monitor.go b/backend/internal/storage/zfs_pool_monitor.go index 6812167..255f676 100644 --- a/backend/internal/storage/zfs_pool_monitor.go +++ b/backend/internal/storage/zfs_pool_monitor.go @@ -218,7 +218,7 @@ func (m *ZFSPoolMonitor) updatePoolStatus(ctx context.Context, poolName string, return nil } -// markMissingPoolsOffline marks pools that exist in database but not in system as offline +// markMissingPoolsOffline marks pools that exist in database but not in system as offline or deletes them func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPools map[string]PoolInfo) error { // Get all pools from database rows, err := m.zfsService.db.QueryContext(ctx, "SELECT id, name FROM zfs_pools WHERE is_active = true") @@ -235,17 +235,13 @@ func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPool // Check if pool exists in system if _, exists := systemPools[poolName]; !exists { - // Pool doesn't exist in system, mark as offline - _, err = m.zfsService.db.ExecContext(ctx, ` - UPDATE zfs_pools SET - health_status = 'offline', - updated_at = NOW() - WHERE id = $1 - `, poolID) + // Pool doesn't exist in system - delete from database (pool was destroyed) + m.logger.Info("Pool not found in system, removing from database", "pool", poolName) + _, err = m.zfsService.db.ExecContext(ctx, "DELETE FROM zfs_pools WHERE id = $1", poolID) if err != nil { - m.logger.Warn("Failed to mark pool as offline", "pool", poolName, "error", err) + m.logger.Warn("Failed to delete missing pool from database", "pool", poolName, "error", err) } else { - m.logger.Info("Marked pool as offline (not found in system)", "pool", poolName) + m.logger.Info("Removed missing pool from database", "pool", poolName) } } } diff --git a/backend/internal/system/handler.go b/backend/internal/system/handler.go index 2c85b78..04fd9bd 100644 --- a/backend/internal/system/handler.go +++ b/backend/internal/system/handler.go @@ -115,3 +115,19 @@ func (h *Handler) GenerateSupportBundle(c *gin.Context) { c.JSON(http.StatusAccepted, gin.H{"task_id": taskID}) } +// ListNetworkInterfaces lists all network interfaces +func (h *Handler) ListNetworkInterfaces(c *gin.Context) { + interfaces, err := h.service.ListNetworkInterfaces(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list network interfaces", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list network interfaces"}) + return + } + + // Ensure we return an empty array instead of null + if interfaces == nil { + interfaces = []NetworkInterface{} + } + + c.JSON(http.StatusOK, gin.H{"interfaces": interfaces}) +} diff --git a/backend/internal/system/service.go b/backend/internal/system/service.go index 1e57b92..8a55345 100644 --- a/backend/internal/system/service.go +++ b/backend/internal/system/service.go @@ -175,3 +175,173 @@ func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string) return nil } +// NetworkInterface represents a network interface +type NetworkInterface struct { + Name string `json:"name"` + IPAddress string `json:"ip_address"` + Subnet string `json:"subnet"` + Status string `json:"status"` // "Connected" or "Down" + Speed string `json:"speed"` // e.g., "10 Gbps", "1 Gbps" + Role string `json:"role"` // "Management", "ISCSI", or empty +} + +// ListNetworkInterfaces lists all network interfaces +func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface, error) { + // First, get all interface names and their states + cmd := exec.CommandContext(ctx, "ip", "link", "show") + output, err := cmd.Output() + if err != nil { + s.logger.Error("Failed to list interfaces", "error", err) + return nil, fmt.Errorf("failed to list interfaces: %w", err) + } + + interfaceMap := make(map[string]*NetworkInterface) + lines := strings.Split(string(output), "\n") + + s.logger.Debug("Parsing network interfaces", "output_lines", len(lines)) + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Parse interface name and state + // Format: "2: ens18: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000" + // Look for lines that start with a number followed by ":" (interface definition line) + // Simple check: line starts with digit, contains ":", and contains "state" + if len(line) > 0 && line[0] >= '0' && line[0] <= '9' && strings.Contains(line, ":") && strings.Contains(line, "state") { + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + + // Extract interface name (e.g., "ens18:" or "lo:") + ifaceName := strings.TrimSuffix(parts[1], ":") + if ifaceName == "" || ifaceName == "lo" { + continue // Skip loopback + } + + // Extract state - look for "state UP" or "state DOWN" in the line + state := "Down" + if strings.Contains(line, "state UP") { + state = "Connected" + } else if strings.Contains(line, "state DOWN") { + state = "Down" + } + + s.logger.Info("Found interface", "name", ifaceName, "state", state) + + interfaceMap[ifaceName] = &NetworkInterface{ + Name: ifaceName, + Status: state, + Speed: "Unknown", + } + } + } + + s.logger.Debug("Found interfaces from ip link", "count", len(interfaceMap)) + + // Get IP addresses for each interface + cmd = exec.CommandContext(ctx, "ip", "-4", "addr", "show") + output, err = cmd.Output() + if err != nil { + s.logger.Warn("Failed to get IP addresses", "error", err) + } else { + lines = strings.Split(string(output), "\n") + var currentIfaceName string + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Parse interface name (e.g., "2: ens18: ") + if strings.Contains(line, ":") && !strings.Contains(line, "inet") && !strings.HasPrefix(line, "valid_lft") && !strings.HasPrefix(line, "altname") { + parts := strings.Fields(line) + if len(parts) >= 2 { + currentIfaceName = strings.TrimSuffix(parts[1], ":") + s.logger.Debug("Processing interface for IP", "name", currentIfaceName) + } + continue + } + + // Parse IP address (e.g., "inet 10.10.14.16/24 brd 10.10.14.255 scope global ens18") + if strings.HasPrefix(line, "inet ") && currentIfaceName != "" && currentIfaceName != "lo" { + parts := strings.Fields(line) + if len(parts) >= 2 { + ipWithSubnet := parts[1] // e.g., "10.10.14.16/24" + ipParts := strings.Split(ipWithSubnet, "/") + if len(ipParts) == 2 { + ip := ipParts[0] + subnet := ipParts[1] + + // Find or create interface + iface, exists := interfaceMap[currentIfaceName] + if !exists { + s.logger.Debug("Creating new interface entry", "name", currentIfaceName) + iface = &NetworkInterface{ + Name: currentIfaceName, + Status: "Down", + Speed: "Unknown", + } + interfaceMap[currentIfaceName] = iface + } + + iface.IPAddress = ip + iface.Subnet = subnet + s.logger.Debug("Set IP for interface", "name", currentIfaceName, "ip", ip, "subnet", subnet) + } + } + } + } + } + + // Convert map to slice + var interfaces []NetworkInterface + s.logger.Debug("Converting interface map to slice", "map_size", len(interfaceMap)) + for _, iface := range interfaceMap { + // Get speed for each interface using ethtool + if iface.Name != "" && iface.Name != "lo" { + cmd := exec.CommandContext(ctx, "ethtool", iface.Name) + output, err := cmd.Output() + if err == nil { + // Parse speed from ethtool output + ethtoolLines := strings.Split(string(output), "\n") + for _, ethtoolLine := range ethtoolLines { + if strings.Contains(ethtoolLine, "Speed:") { + parts := strings.Fields(ethtoolLine) + if len(parts) >= 2 { + iface.Speed = parts[1] + } + break + } + } + } + + // Determine role based on interface name or IP (simple heuristic) + // You can enhance this with configuration file or database lookup + if strings.Contains(iface.Name, "eth") || strings.Contains(iface.Name, "ens") { + // Default to Management for first interface, ISCSI for others + if iface.Name == "eth0" || iface.Name == "ens18" { + iface.Role = "Management" + } else { + // Check if IP is in typical iSCSI range (10.x.x.x) + if strings.HasPrefix(iface.IPAddress, "10.") && iface.IPAddress != "" { + iface.Role = "ISCSI" + } + } + } + } + interfaces = append(interfaces, *iface) + } + + // If no interfaces found, return empty slice + if len(interfaces) == 0 { + s.logger.Warn("No network interfaces found") + return []NetworkInterface{}, nil + } + + s.logger.Info("Listed network interfaces", "count", len(interfaces)) + return interfaces, nil +} diff --git a/backend/internal/tape_vtl/handler.go b/backend/internal/tape_vtl/handler.go index 31e2d8a..27a416d 100644 --- a/backend/internal/tape_vtl/handler.go +++ b/backend/internal/tape_vtl/handler.go @@ -1,6 +1,7 @@ package tape_vtl import ( + "fmt" "net/http" "github.com/atlasos/calypso/internal/common/database" @@ -29,6 +30,7 @@ func NewHandler(db *database.DB, log *logger.Logger) *Handler { // ListLibraries lists all virtual tape libraries func (h *Handler) ListLibraries(c *gin.Context) { + h.logger.Info("ListLibraries called") libraries, err := h.service.ListLibraries(c.Request.Context()) if err != nil { h.logger.Error("Failed to list libraries", "error", err) @@ -36,7 +38,36 @@ func (h *Handler) ListLibraries(c *gin.Context) { return } - c.JSON(http.StatusOK, gin.H{"libraries": libraries}) + h.logger.Info("ListLibraries result", "count", len(libraries), "is_nil", libraries == nil) + + // Ensure we return an empty array instead of null + if libraries == nil { + h.logger.Warn("Libraries is nil, converting to empty array") + libraries = []VirtualTapeLibrary{} + } + + h.logger.Info("Returning libraries", "count", len(libraries), "libraries", libraries) + + // Ensure we always return an array, never null + if libraries == nil { + libraries = []VirtualTapeLibrary{} + } + + // Force empty array if nil (double check) + if libraries == nil { + h.logger.Warn("Libraries is still nil in handler, forcing empty array") + libraries = []VirtualTapeLibrary{} + } + + // Use explicit JSON marshalling to ensure empty array, not null + response := map[string]interface{}{ + "libraries": libraries, + } + + h.logger.Info("Response payload", "count", len(libraries), "response_type", fmt.Sprintf("%T", libraries)) + + // Use JSON marshalling that handles empty slices correctly + c.JSON(http.StatusOK, response) } // GetLibrary retrieves a library by ID @@ -69,11 +100,11 @@ func (h *Handler) GetLibrary(c *gin.Context) { // CreateLibraryRequest represents a library creation request type CreateLibraryRequest struct { - Name string `json:"name" binding:"required"` - Description string `json:"description"` + Name string `json:"name" binding:"required"` + Description string `json:"description"` BackingStorePath string `json:"backing_store_path" binding:"required"` - SlotCount int `json:"slot_count" binding:"required"` - DriveCount int `json:"drive_count" binding:"required"` + SlotCount int `json:"slot_count" binding:"required"` + DriveCount int `json:"drive_count" binding:"required"` } // CreateLibrary creates a new virtual tape library @@ -161,10 +192,10 @@ func (h *Handler) GetLibraryTapes(c *gin.Context) { // CreateTapeRequest represents a tape creation request type CreateTapeRequest struct { - Barcode string `json:"barcode" binding:"required"` - SlotNumber int `json:"slot_number" binding:"required"` - TapeType string `json:"tape_type" binding:"required"` - SizeGB int64 `json:"size_gb" binding:"required"` + Barcode string `json:"barcode" binding:"required"` + SlotNumber int `json:"slot_number" binding:"required"` + TapeType string `json:"tape_type" binding:"required"` + SizeGB int64 `json:"size_gb" binding:"required"` } // CreateTape creates a new virtual tape @@ -218,9 +249,9 @@ func (h *Handler) LoadTape(c *gin.Context) { // Create async task taskID, err := h.taskEngine.CreateTask(c.Request.Context(), tasks.TaskTypeLoadUnload, userID.(string), map[string]interface{}{ - "operation": "load_tape", - "library_id": libraryID, - "slot_number": req.SlotNumber, + "operation": "load_tape", + "library_id": libraryID, + "slot_number": req.SlotNumber, "drive_number": req.DriveNumber, }) if err != nil { @@ -268,9 +299,9 @@ func (h *Handler) UnloadTape(c *gin.Context) { // Create async task taskID, err := h.taskEngine.CreateTask(c.Request.Context(), tasks.TaskTypeLoadUnload, userID.(string), map[string]interface{}{ - "operation": "unload_tape", - "library_id": libraryID, - "slot_number": req.SlotNumber, + "operation": "unload_tape", + "library_id": libraryID, + "slot_number": req.SlotNumber, "drive_number": req.DriveNumber, }) if err != nil { @@ -295,4 +326,3 @@ func (h *Handler) UnloadTape(c *gin.Context) { c.JSON(http.StatusAccepted, gin.H{"task_id": taskID}) } - diff --git a/backend/internal/tape_vtl/mhvtl_monitor.go b/backend/internal/tape_vtl/mhvtl_monitor.go index f5e73ab..166b4cc 100644 --- a/backend/internal/tape_vtl/mhvtl_monitor.go +++ b/backend/internal/tape_vtl/mhvtl_monitor.go @@ -67,7 +67,7 @@ func (m *MHVTLMonitor) Stop() { // syncMHVTL parses mhvtl configuration and syncs to database func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) { - m.logger.Debug("Running MHVTL configuration sync") + m.logger.Info("Running MHVTL configuration sync") deviceConfPath := filepath.Join(m.configPath, "device.conf") if _, err := os.Stat(deviceConfPath); os.IsNotExist(err) { @@ -84,6 +84,11 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) { m.logger.Info("Parsed MHVTL configuration", "libraries", len(libraries), "drives", len(drives)) + // Log parsed drives for debugging + for _, drive := range drives { + m.logger.Debug("Parsed drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot) + } + // Sync libraries to database for _, lib := range libraries { if err := m.syncLibrary(ctx, lib); err != nil { @@ -94,7 +99,9 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) { // Sync drives to database for _, drive := range drives { if err := m.syncDrive(ctx, drive); err != nil { - m.logger.Error("Failed to sync drive", "drive_id", drive.DriveID, "error", err) + m.logger.Error("Failed to sync drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot, "error", err) + } else { + m.logger.Debug("Synced drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot) } } @@ -106,7 +113,7 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) { } } - m.logger.Debug("MHVTL configuration sync completed") + m.logger.Info("MHVTL configuration sync completed") } // LibraryInfo represents a library from device.conf @@ -189,6 +196,7 @@ func (m *MHVTLMonitor) parseDeviceConf(ctx context.Context, path string) ([]Libr Target: matches[3], LUN: matches[4], } + // Library ID and Slot might be on the same line or next line if matches := libraryIDRegex.FindStringSubmatch(line); matches != nil { libID, _ := strconv.Atoi(matches[1]) slot, _ := strconv.Atoi(matches[2]) @@ -198,34 +206,63 @@ func (m *MHVTLMonitor) parseDeviceConf(ctx context.Context, path string) ([]Libr continue } - // Parse library fields - if currentLibrary != nil { - if strings.HasPrefix(line, "Vendor identification:") { - currentLibrary.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:")) - } else if strings.HasPrefix(line, "Product identification:") { - currentLibrary.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:")) - } else if strings.HasPrefix(line, "Unit serial number:") { - currentLibrary.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:")) - } else if strings.HasPrefix(line, "Home directory:") { - currentLibrary.HomeDirectory = strings.TrimSpace(strings.TrimPrefix(line, "Home directory:")) + // Parse library fields (only if we're in a library section and not in a drive section) + if currentLibrary != nil && currentDrive == nil { + // Handle both "Vendor identification:" and " Vendor identification:" (with leading space) + if strings.Contains(line, "Vendor identification:") { + parts := strings.Split(line, "Vendor identification:") + if len(parts) > 1 { + currentLibrary.Vendor = strings.TrimSpace(parts[1]) + m.logger.Debug("Parsed vendor", "vendor", currentLibrary.Vendor, "library_id", currentLibrary.LibraryID) + } + } else if strings.Contains(line, "Product identification:") { + parts := strings.Split(line, "Product identification:") + if len(parts) > 1 { + currentLibrary.Product = strings.TrimSpace(parts[1]) + m.logger.Info("Parsed library product", "product", currentLibrary.Product, "library_id", currentLibrary.LibraryID) + } + } else if strings.Contains(line, "Unit serial number:") { + parts := strings.Split(line, "Unit serial number:") + if len(parts) > 1 { + currentLibrary.SerialNumber = strings.TrimSpace(parts[1]) + } + } else if strings.Contains(line, "Home directory:") { + parts := strings.Split(line, "Home directory:") + if len(parts) > 1 { + currentLibrary.HomeDirectory = strings.TrimSpace(parts[1]) + } } } // Parse drive fields if currentDrive != nil { - if strings.HasPrefix(line, "Vendor identification:") { - currentDrive.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:")) - } else if strings.HasPrefix(line, "Product identification:") { - currentDrive.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:")) - } else if strings.HasPrefix(line, "Unit serial number:") { - currentDrive.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:")) - } else if strings.HasPrefix(line, "Library ID:") && strings.Contains(line, "Slot:") { + // Check for Library ID and Slot first (can be on separate line) + if strings.Contains(line, "Library ID:") && strings.Contains(line, "Slot:") { matches := libraryIDRegex.FindStringSubmatch(line) if matches != nil { libID, _ := strconv.Atoi(matches[1]) slot, _ := strconv.Atoi(matches[2]) currentDrive.LibraryID = libID currentDrive.Slot = slot + m.logger.Debug("Parsed drive Library ID and Slot", "drive_id", currentDrive.DriveID, "library_id", libID, "slot", slot) + continue + } + } + // Handle both "Vendor identification:" and " Vendor identification:" (with leading space) + if strings.Contains(line, "Vendor identification:") { + parts := strings.Split(line, "Vendor identification:") + if len(parts) > 1 { + currentDrive.Vendor = strings.TrimSpace(parts[1]) + } + } else if strings.Contains(line, "Product identification:") { + parts := strings.Split(line, "Product identification:") + if len(parts) > 1 { + currentDrive.Product = strings.TrimSpace(parts[1]) + } + } else if strings.Contains(line, "Unit serial number:") { + parts := strings.Split(line, "Unit serial number:") + if len(parts) > 1 { + currentDrive.SerialNumber = strings.TrimSpace(parts[1]) } } } @@ -255,9 +292,17 @@ func (m *MHVTLMonitor) syncLibrary(ctx context.Context, libInfo LibraryInfo) err libInfo.LibraryID, ).Scan(&existingID) + m.logger.Debug("Syncing library", "library_id", libInfo.LibraryID, "vendor", libInfo.Vendor, "product", libInfo.Product) + + // Use product identification for library name (without library ID) libraryName := fmt.Sprintf("VTL-%d", libInfo.LibraryID) if libInfo.Product != "" { - libraryName = fmt.Sprintf("%s-%d", libInfo.Product, libInfo.LibraryID) + // Use only product name, without library ID + libraryName = libInfo.Product + m.logger.Info("Using product for library name", "product", libInfo.Product, "library_id", libInfo.LibraryID, "name", libraryName) + } else if libInfo.Vendor != "" { + libraryName = libInfo.Vendor + m.logger.Info("Using vendor for library name (product not available)", "vendor", libInfo.Vendor, "library_id", libInfo.LibraryID) } if err == sql.ErrNoRows { @@ -275,23 +320,41 @@ func (m *MHVTLMonitor) syncLibrary(ctx context.Context, libInfo LibraryInfo) err _, err = m.service.db.ExecContext(ctx, ` INSERT INTO virtual_tape_libraries ( name, description, mhvtl_library_id, backing_store_path, - slot_count, drive_count, is_active - ) VALUES ($1, $2, $3, $4, $5, $6, $7) + vendor, slot_count, drive_count, is_active + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) `, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product), - libInfo.LibraryID, backingStorePath, slotCount, driveCount, true) + libInfo.LibraryID, backingStorePath, libInfo.Vendor, slotCount, driveCount, true) if err != nil { return fmt.Errorf("failed to insert library: %w", err) } m.logger.Info("Created virtual library from MHVTL", "library_id", libInfo.LibraryID, "name", libraryName) } else if err == nil { - // Update existing library + // Update existing library - also update name if product is available + updateName := libraryName + // If product exists and current name doesn't match, update it + if libInfo.Product != "" { + var currentName string + err := m.service.db.QueryRowContext(ctx, + "SELECT name FROM virtual_tape_libraries WHERE id = $1", existingID, + ).Scan(¤tName) + if err == nil { + // Use only product name, without library ID + expectedName := libInfo.Product + if currentName != expectedName { + updateName = expectedName + m.logger.Info("Updating library name", "old", currentName, "new", updateName, "product", libInfo.Product) + } + } + } + + m.logger.Info("Updating existing library", "library_id", libInfo.LibraryID, "product", libInfo.Product, "vendor", libInfo.Vendor, "old_name", libraryName, "new_name", updateName) _, err = m.service.db.ExecContext(ctx, ` UPDATE virtual_tape_libraries SET name = $1, description = $2, backing_store_path = $3, - is_active = $4, updated_at = NOW() - WHERE id = $5 - `, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product), - libInfo.HomeDirectory, true, existingID) + vendor = $4, is_active = $5, updated_at = NOW() + WHERE id = $6 + `, updateName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product), + libInfo.HomeDirectory, libInfo.Vendor, true, existingID) if err != nil { return fmt.Errorf("failed to update library: %w", err) } diff --git a/backend/internal/tape_vtl/service.go b/backend/internal/tape_vtl/service.go index e8a65c9..1f7f458 100644 --- a/backend/internal/tape_vtl/service.go +++ b/backend/internal/tape_vtl/service.go @@ -28,46 +28,47 @@ func NewService(db *database.DB, log *logger.Logger) *Service { // VirtualTapeLibrary represents a virtual tape library type VirtualTapeLibrary struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - MHVTLibraryID int `json:"mhvtl_library_id"` - BackingStorePath string `json:"backing_store_path"` - SlotCount int `json:"slot_count"` - DriveCount int `json:"drive_count"` - IsActive bool `json:"is_active"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - CreatedBy string `json:"created_by"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + MHVTLibraryID int `json:"mhvtl_library_id"` + BackingStorePath string `json:"backing_store_path"` + Vendor string `json:"vendor,omitempty"` + SlotCount int `json:"slot_count"` + DriveCount int `json:"drive_count"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CreatedBy string `json:"created_by"` } // VirtualTapeDrive represents a virtual tape drive type VirtualTapeDrive struct { - ID string `json:"id"` - LibraryID string `json:"library_id"` - DriveNumber int `json:"drive_number"` - DevicePath *string `json:"device_path,omitempty"` - StablePath *string `json:"stable_path,omitempty"` - Status string `json:"status"` - CurrentTapeID string `json:"current_tape_id,omitempty"` - IsActive bool `json:"is_active"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID string `json:"id"` + LibraryID string `json:"library_id"` + DriveNumber int `json:"drive_number"` + DevicePath *string `json:"device_path,omitempty"` + StablePath *string `json:"stable_path,omitempty"` + Status string `json:"status"` + CurrentTapeID string `json:"current_tape_id,omitempty"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } // VirtualTape represents a virtual tape type VirtualTape struct { - ID string `json:"id"` - LibraryID string `json:"library_id"` - Barcode string `json:"barcode"` - SlotNumber int `json:"slot_number"` - ImageFilePath string `json:"image_file_path"` - SizeBytes int64 `json:"size_bytes"` - UsedBytes int64 `json:"used_bytes"` - TapeType string `json:"tape_type"` - Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID string `json:"id"` + LibraryID string `json:"library_id"` + Barcode string `json:"barcode"` + SlotNumber int `json:"slot_number"` + ImageFilePath string `json:"image_file_path"` + SizeBytes int64 `json:"size_bytes"` + UsedBytes int64 `json:"used_bytes"` + TapeType string `json:"tape_type"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } // CreateLibrary creates a new virtual tape library @@ -135,14 +136,14 @@ func (s *Service) CreateLibrary(ctx context.Context, name, description, backingS for i := 1; i <= slotCount; i++ { barcode := fmt.Sprintf("V%05d", i) tape := VirtualTape{ - LibraryID: lib.ID, - Barcode: barcode, - SlotNumber: i, + LibraryID: lib.ID, + Barcode: barcode, + SlotNumber: i, ImageFilePath: filepath.Join(tapesPath, fmt.Sprintf("%s.img", barcode)), - SizeBytes: 800 * 1024 * 1024 * 1024, // 800 GB default (LTO-8) - UsedBytes: 0, - TapeType: "LTO-8", - Status: "idle", + SizeBytes: 800 * 1024 * 1024 * 1024, // 800 GB default (LTO-8) + UsedBytes: 0, + TapeType: "LTO-8", + Status: "idle", } if err := s.createTape(ctx, &tape); err != nil { s.logger.Error("Failed to create tape", "slot", i, "error", err) @@ -223,49 +224,83 @@ func (s *Service) createTape(ctx context.Context, tape *VirtualTape) error { func (s *Service) ListLibraries(ctx context.Context) ([]VirtualTapeLibrary, error) { query := ` SELECT id, name, description, mhvtl_library_id, backing_store_path, + COALESCE(vendor, '') as vendor, slot_count, drive_count, is_active, created_at, updated_at, created_by FROM virtual_tape_libraries ORDER BY name ` + s.logger.Info("Executing query to list libraries") rows, err := s.db.QueryContext(ctx, query) if err != nil { + s.logger.Error("Failed to query libraries", "error", err) return nil, fmt.Errorf("failed to list libraries: %w", err) } + s.logger.Info("Query executed successfully, got rows") defer rows.Close() - var libraries []VirtualTapeLibrary + libraries := make([]VirtualTapeLibrary, 0) // Initialize as empty slice, not nil + s.logger.Info("Starting to scan library rows", "query", query) + rowCount := 0 for rows.Next() { + rowCount++ var lib VirtualTapeLibrary + var description sql.NullString + var createdBy sql.NullString err := rows.Scan( - &lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.Vendor, &lib.SlotCount, &lib.DriveCount, &lib.IsActive, - &lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy, + &lib.CreatedAt, &lib.UpdatedAt, &createdBy, ) if err != nil { - s.logger.Error("Failed to scan library", "error", err) + s.logger.Error("Failed to scan library", "error", err, "row", rowCount) continue } + if description.Valid { + lib.Description = description.String + } + if createdBy.Valid { + lib.CreatedBy = createdBy.String + } libraries = append(libraries, lib) + s.logger.Info("Added library to list", "library_id", lib.ID, "name", lib.Name, "mhvtl_id", lib.MHVTLibraryID) + } + s.logger.Info("Finished scanning library rows", "total_rows", rowCount, "libraries_added", len(libraries)) + + if err := rows.Err(); err != nil { + s.logger.Error("Error iterating library rows", "error", err) + return nil, fmt.Errorf("error iterating library rows: %w", err) } - return libraries, rows.Err() + s.logger.Info("Listed virtual tape libraries", "count", len(libraries), "is_nil", libraries == nil) + // Ensure we return an empty slice, not nil + if libraries == nil { + s.logger.Warn("Libraries is nil in service, converting to empty array") + libraries = []VirtualTapeLibrary{} + } + s.logger.Info("Returning from service", "count", len(libraries), "is_nil", libraries == nil) + return libraries, nil } // GetLibrary retrieves a library by ID func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrary, error) { query := ` SELECT id, name, description, mhvtl_library_id, backing_store_path, + COALESCE(vendor, '') as vendor, slot_count, drive_count, is_active, created_at, updated_at, created_by FROM virtual_tape_libraries WHERE id = $1 ` var lib VirtualTapeLibrary + var description sql.NullString + var createdBy sql.NullString err := s.db.QueryRowContext(ctx, query, id).Scan( - &lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.Vendor, &lib.SlotCount, &lib.DriveCount, &lib.IsActive, - &lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy, + &lib.CreatedAt, &lib.UpdatedAt, &createdBy, ) if err != nil { if err == sql.ErrNoRows { @@ -274,6 +309,13 @@ func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrar return nil, fmt.Errorf("failed to get library: %w", err) } + if description.Valid { + lib.Description = description.String + } + if createdBy.Valid { + lib.CreatedBy = createdBy.String + } + return &lib, nil } @@ -500,4 +542,3 @@ func (s *Service) DeleteLibrary(ctx context.Context, id string) error { s.logger.Info("Virtual tape library deleted", "id", id, "name", lib.Name) return nil } - diff --git a/deploy/systemd/calypso-logger.service b/deploy/systemd/calypso-logger.service new file mode 100644 index 0000000..37e9285 --- /dev/null +++ b/deploy/systemd/calypso-logger.service @@ -0,0 +1,25 @@ +[Unit] +Description=Calypso Stack Log Aggregator +Documentation=https://github.com/atlasos/calypso +After=network.target +Wants=calypso-api.service calypso-frontend.service + +[Service] +Type=simple +# Run as root to access journald and write to /var/syslog +# Format: timestamp [service] message +ExecStart=/bin/bash -c '/usr/bin/journalctl -u calypso-api.service -u calypso-frontend.service -f --no-pager -o short-iso >> /var/syslog/calypso.log 2>&1' +Restart=always +RestartSec=5 + +# Security hardening +NoNewPrivileges=false +PrivateTmp=true +ReadWritePaths=/var/syslog + +# Resource limits +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target + diff --git a/docs/services.md b/docs/services.md new file mode 100644 index 0000000..97588a9 --- /dev/null +++ b/docs/services.md @@ -0,0 +1,126 @@ +# Calypso Appliance Services Documentation + +This document provides an overview of all services that form the Calypso backup appliance. + +## Core Calypso Services + +### calypso-api.service +**Status**: Running +**Description**: AtlasOS Calypso API Service (Development) +**Purpose**: Main REST API backend for the Calypso appliance, handles all business logic and database operations. +**Binary**: `/development/calypso/backend/bin/calypso-api` +**Config**: `/development/calypso/backend/config.yaml.example` + +### calypso-frontend.service +**Status**: Running +**Description**: Calypso Frontend Development Server +**Purpose**: Web UI for managing backups, storage, and monitoring the appliance. +**Port**: 3000 +**Technology**: React + Vite (development mode) + +## Backup Services (Bacula) + +### bacula-director.service +**Status**: Running +**Description**: Bacula Director Daemon +**Purpose**: Central management daemon that orchestrates all backup, restore, and verify operations. +**Config**: `/etc/bacula/bacula-dir.conf` +**Docs**: `man:bacula-dir(8)` + +### bacula-sd.service +**Status**: Running +**Description**: Bacula Storage Daemon +**Purpose**: Manages physical backup storage devices (disks, tapes, virtual tape libraries). +**Config**: `/etc/bacula/bacula-sd.conf` + +### bacula-fd.service +**Status**: Running +**Description**: Bacula File Daemon +**Purpose**: Runs on systems being backed up, manages file access and metadata. +**Config**: `/etc/bacula/bacula-fd.conf` + +## Storage/iSCSI Services (SCST) + +### scst.service +**Status**: Active (exited) +**Description**: SCST - A Generic SCSI Target Subsystem +**Purpose**: Kernel-level SCSI target framework providing high-performance storage exports. +**Type**: One-shot service that loads SCST kernel modules + +### iscsi-scstd.service +**Status**: Running +**Description**: iSCSI SCST Target Daemon +**Purpose**: Provides iSCSI protocol support for SCST, allowing network block storage exports. +**Port**: 3260 (standard iSCSI port) +**Configured Targets**: +- `iqn.2025-12.id.atlas:lun01` (enabled) + +### iscsid.service +**Status**: Inactive +**Description**: iSCSI initiator daemon +**Purpose**: Client-side iSCSI service (not currently in use) + +### open-iscsi.service +**Status**: Inactive +**Description**: Login to default iSCSI targets +**Purpose**: Automatic iSCSI target login (not currently in use) + +## Virtual Tape Library + +### mhvtl-load-modules.service +**Status**: Active (exited) +**Description**: Load mhvtl modules +**Purpose**: Loads mhVTL (virtual tape library) kernel modules for tape emulation. +**Type**: One-shot service that runs at boot +**Docs**: `man:vtltape(1)`, `man:vtllibrary(1)` + +## Database + +### postgresql.service +**Status**: Active (exited) +**Description**: PostgreSQL RDBMS +**Purpose**: Parent service for PostgreSQL database management + +### postgresql@16-main.service +**Status**: Running +**Description**: PostgreSQL Cluster 16-main +**Purpose**: Main database for Calypso API, stores configuration, jobs, and metadata. +**Version**: PostgreSQL 16 + +## Service Management + +### Check All Services Status +```bash +systemctl status calypso-api calypso-frontend bacula-director bacula-sd bacula-fd scst iscsi-scstd mhvtl-load-modules postgresql +``` + +### Rebuild and Restart Core Services +```bash +/development/calypso/scripts/rebuild-and-restart.sh +``` + +### Restart Individual Services +```bash +systemctl restart calypso-api.service +systemctl restart calypso-frontend.service +systemctl restart bacula-director.service +``` + +## Service Dependencies + +``` +PostgreSQL + └── Calypso API + └── Calypso Frontend + +SCST + └── iSCSI SCST Target Daemon + +mhVTL + └── Bacula Storage Daemon + └── Bacula Director + └── Bacula File Daemon +``` + +## Total Service Count +**11 services** forming the complete Calypso backup appliance stack. diff --git a/frontend/public/logo.png b/frontend/public/logo.png new file mode 100644 index 0000000..2cd032e Binary files /dev/null and b/frontend/public/logo.png differ diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 37e3b32..6aa4acc 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -10,6 +10,10 @@ import TapeLibrariesPage from '@/pages/TapeLibraries' import VTLDetailPage from '@/pages/VTLDetail' import ISCSITargetsPage from '@/pages/ISCSITargets' import ISCSITargetDetailPage from '@/pages/ISCSITargetDetail' +import SystemPage from '@/pages/System' +import BackupManagementPage from '@/pages/BackupManagement' +import IAMPage from '@/pages/IAM' +import ProfilePage from '@/pages/Profile' import Layout from '@/components/Layout' // Create a client @@ -54,7 +58,12 @@ function App() { } /> } /> } /> + } /> } /> + } /> + } /> + } /> + } /> diff --git a/frontend/src/api/backup.ts b/frontend/src/api/backup.ts new file mode 100644 index 0000000..c59313b --- /dev/null +++ b/frontend/src/api/backup.ts @@ -0,0 +1,75 @@ +import apiClient from './client' + +export interface BackupJob { + id: string + job_id: number + job_name: string + client_name: string + job_type: string + job_level: string + status: 'Running' | 'Completed' | 'Failed' | 'Canceled' | 'Waiting' + bytes_written: number + files_written: number + duration_seconds?: number + started_at?: string + ended_at?: string + error_message?: string + storage_name?: string + pool_name?: string + volume_name?: string + created_at: string + updated_at: string +} + +export interface ListJobsResponse { + jobs: BackupJob[] + total: number + limit: number + offset: number +} + +export interface ListJobsParams { + status?: string + job_type?: string + client_name?: string + job_name?: string + limit?: number + offset?: number +} + +export interface CreateJobRequest { + job_name: string + client_name: string + job_type: string + job_level: string + storage_name?: string + pool_name?: string +} + +export const backupAPI = { + listJobs: async (params?: ListJobsParams): Promise => { + const queryParams = new URLSearchParams() + if (params?.status) queryParams.append('status', params.status) + if (params?.job_type) queryParams.append('job_type', params.job_type) + if (params?.client_name) queryParams.append('client_name', params.client_name) + if (params?.job_name) queryParams.append('job_name', params.job_name) + if (params?.limit) queryParams.append('limit', params.limit.toString()) + if (params?.offset) queryParams.append('offset', params.offset.toString()) + + const response = await apiClient.get( + `/backup/jobs${queryParams.toString() ? `?${queryParams.toString()}` : ''}` + ) + return response.data + }, + + getJob: async (id: string): Promise => { + const response = await apiClient.get(`/backup/jobs/${id}`) + return response.data + }, + + createJob: async (data: CreateJobRequest): Promise => { + const response = await apiClient.post('/backup/jobs', data) + return response.data + }, +} + diff --git a/frontend/src/api/iam.ts b/frontend/src/api/iam.ts new file mode 100644 index 0000000..a14014d --- /dev/null +++ b/frontend/src/api/iam.ts @@ -0,0 +1,191 @@ +import apiClient from './client' + +export interface User { + id: string + username: string + email: string + full_name: string + is_active: boolean + is_system: boolean + created_at: string + updated_at: string + last_login_at: string | null + roles?: string[] + permissions?: string[] + groups?: string[] +} + +export interface Group { + id: string + name: string + description?: string + is_system: boolean + user_count: number + role_count: number + created_at: string + updated_at: string + users?: string[] + roles?: string[] +} + +export interface CreateGroupRequest { + name: string + description?: string +} + +export interface UpdateGroupRequest { + name?: string + description?: string +} + +export interface AddUserToGroupRequest { + user_id: string +} + +export interface CreateUserRequest { + username: string + email: string + password: string + full_name?: string +} + +export interface UpdateUserRequest { + email?: string + full_name?: string + is_active?: boolean + roles?: string[] + groups?: string[] +} + +export const iamApi = { + listUsers: async (): Promise => { + const response = await apiClient.get<{ users: User[] }>('/iam/users') + return response.data.users || [] + }, + + getUser: async (id: string): Promise => { + const response = await apiClient.get<{ + id: string + username: string + email: string + full_name: string + is_active: boolean + is_system: boolean + roles: string[] + permissions: string[] + groups: string[] + created_at: string + updated_at: string + last_login_at: string | null + }>(`/iam/users/${id}`) + return response.data + }, + + createUser: async (data: CreateUserRequest): Promise<{ id: string; username: string }> => { + const response = await apiClient.post<{ id: string; username: string }>('/iam/users', data) + return response.data + }, + + updateUser: async (id: string, data: UpdateUserRequest): Promise => { + await apiClient.put(`/iam/users/${id}`, data) + }, + + deleteUser: async (id: string): Promise => { + await apiClient.delete(`/iam/users/${id}`) + }, + + // Groups API + listGroups: async (): Promise => { + const response = await apiClient.get<{ groups: Group[] }>('/iam/groups') + return response.data.groups || [] + }, + + getGroup: async (id: string): Promise => { + const response = await apiClient.get(`/iam/groups/${id}`) + return response.data + }, + + createGroup: async (data: CreateGroupRequest): Promise<{ id: string; name: string }> => { + const response = await apiClient.post<{ id: string; name: string }>('/iam/groups', data) + return response.data + }, + + updateGroup: async (id: string, data: UpdateGroupRequest): Promise => { + await apiClient.put(`/iam/groups/${id}`, data) + }, + + deleteGroup: async (id: string): Promise => { + await apiClient.delete(`/iam/groups/${id}`) + }, + + addUserToGroup: async (groupId: string, userId: string): Promise => { + await apiClient.post(`/iam/groups/${groupId}/users`, { user_id: userId }) + }, + + removeUserFromGroup: async (groupId: string, userId: string): Promise => { + await apiClient.delete(`/iam/groups/${groupId}/users/${userId}`) + }, + + // User role assignment + assignRoleToUser: async (userId: string, roleName: string): Promise => { + await apiClient.post(`/iam/users/${userId}/roles`, { role_name: roleName }) + }, + + removeRoleFromUser: async (userId: string, roleName: string): Promise => { + await apiClient.delete(`/iam/users/${userId}/roles?role_name=${encodeURIComponent(roleName)}`) + }, + + // User group assignment + assignGroupToUser: async (userId: string, groupName: string): Promise => { + await apiClient.post(`/iam/users/${userId}/groups`, { group_name: groupName }) + }, + + removeGroupFromUser: async (userId: string, groupName: string): Promise => { + await apiClient.delete(`/iam/users/${userId}/groups?group_name=${encodeURIComponent(groupName)}`) + }, + + // List all available roles + listRoles: async (): Promise> => { + const response = await apiClient.get<{ roles: Array<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }> }>('/iam/roles') + return response.data.roles + }, + + getRole: async (id: string): Promise<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }> => { + const response = await apiClient.get<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }>(`/iam/roles/${id}`) + return response.data + }, + + createRole: async (data: { name: string; description?: string }): Promise<{ id: string; name: string }> => { + const response = await apiClient.post<{ id: string; name: string }>('/iam/roles', data) + return response.data + }, + + updateRole: async (id: string, data: { name?: string; description?: string }): Promise => { + await apiClient.put(`/iam/roles/${id}`, data) + }, + + deleteRole: async (id: string): Promise => { + await apiClient.delete(`/iam/roles/${id}`) + }, + + // Role permissions + getRolePermissions: async (roleId: string): Promise => { + const response = await apiClient.get<{ permissions: string[] }>(`/iam/roles/${roleId}/permissions`) + return response.data.permissions + }, + + assignPermissionToRole: async (roleId: string, permissionName: string): Promise => { + await apiClient.post(`/iam/roles/${roleId}/permissions`, { permission_name: permissionName }) + }, + + removePermissionFromRole: async (roleId: string, permissionName: string): Promise => { + await apiClient.delete(`/iam/roles/${roleId}/permissions?permission_name=${encodeURIComponent(permissionName)}`) + }, + + // Permissions + listPermissions: async (): Promise> => { + const response = await apiClient.get<{ permissions: Array<{ id: string; name: string; resource: string; action: string; description?: string }> }>('/iam/permissions') + return response.data.permissions + }, +} + diff --git a/frontend/src/api/scst.ts b/frontend/src/api/scst.ts index cc47a9f..d7632a7 100644 --- a/frontend/src/api/scst.ts +++ b/frontend/src/api/scst.ts @@ -9,6 +9,7 @@ export interface SCSTTarget { iqn: string alias?: string is_active: boolean + lun_count?: number created_at: string updated_at: string } @@ -31,7 +32,11 @@ export interface SCSTInitiator { iqn: string is_active: boolean created_at: string - updated_at: string + updated_at?: string + target_id?: string + target_iqn?: string + target_name?: string + group_name?: string } export interface SCSTInitiatorGroup { @@ -45,9 +50,19 @@ export interface SCSTInitiatorGroup { export interface SCSTHandler { name: string + label: string description?: string } +export interface SCSTPortal { + id: string + ip_address: string + port: number + is_active: boolean + created_at: string + updated_at: string +} + export interface CreateTargetRequest { iqn: string target_type: string @@ -80,6 +95,7 @@ export const scstAPI = { getTarget: async (id: string): Promise<{ target: SCSTTarget luns: SCSTLUN[] + initiator_groups?: SCSTInitiatorGroup[] }> => { const response = await apiClient.get(`/scst/targets/${id}`) return response.data @@ -87,7 +103,8 @@ export const scstAPI = { createTarget: async (data: CreateTargetRequest): Promise => { const response = await apiClient.post('/scst/targets', data) - return response.data.target + // Backend returns target directly, not wrapped in { target: ... } + return response.data }, addLUN: async (targetId: string, data: AddLUNRequest): Promise<{ task_id: string }> => { @@ -109,5 +126,81 @@ export const scstAPI = { const response = await apiClient.get('/scst/handlers') return response.data.handlers || [] }, + + listPortals: async (): Promise => { + const response = await apiClient.get('/scst/portals') + return response.data.portals || [] + }, + + getPortal: async (id: string): Promise => { + const response = await apiClient.get(`/scst/portals/${id}`) + return response.data + }, + + createPortal: async (data: { ip_address: string; port?: number; is_active?: boolean }): Promise => { + const response = await apiClient.post('/scst/portals', data) + return response.data + }, + + updatePortal: async (id: string, data: { ip_address: string; port?: number; is_active?: boolean }): Promise => { + const response = await apiClient.put(`/scst/portals/${id}`, data) + return response.data + }, + + deletePortal: async (id: string): Promise => { + await apiClient.delete(`/scst/portals/${id}`) + }, + + enableTarget: async (targetId: string): Promise<{ message: string }> => { + const response = await apiClient.post(`/scst/targets/${targetId}/enable`) + return response.data + }, + + disableTarget: async (targetId: string): Promise<{ message: string }> => { + const response = await apiClient.post(`/scst/targets/${targetId}/disable`) + return response.data + }, + + listInitiators: async (): Promise => { + const response = await apiClient.get('/scst/initiators') + return response.data.initiators || [] + }, + + getInitiator: async (id: string): Promise => { + const response = await apiClient.get(`/scst/initiators/${id}`) + return response.data + }, + + removeInitiator: async (id: string): Promise => { + await apiClient.delete(`/scst/initiators/${id}`) + }, + + listExtents: async (): Promise => { + const response = await apiClient.get('/scst/extents') + return response.data.extents || [] + }, + + createExtent: async (extent: CreateExtentRequest): Promise<{ message: string }> => { + const response = await apiClient.post('/scst/extents', extent) + return response.data + }, + + deleteExtent: async (deviceName: string): Promise => { + await apiClient.delete(`/scst/extents/${deviceName}`) + }, +} + +export interface SCSTExtent { + handler_type: string + device_name: string + device_path: string + is_in_use: boolean + lun_count: number +} + +export interface CreateExtentRequest { + device_name: string + device_path: string + handler_type: string } diff --git a/frontend/src/api/storage.ts b/frontend/src/api/storage.ts index e7715e9..0653528 100644 --- a/frontend/src/api/storage.ts +++ b/frontend/src/api/storage.ts @@ -100,6 +100,7 @@ export interface ZFSPool { scrub_interval: number // days is_active: boolean health_status: string // online, degraded, faulted, offline + compress_ratio?: number // compression ratio (e.g., 1.45) created_at: string updated_at: string created_by: string diff --git a/frontend/src/api/system.ts b/frontend/src/api/system.ts new file mode 100644 index 0000000..a640144 --- /dev/null +++ b/frontend/src/api/system.ts @@ -0,0 +1,18 @@ +import apiClient from './client' + +export interface NetworkInterface { + name: string + ip_address: string + subnet: string + status: string // "Connected" or "Down" + speed: string // e.g., "10 Gbps", "1 Gbps" + role: string // "Management", "ISCSI", or empty +} + +export const systemAPI = { + listNetworkInterfaces: async (): Promise => { + const response = await apiClient.get<{ interfaces: NetworkInterface[] | null }>('/system/interfaces') + return response.data.interfaces || [] + }, +} + diff --git a/frontend/src/api/tape.ts b/frontend/src/api/tape.ts index df8d2d1..48c68eb 100644 --- a/frontend/src/api/tape.ts +++ b/frontend/src/api/tape.ts @@ -26,6 +26,7 @@ export interface VirtualTapeLibrary { name: string mhvtl_library_id: number storage_path: string + vendor?: string slot_count: number drive_count: number is_active: boolean diff --git a/frontend/src/components/Layout.tsx b/frontend/src/components/Layout.tsx index 8edd57e..9f58aea 100644 --- a/frontend/src/components/Layout.tsx +++ b/frontend/src/components/Layout.tsx @@ -10,15 +10,31 @@ import { Settings, Bell, Server, - Users + Users, + Archive } from 'lucide-react' -import { useState } from 'react' +import { useState, useEffect } from 'react' export default function Layout() { const { user, clearAuth } = useAuthStore() const navigate = useNavigate() const location = useLocation() - const [sidebarOpen, setSidebarOpen] = useState(true) + const [sidebarOpen, setSidebarOpen] = useState(false) + + // Set sidebar open by default on desktop, closed on mobile + useEffect(() => { + const handleResize = () => { + if (window.innerWidth >= 1024) { + setSidebarOpen(true) + } else { + setSidebarOpen(false) + } + } + + handleResize() // Set initial state + window.addEventListener('resize', handleResize) + return () => window.removeEventListener('resize', handleResize) + }, []) const handleLogout = () => { clearAuth() @@ -29,14 +45,15 @@ export default function Layout() { { name: 'Dashboard', href: '/', icon: LayoutDashboard }, { name: 'Storage', href: '/storage', icon: HardDrive }, { name: 'Tape Libraries', href: '/tape', icon: Database }, - { name: 'iSCSI Targets', href: '/iscsi', icon: Network }, + { name: 'iSCSI Management', href: '/iscsi', icon: Network }, + { name: 'Backup Management', href: '/backup', icon: Archive }, { name: 'Tasks', href: '/tasks', icon: Settings }, { name: 'Alerts', href: '/alerts', icon: Bell }, { name: 'System', href: '/system', icon: Server }, ] if (user?.roles.includes('admin')) { - navigation.push({ name: 'IAM', href: '/iam', icon: Users }) + navigation.push({ name: 'User Management', href: '/iam', icon: Users }) } const isActive = (href: string) => { @@ -47,7 +64,15 @@ export default function Layout() { } return ( -
+
+ {/* Mobile backdrop overlay */} + {sidebarOpen && ( +
setSidebarOpen(false)} + /> + )} + {/* Sidebar */}
{/* Header */}
-
-
+
+ Calypso Logo { + // Fallback to text if image not found + const target = e.target as HTMLImageElement + target.style.display = 'none' + const fallback = target.nextElementSibling as HTMLElement + if (fallback) fallback.style.display = 'flex' + }} + /> +
C
-

Calypso

+
+

Calypso

+

Dev Release V.1

+
+
{/* Page content */} -
+
diff --git a/frontend/src/index.css b/frontend/src/index.css index b470e28..b08b0ae 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -69,6 +69,7 @@ .custom-scrollbar::-webkit-scrollbar-track { background: #111a22; + border-radius: 4px; } .custom-scrollbar::-webkit-scrollbar-thumb { @@ -80,6 +81,24 @@ background: #476685; } +.custom-scrollbar { + -webkit-overflow-scrolling: touch; + overscroll-behavior: contain; + scroll-behavior: smooth; +} + +/* Ensure mouse wheel scrolling works */ +.custom-scrollbar, +.custom-scrollbar * { + touch-action: pan-y; +} + +/* Firefox scrollbar */ +.custom-scrollbar { + scrollbar-width: thin; + scrollbar-color: #324d67 #111a22; +} + /* Electric glow animation for buttons */ @keyframes electric-glow { 0%, 100% { @@ -122,3 +141,23 @@ } } +/* Custom Toggle Switch */ +.toggle-checkbox:checked { + right: 0; + border-color: #137fec; +} + +.toggle-checkbox:checked + .toggle-label { + background-color: #137fec; +} + +.toggle-checkbox { + right: 0; + left: auto; +} + +.toggle-checkbox:checked { + right: 0; + left: auto; +} + diff --git a/frontend/src/pages/BackupManagement.tsx b/frontend/src/pages/BackupManagement.tsx new file mode 100644 index 0000000..b813d21 --- /dev/null +++ b/frontend/src/pages/BackupManagement.tsx @@ -0,0 +1,811 @@ +import { useState } from 'react' +import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' +import { backupAPI } from '@/api/backup' +import { Search, X } from 'lucide-react' + +export default function BackupManagement() { + const [activeTab, setActiveTab] = useState<'dashboard' | 'jobs' | 'clients' | 'storage' | 'restore'>('dashboard') + + return ( +
+ {/* Page Heading */} +
+
+
+
+

+ Calypso Backup Manager +

+ +
+

+ Manage backup jobs, configure clients, and monitor storage pools from a central director console. +

+
+
+ + +
+
+
+ + {/* Scrollable Content */} +
+
+ {/* Navigation Tabs */} +
+
+ + + + + +
+
+ + {/* Conditional Content Based on Active Tab */} + {activeTab === 'dashboard' && ( + <> + {/* Stats Dashboard */} +
+ {/* Service Status Card */} +
+
+ health_and_safety +
+
+

Director Status

+
+ check_circle +

Active

+
+

Uptime: 14d 2h 12m

+
+
+ + {/* Last Backup Card */} +
+
+ schedule +
+
+

Last Job

+
+

Success

+
+

DailyBackup • 2h 15m ago

+
+
+ + {/* Active Jobs Card */} +
+
+ pending_actions +
+
+

Active Jobs

+
+

3 Running

+
+
+
+
+
+
+ + {/* Storage Pool Card */} +
+
+ hard_drive +
+
+
+

Default Pool

+ 78% +
+
+

9.4 TB

+

/ 12 TB

+
+
+
+
+
+
+
+ + {/* Recent Jobs Section */} +
+
+

Recent Job History

+ +
+
+
+ + + + + + + + + + + + + + + + {/* Running Job */} + + + + + + + + + + + + {/* Successful Job */} + + + + + + + + + + + + {/* Failed Job */} + + + + + + + + + + + + {/* Another Success */} + + + + + + + + + + + + +
StatusJob IDJob NameClientTypeLevelDurationBytesActions
+ + + Running + + 10423WeeklyArchivefilesrv-02BackupFull00:45:12142 GB + +
+ + check + OK + + 10422DailyBackupweb-srv-01BackupIncr00:12:054.2 GB + +
+ + error + Error + + 10421DB_Snapshotdb-prod-01BackupDiff00:00:040 B + +
+ + check + OK + + 10420CatalogBackupbackup-srv-01BackupFull00:05:30850 MB + +
+
+ {/* Pagination/Footer */} +
+

Showing 4 of 128 jobs

+
+ + +
+
+
+
+ + {/* Footer Console Widget */} +
+
+
+ Console Log (tail -f) + + Connected + +
+

[14:22:01] bareos-dir: Connected to Storage at backup-srv-01:9103

+

[14:22:02] bareos-sd: Volume "Vol-0012" selected for appending

+

[14:22:05] bareos-fd: Client "filesrv-02" starting backup of /var/www/html

+

[14:23:10] warning: /var/www/html/cache/tmp locked by another process, skipping

+

[14:23:45] bareos-dir: JobId 10423: Sending Accurate information.

+
+
+ + )} + + {activeTab === 'jobs' && ( + + )} + + {activeTab === 'clients' && ( +
+ Clients tab coming soon +
+ )} + + {activeTab === 'storage' && ( +
+ Storage tab coming soon +
+ )} + + {activeTab === 'restore' && ( +
+ Restore tab coming soon +
+ )} +
+
+
+ ) +} + +// Jobs Management Tab Component +function JobsManagementTab() { + const queryClient = useQueryClient() + const [searchQuery, setSearchQuery] = useState('') + const [statusFilter, setStatusFilter] = useState('') + const [jobTypeFilter, setJobTypeFilter] = useState('') + const [page, setPage] = useState(1) + const [showCreateForm, setShowCreateForm] = useState(false) + const limit = 20 + + const { data, isLoading, error } = useQuery({ + queryKey: ['backup-jobs', statusFilter, jobTypeFilter, searchQuery, page], + queryFn: () => backupAPI.listJobs({ + status: statusFilter || undefined, + job_type: jobTypeFilter || undefined, + job_name: searchQuery || undefined, + limit, + offset: (page - 1) * limit, + }), + }) + + const jobs = data?.jobs || [] + const total = data?.total || 0 + const totalPages = Math.ceil(total / limit) + + const formatBytes = (bytes: number): string => { + if (bytes === 0) return '0 B' + const k = 1024 + const sizes = ['B', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` + } + + const formatDuration = (seconds?: number): string => { + if (!seconds) return '-' + const hours = Math.floor(seconds / 3600) + const minutes = Math.floor((seconds % 3600) / 60) + const secs = seconds % 60 + if (hours > 0) { + return `${hours}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}` + } + return `${minutes}:${secs.toString().padStart(2, '0')}` + } + + const getStatusBadge = (status: string) => { + const statusMap: Record = { + Running: { + bg: 'bg-blue-500/10', + text: 'text-blue-400', + border: 'border-blue-500/20', + icon: 'pending_actions', + }, + Completed: { + bg: 'bg-green-500/10', + text: 'text-green-400', + border: 'border-green-500/20', + icon: 'check_circle', + }, + Failed: { + bg: 'bg-red-500/10', + text: 'text-red-400', + border: 'border-red-500/20', + icon: 'error', + }, + Canceled: { + bg: 'bg-yellow-500/10', + text: 'text-yellow-400', + border: 'border-yellow-500/20', + icon: 'cancel', + }, + Waiting: { + bg: 'bg-gray-500/10', + text: 'text-gray-400', + border: 'border-gray-500/20', + icon: 'schedule', + }, + } + + const config = statusMap[status] || statusMap.Waiting + + return ( + + {status === 'Running' && ( + + )} + {status !== 'Running' && ( + {config.icon} + )} + {status} + + ) + } + + return ( +
+ {/* Header */} +
+
+

Backup Jobs

+

Manage and monitor backup job executions

+
+ +
+ + {/* Filters */} +
+ {/* Search */} +
+ + { + setSearchQuery(e.target.value) + setPage(1) + }} + className="w-full pl-10 pr-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +
+ + {/* Status Filter */} + + + {/* Job Type Filter */} + +
+ + {/* Jobs Table */} +
+ {isLoading ? ( +
Loading jobs...
+ ) : error ? ( +
Failed to load jobs
+ ) : jobs.length === 0 ? ( +
+

No jobs found

+
+ ) : ( + <> +
+ + + + + + + + + + + + + + + + + {jobs.map((job) => ( + + + + + + + + + + + + + ))} + +
StatusJob IDJob NameClientTypeLevelDurationBytesFilesActions
{getStatusBadge(job.status)}{job.job_id}{job.job_name}{job.client_name}{job.job_type}{job.job_level}{formatDuration(job.duration_seconds)}{formatBytes(job.bytes_written)}{job.files_written.toLocaleString()} + +
+
+ {/* Pagination */} +
+

+ Showing {(page - 1) * limit + 1}-{Math.min(page * limit, total)} of {total} jobs +

+
+ + +
+
+ + )} +
+ + {/* Create Job Form Modal */} + {showCreateForm && ( + setShowCreateForm(false)} + onSuccess={async () => { + setShowCreateForm(false) + await queryClient.invalidateQueries({ queryKey: ['backup-jobs'] }) + await queryClient.refetchQueries({ queryKey: ['backup-jobs'] }) + }} + /> + )} +
+ ) +} + +// Create Job Form Component +interface CreateJobFormProps { + onClose: () => void + onSuccess: () => void +} + +function CreateJobForm({ onClose, onSuccess }: CreateJobFormProps) { + const [formData, setFormData] = useState({ + job_name: '', + client_name: '', + job_type: 'Backup', + job_level: 'Full', + storage_name: '', + pool_name: '', + }) + const [error, setError] = useState(null) + + const createJobMutation = useMutation({ + mutationFn: backupAPI.createJob, + onSuccess: () => { + onSuccess() + }, + onError: (err: any) => { + setError(err.response?.data?.error || 'Failed to create job') + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + setError(null) + + const payload: any = { + job_name: formData.job_name, + client_name: formData.client_name, + job_type: formData.job_type, + job_level: formData.job_level, + } + + if (formData.storage_name) { + payload.storage_name = formData.storage_name + } + if (formData.pool_name) { + payload.pool_name = formData.pool_name + } + + createJobMutation.mutate(payload) + } + + return ( +
+
+ {/* Header */} +
+

Create Backup Job

+ +
+ + {/* Form */} +
+ {error && ( +
+ {error} +
+ )} + + {/* Job Name */} +
+ + setFormData({ ...formData, job_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., DailyBackup" + /> +
+ + {/* Client Name */} +
+ + setFormData({ ...formData, client_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., filesrv-02" + /> +
+ + {/* Job Type & Level */} +
+
+ + +
+ +
+ + +
+
+ + {/* Storage Name */} +
+ + setFormData({ ...formData, storage_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., backup-srv-01" + /> +
+ + {/* Pool Name */} +
+ + setFormData({ ...formData, pool_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., Default" + /> +
+ + {/* Actions */} +
+ + +
+
+
+
+ ) +} + diff --git a/frontend/src/pages/Dashboard.tsx b/frontend/src/pages/Dashboard.tsx index 7c4b1fa..9db78a2 100644 --- a/frontend/src/pages/Dashboard.tsx +++ b/frontend/src/pages/Dashboard.tsx @@ -25,6 +25,34 @@ import { ResponsiveContainer, } from 'recharts' +// Mock data - moved outside component to prevent re-creation on every render +const MOCK_ACTIVE_JOBS = [ + { + id: '1', + name: 'Daily Backup: VM-Cluster-01', + type: 'Replication', + progress: 45, + speed: '145 MB/s', + status: 'running', + eta: '1h 12m', + }, + { + id: '2', + name: 'ZFS Scrub: Pool-01', + type: 'Maintenance', + progress: 78, + speed: '1.2 GB/s', + status: 'running', + }, +] + +const MOCK_SYSTEM_LOGS = [ + { time: '10:45:22', level: 'INFO', source: 'systemd', message: 'Started User Manager for UID 1000.' }, + { time: '10:45:15', level: 'WARN', source: 'smartd', message: 'Device: /dev/ada5, SMART Usage Attribute: 194 Temperature_Celsius changed from 38 to 41' }, + { time: '10:44:58', level: 'INFO', source: 'kernel', message: 'ix0: link state changed to UP' }, + { time: '10:42:10', level: 'INFO', source: 'zfs', message: 'zfs_arc_reclaim_thread: reclaiming 157286400 bytes ...' }, +] + export default function Dashboard() { const [activeTab, setActiveTab] = useState<'jobs' | 'logs' | 'alerts'>('jobs') const [networkDataPoints, setNetworkDataPoints] = useState>([]) @@ -37,63 +65,83 @@ export default function Dashboard() { return response.data }, refetchInterval: refreshInterval * 1000, + staleTime: refreshInterval * 1000 * 2, // Consider data fresh for 2x the interval + refetchOnWindowFocus: false, // Don't refetch on window focus + refetchOnMount: false, // Don't refetch on mount if data is fresh + notifyOnChangeProps: ['data', 'error'], + structuralSharing: (oldData, newData) => { + // Only update if data actually changed + if (JSON.stringify(oldData) === JSON.stringify(newData)) { + return oldData + } + return newData + }, }) const { data: metrics } = useQuery({ queryKey: ['metrics'], queryFn: monitoringApi.getMetrics, refetchInterval: refreshInterval * 1000, + staleTime: refreshInterval * 1000 * 2, + refetchOnWindowFocus: false, + refetchOnMount: false, + notifyOnChangeProps: ['data', 'error'], + structuralSharing: (oldData, newData) => { + if (JSON.stringify(oldData) === JSON.stringify(newData)) { + return oldData + } + return newData + }, }) const { data: alerts } = useQuery({ queryKey: ['alerts', 'dashboard'], queryFn: () => monitoringApi.listAlerts({ is_acknowledged: false, limit: 10 }), refetchInterval: refreshInterval * 1000, + staleTime: refreshInterval * 1000 * 2, + refetchOnWindowFocus: false, + refetchOnMount: false, + notifyOnChangeProps: ['data', 'error'], + structuralSharing: (oldData, newData) => { + if (JSON.stringify(oldData) === JSON.stringify(newData)) { + return oldData + } + return newData + }, }) const { data: repositories = [] } = useQuery({ queryKey: ['storage', 'repositories'], queryFn: storageApi.listRepositories, + staleTime: 60 * 1000, // Consider repositories fresh for 60 seconds + refetchOnWindowFocus: false, + refetchOnMount: false, + notifyOnChangeProps: ['data', 'error'], + structuralSharing: (oldData, newData) => { + if (JSON.stringify(oldData) === JSON.stringify(newData)) { + return oldData + } + return newData + }, }) - // Calculate uptime (mock for now, would come from metrics) - const uptime = metrics?.system?.uptime_seconds || 0 - const days = Math.floor(uptime / 86400) - const hours = Math.floor((uptime % 86400) / 3600) - const minutes = Math.floor((uptime % 3600) / 60) + // Memoize uptime calculations to prevent recalculation on every render + const { days, hours, minutes } = useMemo(() => { + const uptimeValue = metrics?.system?.uptime_seconds || 0 + return { + days: Math.floor(uptimeValue / 86400), + hours: Math.floor((uptimeValue % 86400) / 3600), + minutes: Math.floor((uptimeValue % 3600) / 60), + } + }, [metrics?.system?.uptime_seconds]) - // Mock active jobs (would come from tasks API) - const activeJobs = [ - { - id: '1', - name: 'Daily Backup: VM-Cluster-01', - type: 'Replication', - progress: 45, - speed: '145 MB/s', - status: 'running', - eta: '1h 12m', - }, - { - id: '2', - name: 'ZFS Scrub: Pool-01', - type: 'Maintenance', - progress: 78, - speed: '1.2 GB/s', - status: 'running', - }, - ] - - // Mock system logs - const systemLogs = [ - { time: '10:45:22', level: 'INFO', source: 'systemd', message: 'Started User Manager for UID 1000.' }, - { time: '10:45:15', level: 'WARN', source: 'smartd', message: 'Device: /dev/ada5, SMART Usage Attribute: 194 Temperature_Celsius changed from 38 to 41' }, - { time: '10:44:58', level: 'INFO', source: 'kernel', message: 'ix0: link state changed to UP' }, - { time: '10:42:10', level: 'INFO', source: 'zfs', message: 'zfs_arc_reclaim_thread: reclaiming 157286400 bytes ...' }, - ] - - const totalStorage = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.size_bytes || 0), 0) : 0 - const usedStorage = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.used_bytes || 0), 0) : 0 - const storagePercent = totalStorage > 0 ? (usedStorage / totalStorage) * 100 : 0 + // Use memoized storage calculations to prevent unnecessary recalculations + const { totalStorage, usedStorage, storagePercent } = useMemo(() => { + const total = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.size_bytes || 0), 0) : 0 + const used = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.used_bytes || 0), 0) : 0 + const percent = total > 0 ? (used / total) * 100 : 0 + return { totalStorage: total, usedStorage: used, storagePercent: percent } + }, [repositories]) // Initialize network data useEffect(() => { @@ -157,11 +205,15 @@ export default function Dashboard() { return Math.max(...networkDataPoints.map((d) => d.inbound + d.outbound)) }, [networkDataPoints]) - const systemStatus = health?.status === 'healthy' ? 'System Healthy' : 'System Degraded' - const isHealthy = health?.status === 'healthy' + // Memoize system status to prevent recalculation + const { systemStatus, isHealthy } = useMemo(() => { + const status = health?.status === 'healthy' ? 'System Healthy' : 'System Degraded' + const healthy = health?.status === 'healthy' + return { systemStatus: status, isHealthy: healthy } + }, [health?.status]) return ( -
+
{/* Header */}
@@ -420,9 +472,9 @@ export default function Dashboard() { }`} > Active Jobs{' '} - {activeJobs.length > 0 && ( + {MOCK_ACTIVE_JOBS.length > 0 && ( - {activeJobs.length} + {MOCK_ACTIVE_JOBS.length} )} @@ -473,7 +525,7 @@ export default function Dashboard() { - {activeJobs.map((job) => ( + {MOCK_ACTIVE_JOBS.map((job) => ( {job.name} {job.type} @@ -519,7 +571,7 @@ export default function Dashboard() {
- {systemLogs.map((log, idx) => ( + {MOCK_SYSTEM_LOGS.map((log, idx) => (
{log.time} diff --git a/frontend/src/pages/IAM.tsx b/frontend/src/pages/IAM.tsx new file mode 100644 index 0000000..c287ba5 --- /dev/null +++ b/frontend/src/pages/IAM.tsx @@ -0,0 +1,2021 @@ +import { useState, useEffect, useRef } from 'react' +import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' +import { ChevronRight, Search, Filter, UserPlus, History, ChevronLeft, MoreVertical, Lock, Verified, Wrench, Eye, HardDrive, Shield, ArrowRight, Network, ChevronRight as ChevronRightIcon, X, Edit, Trash2, User as UserIcon, Plus } from 'lucide-react' +import { Button } from '@/components/ui/button' +import { iamApi, type User, type Group } from '@/api/iam' + +export default function IAM() { + const [activeTab, setActiveTab] = useState('users') + const [searchQuery, setSearchQuery] = useState('') + const [showCreateUserForm, setShowCreateUserForm] = useState(false) + const [showEditUserForm, setShowEditUserForm] = useState(false) + const [selectedUser, setSelectedUser] = useState(null) + const [openActionMenu, setOpenActionMenu] = useState(null) + const queryClient = useQueryClient() + + const { data: users, isLoading, error } = useQuery({ + queryKey: ['iam-users'], + queryFn: iamApi.listUsers, + refetchOnWindowFocus: true, + }) + + if (error) { + console.error('Failed to load users:', error) + } + + const filteredUsers = (users || []).filter((user: User) => + user.username.toLowerCase().includes(searchQuery.toLowerCase()) || + (user.full_name && user.full_name.toLowerCase().includes(searchQuery.toLowerCase())) || + (user.email && user.email.toLowerCase().includes(searchQuery.toLowerCase())) || + (user.roles && user.roles.some((r: string) => r.toLowerCase().includes(searchQuery.toLowerCase()))) || + (user.groups && user.groups.some((g: string) => g.toLowerCase().includes(searchQuery.toLowerCase()))) + ) + + const getRoleBadge = (roles: string[] | undefined) => { + if (!roles || roles.length === 0) { + return { bg: 'bg-slate-700', text: 'text-slate-300', border: 'border-slate-600', icon: Shield, Icon: Shield, label: 'No Role' } + } + + // Use first role for display + const role = roles[0] + const roleConfig: Record = { + 'admin': { bg: 'bg-purple-500/10', text: 'text-purple-400', border: 'border-purple-500/20', icon: Verified, label: 'Admin' }, + 'operator': { bg: 'bg-blue-500/10', text: 'text-blue-400', border: 'border-blue-500/20', icon: Wrench, label: 'Operator' }, + 'auditor': { bg: 'bg-yellow-500/10', text: 'text-yellow-500', border: 'border-yellow-500/20', icon: Eye, label: 'Auditor' }, + 'storage_admin': { bg: 'bg-teal-500/10', text: 'text-teal-500', border: 'border-teal-500/20', icon: HardDrive, label: 'Storage Admin' }, + 'service': { bg: 'bg-slate-700', text: 'text-slate-300', border: 'border-slate-600', icon: Shield, label: 'Service' } + } + const config = roleConfig[role.toLowerCase()] || { bg: 'bg-slate-700', text: 'text-slate-300', border: 'border-slate-600', icon: Shield, label: role } + const Icon = config.icon + return { ...config, Icon } + } + + const getAvatarBg = (username: string) => { + if (username.toLowerCase() === 'admin') { + return 'bg-gradient-to-br from-blue-500 to-indigo-600' + } + return 'bg-slate-700' + } + + const deleteUserMutation = useMutation({ + mutationFn: iamApi.deleteUser, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + queryClient.refetchQueries({ queryKey: ['iam-users'] }) + }, + onError: (error: any) => { + console.error('Failed to delete user:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to delete user' + alert(errorMessage) + }, + }) + + const handleDeleteUser = (userId: string) => { + deleteUserMutation.mutate(userId) + } + + const formatLastLogin = (lastLoginAt: string | null) => { + if (!lastLoginAt) return 'Never' + + const date = new Date(lastLoginAt) + const now = new Date() + const diffMs = now.getTime() - date.getTime() + const diffMins = Math.floor(diffMs / 60000) + const diffHours = Math.floor(diffMs / 3600000) + const diffDays = Math.floor(diffMs / 86400000) + + if (diffMins < 1) return 'Just now' + if (diffMins < 60) return `${diffMins} minute${diffMins > 1 ? 's' : ''} ago` + if (diffHours < 24) return `${diffHours} hour${diffHours > 1 ? 's' : ''} ago` + if (diffDays < 7) return `${diffDays} day${diffDays > 1 ? 's' : ''} ago` + + return date.toLocaleDateString() + } + + return ( +
+
+ {/* Page Header */} +
+
+ +

User & Access Management

+

+ Manage local accounts, define RBAC roles, and configure directory services (LDAP/AD) integration. +

+
+
+ +
+
+ + {/* Content Container */} +
+ {/* Tabs */} +
+ + + + + +
+ + {/* Toolbar Area */} + {activeTab === 'users' && ( + <> +
+ {/* Search & Filter */} +
+
+ + setSearchQuery(e.target.value)} + className="w-full bg-card-dark border border-border-dark rounded-lg pl-10 pr-4 py-2.5 text-white placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent text-sm" + /> +
+ +
+ {/* Primary Action */} + +
+ + {/* Users Table */} +
+
+ + + + + + + + + + + + + + {isLoading ? ( + + + + ) : error ? ( + + + + ) : filteredUsers.length > 0 ? ( + filteredUsers.map((user: User) => { + const roleBadge = getRoleBadge(user.roles) + const Icon = roleBadge.Icon + const avatarInitials = user.full_name + ? user.full_name.split(' ').map((n: string) => n[0]).join('').substring(0, 2).toUpperCase() + : user.username.substring(0, 2).toUpperCase() + + return ( + + + + + + + + + + ) + }) + ) : ( + + + + )} + +
StatusUsernameFull NameRoleGroupsLast LoginActions
+ Loading users... +
+ Error loading users: {error instanceof Error ? error.message : 'Unknown error'} +
+ {user.is_active ? ( +
+ + Active +
+ ) : ( +
+ + Locked +
+ )} +
+
+
+ {avatarInitials} +
+ {user.username} +
+
{user.full_name || '-'} + {user.roles && user.roles.length > 0 ? ( + + + {roleBadge.label} + + ) : ( + No role + )} + + {user.groups && user.groups.length > 0 ? user.groups.join(', ') : '-'} + {formatLastLogin(user.last_login_at)} +
+ + {openActionMenu === user.id && ( + <> +
setOpenActionMenu(null)} + /> +
+
+ + + {!user.is_system && ( + + )} +
+
+ + )} +
+
+ No users found +
+
+ {/* Pagination */} +
+ + Showing 1-{filteredUsers.length} of{' '} + {filteredUsers.length} users + +
+ + +
+
+
+ + )} + + {/* Create User Form Modal */} + {showCreateUserForm && ( + setShowCreateUserForm(false)} + onSuccess={async () => { + setShowCreateUserForm(false) + // Invalidate and refetch users list immediately + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + await queryClient.refetchQueries({ queryKey: ['iam-users'] }) + }} + /> + )} + + {/* Edit User Form Modal */} + {showEditUserForm && selectedUser && ( + { + setShowEditUserForm(false) + setSelectedUser(null) + }} + onSuccess={async () => { + setShowEditUserForm(false) + setSelectedUser(null) + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + await queryClient.refetchQueries({ queryKey: ['iam-users'] }) + }} + /> + )} + + {/* Groups Tab */} + {activeTab === 'groups' && } + {/* Roles Tab */} + {activeTab === 'roles' && } + + {activeTab !== 'users' && activeTab !== 'groups' && ( +
+ {activeTab === 'directory' && 'Directory Services tab coming soon'} + {activeTab === 'auth' && 'Authentication & SSO tab coming soon'} +
+ )} + + {/* Info Cards */} +
+ {/* Directory Status */} +
+
+
+
+ +
+

Directory Service

+
+ + Inactive + +
+

+ No LDAP or Active Directory server is currently connected. Local authentication is being used. +

+
+ +
+
+ + {/* MFA Status */} +
+
+
+
+ +
+

Security Policy

+
+ + Good + +
+
+
+ Multi-Factor Auth + Enforced +
+
+ Password Rotation + 90 Days +
+
+
+ +
+
+
+
+
+
+ ) +} + +// Create User Form Component +interface CreateUserFormProps { + onClose: () => void + onSuccess: () => void +} + +function CreateUserForm({ onClose, onSuccess }: CreateUserFormProps) { + const [username, setUsername] = useState('') + const [email, setEmail] = useState('') + const [password, setPassword] = useState('') + const [fullName, setFullName] = useState('') + + const createMutation = useMutation({ + mutationFn: iamApi.createUser, + onSuccess: async () => { + // Wait a bit to ensure backend has processed the request + await new Promise(resolve => setTimeout(resolve, 300)) + onSuccess() + }, + onError: (error: any) => { + console.error('Failed to create user:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to create user' + alert(errorMessage) + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + if (!username.trim() || !email.trim() || !password.trim()) { + alert('Username, email, and password are required') + return + } + + const userData = { + username: username.trim(), + email: email.trim(), + password: password, + full_name: fullName.trim() || undefined, + } + + console.log('Creating user:', { ...userData, password: '***' }) + createMutation.mutate(userData) + } + + return ( +
+
+ {/* Modal Header */} +
+
+

Create User

+

Create a new user account

+
+ +
+ + {/* Modal Content */} +
+
+ + setUsername(e.target.value)} + placeholder="johndoe" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + required + /> +
+ +
+ + setEmail(e.target.value)} + placeholder="john.doe@example.com" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + required + /> +
+ +
+ + setPassword(e.target.value)} + placeholder="Enter password" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + required + minLength={8} + /> +

Minimum 8 characters

+
+ +
+ + setFullName(e.target.value)} + placeholder="John Doe" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + /> +
+ + {/* Action Buttons */} +
+ + +
+
+
+
+ ) +} + +// Edit User Form Component +interface EditUserFormProps { + user: User + onClose: () => void + onSuccess: () => void +} + +function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) { + const [email, setEmail] = useState(user.email || '') + const [fullName, setFullName] = useState(user.full_name || '') + const [isActive, setIsActive] = useState(user.is_active) + const [userRoles, setUserRoles] = useState(user.roles || []) + const [userGroups, setUserGroups] = useState(user.groups || []) + const [selectedRole, setSelectedRole] = useState('') + const [selectedGroup, setSelectedGroup] = useState('') + const queryClient = useQueryClient() + + // Use refs to always get the latest state values + const userRolesRef = useRef(userRoles) + const userGroupsRef = useRef(userGroups) + + // Update refs whenever state changes + useEffect(() => { + userRolesRef.current = userRoles + console.log('useEffect userRoles - state updated:', userRoles) + }, [userRoles]) + + useEffect(() => { + userGroupsRef.current = userGroups + console.log('useEffect userGroups - state updated:', userGroups) + }, [userGroups]) + + // Debug: log state changes + useEffect(() => { + console.log('EditUserForm - userRoles state changed:', userRoles) + }, [userRoles]) + + useEffect(() => { + console.log('EditUserForm - userGroups state changed:', userGroups) + }, [userGroups]) + + // Fetch available roles and groups + const { data: availableRoles = [] } = useQuery({ + queryKey: ['iam-roles'], + queryFn: iamApi.listRoles, + }) + + const { data: availableGroups = [] } = useQuery({ + queryKey: ['iam-groups'], + queryFn: iamApi.listGroups, + }) + + // Filter out already assigned roles/groups + const unassignedRoles = availableRoles.filter(r => !userRoles.includes(r.name)) + const unassignedGroups = availableGroups.filter(g => !userGroups.includes(g.name)) + + const updateMutation = useMutation({ + mutationFn: (data: { email?: string; full_name?: string; is_active?: boolean; roles?: string[]; groups?: string[] }) => + iamApi.updateUser(user.id, data), + onSuccess: async () => { + onSuccess() + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + await queryClient.refetchQueries({ queryKey: ['iam-users'] }) + queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] }) + await queryClient.refetchQueries({ queryKey: ['iam-user', user.id] }) + }, + onError: (error: any) => { + console.error('Failed to update user:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to update user' + alert(errorMessage) + }, + }) + + const assignRoleMutation = useMutation({ + mutationFn: (roleName: string) => iamApi.assignRoleToUser(user.id, roleName), + onMutate: async (roleName: string) => { + // Optimistic update: add role to state immediately + console.log('assignRoleMutation onMutate - BEFORE update, current userRoles:', userRolesRef.current) + setUserRoles(prev => { + const newRoles = prev.includes(roleName) ? prev : [...prev, roleName] + console.log('assignRoleMutation onMutate - prev:', prev, 'roleName:', roleName, 'newRoles:', newRoles) + // Also update ref immediately + userRolesRef.current = newRoles + return newRoles + }) + setSelectedRole('') + console.log('assignRoleMutation onMutate - AFTER update, ref should be:', userRolesRef.current) + }, + onSuccess: async (_, roleName: string) => { + // Don't overwrite state with server data - keep optimistic update + // Just invalidate queries for other components + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] }) + // Use functional update to get current state + setUserRoles(current => { + console.log('assignRoleMutation onSuccess - roleName:', roleName, 'current userRoles:', current) + return current + }) + }, + onError: (error: any, roleName: string) => { + console.error('Failed to assign role:', error, roleName) + // Rollback: remove role from state if API call failed + setUserRoles(prev => prev.filter(r => r !== roleName)) + alert(error.response?.data?.error || error.message || 'Failed to assign role') + }, + }) + + const removeRoleMutation = useMutation({ + mutationFn: (roleName: string) => iamApi.removeRoleFromUser(user.id, roleName), + onMutate: async (roleName: string) => { + // Store previous state for rollback + const previousRoles = userRoles + // Optimistic update: remove role from state immediately + setUserRoles(prev => prev.filter(r => r !== roleName)) + return { previousRoles } + }, + onSuccess: async (_, roleName: string) => { + // Don't overwrite state with server data - keep optimistic update + // Just invalidate queries for other components + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] }) + console.log('Role removed successfully:', roleName, 'Current userRoles:', userRoles) + }, + onError: (error: any, _roleName: string, context: any) => { + console.error('Failed to remove role:', error) + // Rollback: restore previous state if API call failed + if (context?.previousRoles) { + setUserRoles(context.previousRoles) + } + alert(error.response?.data?.error || error.message || 'Failed to remove role') + }, + }) + + const assignGroupMutation = useMutation({ + mutationFn: (groupName: string) => iamApi.assignGroupToUser(user.id, groupName), + onMutate: async (groupName: string) => { + // Optimistic update: add group to state immediately + console.log('assignGroupMutation onMutate - BEFORE update, current userGroups:', userGroupsRef.current) + setUserGroups(prev => { + const newGroups = prev.includes(groupName) ? prev : [...prev, groupName] + console.log('assignGroupMutation onMutate - prev:', prev, 'groupName:', groupName, 'newGroups:', newGroups) + // Also update ref immediately + userGroupsRef.current = newGroups + return newGroups + }) + setSelectedGroup('') + console.log('assignGroupMutation onMutate - AFTER update, ref should be:', userGroupsRef.current) + }, + onSuccess: async (_, groupName: string) => { + // Don't overwrite state with server data - keep optimistic update + // Just invalidate queries for other components + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] }) + // Use functional update to get current state + setUserGroups(current => { + console.log('assignGroupMutation onSuccess - groupName:', groupName, 'current userGroups:', current) + return current + }) + }, + onError: (error: any, groupName: string) => { + console.error('Failed to assign group:', error, groupName) + // Rollback: remove group from state if API call failed + setUserGroups(prev => prev.filter(g => g !== groupName)) + alert(error.response?.data?.error || error.message || 'Failed to assign group') + }, + }) + + const removeGroupMutation = useMutation({ + mutationFn: (groupName: string) => iamApi.removeGroupFromUser(user.id, groupName), + onMutate: async (groupName: string) => { + // Store previous state for rollback + const previousGroups = userGroups + // Optimistic update: remove group from state immediately + setUserGroups(prev => prev.filter(g => g !== groupName)) + return { previousGroups } + }, + onSuccess: async (_, groupName: string) => { + // Don't overwrite state with server data - keep optimistic update + // Just invalidate queries for other components + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] }) + console.log('Group removed successfully:', groupName, 'Current userGroups:', userGroups) + }, + onError: (error: any, _groupName: string, context: any) => { + console.error('Failed to remove group:', error) + // Rollback: restore previous state if API call failed + if (context?.previousGroups) { + setUserGroups(context.previousGroups) + } + alert(error.response?.data?.error || error.message || 'Failed to remove group') + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + // Use refs to get the latest state values (avoid closure issues) + const currentRoles = userRolesRef.current + const currentGroups = userGroupsRef.current + const payload = { + email: email.trim(), + full_name: fullName.trim() || undefined, + is_active: isActive, + roles: currentRoles, + groups: currentGroups, + } + console.log('EditUserForm - Submitting payload:', payload) + console.log('EditUserForm - currentRoles from ref:', currentRoles) + console.log('EditUserForm - currentGroups from ref:', currentGroups) + console.log('EditUserForm - userRoles from state:', userRoles) + console.log('EditUserForm - userGroups from state:', userGroups) + updateMutation.mutate(payload) + } + + return ( +
+
+ {/* Modal Header */} +
+
+

Edit User

+

Edit user account: {user.username}

+
+ +
+ + {/* Modal Content */} +
+
+ + +

Username cannot be changed

+
+ +
+ + setEmail(e.target.value)} + placeholder="john.doe@example.com" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + required + /> +
+ +
+ + setFullName(e.target.value)} + placeholder="John Doe" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + /> +
+ + {/* Roles Section */} +
+ +
+ + +
+
+ {userRoles.length > 0 ? ( + userRoles.map((role) => ( +
+ {role} + +
+ )) + ) : ( +

No roles assigned

+ )} +
+
+ + {/* Groups Section */} +
+ +
+ + +
+
+ {userGroups.length > 0 ? ( + userGroups.map((group) => ( +
+ {group} + +
+ )) + ) : ( +

No groups assigned

+ )} +
+
+ +
+ +

+ {isActive ? 'User can log in and access the system' : 'User account is disabled'} +

+
+ + {/* Action Buttons */} +
+ + +
+
+
+
+ ) +} + +// Groups Tab Component +function GroupsTab() { + const queryClient = useQueryClient() + const [searchQuery, setSearchQuery] = useState('') + const [showCreateForm, setShowCreateForm] = useState(false) + const [showEditForm, setShowEditForm] = useState(false) + const [selectedGroup, setSelectedGroup] = useState(null) + const [openActionMenu, setOpenActionMenu] = useState(null) + + const { data: groups, isLoading } = useQuery({ + queryKey: ['iam-groups'], + queryFn: iamApi.listGroups, + }) + + const filteredGroups = groups?.filter(group => + group.name.toLowerCase().includes(searchQuery.toLowerCase()) || + (group.description && group.description.toLowerCase().includes(searchQuery.toLowerCase())) + ) || [] + + const deleteGroupMutation = useMutation({ + mutationFn: iamApi.deleteGroup, + onSuccess: async () => { + queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) + await queryClient.refetchQueries({ queryKey: ['iam-groups'] }) + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + await queryClient.refetchQueries({ queryKey: ['iam-users'] }) + alert('Group deleted successfully!') + }, + onError: (error: any) => { + console.error('Failed to delete group:', error) + alert(error.response?.data?.error || error.message || 'Failed to delete group') + }, + }) + + const handleDeleteGroup = (groupId: string, groupName: string) => { + if (confirm(`Are you sure you want to delete group "${groupName}"? This action cannot be undone.`)) { + deleteGroupMutation.mutate(groupId) + } + } + + return ( + <> + {/* Toolbar */} +
+
+
+ + setSearchQuery(e.target.value)} + className="w-full bg-card-dark border border-border-dark rounded-lg pl-10 pr-4 py-2.5 text-white placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent text-sm" + /> +
+ +
+ +
+ + {/* Groups Table */} +
+
+ + + + + + + + + + + + + {isLoading ? ( + + + + ) : filteredGroups.length > 0 ? ( + filteredGroups.map((group) => ( + + + + + + + + + )) + ) : ( + + + + )} + +
NameDescriptionUsersRolesTypeActions
+ Loading groups... +
+ {group.name} + + {group.description || '-'} + + {group.user_count} + + {group.role_count} + + {group.is_system ? ( + + + System + + ) : ( + Custom + )} + +
+ + {openActionMenu === group.id && ( + <> +
setOpenActionMenu(null)} + /> +
+
+ + +
+
+ + )} +
+
+ No groups found +
+
+ {/* Pagination */} +
+ + Showing 1-{filteredGroups.length} of{' '} + {filteredGroups.length} groups + +
+ + +
+
+
+ + {/* Create Group Form Modal */} + {showCreateForm && ( + setShowCreateForm(false)} + onSuccess={async () => { + setShowCreateForm(false) + queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) + await queryClient.refetchQueries({ queryKey: ['iam-groups'] }) + }} + /> + )} + + {/* Edit Group Form Modal */} + {showEditForm && selectedGroup && ( + { + setShowEditForm(false) + setSelectedGroup(null) + }} + onSuccess={async () => { + setShowEditForm(false) + setSelectedGroup(null) + queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) + await queryClient.refetchQueries({ queryKey: ['iam-groups'] }) + queryClient.invalidateQueries({ queryKey: ['iam-users'] }) + await queryClient.refetchQueries({ queryKey: ['iam-users'] }) + }} + /> + )} + + ) +} + +interface CreateGroupFormProps { + onClose: () => void + onSuccess: () => void +} + +function CreateGroupForm({ onClose, onSuccess }: CreateGroupFormProps) { + const [name, setName] = useState('') + const [description, setDescription] = useState('') + + const createMutation = useMutation({ + mutationFn: iamApi.createGroup, + onSuccess: () => { + onSuccess() + }, + onError: (error: any) => { + console.error('Failed to create group:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to create group' + alert(errorMessage) + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + if (!name.trim()) { + alert('Name is required') + return + } + + const groupData = { + name: name.trim(), + description: description.trim() || '', + } + + console.log('Creating group:', groupData) + createMutation.mutate(groupData) + } + + return ( +
+
+ {/* Modal Header */} +
+
+

Create Group

+

Create a new user group

+
+ +
+ + {/* Modal Content */} +
+
+ + setName(e.target.value)} + placeholder="operators" + className="w-full px-4 py-3 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary/50 focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent transition-colors" + required + /> +
+ +
+ +