diff --git a/backend/bin/calypso-api b/backend/bin/calypso-api index 216dcf9..7e42af4 100755 Binary files a/backend/bin/calypso-api and b/backend/bin/calypso-api differ diff --git a/backend/internal/backup/handler.go b/backend/internal/backup/handler.go new file mode 100644 index 0000000..c3e7709 --- /dev/null +++ b/backend/internal/backup/handler.go @@ -0,0 +1,118 @@ +package backup + +import ( + "fmt" + "net/http" + + "github.com/atlasos/calypso/internal/common/logger" + "github.com/gin-gonic/gin" +) + +// Handler handles backup-related API requests +type Handler struct { + service *Service + logger *logger.Logger +} + +// NewHandler creates a new backup handler +func NewHandler(service *Service, log *logger.Logger) *Handler { + return &Handler{ + service: service, + logger: log, + } +} + +// ListJobs lists backup jobs with optional filters +func (h *Handler) ListJobs(c *gin.Context) { + opts := ListJobsOptions{ + Status: c.Query("status"), + JobType: c.Query("job_type"), + ClientName: c.Query("client_name"), + JobName: c.Query("job_name"), + } + + // Parse pagination + var limit, offset int + if limitStr := c.Query("limit"); limitStr != "" { + if _, err := fmt.Sscanf(limitStr, "%d", &limit); err == nil { + opts.Limit = limit + } + } + if offsetStr := c.Query("offset"); offsetStr != "" { + if _, err := fmt.Sscanf(offsetStr, "%d", &offset); err == nil { + opts.Offset = offset + } + } + + jobs, totalCount, err := h.service.ListJobs(c.Request.Context(), opts) + if err != nil { + h.logger.Error("Failed to list jobs", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list jobs"}) + return + } + + if jobs == nil { + jobs = []Job{} + } + + c.JSON(http.StatusOK, gin.H{ + "jobs": jobs, + "total": totalCount, + "limit": opts.Limit, + "offset": opts.Offset, + }) +} + +// GetJob retrieves a job by ID +func (h *Handler) GetJob(c *gin.Context) { + id := c.Param("id") + + job, err := h.service.GetJob(c.Request.Context(), id) + if err != nil { + if err.Error() == "job not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "job not found"}) + return + } + h.logger.Error("Failed to get job", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get job"}) + return + } + + c.JSON(http.StatusOK, job) +} + +// CreateJob creates a new backup job +func (h *Handler) CreateJob(c *gin.Context) { + var req CreateJobRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + // Validate job type + validJobTypes := map[string]bool{ + "Backup": true, "Restore": true, "Verify": true, "Copy": true, "Migrate": true, + } + if !validJobTypes[req.JobType] { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_type"}) + return + } + + // Validate job level + validJobLevels := map[string]bool{ + "Full": true, "Incremental": true, "Differential": true, "Since": true, + } + if !validJobLevels[req.JobLevel] { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_level"}) + return + } + + job, err := h.service.CreateJob(c.Request.Context(), req) + if err != nil { + h.logger.Error("Failed to create job", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create job"}) + return + } + + c.JSON(http.StatusCreated, job) +} diff --git a/backend/internal/backup/service.go b/backend/internal/backup/service.go new file mode 100644 index 0000000..45e1052 --- /dev/null +++ b/backend/internal/backup/service.go @@ -0,0 +1,962 @@ +package backup + +import ( + "context" + "database/sql" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/atlasos/calypso/internal/common/database" + "github.com/atlasos/calypso/internal/common/logger" +) + +// Service handles backup job operations +type Service struct { + db *database.DB + baculaDB *database.DB // Optional: separate connection to Bacula database + logger *logger.Logger + baculaDBName string // Bacula database name (bacula, bareos, etc.) + dbPassword string // Database password for dblink (optional, will try without if empty) +} + +// NewService creates a new backup service +func NewService(db *database.DB, log *logger.Logger) *Service { + return &Service{ + db: db, + logger: log, + baculaDBName: "bacula", // Default Bacula database name + } +} + +// SetDatabasePassword sets the database password for dblink connections +func (s *Service) SetDatabasePassword(password string) { + s.dbPassword = password + s.logger.Debug("Database password set for dblink", "has_password", password != "", "password_length", len(password)) +} + +// Job represents a backup job +type Job struct { + ID string `json:"id"` + JobID int `json:"job_id"` + JobName string `json:"job_name"` + ClientName string `json:"client_name"` + JobType string `json:"job_type"` + JobLevel string `json:"job_level"` + Status string `json:"status"` + BytesWritten int64 `json:"bytes_written"` + FilesWritten int `json:"files_written"` + DurationSeconds *int `json:"duration_seconds,omitempty"` + StartedAt *time.Time `json:"started_at,omitempty"` + EndedAt *time.Time `json:"ended_at,omitempty"` + ErrorMessage *string `json:"error_message,omitempty"` + StorageName *string `json:"storage_name,omitempty"` + PoolName *string `json:"pool_name,omitempty"` + VolumeName *string `json:"volume_name,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// ListJobsOptions represents filtering and pagination options +type ListJobsOptions struct { + Status string // Filter by status: "Running", "Completed", "Failed", etc. + JobType string // Filter by job type: "Backup", "Restore", etc. + ClientName string // Filter by client name + JobName string // Filter by job name + Limit int // Number of results to return + Offset int // Offset for pagination +} + +// SyncJobsFromBacula syncs jobs from Bacula/Bareos to the database +// Tries to query Bacula database directly first, falls back to bconsole if database access fails +func (s *Service) SyncJobsFromBacula(ctx context.Context) error { + // Try to query Bacula database directly (if user has access) + jobs, err := s.queryBaculaDatabase(ctx) + if err != nil { + s.logger.Debug("Failed to query Bacula database directly, trying bconsole", "error", err) + // Fallback to bconsole + return s.syncFromBconsole(ctx) + } + + if len(jobs) == 0 { + s.logger.Debug("No jobs found in Bacula database") + return nil + } + + // Upsert jobs to Calypso database + successCount := 0 + for _, job := range jobs { + err := s.upsertJob(ctx, job) + if err != nil { + s.logger.Error("Failed to upsert job", "job_id", job.JobID, "error", err) + continue + } + successCount++ + } + + s.logger.Info("Synced jobs from Bacula database", "total", len(jobs), "success", successCount) + return nil +} + +// getBaculaConnection gets or creates a connection to Bacula database +// Tries to create connection using same host/port/user but different database name +func (s *Service) getBaculaConnection(ctx context.Context) (*database.DB, error) { + if s.baculaDB != nil { + // Test if connection is still alive + if err := s.baculaDB.Ping(); err == nil { + return s.baculaDB, nil + } + // Connection is dead, close it + s.baculaDB.Close() + s.baculaDB = nil + } + + // Try to get connection info from current database connection + // We'll query the current database to get connection parameters + var currentDB, currentUser, currentHost string + var currentPort int + + // Get current database connection info + query := `SELECT current_database(), current_user, inet_server_addr(), inet_server_port()` + err := s.db.QueryRowContext(ctx, query).Scan(¤tDB, ¤tUser, ¤tHost, ¤tPort) + if err != nil { + return nil, fmt.Errorf("failed to get current database info: %w", err) + } + + // If host is null, it's a local connection (Unix socket) + if currentHost == "" { + currentHost = "localhost" + } + if currentPort == 0 { + currentPort = 5432 // Default PostgreSQL port + } + + // Try common Bacula database names + databases := []string{"bacula", "bareos", s.baculaDBName} + + for _, dbName := range databases { + if dbName == "" { + continue + } + + // Try to create connection to Bacula database + // We'll use the same connection parameters but different database name + // Note: This assumes same host/port/user/password + // For production, you'd want to configure this separately + + // We can't create a new connection without password + // So we'll try to query using dblink or assume same connection can access Bacula DB + // For now, return nil and let queryBaculaDatabase handle it via dblink or direct query + } + + return nil, fmt.Errorf("Bacula database connection not configured - will use dblink or direct query") +} + +// queryBaculaDatabase queries Bacula database directly +// Following Bacularis approach: query Job table directly from Bacula database +// Since Bacula is in separate database, prioritize dblink over direct query +func (s *Service) queryBaculaDatabase(ctx context.Context) ([]Job, error) { + // Method 1: Try using dblink extension for cross-database query (preferred for separate databases) + checkDblink := `SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'dblink')` + var dblinkExists bool + err := s.db.QueryRowContext(ctx, checkDblink).Scan(&dblinkExists) + if err == nil && dblinkExists { + jobs, err := s.queryBaculaViaDblink(ctx) + if err == nil && len(jobs) > 0 { + return jobs, nil + } + s.logger.Debug("dblink query failed, trying direct query", "error", err) + } else { + s.logger.Debug("dblink extension not found, trying direct query") + } + + // Method 2: Try querying Job table directly (if Bacula is in same database) + jobs, err := s.queryBaculaDirect(ctx) + if err == nil && len(jobs) > 0 { + return jobs, nil + } + s.logger.Debug("Direct query also failed", "error", err) + + return nil, fmt.Errorf("failed to query Bacula database: dblink and direct query both failed") +} + +// queryBaculaDirect queries Job table directly (Bacularis approach) +// Assumes Bacula tables are in same database or accessible via search_path +func (s *Service) queryBaculaDirect(ctx context.Context) ([]Job, error) { + // Bacularis-style query: direct query to Job table with JOIN to Client + // This is the standard way Bacularis queries Bacula database + query := ` + SELECT + j.JobId as job_id, + j.Name as job_name, + COALESCE(c.Name, 'unknown') as client_name, + CASE + WHEN j.Type = 'B' THEN 'Backup' + WHEN j.Type = 'R' THEN 'Restore' + WHEN j.Type = 'V' THEN 'Verify' + WHEN j.Type = 'C' THEN 'Copy' + WHEN j.Type = 'M' THEN 'Migrate' + ELSE 'Backup' + END as job_type, + CASE + WHEN j.Level = 'F' THEN 'Full' + WHEN j.Level = 'I' THEN 'Incremental' + WHEN j.Level = 'D' THEN 'Differential' + WHEN j.Level = 'S' THEN 'Since' + ELSE 'Full' + END as job_level, + CASE + WHEN j.JobStatus = 'T' THEN 'Running' + WHEN j.JobStatus = 'C' THEN 'Completed' + WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed' + WHEN j.JobStatus = 'A' THEN 'Canceled' + WHEN j.JobStatus = 'W' THEN 'Waiting' + ELSE 'Waiting' + END as status, + COALESCE(j.JobBytes, 0) as bytes_written, + COALESCE(j.JobFiles, 0) as files_written, + j.StartTime as started_at, + j.EndTime as ended_at + FROM Job j + LEFT JOIN Client c ON j.ClientId = c.ClientId + ORDER BY j.StartTime DESC + LIMIT 1000 + ` + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("Job table not found or not accessible: %w", err) + } + defer rows.Close() + + var jobs []Job + for rows.Next() { + var job Job + var startedAt, endedAt sql.NullTime + + err := rows.Scan( + &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &startedAt, &endedAt, + ) + if err != nil { + s.logger.Error("Failed to scan Bacula job", "error", err) + continue + } + + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + // Calculate duration if both start and end times are available + if job.StartedAt != nil { + duration := int(endedAt.Time.Sub(*job.StartedAt).Seconds()) + job.DurationSeconds = &duration + } + } + + jobs = append(jobs, job) + } + + if err := rows.Err(); err != nil { + return nil, err + } + + if len(jobs) > 0 { + s.logger.Info("Successfully queried Bacula database (direct)", "count", len(jobs)) + return jobs, nil + } + + return jobs, nil // Return empty list, not an error +} + +// queryBaculaViaDblink queries Bacula database using dblink extension +// Assumes dblink is installed and user has access to bacula database +func (s *Service) queryBaculaViaDblink(ctx context.Context) ([]Job, error) { + // Get current user and connection info for dblink + var currentUser, currentHost string + var currentPort int + + // Get current connection info + err := s.db.QueryRowContext(ctx, + `SELECT current_user, COALESCE(inet_server_addr()::text, ''), COALESCE(inet_server_port(), 5432)`).Scan( + ¤tUser, ¤tHost, ¤tPort) + if err != nil { + return nil, fmt.Errorf("failed to get connection info: %w", err) + } + + // Log connection info (without password) + s.logger.Debug("Preparing dblink connection", "user", currentUser, "host", currentHost, "port", currentPort, "has_password", s.dbPassword != "") + + // Try common Bacula database names + databases := []string{"bacula", "bareos", s.baculaDBName} + + for _, dbName := range databases { + if dbName == "" { + continue + } + + // Build dblink connection string + // Format: 'dbname=database_name user=username password=password' + // dblink requires password even for local connections + connStr := fmt.Sprintf("dbname=%s user=%s", dbName, currentUser) + + // Add password if available (required for dblink) + if s.dbPassword != "" { + // Escape special characters in password for connection string + // Replace single quotes with \' and backslashes with \\ + escapedPassword := strings.ReplaceAll(s.dbPassword, "\\", "\\\\") + escapedPassword = strings.ReplaceAll(escapedPassword, "'", "\\'") + connStr += fmt.Sprintf(" password='%s'", escapedPassword) + } + + // Add host/port for remote connections + if currentHost != "" { + connStr += fmt.Sprintf(" host=%s port=%d", currentHost, currentPort) + } + + // Query using dblink - get all data in one query with JOIN + // Escape single quotes in SQL string for dblink (double them) + innerQuery := `SELECT + j.JobId, + j.Name, + j.Type, + j.Level, + j.JobStatus, + j.JobBytes, + j.JobFiles, + j.StartTime, + j.EndTime, + COALESCE(c.Name, 'unknown') as ClientName + FROM Job j + LEFT JOIN Client c ON j.ClientId = c.ClientId + ORDER BY j.StartTime DESC + LIMIT 1000` + + // Escape single quotes in inner query for dblink (double them) + escapedQuery := strings.ReplaceAll(innerQuery, "'", "''") + + query := fmt.Sprintf(` + SELECT + JobId as job_id, + Name as job_name, + ClientName as client_name, + CASE + WHEN Type = 'B' THEN 'Backup' + WHEN Type = 'R' THEN 'Restore' + WHEN Type = 'V' THEN 'Verify' + WHEN Type = 'C' THEN 'Copy' + WHEN Type = 'M' THEN 'Migrate' + ELSE 'Backup' + END as job_type, + CASE + WHEN Level = 'F' THEN 'Full' + WHEN Level = 'I' THEN 'Incremental' + WHEN Level = 'D' THEN 'Differential' + WHEN Level = 'S' THEN 'Since' + ELSE 'Full' + END as job_level, + CASE + WHEN JobStatus = 'T' THEN 'Running' + WHEN JobStatus = 'C' THEN 'Completed' + WHEN JobStatus = 'f' OR JobStatus = 'F' THEN 'Failed' + WHEN JobStatus = 'A' THEN 'Canceled' + WHEN JobStatus = 'W' THEN 'Waiting' + ELSE 'Waiting' + END as status, + COALESCE(JobBytes, 0) as bytes_written, + COALESCE(JobFiles, 0) as files_written, + StartTime as started_at, + EndTime as ended_at + FROM dblink('%s', '%s') AS t(JobId int, Name text, Type char, Level char, JobStatus char, JobBytes bigint, JobFiles int, StartTime timestamp, EndTime timestamp, ClientName text) + `, connStr, escapedQuery) + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + s.logger.Error("Failed to query Bacula via dblink", "database", dbName, "connection", connStr, "error", err) + continue + } + defer rows.Close() + + var jobs []Job + for rows.Next() { + var job Job + var startedAt, endedAt sql.NullTime + + err := rows.Scan( + &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &startedAt, &endedAt, + ) + if err != nil { + s.logger.Error("Failed to scan Bacula job from dblink", "error", err) + continue + } + + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + // Calculate duration + if job.StartedAt != nil { + duration := int(endedAt.Time.Sub(*job.StartedAt).Seconds()) + job.DurationSeconds = &duration + } + } + + jobs = append(jobs, job) + } + + if err := rows.Err(); err != nil { + s.logger.Debug("Error iterating dblink results", "database", dbName, "error", err) + continue + } + + if len(jobs) > 0 { + s.logger.Info("Successfully queried Bacula database via dblink", "database", dbName, "count", len(jobs)) + return jobs, nil + } + } + + return nil, fmt.Errorf("failed to query Bacula database via dblink from any database") +} + +// syncFromBconsole syncs jobs using bconsole command (fallback method) +func (s *Service) syncFromBconsole(ctx context.Context) error { + // Execute bconsole command to list jobs + cmd := exec.CommandContext(ctx, "sh", "-c", "echo -e 'list jobs\nquit' | bconsole") + + output, err := cmd.CombinedOutput() + if err != nil { + s.logger.Debug("Failed to execute bconsole", "error", err, "output", string(output)) + return nil // Don't fail, just return empty + } + + if len(output) == 0 { + s.logger.Debug("bconsole returned empty output") + return nil + } + + // Parse bconsole output + jobs := s.parseBconsoleOutput(ctx, string(output)) + + if len(jobs) == 0 { + s.logger.Debug("No jobs found in bconsole output") + return nil + } + + // Upsert jobs to database + successCount := 0 + for _, job := range jobs { + err := s.upsertJob(ctx, job) + if err != nil { + s.logger.Error("Failed to upsert job", "job_id", job.JobID, "error", err) + continue + } + successCount++ + } + + s.logger.Info("Synced jobs from bconsole", "total", len(jobs), "success", successCount) + return nil +} + +// parseBconsoleOutput parses bconsole "list jobs" output +func (s *Service) parseBconsoleOutput(ctx context.Context, output string) []Job { + var jobs []Job + lines := strings.Split(output, "\n") + + // Skip header lines until we find the data rows + inDataSection := false + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip empty lines and separators + if line == "" || strings.HasPrefix(line, "+") { + continue + } + + // Start data section when we see header + if strings.HasPrefix(line, "| jobid") { + inDataSection = true + continue + } + + // Stop at footer separator + if strings.HasPrefix(line, "*") { + break + } + + if !inDataSection { + continue + } + + // Parse data row: | jobid | name | starttime | type | level | jobfiles | jobbytes | jobstatus | + if strings.HasPrefix(line, "|") { + parts := strings.Split(line, "|") + if len(parts) < 9 { + continue + } + + // Extract fields (skip first empty part) + jobIDStr := strings.TrimSpace(parts[1]) + jobName := strings.TrimSpace(parts[2]) + startTimeStr := strings.TrimSpace(parts[3]) + jobTypeChar := strings.TrimSpace(parts[4]) + jobLevelChar := strings.TrimSpace(parts[5]) + jobFilesStr := strings.TrimSpace(parts[6]) + jobBytesStr := strings.TrimSpace(parts[7]) + jobStatusChar := strings.TrimSpace(parts[8]) + + // Parse job ID + jobID, err := strconv.Atoi(jobIDStr) + if err != nil { + s.logger.Warn("Failed to parse job ID", "value", jobIDStr, "error", err) + continue + } + + // Parse start time + var startedAt *time.Time + if startTimeStr != "" && startTimeStr != "-" { + // Format: 2025-12-27 23:05:02 + parsedTime, err := time.Parse("2006-01-02 15:04:05", startTimeStr) + if err == nil { + startedAt = &parsedTime + } + } + + // Map job type + jobType := "Backup" + switch jobTypeChar { + case "B": + jobType = "Backup" + case "R": + jobType = "Restore" + case "V": + jobType = "Verify" + case "C": + jobType = "Copy" + case "M": + jobType = "Migrate" + } + + // Map job level + jobLevel := "Full" + switch jobLevelChar { + case "F": + jobLevel = "Full" + case "I": + jobLevel = "Incremental" + case "D": + jobLevel = "Differential" + case "S": + jobLevel = "Since" + } + + // Parse files and bytes + filesWritten := 0 + if jobFilesStr != "" && jobFilesStr != "-" { + if f, err := strconv.Atoi(jobFilesStr); err == nil { + filesWritten = f + } + } + + bytesWritten := int64(0) + if jobBytesStr != "" && jobBytesStr != "-" { + if b, err := strconv.ParseInt(jobBytesStr, 10, 64); err == nil { + bytesWritten = b + } + } + + // Map job status + status := "Waiting" + switch strings.ToLower(jobStatusChar) { + case "t", "T": + status = "Running" + case "c", "C": + status = "Completed" + case "f", "F": + status = "Failed" + case "A": + status = "Canceled" + case "W": + status = "Waiting" + } + + // Try to extract client name from job name (common pattern: JobName-ClientName) + clientName := "unknown" + // For now, use job name as client name if it looks like a client name + // In real implementation, we'd query job details from Bacula + if jobName != "" { + // Try to get client name from job details + clientNameFromJob := s.getClientNameFromJob(ctx, jobID) + if clientNameFromJob != "" { + clientName = clientNameFromJob + } else { + // Fallback: use job name as client name + clientName = jobName + } + } + + job := Job{ + JobID: jobID, + JobName: jobName, + ClientName: clientName, + JobType: jobType, + JobLevel: jobLevel, + Status: status, + BytesWritten: bytesWritten, + FilesWritten: filesWritten, + StartedAt: startedAt, + } + + jobs = append(jobs, job) + } + } + + return jobs +} + +// getClientNameFromJob gets client name from job details using bconsole +func (s *Service) getClientNameFromJob(ctx context.Context, jobID int) string { + // Execute bconsole to get job details + cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("echo -e 'list job jobid=%d\nquit' | bconsole", jobID)) + + output, err := cmd.CombinedOutput() + if err != nil { + s.logger.Debug("Failed to get job details", "job_id", jobID, "error", err) + return "" + } + + // Parse output to find Client line + lines := strings.Split(string(output), "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "Client:") { + parts := strings.Split(line, ":") + if len(parts) >= 2 { + return strings.TrimSpace(parts[1]) + } + } + } + + return "" +} + +// upsertJob inserts or updates a job in the database +func (s *Service) upsertJob(ctx context.Context, job Job) error { + query := ` + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, started_at, updated_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW()) + ON CONFLICT (job_id) DO UPDATE SET + job_name = EXCLUDED.job_name, + job_type = EXCLUDED.job_type, + job_level = EXCLUDED.job_level, + status = EXCLUDED.status, + bytes_written = EXCLUDED.bytes_written, + files_written = EXCLUDED.files_written, + started_at = EXCLUDED.started_at, + updated_at = NOW() + ` + + // Use job name as client name if client_name is empty (we'll improve this later) + clientName := job.ClientName + if clientName == "" { + clientName = "unknown" + } + + _, err := s.db.ExecContext(ctx, query, + job.JobID, job.JobName, clientName, job.JobType, job.JobLevel, job.Status, + job.BytesWritten, job.FilesWritten, job.StartedAt, + ) + + return err +} + +// ListJobs lists backup jobs with optional filters +func (s *Service) ListJobs(ctx context.Context, opts ListJobsOptions) ([]Job, int, error) { + // Try to sync jobs from Bacula first (non-blocking - if it fails, continue with database) + // Don't return error if sync fails, just log it and continue + // This allows the API to work even if bconsole is not available + syncErr := s.SyncJobsFromBacula(ctx) + if syncErr != nil { + s.logger.Debug("Failed to sync jobs from Bacula, using database only", "error", syncErr) + // Continue anyway - we'll use whatever is in the database + } + + // Build WHERE clause + whereClauses := []string{"1=1"} + args := []interface{}{} + argIndex := 1 + + if opts.Status != "" { + whereClauses = append(whereClauses, fmt.Sprintf("status = $%d", argIndex)) + args = append(args, opts.Status) + argIndex++ + } + + if opts.JobType != "" { + whereClauses = append(whereClauses, fmt.Sprintf("job_type = $%d", argIndex)) + args = append(args, opts.JobType) + argIndex++ + } + + if opts.ClientName != "" { + whereClauses = append(whereClauses, fmt.Sprintf("client_name ILIKE $%d", argIndex)) + args = append(args, "%"+opts.ClientName+"%") + argIndex++ + } + + if opts.JobName != "" { + whereClauses = append(whereClauses, fmt.Sprintf("job_name ILIKE $%d", argIndex)) + args = append(args, "%"+opts.JobName+"%") + argIndex++ + } + + whereClause := "" + if len(whereClauses) > 0 { + whereClause = "WHERE " + whereClauses[0] + for i := 1; i < len(whereClauses); i++ { + whereClause += " AND " + whereClauses[i] + } + } + + // Get total count + countQuery := fmt.Sprintf("SELECT COUNT(*) FROM backup_jobs %s", whereClause) + var totalCount int + err := s.db.QueryRowContext(ctx, countQuery, args...).Scan(&totalCount) + if err != nil { + return nil, 0, fmt.Errorf("failed to count jobs: %w", err) + } + + // Set default limit + limit := opts.Limit + if limit <= 0 { + limit = 50 + } + if limit > 100 { + limit = 100 + } + + // Build query with pagination + query := fmt.Sprintf(` + SELECT id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + FROM backup_jobs + %s + ORDER BY started_at DESC NULLS LAST, created_at DESC + LIMIT $%d OFFSET $%d + `, whereClause, argIndex, argIndex+1) + + args = append(args, limit, opts.Offset) + + rows, err := s.db.QueryContext(ctx, query, args...) + if err != nil { + return nil, 0, fmt.Errorf("failed to query jobs: %w", err) + } + defer rows.Close() + + var jobs []Job + for rows.Next() { + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err := rows.Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + s.logger.Error("Failed to scan job", "error", err) + continue + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + jobs = append(jobs, job) + } + + return jobs, totalCount, rows.Err() +} + +// GetJob retrieves a job by ID +func (s *Service) GetJob(ctx context.Context, id string) (*Job, error) { + query := ` + SELECT id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + FROM backup_jobs + WHERE id = $1 + ` + + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err := s.db.QueryRowContext(ctx, query, id).Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("job not found") + } + return nil, fmt.Errorf("failed to get job: %w", err) + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + return &job, nil +} + +// CreateJobRequest represents a request to create a new backup job +type CreateJobRequest struct { + JobName string `json:"job_name" binding:"required"` + ClientName string `json:"client_name" binding:"required"` + JobType string `json:"job_type" binding:"required"` // 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate' + JobLevel string `json:"job_level" binding:"required"` // 'Full', 'Incremental', 'Differential', 'Since' + StorageName *string `json:"storage_name,omitempty"` + PoolName *string `json:"pool_name,omitempty"` +} + +// CreateJob creates a new backup job +func (s *Service) CreateJob(ctx context.Context, req CreateJobRequest) (*Job, error) { + // Generate a unique job ID (in real implementation, this would come from Bareos) + // For now, we'll use a simple incrementing approach or timestamp-based ID + var jobID int + err := s.db.QueryRowContext(ctx, ` + SELECT COALESCE(MAX(job_id), 0) + 1 FROM backup_jobs + `).Scan(&jobID) + if err != nil { + return nil, fmt.Errorf("failed to generate job ID: %w", err) + } + + // Insert the job into database + query := ` + INSERT INTO backup_jobs ( + job_id, job_name, client_name, job_type, job_level, + status, bytes_written, files_written, + storage_name, pool_name, started_at + ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW()) + RETURNING id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, duration_seconds, + started_at, ended_at, error_message, + storage_name, pool_name, volume_name, + created_at, updated_at + ` + + var job Job + var durationSeconds sql.NullInt64 + var startedAt, endedAt sql.NullTime + var errorMessage, storageName, poolName, volumeName sql.NullString + + err = s.db.QueryRowContext(ctx, query, + jobID, req.JobName, req.ClientName, req.JobType, req.JobLevel, + "Waiting", 0, 0, + req.StorageName, req.PoolName, + ).Scan( + &job.ID, &job.JobID, &job.JobName, &job.ClientName, + &job.JobType, &job.JobLevel, &job.Status, + &job.BytesWritten, &job.FilesWritten, &durationSeconds, + &startedAt, &endedAt, &errorMessage, + &storageName, &poolName, &volumeName, + &job.CreatedAt, &job.UpdatedAt, + ) + if err != nil { + return nil, fmt.Errorf("failed to create job: %w", err) + } + + if durationSeconds.Valid { + dur := int(durationSeconds.Int64) + job.DurationSeconds = &dur + } + if startedAt.Valid { + job.StartedAt = &startedAt.Time + } + if endedAt.Valid { + job.EndedAt = &endedAt.Time + } + if errorMessage.Valid { + job.ErrorMessage = &errorMessage.String + } + if storageName.Valid { + job.StorageName = &storageName.String + } + if poolName.Valid { + job.PoolName = &poolName.String + } + if volumeName.Valid { + job.VolumeName = &volumeName.String + } + + s.logger.Info("Backup job created", + "job_id", job.JobID, + "job_name", job.JobName, + "client_name", job.ClientName, + "job_type", job.JobType, + ) + + return &job, nil +} diff --git a/backend/internal/common/database/migrations.go b/backend/internal/common/database/migrations.go index 82a831a..54353af 100644 --- a/backend/internal/common/database/migrations.go +++ b/backend/internal/common/database/migrations.go @@ -59,7 +59,7 @@ func RunMigrations(ctx context.Context, db *DB) error { if _, err := tx.ExecContext(ctx, string(sql)); err != nil { tx.Rollback() - return fmt.Errorf("failed to execute migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err) } // Record migration @@ -68,11 +68,11 @@ func RunMigrations(ctx context.Context, db *DB) error { migration.Version, ); err != nil { tx.Rollback() - return fmt.Errorf("failed to record migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to record migration %d: %w", migration.Version, err) } if err := tx.Commit(); err != nil { - return fmt.Errorf("failed to commit migration %s: %w", migration.Version, err) + return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err) } log.Info("Migration applied successfully", "version", migration.Version) diff --git a/backend/internal/common/database/migrations/009_backup_jobs_schema.sql b/backend/internal/common/database/migrations/009_backup_jobs_schema.sql new file mode 100644 index 0000000..b4b6dc6 --- /dev/null +++ b/backend/internal/common/database/migrations/009_backup_jobs_schema.sql @@ -0,0 +1,34 @@ +-- AtlasOS - Calypso +-- Backup Jobs Schema +-- Version: 9.0 + +-- Backup jobs table +CREATE TABLE IF NOT EXISTS backup_jobs ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + job_id INTEGER NOT NULL UNIQUE, -- Bareos job ID + job_name VARCHAR(255) NOT NULL, + client_name VARCHAR(255) NOT NULL, + job_type VARCHAR(50) NOT NULL, -- 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate' + job_level VARCHAR(50) NOT NULL, -- 'Full', 'Incremental', 'Differential', 'Since' + status VARCHAR(50) NOT NULL, -- 'Running', 'Completed', 'Failed', 'Canceled', 'Waiting' + bytes_written BIGINT NOT NULL DEFAULT 0, + files_written INTEGER NOT NULL DEFAULT 0, + duration_seconds INTEGER, + started_at TIMESTAMP, + ended_at TIMESTAMP, + error_message TEXT, + storage_name VARCHAR(255), + pool_name VARCHAR(255), + volume_name VARCHAR(255), + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() +); + +-- Indexes for performance +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_name ON backup_jobs(job_name); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_client_name ON backup_jobs(client_name); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_started_at ON backup_jobs(started_at DESC); +CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_type ON backup_jobs(job_type); + diff --git a/backend/internal/common/database/migrations/010_add_backup_permissions.sql b/backend/internal/common/database/migrations/010_add_backup_permissions.sql new file mode 100644 index 0000000..6c8eb5d --- /dev/null +++ b/backend/internal/common/database/migrations/010_add_backup_permissions.sql @@ -0,0 +1,39 @@ +-- AtlasOS - Calypso +-- Add Backup Permissions +-- Version: 10.0 + +-- Insert backup permissions +INSERT INTO permissions (name, resource, action, description) VALUES + ('backup:read', 'backup', 'read', 'View backup jobs and history'), + ('backup:write', 'backup', 'write', 'Create and manage backup jobs'), + ('backup:manage', 'backup', 'manage', 'Full backup management') +ON CONFLICT (name) DO NOTHING; + +-- Assign backup permissions to roles + +-- Admin gets all backup permissions (explicitly assign since admin query in 001 only runs once) +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'admin' + AND p.resource = 'backup' +ON CONFLICT DO NOTHING; + +-- Operator gets read and write permissions for backup +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'operator' + AND p.resource = 'backup' + AND p.action IN ('read', 'write') +ON CONFLICT DO NOTHING; + +-- ReadOnly gets only read permission for backup +INSERT INTO role_permissions (role_id, permission_id) +SELECT r.id, p.id +FROM roles r, permissions p +WHERE r.name = 'readonly' + AND p.resource = 'backup' + AND p.action = 'read' +ON CONFLICT DO NOTHING; + diff --git a/backend/internal/common/router/router.go b/backend/internal/common/router/router.go index 8e5a1a9..889c34c 100644 --- a/backend/internal/common/router/router.go +++ b/backend/internal/common/router/router.go @@ -6,6 +6,7 @@ import ( "github.com/atlasos/calypso/internal/audit" "github.com/atlasos/calypso/internal/auth" + "github.com/atlasos/calypso/internal/backup" "github.com/atlasos/calypso/internal/common/cache" "github.com/atlasos/calypso/internal/common/config" "github.com/atlasos/calypso/internal/common/database" @@ -207,8 +208,21 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng scstGroup.POST("/targets", scstHandler.CreateTarget) scstGroup.POST("/targets/:id/luns", scstHandler.AddLUN) scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator) + scstGroup.POST("/targets/:id/enable", scstHandler.EnableTarget) + scstGroup.POST("/targets/:id/disable", scstHandler.DisableTarget) + scstGroup.GET("/initiators", scstHandler.ListAllInitiators) + scstGroup.GET("/initiators/:id", scstHandler.GetInitiator) + scstGroup.DELETE("/initiators/:id", scstHandler.RemoveInitiator) + scstGroup.GET("/extents", scstHandler.ListExtents) + scstGroup.POST("/extents", scstHandler.CreateExtent) + scstGroup.DELETE("/extents/:device", scstHandler.DeleteExtent) scstGroup.POST("/config/apply", scstHandler.ApplyConfig) scstGroup.GET("/handlers", scstHandler.ListHandlers) + scstGroup.GET("/portals", scstHandler.ListPortals) + scstGroup.GET("/portals/:id", scstHandler.GetPortal) + scstGroup.POST("/portals", scstHandler.CreatePortal) + scstGroup.PUT("/portals/:id", scstHandler.UpdatePortal) + scstGroup.DELETE("/portals/:id", scstHandler.DeletePortal) } // Physical Tape Libraries @@ -299,6 +313,19 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng iamGroup.DELETE("/groups/:id/users/:user_id", iamHandler.RemoveUserFromGroup) } + // Backup Jobs + backupService := backup.NewService(db, log) + // Set database password for dblink connections + backupService.SetDatabasePassword(cfg.Database.Password) + backupHandler := backup.NewHandler(backupService, log) + backupGroup := protected.Group("/backup") + backupGroup.Use(requirePermission("backup", "read")) + { + backupGroup.GET("/jobs", backupHandler.ListJobs) + backupGroup.GET("/jobs/:id", backupHandler.GetJob) + backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob) + } + // Monitoring monitoringHandler := monitoring.NewHandler(db, log, alertService, metricsService, eventHub) monitoringGroup := protected.Group("/monitoring") diff --git a/backend/internal/iam/handler.go b/backend/internal/iam/handler.go index fe02958..7ca67dc 100644 --- a/backend/internal/iam/handler.go +++ b/backend/internal/iam/handler.go @@ -260,118 +260,158 @@ func (h *Handler) UpdateUser(c *gin.Context) { // Update roles if provided if req.Roles != nil { h.logger.Info("Updating user roles", "user_id", userID, "roles", *req.Roles) - // Get current roles currentRoles, err := GetUserRoles(h.db, userID) if err != nil { - h.logger.Error("Failed to get current roles", "error", err) - } - h.logger.Info("Current roles", "user_id", userID, "current_roles", currentRoles) - - // Remove roles that are not in the new list - for _, role := range currentRoles { - found := false - for _, newRole := range *req.Roles { - if role == newRole { - found = true - break - } - } - if !found { - roleID, err := GetRoleIDByName(h.db, role) - if err == nil { - err = RemoveUserRole(h.db, userID, roleID) - if err != nil { - h.logger.Error("Failed to remove role", "error", err, "role", role) - } else { - h.logger.Info("Role removed", "user_id", userID, "role", role) - } - } else { - h.logger.Error("Failed to get role ID", "error", err, "role", role) - } - } + h.logger.Error("Failed to get current roles for user", "user_id", userID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user roles"}) + return } - // Add new roles that are not in the current list - for _, roleName := range *req.Roles { + rolesToAdd := []string{} + rolesToRemove := []string{} + + // Find roles to add + for _, newRole := range *req.Roles { found := false for _, currentRole := range currentRoles { - if roleName == currentRole { + if newRole == currentRole { found = true break } } if !found { - roleID, err := GetRoleIDByName(h.db, roleName) - if err == nil { - err = AddUserRole(h.db, userID, roleID, currentUser.ID) - if err != nil { - h.logger.Error("Failed to add role", "error", err, "role", roleName) - } else { - h.logger.Info("Role added", "user_id", userID, "role", roleName) - } - } else { - h.logger.Error("Failed to get role ID", "error", err, "role", roleName) + rolesToAdd = append(rolesToAdd, newRole) + } + } + + // Find roles to remove + for _, currentRole := range currentRoles { + found := false + for _, newRole := range *req.Roles { + if currentRole == newRole { + found = true + break } } + if !found { + rolesToRemove = append(rolesToRemove, currentRole) + } + } + + // Add new roles + for _, roleName := range rolesToAdd { + roleID, err := GetRoleIDByName(h.db, roleName) + if err != nil { + if err == sql.ErrNoRows { + h.logger.Warn("Attempted to add non-existent role to user", "user_id", userID, "role_name", roleName) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("role '%s' not found", roleName)}) + return + } + h.logger.Error("Failed to get role ID by name", "role_name", roleName, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process roles"}) + return + } + if err := AddUserRole(h.db, userID, roleID, currentUser.ID); err != nil { + h.logger.Error("Failed to add role to user", "user_id", userID, "role_id", roleID, "error", err) + // Don't return early, continue with other roles + continue + } + h.logger.Info("Role added to user", "user_id", userID, "role_name", roleName) + } + + // Remove old roles + for _, roleName := range rolesToRemove { + roleID, err := GetRoleIDByName(h.db, roleName) + if err != nil { + // This case should be rare, but handle it defensively + h.logger.Error("Failed to get role ID for role to be removed", "role_name", roleName, "error", err) + continue + } + if err := RemoveUserRole(h.db, userID, roleID); err != nil { + h.logger.Error("Failed to remove role from user", "user_id", userID, "role_id", roleID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove role"}) + return + } + h.logger.Info("Role removed from user", "user_id", userID, "role_name", roleName) } } // Update groups if provided if req.Groups != nil { h.logger.Info("Updating user groups", "user_id", userID, "groups", *req.Groups) - // Get current groups currentGroups, err := GetUserGroups(h.db, userID) if err != nil { - h.logger.Error("Failed to get current groups", "error", err) - } - h.logger.Info("Current groups", "user_id", userID, "current_groups", currentGroups) - - // Remove groups that are not in the new list - for _, group := range currentGroups { - found := false - for _, newGroup := range *req.Groups { - if group == newGroup { - found = true - break - } - } - if !found { - groupObj, err := GetGroupByName(h.db, group) - if err == nil { - err = RemoveUserFromGroup(h.db, userID, groupObj.ID) - if err != nil { - h.logger.Error("Failed to remove group", "error", err, "group", group) - } else { - h.logger.Info("Group removed", "user_id", userID, "group", group) - } - } else { - h.logger.Error("Failed to get group", "error", err, "group", group) - } - } + h.logger.Error("Failed to get current groups for user", "user_id", userID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user groups"}) + return } - // Add new groups that are not in the current list - for _, groupName := range *req.Groups { + groupsToAdd := []string{} + groupsToRemove := []string{} + + // Find groups to add + for _, newGroup := range *req.Groups { found := false for _, currentGroup := range currentGroups { - if groupName == currentGroup { + if newGroup == currentGroup { found = true break } } if !found { - groupObj, err := GetGroupByName(h.db, groupName) - if err == nil { - err = AddUserToGroup(h.db, userID, groupObj.ID, currentUser.ID) - if err != nil { - h.logger.Error("Failed to add group", "error", err, "group", groupName) - } else { - h.logger.Info("Group added", "user_id", userID, "group", groupName) - } - } else { - h.logger.Error("Failed to get group", "error", err, "group", groupName) + groupsToAdd = append(groupsToAdd, newGroup) + } + } + + // Find groups to remove + for _, currentGroup := range currentGroups { + found := false + for _, newGroup := range *req.Groups { + if currentGroup == newGroup { + found = true + break } } + if !found { + groupsToRemove = append(groupsToRemove, currentGroup) + } + } + + // Add new groups + for _, groupName := range groupsToAdd { + group, err := GetGroupByName(h.db, groupName) + if err != nil { + if err == sql.ErrNoRows { + h.logger.Warn("Attempted to add user to non-existent group", "user_id", userID, "group_name", groupName) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("group '%s' not found", groupName)}) + return + } + h.logger.Error("Failed to get group by name", "group_name", groupName, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process groups"}) + return + } + if err := AddUserToGroup(h.db, userID, group.ID, currentUser.ID); err != nil { + h.logger.Error("Failed to add user to group", "user_id", userID, "group_id", group.ID, "error", err) + // Don't return early, continue with other groups + continue + } + h.logger.Info("User added to group", "user_id", userID, "group_name", groupName) + } + + // Remove old groups + for _, groupName := range groupsToRemove { + group, err := GetGroupByName(h.db, groupName) + if err != nil { + // This case should be rare, but handle it defensively + h.logger.Error("Failed to get group ID for group to be removed", "group_name", groupName, "error", err) + continue + } + if err := RemoveUserFromGroup(h.db, userID, group.ID); err != nil { + h.logger.Error("Failed to remove user from group", "user_id", userID, "group_id", group.ID, "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove user from group"}) + return + } + h.logger.Info("User removed from group", "user_id", userID, "group_name", groupName) } } diff --git a/backend/internal/monitoring/metrics.go b/backend/internal/monitoring/metrics.go index 075c28e..4df10e5 100644 --- a/backend/internal/monitoring/metrics.go +++ b/backend/internal/monitoring/metrics.go @@ -1,10 +1,14 @@ package monitoring import ( + "bufio" "context" "database/sql" "fmt" + "os" "runtime" + "strconv" + "strings" "time" "github.com/atlasos/calypso/internal/common/database" @@ -13,14 +17,14 @@ import ( // Metrics represents system metrics type Metrics struct { - System SystemMetrics `json:"system"` - Storage StorageMetrics `json:"storage"` - SCST SCSTMetrics `json:"scst"` - Tape TapeMetrics `json:"tape"` - VTL VTLMetrics `json:"vtl"` - Tasks TaskMetrics `json:"tasks"` - API APIMetrics `json:"api"` - CollectedAt time.Time `json:"collected_at"` + System SystemMetrics `json:"system"` + Storage StorageMetrics `json:"storage"` + SCST SCSTMetrics `json:"scst"` + Tape TapeMetrics `json:"tape"` + VTL VTLMetrics `json:"vtl"` + Tasks TaskMetrics `json:"tasks"` + API APIMetrics `json:"api"` + CollectedAt time.Time `json:"collected_at"` } // SystemMetrics represents system-level metrics @@ -37,11 +41,11 @@ type SystemMetrics struct { // StorageMetrics represents storage metrics type StorageMetrics struct { - TotalDisks int `json:"total_disks"` - TotalRepositories int `json:"total_repositories"` - TotalCapacityBytes int64 `json:"total_capacity_bytes"` - UsedCapacityBytes int64 `json:"used_capacity_bytes"` - AvailableBytes int64 `json:"available_bytes"` + TotalDisks int `json:"total_disks"` + TotalRepositories int `json:"total_repositories"` + TotalCapacityBytes int64 `json:"total_capacity_bytes"` + UsedCapacityBytes int64 `json:"used_capacity_bytes"` + AvailableBytes int64 `json:"available_bytes"` UsagePercent float64 `json:"usage_percent"` } @@ -72,28 +76,43 @@ type VTLMetrics struct { // TaskMetrics represents task execution metrics type TaskMetrics struct { - TotalTasks int `json:"total_tasks"` - PendingTasks int `json:"pending_tasks"` - RunningTasks int `json:"running_tasks"` - CompletedTasks int `json:"completed_tasks"` - FailedTasks int `json:"failed_tasks"` - AvgDurationSec float64 `json:"avg_duration_seconds"` + TotalTasks int `json:"total_tasks"` + PendingTasks int `json:"pending_tasks"` + RunningTasks int `json:"running_tasks"` + CompletedTasks int `json:"completed_tasks"` + FailedTasks int `json:"failed_tasks"` + AvgDurationSec float64 `json:"avg_duration_seconds"` } // APIMetrics represents API metrics type APIMetrics struct { - TotalRequests int64 `json:"total_requests"` - RequestsPerSec float64 `json:"requests_per_second"` - ErrorRate float64 `json:"error_rate"` - AvgLatencyMs float64 `json:"avg_latency_ms"` - ActiveConnections int `json:"active_connections"` + TotalRequests int64 `json:"total_requests"` + RequestsPerSec float64 `json:"requests_per_second"` + ErrorRate float64 `json:"error_rate"` + AvgLatencyMs float64 `json:"avg_latency_ms"` + ActiveConnections int `json:"active_connections"` } // MetricsService collects and provides system metrics type MetricsService struct { - db *database.DB - logger *logger.Logger - startTime time.Time + db *database.DB + logger *logger.Logger + startTime time.Time + lastCPU *cpuStats // For CPU usage calculation + lastCPUTime time.Time +} + +// cpuStats represents CPU statistics from /proc/stat +type cpuStats struct { + user uint64 + nice uint64 + system uint64 + idle uint64 + iowait uint64 + irq uint64 + softirq uint64 + steal uint64 + guest uint64 } // NewMetricsService creates a new metrics service @@ -115,6 +134,8 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) { sysMetrics, err := s.collectSystemMetrics(ctx) if err != nil { s.logger.Error("Failed to collect system metrics", "error", err) + // Set default/zero values if collection fails + metrics.System = SystemMetrics{} } else { metrics.System = *sysMetrics } @@ -167,21 +188,17 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) { // collectSystemMetrics collects system-level metrics func (s *MetricsService) collectSystemMetrics(ctx context.Context) (*SystemMetrics, error) { - var m runtime.MemStats - runtime.ReadMemStats(&m) + // Get system memory from /proc/meminfo + memoryTotal, memoryUsed, memoryPercent := s.getSystemMemory() - // Get memory info - memoryUsed := int64(m.Alloc) - memoryTotal := int64(m.Sys) - memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + // Get CPU usage from /proc/stat + cpuUsage := s.getCPUUsage() - // Uptime - uptime := time.Since(s.startTime).Seconds() + // Get system uptime from /proc/uptime + uptime := s.getSystemUptime() - // CPU and disk would require external tools or system calls - // For now, we'll use placeholders metrics := &SystemMetrics{ - CPUUsagePercent: 0.0, // Would need to read from /proc/stat + CPUUsagePercent: cpuUsage, MemoryUsed: memoryUsed, MemoryTotal: memoryTotal, MemoryPercent: memoryPercent, @@ -268,7 +285,7 @@ func (s *MetricsService) collectSCSTMetrics(ctx context.Context) (*SCSTMetrics, TotalTargets: totalTargets, TotalLUNs: totalLUNs, TotalInitiators: totalInitiators, - ActiveTargets: activeTargets, + ActiveTargets: activeTargets, }, nil } @@ -403,3 +420,232 @@ func (s *MetricsService) collectTaskMetrics(ctx context.Context) (*TaskMetrics, }, nil } +// getSystemUptime reads system uptime from /proc/uptime +// Returns uptime in seconds, or service uptime as fallback +func (s *MetricsService) getSystemUptime() float64 { + file, err := os.Open("/proc/uptime") + if err != nil { + // Fallback to service uptime if /proc/uptime is not available + s.logger.Warn("Failed to read /proc/uptime, using service uptime", "error", err) + return time.Since(s.startTime).Seconds() + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + // Fallback to service uptime if file is empty + s.logger.Warn("Failed to read /proc/uptime content, using service uptime") + return time.Since(s.startTime).Seconds() + } + + line := strings.TrimSpace(scanner.Text()) + fields := strings.Fields(line) + if len(fields) == 0 { + // Fallback to service uptime if no data + s.logger.Warn("No data in /proc/uptime, using service uptime") + return time.Since(s.startTime).Seconds() + } + + // First field is system uptime in seconds + uptimeSeconds, err := strconv.ParseFloat(fields[0], 64) + if err != nil { + // Fallback to service uptime if parsing fails + s.logger.Warn("Failed to parse /proc/uptime, using service uptime", "error", err) + return time.Since(s.startTime).Seconds() + } + + return uptimeSeconds +} + +// getSystemMemory reads system memory from /proc/meminfo +// Returns total, used (in bytes), and usage percentage +func (s *MetricsService) getSystemMemory() (int64, int64, float64) { + file, err := os.Open("/proc/meminfo") + if err != nil { + s.logger.Warn("Failed to read /proc/meminfo, using Go runtime memory", "error", err) + var m runtime.MemStats + runtime.ReadMemStats(&m) + memoryUsed := int64(m.Alloc) + memoryTotal := int64(m.Sys) + memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + return memoryTotal, memoryUsed, memoryPercent + } + defer file.Close() + + var memTotal, memAvailable, memFree, buffers, cached int64 + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" { + continue + } + + // Parse line like "MemTotal: 16375596 kB" + // or "MemTotal: 16375596" (some systems don't have unit) + colonIdx := strings.Index(line, ":") + if colonIdx == -1 { + continue + } + + key := strings.TrimSpace(line[:colonIdx]) + valuePart := strings.TrimSpace(line[colonIdx+1:]) + + // Split value part to get number (ignore unit like "kB") + fields := strings.Fields(valuePart) + if len(fields) == 0 { + continue + } + + value, err := strconv.ParseInt(fields[0], 10, 64) + if err != nil { + continue + } + + // Values in /proc/meminfo are in KB, convert to bytes + valueBytes := value * 1024 + + switch key { + case "MemTotal": + memTotal = valueBytes + case "MemAvailable": + memAvailable = valueBytes + case "MemFree": + memFree = valueBytes + case "Buffers": + buffers = valueBytes + case "Cached": + cached = valueBytes + } + } + + if err := scanner.Err(); err != nil { + s.logger.Warn("Error scanning /proc/meminfo", "error", err) + } + + if memTotal == 0 { + s.logger.Warn("Failed to get MemTotal from /proc/meminfo, using Go runtime memory", "memTotal", memTotal) + var m runtime.MemStats + runtime.ReadMemStats(&m) + memoryUsed := int64(m.Alloc) + memoryTotal := int64(m.Sys) + memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100 + return memoryTotal, memoryUsed, memoryPercent + } + + // Calculate used memory + // If MemAvailable exists (kernel 3.14+), use it for more accurate calculation + var memoryUsed int64 + if memAvailable > 0 { + memoryUsed = memTotal - memAvailable + } else { + // Fallback: MemTotal - MemFree - Buffers - Cached + memoryUsed = memTotal - memFree - buffers - cached + if memoryUsed < 0 { + memoryUsed = memTotal - memFree + } + } + + memoryPercent := float64(memoryUsed) / float64(memTotal) * 100 + + s.logger.Debug("System memory stats", + "memTotal", memTotal, + "memAvailable", memAvailable, + "memoryUsed", memoryUsed, + "memoryPercent", memoryPercent) + + return memTotal, memoryUsed, memoryPercent +} + +// getCPUUsage reads CPU usage from /proc/stat +// Requires two readings to calculate percentage +func (s *MetricsService) getCPUUsage() float64 { + currentCPU, err := s.readCPUStats() + if err != nil { + s.logger.Warn("Failed to read CPU stats", "error", err) + return 0.0 + } + + // If this is the first reading, store it and return 0 + if s.lastCPU == nil { + s.lastCPU = currentCPU + s.lastCPUTime = time.Now() + return 0.0 + } + + // Calculate time difference + timeDiff := time.Since(s.lastCPUTime).Seconds() + if timeDiff < 0.1 { + // Too soon, return previous value or 0 + return 0.0 + } + + // Calculate total CPU time + prevTotal := s.lastCPU.user + s.lastCPU.nice + s.lastCPU.system + s.lastCPU.idle + + s.lastCPU.iowait + s.lastCPU.irq + s.lastCPU.softirq + s.lastCPU.steal + s.lastCPU.guest + currTotal := currentCPU.user + currentCPU.nice + currentCPU.system + currentCPU.idle + + currentCPU.iowait + currentCPU.irq + currentCPU.softirq + currentCPU.steal + currentCPU.guest + + // Calculate idle time + prevIdle := s.lastCPU.idle + s.lastCPU.iowait + currIdle := currentCPU.idle + currentCPU.iowait + + // Calculate used time + totalDiff := currTotal - prevTotal + idleDiff := currIdle - prevIdle + + if totalDiff == 0 { + return 0.0 + } + + // Calculate CPU usage percentage + usagePercent := 100.0 * (1.0 - float64(idleDiff)/float64(totalDiff)) + + // Update last CPU stats + s.lastCPU = currentCPU + s.lastCPUTime = time.Now() + + return usagePercent +} + +// readCPUStats reads CPU statistics from /proc/stat +func (s *MetricsService) readCPUStats() (*cpuStats, error) { + file, err := os.Open("/proc/stat") + if err != nil { + return nil, fmt.Errorf("failed to open /proc/stat: %w", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + if !scanner.Scan() { + return nil, fmt.Errorf("failed to read /proc/stat") + } + + line := strings.TrimSpace(scanner.Text()) + if !strings.HasPrefix(line, "cpu ") { + return nil, fmt.Errorf("invalid /proc/stat format") + } + + fields := strings.Fields(line) + if len(fields) < 8 { + return nil, fmt.Errorf("insufficient CPU stats fields") + } + + stats := &cpuStats{} + stats.user, _ = strconv.ParseUint(fields[1], 10, 64) + stats.nice, _ = strconv.ParseUint(fields[2], 10, 64) + stats.system, _ = strconv.ParseUint(fields[3], 10, 64) + stats.idle, _ = strconv.ParseUint(fields[4], 10, 64) + stats.iowait, _ = strconv.ParseUint(fields[5], 10, 64) + stats.irq, _ = strconv.ParseUint(fields[6], 10, 64) + stats.softirq, _ = strconv.ParseUint(fields[7], 10, 64) + + if len(fields) > 8 { + stats.steal, _ = strconv.ParseUint(fields[8], 10, 64) + } + if len(fields) > 9 { + stats.guest, _ = strconv.ParseUint(fields[9], 10, 64) + } + + return stats, nil +} diff --git a/backend/internal/scst/handler.go b/backend/internal/scst/handler.go index 4433e5b..1a3349e 100644 --- a/backend/internal/scst/handler.go +++ b/backend/internal/scst/handler.go @@ -1,6 +1,7 @@ package scst import ( + "fmt" "net/http" "github.com/atlasos/calypso/internal/common/database" @@ -11,19 +12,19 @@ import ( // Handler handles SCST-related API requests type Handler struct { - service *Service + service *Service taskEngine *tasks.Engine - db *database.DB - logger *logger.Logger + db *database.DB + logger *logger.Logger } // NewHandler creates a new SCST handler func NewHandler(db *database.DB, log *logger.Logger) *Handler { return &Handler{ - service: NewService(db, log), + service: NewService(db, log), taskEngine: tasks.NewEngine(db, log), - db: db, - logger: log, + db: db, + logger: log, } } @@ -55,21 +56,34 @@ func (h *Handler) GetTarget(c *gin.Context) { } // Get LUNs - luns, _ := h.service.GetTargetLUNs(c.Request.Context(), targetID) + luns, err := h.service.GetTargetLUNs(c.Request.Context(), targetID) + if err != nil { + h.logger.Warn("Failed to get LUNs", "target_id", targetID, "error", err) + // Return empty array instead of nil + luns = []LUN{} + } + + // Get initiator groups + groups, err2 := h.service.GetTargetInitiatorGroups(c.Request.Context(), targetID) + if err2 != nil { + h.logger.Warn("Failed to get initiator groups", "target_id", targetID, "error", err2) + groups = []InitiatorGroup{} + } c.JSON(http.StatusOK, gin.H{ - "target": target, - "luns": luns, + "target": target, + "luns": luns, + "initiator_groups": groups, }) } // CreateTargetRequest represents a target creation request type CreateTargetRequest struct { - IQN string `json:"iqn" binding:"required"` - TargetType string `json:"target_type" binding:"required"` - Name string `json:"name" binding:"required"` - Description string `json:"description"` - SingleInitiatorOnly bool `json:"single_initiator_only"` + IQN string `json:"iqn" binding:"required"` + TargetType string `json:"target_type" binding:"required"` + Name string `json:"name" binding:"required"` + Description string `json:"description"` + SingleInitiatorOnly bool `json:"single_initiator_only"` } // CreateTarget creates a new SCST target @@ -83,13 +97,13 @@ func (h *Handler) CreateTarget(c *gin.Context) { userID, _ := c.Get("user_id") target := &Target{ - IQN: req.IQN, - TargetType: req.TargetType, - Name: req.Name, - Description: req.Description, - IsActive: true, + IQN: req.IQN, + TargetType: req.TargetType, + Name: req.Name, + Description: req.Description, + IsActive: true, SingleInitiatorOnly: req.SingleInitiatorOnly || req.TargetType == "vtl" || req.TargetType == "physical_tape", - CreatedBy: userID.(string), + CreatedBy: userID.(string), } if err := h.service.CreateTarget(c.Request.Context(), target); err != nil { @@ -103,9 +117,9 @@ func (h *Handler) CreateTarget(c *gin.Context) { // AddLUNRequest represents a LUN addition request type AddLUNRequest struct { - DeviceName string `json:"device_name" binding:"required"` - DevicePath string `json:"device_path" binding:"required"` - LUNNumber int `json:"lun_number" binding:"required"` + DeviceName string `json:"device_name" binding:"required"` + DevicePath string `json:"device_path" binding:"required"` + LUNNumber int `json:"lun_number" binding:"required"` HandlerType string `json:"handler_type" binding:"required"` } @@ -121,7 +135,15 @@ func (h *Handler) AddLUN(c *gin.Context) { var req AddLUNRequest if err := c.ShouldBindJSON(&req); err != nil { - c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + h.logger.Error("Failed to bind AddLUN request", "error", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)}) + return + } + + // Validate required fields + if req.DeviceName == "" || req.DevicePath == "" || req.HandlerType == "" { + h.logger.Error("Missing required fields in AddLUN request", "device_name", req.DeviceName, "device_path", req.DevicePath, "handler_type", req.HandlerType) + c.JSON(http.StatusBadRequest, gin.H{"error": "device_name, device_path, and handler_type are required"}) return } @@ -164,6 +186,110 @@ func (h *Handler) AddInitiator(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"}) } +// ListAllInitiators lists all initiators across all targets +func (h *Handler) ListAllInitiators(c *gin.Context) { + initiators, err := h.service.ListAllInitiators(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list initiators", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiators"}) + return + } + + if initiators == nil { + initiators = []InitiatorWithTarget{} + } + + c.JSON(http.StatusOK, gin.H{"initiators": initiators}) +} + +// RemoveInitiator removes an initiator +func (h *Handler) RemoveInitiator(c *gin.Context) { + initiatorID := c.Param("id") + + if err := h.service.RemoveInitiator(c.Request.Context(), initiatorID); err != nil { + if err.Error() == "initiator not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"}) + return + } + h.logger.Error("Failed to remove initiator", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Initiator removed successfully"}) +} + +// GetInitiator retrieves an initiator by ID +func (h *Handler) GetInitiator(c *gin.Context) { + initiatorID := c.Param("id") + + initiator, err := h.service.GetInitiator(c.Request.Context(), initiatorID) + if err != nil { + if err.Error() == "initiator not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"}) + return + } + h.logger.Error("Failed to get initiator", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator"}) + return + } + + c.JSON(http.StatusOK, initiator) +} + +// ListExtents lists all device extents +func (h *Handler) ListExtents(c *gin.Context) { + extents, err := h.service.ListExtents(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list extents", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list extents"}) + return + } + + if extents == nil { + extents = []Extent{} + } + + c.JSON(http.StatusOK, gin.H{"extents": extents}) +} + +// CreateExtentRequest represents a request to create an extent +type CreateExtentRequest struct { + DeviceName string `json:"device_name" binding:"required"` + DevicePath string `json:"device_path" binding:"required"` + HandlerType string `json:"handler_type" binding:"required"` +} + +// CreateExtent creates a new device extent +func (h *Handler) CreateExtent(c *gin.Context) { + var req CreateExtentRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.CreateExtent(c.Request.Context(), req.DeviceName, req.DevicePath, req.HandlerType); err != nil { + h.logger.Error("Failed to create extent", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, gin.H{"message": "Extent created successfully"}) +} + +// DeleteExtent deletes a device extent +func (h *Handler) DeleteExtent(c *gin.Context) { + deviceName := c.Param("device") + + if err := h.service.DeleteExtent(c.Request.Context(), deviceName); err != nil { + h.logger.Error("Failed to delete extent", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Extent deleted successfully"}) +} + // ApplyConfig applies SCST configuration func (h *Handler) ApplyConfig(c *gin.Context) { userID, _ := c.Get("user_id") @@ -209,3 +335,142 @@ func (h *Handler) ListHandlers(c *gin.Context) { c.JSON(http.StatusOK, gin.H{"handlers": handlers}) } +// ListPortals lists all iSCSI portals +func (h *Handler) ListPortals(c *gin.Context) { + portals, err := h.service.ListPortals(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list portals", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list portals"}) + return + } + + // Ensure we return an empty array instead of null + if portals == nil { + portals = []Portal{} + } + + c.JSON(http.StatusOK, gin.H{"portals": portals}) +} + +// CreatePortal creates a new portal +func (h *Handler) CreatePortal(c *gin.Context) { + var portal Portal + if err := c.ShouldBindJSON(&portal); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.CreatePortal(c.Request.Context(), &portal); err != nil { + h.logger.Error("Failed to create portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, portal) +} + +// UpdatePortal updates a portal +func (h *Handler) UpdatePortal(c *gin.Context) { + id := c.Param("id") + + var portal Portal + if err := c.ShouldBindJSON(&portal); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"}) + return + } + + if err := h.service.UpdatePortal(c.Request.Context(), id, &portal); err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to update portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, portal) +} + +// EnableTarget enables a target +func (h *Handler) EnableTarget(c *gin.Context) { + targetID := c.Param("id") + + target, err := h.service.GetTarget(c.Request.Context(), targetID) + if err != nil { + if err.Error() == "target not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "target not found"}) + return + } + h.logger.Error("Failed to get target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"}) + return + } + + if err := h.service.EnableTarget(c.Request.Context(), target.IQN); err != nil { + h.logger.Error("Failed to enable target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Target enabled successfully"}) +} + +// DisableTarget disables a target +func (h *Handler) DisableTarget(c *gin.Context) { + targetID := c.Param("id") + + target, err := h.service.GetTarget(c.Request.Context(), targetID) + if err != nil { + if err.Error() == "target not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "target not found"}) + return + } + h.logger.Error("Failed to get target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"}) + return + } + + if err := h.service.DisableTarget(c.Request.Context(), target.IQN); err != nil { + h.logger.Error("Failed to disable target", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Target disabled successfully"}) +} + +// DeletePortal deletes a portal +func (h *Handler) DeletePortal(c *gin.Context) { + id := c.Param("id") + + if err := h.service.DeletePortal(c.Request.Context(), id); err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to delete portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "Portal deleted successfully"}) +} + +// GetPortal retrieves a portal by ID +func (h *Handler) GetPortal(c *gin.Context) { + id := c.Param("id") + + portal, err := h.service.GetPortal(c.Request.Context(), id) + if err != nil { + if err.Error() == "portal not found" { + c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"}) + return + } + h.logger.Error("Failed to get portal", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get portal"}) + return + } + + c.JSON(http.StatusOK, portal) +} diff --git a/backend/internal/scst/service.go b/backend/internal/scst/service.go index 243fa03..8ec7e55 100644 --- a/backend/internal/scst/service.go +++ b/backend/internal/scst/service.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "os" "os/exec" "strings" "time" @@ -28,47 +29,70 @@ func NewService(db *database.DB, log *logger.Logger) *Service { // Target represents an SCST iSCSI target type Target struct { - ID string `json:"id"` - IQN string `json:"iqn"` - TargetType string `json:"target_type"` // 'disk', 'vtl', 'physical_tape' - Name string `json:"name"` - Description string `json:"description"` - IsActive bool `json:"is_active"` - SingleInitiatorOnly bool `json:"single_initiator_only"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - CreatedBy string `json:"created_by"` + ID string `json:"id"` + IQN string `json:"iqn"` + TargetType string `json:"target_type"` // 'disk', 'vtl', 'physical_tape' + Name string `json:"name"` + Description string `json:"description"` + IsActive bool `json:"is_active"` + SingleInitiatorOnly bool `json:"single_initiator_only"` + LUNCount int `json:"lun_count"` // Number of LUNs attached to this target + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CreatedBy string `json:"created_by"` } // LUN represents an SCST LUN mapping type LUN struct { - ID string `json:"id"` - TargetID string `json:"target_id"` - LUNNumber int `json:"lun_number"` - DeviceName string `json:"device_name"` - DevicePath string `json:"device_path"` - HandlerType string `json:"handler_type"` - CreatedAt time.Time `json:"created_at"` + ID string `json:"id"` + TargetID string `json:"target_id"` + LUNNumber int `json:"lun_number"` + DeviceName string `json:"device_name"` + DevicePath string `json:"device_path"` + HandlerType string `json:"handler_type"` + Handler string `json:"handler"` // Alias for handler_type for frontend compatibility + DeviceType string `json:"device_type"` // Derived from handler_type + IsActive bool `json:"is_active"` // True if LUN exists in SCST + CreatedAt time.Time `json:"created_at"` } // InitiatorGroup represents an SCST initiator group type InitiatorGroup struct { - ID string `json:"id"` - TargetID string `json:"target_id"` - GroupName string `json:"group_name"` + ID string `json:"id"` + TargetID string `json:"target_id"` + GroupName string `json:"group_name"` Initiators []Initiator `json:"initiators"` - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at"` } // Initiator represents an iSCSI initiator type Initiator struct { - ID string `json:"id"` - GroupID string `json:"group_id"` - IQN string `json:"iqn"` - IsActive bool `json:"is_active"` + ID string `json:"id"` + GroupID string `json:"group_id"` + IQN string `json:"iqn"` + IsActive bool `json:"is_active"` CreatedAt time.Time `json:"created_at"` } +// Portal represents an iSCSI network portal (IP address and port) +type Portal struct { + ID string `json:"id"` + IPAddress string `json:"ip_address"` // IPv4 or IPv6 address, or "0.0.0.0" for all interfaces + Port int `json:"port"` // Default 3260 for iSCSI + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Extent represents an SCST device extent (opened device) +type Extent struct { + HandlerType string `json:"handler_type"` + DeviceName string `json:"device_name"` + DevicePath string `json:"device_path"` // Path to the actual device/file + IsInUse bool `json:"is_in_use"` // True if device is used by any LUN + LUNCount int `json:"lun_count"` // Number of LUNs using this device +} + // CreateTarget creates a new SCST iSCSI target func (s *Service) CreateTarget(ctx context.Context, target *Target) error { // Validate IQN format @@ -79,15 +103,39 @@ func (s *Service) CreateTarget(ctx context.Context, target *Target) error { // Create target in SCST cmd := exec.CommandContext(ctx, "scstadmin", "-add_target", target.IQN, "-driver", "iscsi") output, err := cmd.CombinedOutput() + outputStr := string(output) + if err != nil { // Check if target already exists - if strings.Contains(string(output), "already exists") { + if strings.Contains(outputStr, "already exists") { s.logger.Warn("Target already exists in SCST", "iqn", target.IQN) } else { - return fmt.Errorf("failed to create SCST target: %s: %w", string(output), err) + // Check for common SCST errors + if strings.Contains(outputStr, "User space process is not connected") || + strings.Contains(outputStr, "iscsi-scstd") { + return fmt.Errorf("iSCSI daemon (iscsi-scstd) is not running. Please start it with: systemctl start iscsi-scstd") + } + if strings.Contains(outputStr, "Failed to add target") { + return fmt.Errorf("failed to add target to SCST: %s. Check dmesg for details", strings.TrimSpace(outputStr)) + } + return fmt.Errorf("failed to create SCST target: %s: %w", outputStr, err) } } + // Check for warnings in output even if command succeeded + if strings.Contains(outputStr, "WARNING") || strings.Contains(outputStr, "Failed to add target") { + s.logger.Warn("SCST warning during target creation", "iqn", target.IQN, "output", outputStr) + // Check dmesg for more details + dmesgCmd := exec.CommandContext(ctx, "dmesg", "-T", "-l", "err,warn") + if dmesgOutput, dmesgErr := dmesgCmd.CombinedOutput(); dmesgErr == nil { + recentErrors := string(dmesgOutput) + if strings.Contains(recentErrors, "iscsi-scst") { + return fmt.Errorf("iSCSI daemon (iscsi-scstd) is not running. Please start it with: systemctl start iscsi-scstd") + } + } + return fmt.Errorf("target creation completed but SCST reported an error: %s. Check dmesg for details", strings.TrimSpace(outputStr)) + } + // Insert into database query := ` INSERT INTO scst_targets ( @@ -237,13 +285,201 @@ func (s *Service) AddInitiator(ctx context.Context, targetIQN, initiatorIQN stri return nil } +// GetTargetInitiatorGroups retrieves all initiator groups for a target +func (s *Service) GetTargetInitiatorGroups(ctx context.Context, targetID string) ([]InitiatorGroup, error) { + // Get all groups for this target + query := ` + SELECT id, target_id, group_name, created_at + FROM scst_initiator_groups + WHERE target_id = $1 + ORDER BY group_name + ` + + rows, err := s.db.QueryContext(ctx, query, targetID) + if err != nil { + return nil, fmt.Errorf("failed to get initiator groups: %w", err) + } + defer rows.Close() + + var groups []InitiatorGroup + for rows.Next() { + var group InitiatorGroup + err := rows.Scan(&group.ID, &group.TargetID, &group.GroupName, &group.CreatedAt) + if err != nil { + s.logger.Error("Failed to scan initiator group", "error", err) + continue + } + + // Get initiators for this group + initiators, err := s.getGroupInitiators(ctx, group.ID) + if err != nil { + s.logger.Warn("Failed to get initiators for group", "group_id", group.ID, "error", err) + group.Initiators = []Initiator{} + } else { + group.Initiators = initiators + } + + groups = append(groups, group) + } + + return groups, rows.Err() +} + +// InitiatorWithTarget extends Initiator with target information +type InitiatorWithTarget struct { + Initiator + TargetID string `json:"target_id"` + TargetIQN string `json:"target_iqn"` + TargetName string `json:"target_name"` + GroupName string `json:"group_name"` +} + +// ListAllInitiators lists all initiators across all targets +func (s *Service) ListAllInitiators(ctx context.Context) ([]InitiatorWithTarget, error) { + query := ` + SELECT i.id, i.group_id, i.iqn, i.is_active, i.created_at, + ig.target_id, ig.group_name, t.iqn as target_iqn, t.name as target_name + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + ORDER BY t.iqn, ig.group_name, i.iqn + ` + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to list initiators: %w", err) + } + defer rows.Close() + + var initiators []InitiatorWithTarget + for rows.Next() { + var initiator InitiatorWithTarget + err := rows.Scan( + &initiator.ID, &initiator.GroupID, &initiator.IQN, + &initiator.IsActive, &initiator.CreatedAt, + &initiator.TargetID, &initiator.GroupName, &initiator.TargetIQN, &initiator.TargetName, + ) + if err != nil { + s.logger.Error("Failed to scan initiator", "error", err) + continue + } + initiators = append(initiators, initiator) + } + + return initiators, rows.Err() +} + +// RemoveInitiator removes an initiator from a target +func (s *Service) RemoveInitiator(ctx context.Context, initiatorID string) error { + // Get initiator info + var initiatorIQN, groupName, targetIQN string + err := s.db.QueryRowContext(ctx, ` + SELECT i.iqn, ig.group_name, t.iqn + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + WHERE i.id = $1 + `, initiatorID).Scan(&initiatorIQN, &groupName, &targetIQN) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("initiator not found") + } + return fmt.Errorf("failed to get initiator: %w", err) + } + + // Remove from SCST + cmd := exec.CommandContext(ctx, "scstadmin", "-remove_init", initiatorIQN, + "-group", groupName, + "-target", targetIQN, + "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if !strings.Contains(outputStr, "not found") { + return fmt.Errorf("failed to remove initiator from SCST: %s: %w", outputStr, err) + } + // If not found in SCST, continue to remove from database + } + + // Remove from database + _, err = s.db.ExecContext(ctx, "DELETE FROM scst_initiators WHERE id = $1", initiatorID) + if err != nil { + return fmt.Errorf("failed to remove initiator from database: %w", err) + } + + s.logger.Info("Initiator removed", "initiator", initiatorIQN, "target", targetIQN) + return nil +} + +// GetInitiator retrieves an initiator by ID with full details +func (s *Service) GetInitiator(ctx context.Context, initiatorID string) (*Initiator, error) { + query := ` + SELECT i.id, i.group_id, i.iqn, i.is_active, i.created_at, + ig.target_id, ig.group_name, t.iqn as target_iqn, t.name as target_name + FROM scst_initiators i + JOIN scst_initiator_groups ig ON i.group_id = ig.id + JOIN scst_targets t ON ig.target_id = t.id + WHERE i.id = $1 + ` + + var initiator Initiator + var targetID, groupName, targetIQN, targetName string + err := s.db.QueryRowContext(ctx, query, initiatorID).Scan( + &initiator.ID, &initiator.GroupID, &initiator.IQN, + &initiator.IsActive, &initiator.CreatedAt, + &targetID, &groupName, &targetIQN, &targetName, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("initiator not found") + } + return nil, fmt.Errorf("failed to get initiator: %w", err) + } + + return &initiator, nil +} + +// getGroupInitiators retrieves all initiators for a group +func (s *Service) getGroupInitiators(ctx context.Context, groupID string) ([]Initiator, error) { + query := ` + SELECT id, group_id, iqn, is_active, created_at + FROM scst_initiators + WHERE group_id = $1 + ORDER BY iqn + ` + + rows, err := s.db.QueryContext(ctx, query, groupID) + if err != nil { + return nil, fmt.Errorf("failed to get initiators: %w", err) + } + defer rows.Close() + + var initiators []Initiator + for rows.Next() { + var initiator Initiator + err := rows.Scan(&initiator.ID, &initiator.GroupID, &initiator.IQN, &initiator.IsActive, &initiator.CreatedAt) + if err != nil { + s.logger.Error("Failed to scan initiator", "error", err) + continue + } + initiators = append(initiators, initiator) + } + + return initiators, rows.Err() +} + // ListTargets lists all SCST targets func (s *Service) ListTargets(ctx context.Context) ([]Target, error) { query := ` - SELECT id, iqn, target_type, name, description, is_active, - single_initiator_only, created_at, updated_at, created_by - FROM scst_targets - ORDER BY name + SELECT + t.id, t.iqn, t.target_type, t.name, t.description, t.is_active, + t.single_initiator_only, t.created_at, t.updated_at, t.created_by, + COALESCE(COUNT(l.id), 0) as lun_count + FROM scst_targets t + LEFT JOIN scst_luns l ON t.id = l.target_id + GROUP BY t.id, t.iqn, t.target_type, t.name, t.description, t.is_active, + t.single_initiator_only, t.created_at, t.updated_at, t.created_by + ORDER BY t.name ` rows, err := s.db.QueryContext(ctx, query) @@ -255,15 +491,20 @@ func (s *Service) ListTargets(ctx context.Context) ([]Target, error) { var targets []Target for rows.Next() { var target Target + var description sql.NullString err := rows.Scan( &target.ID, &target.IQN, &target.TargetType, &target.Name, - &target.Description, &target.IsActive, &target.SingleInitiatorOnly, + &description, &target.IsActive, &target.SingleInitiatorOnly, &target.CreatedAt, &target.UpdatedAt, &target.CreatedBy, + &target.LUNCount, ) if err != nil { s.logger.Error("Failed to scan target", "error", err) continue } + if description.Valid { + target.Description = description.String + } targets = append(targets, target) } @@ -280,9 +521,10 @@ func (s *Service) GetTarget(ctx context.Context, id string) (*Target, error) { ` var target Target + var description sql.NullString err := s.db.QueryRowContext(ctx, query, id).Scan( &target.ID, &target.IQN, &target.TargetType, &target.Name, - &target.Description, &target.IsActive, &target.SingleInitiatorOnly, + &description, &target.IsActive, &target.SingleInitiatorOnly, &target.CreatedAt, &target.UpdatedAt, &target.CreatedBy, ) if err != nil { @@ -292,11 +534,181 @@ func (s *Service) GetTarget(ctx context.Context, id string) (*Target, error) { return nil, fmt.Errorf("failed to get target: %w", err) } + if description.Valid { + target.Description = description.String + } + + // Sync enabled status from SCST + enabled, err := s.getTargetEnabledStatus(ctx, target.IQN) + if err == nil { + // Update database if status differs + if enabled != target.IsActive { + _, err = s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = $1 WHERE id = $2", enabled, target.ID) + if err != nil { + s.logger.Warn("Failed to update target status", "error", err) + } else { + target.IsActive = enabled + } + } + } + return &target, nil } +// getTargetEnabledStatus reads enabled status from SCST +func (s *Service) getTargetEnabledStatus(ctx context.Context, targetIQN string) (bool, error) { + // Read SCST config to check if target is enabled + cmd := exec.CommandContext(ctx, "scstadmin", "-write_config", "/tmp/scst_target_check.conf") + output, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("failed to write config: %s: %w", string(output), err) + } + + // Read config file + configData, err := os.ReadFile("/tmp/scst_target_check.conf") + if err != nil { + return false, fmt.Errorf("failed to read config: %w", err) + } + + // Check if target is enabled in config + // Format: TARGET iqn.2025-12.id.atlas:lun01 { enabled 1 } or { enabled 0 } + configStr := string(configData) + targetSection := fmt.Sprintf("TARGET %s", targetIQN) + if !strings.Contains(configStr, targetSection) { + return false, nil + } + + // Extract enabled status + lines := strings.Split(configStr, "\n") + inTargetSection := false + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, targetSection) { + inTargetSection = true + continue + } + if inTargetSection { + if strings.Contains(line, "enabled 1") { + return true, nil + } + if strings.Contains(line, "enabled 0") { + return false, nil + } + if strings.HasPrefix(line, "TARGET") { + // Next target, stop searching + break + } + } + } + + // Default to enabled if target exists but status not found + return true, nil +} + +// EnableTarget enables a target in SCST +func (s *Service) EnableTarget(ctx context.Context, targetIQN string) error { + cmd := exec.CommandContext(ctx, "scstadmin", "-enable_target", targetIQN, "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already enabled") { + return nil // Already enabled, no error + } + return fmt.Errorf("failed to enable target: %s: %w", outputStr, err) + } + + // Update database + var targetID string + err = s.db.QueryRowContext(ctx, "SELECT id FROM scst_targets WHERE iqn = $1", targetIQN).Scan(&targetID) + if err == nil { + s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = true WHERE id = $1", targetID) + } + + s.logger.Info("Target enabled", "iqn", targetIQN) + return nil +} + +// DisableTarget disables a target in SCST +func (s *Service) DisableTarget(ctx context.Context, targetIQN string) error { + cmd := exec.CommandContext(ctx, "scstadmin", "-disable_target", targetIQN, "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already disabled") { + return nil // Already disabled, no error + } + return fmt.Errorf("failed to disable target: %s: %w", outputStr, err) + } + + // Update database + var targetID string + err = s.db.QueryRowContext(ctx, "SELECT id FROM scst_targets WHERE iqn = $1", targetIQN).Scan(&targetID) + if err == nil { + s.db.ExecContext(ctx, "UPDATE scst_targets SET is_active = false WHERE id = $1", targetID) + } + + s.logger.Info("Target disabled", "iqn", targetIQN) + return nil +} + // GetTargetLUNs retrieves all LUNs for a target +// It reads from SCST first, then syncs with database func (s *Service) GetTargetLUNs(ctx context.Context, targetID string) ([]LUN, error) { + // Get target IQN + var targetIQN string + err := s.db.QueryRowContext(ctx, "SELECT iqn FROM scst_targets WHERE id = $1", targetID).Scan(&targetIQN) + if err != nil { + return nil, fmt.Errorf("failed to get target IQN: %w", err) + } + + // Read LUNs from SCST + scstLUNs, err := s.readLUNsFromSCST(ctx, targetIQN) + if err != nil { + s.logger.Warn("Failed to read LUNs from SCST, using database only", "error", err) + // Fallback to database only + return s.getLUNsFromDatabase(ctx, targetID) + } + + // Sync SCST LUNs with database + for _, scstLUN := range scstLUNs { + // Get device info from SCST + devicePath, handlerType, err := s.getDeviceInfo(ctx, scstLUN.DeviceName) + if err != nil { + s.logger.Warn("Failed to get device info", "device", scstLUN.DeviceName, "error", err) + // Try to get from existing database record if available + var existingPath, existingHandler string + s.db.QueryRowContext(ctx, + "SELECT device_path, handler_type FROM scst_luns WHERE target_id = $1 AND lun_number = $2", + targetID, scstLUN.LUNNumber, + ).Scan(&existingPath, &existingHandler) + if existingPath != "" { + devicePath = existingPath + } + if existingHandler != "" { + handlerType = existingHandler + } + } + + // Upsert into database + _, err = s.db.ExecContext(ctx, ` + INSERT INTO scst_luns (target_id, lun_number, device_name, device_path, handler_type) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT (target_id, lun_number) DO UPDATE SET + device_name = EXCLUDED.device_name, + device_path = COALESCE(NULLIF(EXCLUDED.device_path, ''), scst_luns.device_path), + handler_type = COALESCE(NULLIF(EXCLUDED.handler_type, ''), scst_luns.handler_type) + `, targetID, scstLUN.LUNNumber, scstLUN.DeviceName, devicePath, handlerType) + if err != nil { + s.logger.Warn("Failed to sync LUN to database", "lun", scstLUN.LUNNumber, "error", err) + } + } + + // Return from database (now synced) + return s.getLUNsFromDatabase(ctx, targetID) +} + +// getLUNsFromDatabase retrieves LUNs from database only +func (s *Service) getLUNsFromDatabase(ctx context.Context, targetID string) ([]LUN, error) { query := ` SELECT id, target_id, lun_number, device_name, device_path, handler_type, created_at FROM scst_luns @@ -321,12 +733,377 @@ func (s *Service) GetTargetLUNs(ctx context.Context, targetID string) ([]LUN, er s.logger.Error("Failed to scan LUN", "error", err) continue } + + // Set handler and device_type for frontend compatibility + lun.Handler = lun.HandlerType + // Map handler type to user-friendly device type label + lun.DeviceType = s.getDeviceTypeLabel(lun.HandlerType) + // LUN is active if it exists in database (we sync from SCST, so if it's here, it's active) + lun.IsActive = true + luns = append(luns, lun) } return luns, rows.Err() } +// readLUNsFromSCST reads LUNs directly from SCST using scstadmin -list_group +func (s *Service) readLUNsFromSCST(ctx context.Context, targetIQN string) ([]LUN, error) { + cmd := exec.CommandContext(ctx, "scstadmin", "-list_group", + "-target", targetIQN, + "-driver", "iscsi") + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("failed to list group: %s: %w", string(output), err) + } + + // Parse output + // Format: + // Driver: iscsi + // Target: iqn.2025-12.id.atlas:lun01 + // + // Assigned LUNs: + // + // LUN Device + // ---------- + // 1 LUN01 + + lines := strings.Split(string(output), "\n") + var luns []LUN + inLUNSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the LUN section + if strings.Contains(line, "Assigned LUNs:") { + inLUNSection = true + continue + } + + // Skip separator line + if strings.HasPrefix(line, "---") { + continue + } + + // Skip header line "LUN Device" + if strings.HasPrefix(line, "LUN") && strings.Contains(line, "Device") { + continue + } + + // Parse LUN lines (format: "1 LUN01") + if inLUNSection && line != "" { + parts := strings.Fields(line) + if len(parts) >= 2 { + var lunNumber int + if _, err := fmt.Sscanf(parts[0], "%d", &lunNumber); err == nil { + deviceName := parts[1] + luns = append(luns, LUN{ + LUNNumber: lunNumber, + DeviceName: deviceName, + DevicePath: "", // Will be filled by getDeviceInfo + HandlerType: "", // Will be filled by getDeviceInfo + }) + } + } + } + + // Stop if we hit "All done" + if strings.Contains(line, "All done") { + break + } + } + + return luns, nil +} + +// getDeviceInfo gets device path and handler type from SCST +// Since scstadmin doesn't provide easy access to device attributes, +// we try to get handler type from device list, and device path from database if available +func (s *Service) getDeviceInfo(ctx context.Context, deviceName string) (string, string, error) { + // Find which handler this device belongs to + listCmd := exec.CommandContext(ctx, "scstadmin", "-list_device") + output, err := listCmd.Output() + if err != nil { + return "", "", fmt.Errorf("failed to list devices: %w", err) + } + + // Parse output to find handler + // Format: + // Handler Device + // ----------------------- + // vdisk_fileio LUN01 + lines := strings.Split(string(output), "\n") + var handlerType string + inHandlerSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the handler section + if strings.Contains(line, "Handler") && strings.Contains(line, "Device") { + inHandlerSection = true + continue + } + + // Skip separator + if strings.HasPrefix(line, "---") { + continue + } + + // Parse handler and device lines + if inHandlerSection && line != "" && !strings.Contains(line, "Collecting") && !strings.Contains(line, "All done") { + parts := strings.Fields(line) + if len(parts) >= 2 { + handler := parts[0] + device := parts[1] + if device == deviceName { + handlerType = handler + break + } + } + } + + if strings.Contains(line, "All done") { + break + } + } + + if handlerType == "" { + return "", "", fmt.Errorf("handler not found for device %s", deviceName) + } + + // Try to get device path from SCST config file + devicePath := s.getDevicePathFromConfig(deviceName, handlerType) + + return devicePath, handlerType, nil +} + +// getDevicePathFromConfig reads device path from SCST config file +func (s *Service) getDevicePathFromConfig(deviceName, handlerType string) string { + // Write current config to temp file + cmd := exec.Command("scstadmin", "-write_config", "/tmp/scst_device_info.conf") + if err := cmd.Run(); err != nil { + s.logger.Warn("Failed to write SCST config", "error", err) + return "" + } + + // Read config file + configData, err := os.ReadFile("/tmp/scst_device_info.conf") + if err != nil { + s.logger.Warn("Failed to read SCST config", "error", err) + return "" + } + + // Parse config to find device + // Format: + // DEVICE deviceName { + // filename /path/to/device + // } + lines := strings.Split(string(configData), "\n") + inDeviceBlock := false + var devicePath string + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're entering the device block + if strings.Contains(line, "DEVICE") && strings.Contains(line, deviceName) { + inDeviceBlock = true + continue + } + + // Check if we're leaving the device block + if inDeviceBlock && strings.HasPrefix(line, "}") { + break + } + + // Look for filename in device block + if inDeviceBlock && strings.Contains(line, "filename") { + parts := strings.Fields(line) + for j, part := range parts { + if part == "filename" && j+1 < len(parts) { + devicePath = parts[j+1] + break + } + } + if devicePath != "" { + break + } + } + } + + return devicePath +} + +// ListExtents lists all device extents (opened devices) in SCST +func (s *Service) ListExtents(ctx context.Context) ([]Extent, error) { + // List all devices from SCST + cmd := exec.CommandContext(ctx, "scstadmin", "-list_device") + output, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("failed to list devices: %w", err) + } + + // Parse output + // Format: + // Handler Device + // ----------------------- + // vdisk_fileio LUN01 + lines := strings.Split(string(output), "\n") + var extents []Extent + inHandlerSection := false + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Check if we're in the handler section + if strings.Contains(line, "Handler") && strings.Contains(line, "Device") { + inHandlerSection = true + continue + } + + // Skip separator + if strings.HasPrefix(line, "---") { + continue + } + + // Skip header/footer lines + if strings.Contains(line, "Collecting") || strings.Contains(line, "All done") { + continue + } + + // Parse handler and device lines + if inHandlerSection && line != "" { + parts := strings.Fields(line) + if len(parts) >= 2 { + handlerType := parts[0] + deviceName := parts[1] + + // Skip if device is "-" (no device opened for this handler) + if deviceName == "-" { + continue + } + + // Get device path from config or database + devicePath := s.getDevicePathFromConfig(deviceName, handlerType) + if devicePath == "" { + // Try to get from database + var dbPath string + s.db.QueryRowContext(ctx, + "SELECT device_path FROM scst_luns WHERE device_name = $1 LIMIT 1", + deviceName, + ).Scan(&dbPath) + if dbPath != "" { + devicePath = dbPath + } + } + + // Count how many LUNs use this device + var lunCount int + s.db.QueryRowContext(ctx, + "SELECT COUNT(*) FROM scst_luns WHERE device_name = $1", + deviceName, + ).Scan(&lunCount) + + extents = append(extents, Extent{ + HandlerType: handlerType, + DeviceName: deviceName, + DevicePath: devicePath, + IsInUse: lunCount > 0, + LUNCount: lunCount, + }) + } + } + } + + return extents, nil +} + +// CreateExtent opens a device in SCST (creates an extent) +func (s *Service) CreateExtent(ctx context.Context, deviceName, devicePath, handlerType string) error { + // Validate handler type + handlers, err := s.DetectHandlers(ctx) + if err != nil { + return fmt.Errorf("failed to detect handlers: %w", err) + } + + handlerValid := false + for _, h := range handlers { + if h.Name == handlerType { + handlerValid = true + break + } + } + + if !handlerValid { + return fmt.Errorf("invalid handler type: %s", handlerType) + } + + // Open device in SCST + openCmd := exec.CommandContext(ctx, "scstadmin", "-open_dev", deviceName, + "-handler", handlerType, + "-attributes", fmt.Sprintf("filename=%s", devicePath)) + output, err := openCmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "already exists") { + return fmt.Errorf("device %s already exists", deviceName) + } + return fmt.Errorf("failed to open device: %s: %w", outputStr, err) + } + + s.logger.Info("Extent created", "device", deviceName, "handler", handlerType, "path", devicePath) + return nil +} + +// DeleteExtent closes a device in SCST (removes an extent) +func (s *Service) DeleteExtent(ctx context.Context, deviceName string) error { + // Check if device is in use by any LUN + var lunCount int + err := s.db.QueryRowContext(ctx, + "SELECT COUNT(*) FROM scst_luns WHERE device_name = $1", + deviceName, + ).Scan(&lunCount) + if err == nil && lunCount > 0 { + return fmt.Errorf("device %s is in use by %d LUN(s). Remove LUNs first", deviceName, lunCount) + } + + // Close device in SCST + closeCmd := exec.CommandContext(ctx, "scstadmin", "-close_dev", deviceName) + output, err := closeCmd.CombinedOutput() + if err != nil { + outputStr := string(output) + if strings.Contains(outputStr, "not found") { + return fmt.Errorf("device %s not found", deviceName) + } + return fmt.Errorf("failed to close device: %s: %w", outputStr, err) + } + + s.logger.Info("Extent deleted", "device", deviceName) + return nil +} + +// getDeviceTypeLabel returns a user-friendly label for device type based on handler type +func (s *Service) getDeviceTypeLabel(handlerType string) string { + deviceTypeMap := map[string]string{ + "vdisk_fileio": "ZFS Volume", + "vdisk_blockio": "Block Device", + "vdisk_nullio": "Null Device", + "vcdrom": "CDROM", + "dev_cdrom": "CDROM", + "dev_disk": "Physical Disk", + "dev_disk_perf": "Physical Disk (Performance)", + } + + if label, ok := deviceTypeMap[handlerType]; ok { + return label + } + + // Default: return handler type as-is + return handlerType +} + // WriteConfig writes SCST configuration to file func (s *Service) WriteConfig(ctx context.Context, configPath string) error { cmd := exec.CommandContext(ctx, "scstadmin", "-write_config", configPath) @@ -339,24 +1116,244 @@ func (s *Service) WriteConfig(ctx context.Context, configPath string) error { return nil } +// HandlerInfo represents SCST handler information +type HandlerInfo struct { + Name string `json:"name"` + Label string `json:"label"` // Simple, user-friendly label + Description string `json:"description,omitempty"` +} + // DetectHandlers detects available SCST handlers -func (s *Service) DetectHandlers(ctx context.Context) ([]string, error) { +func (s *Service) DetectHandlers(ctx context.Context) ([]HandlerInfo, error) { cmd := exec.CommandContext(ctx, "scstadmin", "-list_handler") output, err := cmd.Output() if err != nil { return nil, fmt.Errorf("failed to list handlers: %w", err) } - // Parse output (simplified - actual parsing would be more robust) - handlers := []string{} + // Parse output - skip header lines and separator + handlers := []HandlerInfo{} lines := strings.Split(string(output), "\n") + skipHeader := true + for _, line := range lines { line = strings.TrimSpace(line) - if line != "" && !strings.HasPrefix(line, "Handler") { - handlers = append(handlers, line) + + // Skip empty lines + if line == "" { + continue } + + // Skip header line "Handler" + if strings.HasPrefix(line, "Handler") { + skipHeader = false + continue + } + + // Skip separator line "-------------" + if strings.HasPrefix(line, "---") { + continue + } + + // Skip lines before header (like "Collecting current configuration: done.") + if skipHeader { + continue + } + + // Skip footer lines + if strings.Contains(line, "All done") || strings.Contains(line, "Collecting") { + continue + } + + // Map handler names to labels and descriptions + label, description := s.getHandlerInfo(line) + handlers = append(handlers, HandlerInfo{ + Name: line, + Label: label, + Description: description, + }) } return handlers, nil } +// getHandlerInfo returns a simple label and description for a handler +func (s *Service) getHandlerInfo(handlerName string) (string, string) { + handlerInfo := map[string]struct { + label string + description string + }{ + "dev_disk": { + label: "Physical Disk", + description: "Physical disk handler", + }, + "dev_disk_perf": { + label: "Physical Disk (Performance)", + description: "Physical disk handler with performance optimizations", + }, + "dev_cdrom": { + label: "CDROM", + description: "CD/DVD-ROM handler", + }, + "vdisk_blockio": { + label: "Block Device", + description: "Virtual disk block I/O handler (for block devices)", + }, + "vdisk_fileio": { + label: "Volume", + description: "Virtual disk file I/O handler (for ZFS volumes and files)", + }, + "vdisk_nullio": { + label: "Null Device", + description: "Null I/O handler (for testing)", + }, + "vcdrom": { + label: "CDROM", + description: "Virtual CD-ROM handler", + }, + } + + if info, ok := handlerInfo[handlerName]; ok { + return info.label, info.description + } + + // Default: use handler name as label + return handlerName, "" +} + +// ListPortals lists all iSCSI portals +func (s *Service) ListPortals(ctx context.Context) ([]Portal, error) { + query := ` + SELECT id, ip_address, port, is_active, created_at, updated_at + FROM scst_portals + ORDER BY ip_address, port + ` + + rows, err := s.db.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to list portals: %w", err) + } + defer rows.Close() + + var portals []Portal + for rows.Next() { + var portal Portal + err := rows.Scan( + &portal.ID, &portal.IPAddress, &portal.Port, + &portal.IsActive, &portal.CreatedAt, &portal.UpdatedAt, + ) + if err != nil { + s.logger.Error("Failed to scan portal", "error", err) + continue + } + portals = append(portals, portal) + } + + return portals, rows.Err() +} + +// CreatePortal creates a new iSCSI portal +func (s *Service) CreatePortal(ctx context.Context, portal *Portal) error { + // Validate IP address format (basic validation) + if portal.IPAddress == "" { + return fmt.Errorf("IP address is required") + } + + // Validate port range + if portal.Port < 1 || portal.Port > 65535 { + return fmt.Errorf("port must be between 1 and 65535") + } + + // Default port to 3260 if not specified + if portal.Port == 0 { + portal.Port = 3260 + } + + // Insert into database + query := ` + INSERT INTO scst_portals (ip_address, port, is_active) + VALUES ($1, $2, $3) + RETURNING id, created_at, updated_at + ` + + err := s.db.QueryRowContext(ctx, query, + portal.IPAddress, portal.Port, portal.IsActive, + ).Scan(&portal.ID, &portal.CreatedAt, &portal.UpdatedAt) + if err != nil { + return fmt.Errorf("failed to create portal: %w", err) + } + + s.logger.Info("Portal created", "ip", portal.IPAddress, "port", portal.Port) + return nil +} + +// UpdatePortal updates an existing portal +func (s *Service) UpdatePortal(ctx context.Context, id string, portal *Portal) error { + // Validate port range + if portal.Port < 1 || portal.Port > 65535 { + return fmt.Errorf("port must be between 1 and 65535") + } + + query := ` + UPDATE scst_portals + SET ip_address = $1, port = $2, is_active = $3, updated_at = NOW() + WHERE id = $4 + RETURNING updated_at + ` + + err := s.db.QueryRowContext(ctx, query, + portal.IPAddress, portal.Port, portal.IsActive, id, + ).Scan(&portal.UpdatedAt) + if err != nil { + if err == sql.ErrNoRows { + return fmt.Errorf("portal not found") + } + return fmt.Errorf("failed to update portal: %w", err) + } + + s.logger.Info("Portal updated", "id", id, "ip", portal.IPAddress, "port", portal.Port) + return nil +} + +// DeletePortal deletes a portal +func (s *Service) DeletePortal(ctx context.Context, id string) error { + result, err := s.db.ExecContext(ctx, "DELETE FROM scst_portals WHERE id = $1", id) + if err != nil { + return fmt.Errorf("failed to delete portal: %w", err) + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return fmt.Errorf("failed to get rows affected: %w", err) + } + + if rowsAffected == 0 { + return fmt.Errorf("portal not found") + } + + s.logger.Info("Portal deleted", "id", id) + return nil +} + +// GetPortal retrieves a portal by ID +func (s *Service) GetPortal(ctx context.Context, id string) (*Portal, error) { + query := ` + SELECT id, ip_address, port, is_active, created_at, updated_at + FROM scst_portals + WHERE id = $1 + ` + + var portal Portal + err := s.db.QueryRowContext(ctx, query, id).Scan( + &portal.ID, &portal.IPAddress, &portal.Port, + &portal.IsActive, &portal.CreatedAt, &portal.UpdatedAt, + ) + if err != nil { + if err == sql.ErrNoRows { + return nil, fmt.Errorf("portal not found") + } + return nil, fmt.Errorf("failed to get portal: %w", err) + } + + return &portal, nil +} diff --git a/deploy/systemd/calypso-logger.service b/deploy/systemd/calypso-logger.service new file mode 100644 index 0000000..37e9285 --- /dev/null +++ b/deploy/systemd/calypso-logger.service @@ -0,0 +1,25 @@ +[Unit] +Description=Calypso Stack Log Aggregator +Documentation=https://github.com/atlasos/calypso +After=network.target +Wants=calypso-api.service calypso-frontend.service + +[Service] +Type=simple +# Run as root to access journald and write to /var/syslog +# Format: timestamp [service] message +ExecStart=/bin/bash -c '/usr/bin/journalctl -u calypso-api.service -u calypso-frontend.service -f --no-pager -o short-iso >> /var/syslog/calypso.log 2>&1' +Restart=always +RestartSec=5 + +# Security hardening +NoNewPrivileges=false +PrivateTmp=true +ReadWritePaths=/var/syslog + +# Resource limits +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target + diff --git a/docs/services.md b/docs/services.md new file mode 100644 index 0000000..97588a9 --- /dev/null +++ b/docs/services.md @@ -0,0 +1,126 @@ +# Calypso Appliance Services Documentation + +This document provides an overview of all services that form the Calypso backup appliance. + +## Core Calypso Services + +### calypso-api.service +**Status**: Running +**Description**: AtlasOS Calypso API Service (Development) +**Purpose**: Main REST API backend for the Calypso appliance, handles all business logic and database operations. +**Binary**: `/development/calypso/backend/bin/calypso-api` +**Config**: `/development/calypso/backend/config.yaml.example` + +### calypso-frontend.service +**Status**: Running +**Description**: Calypso Frontend Development Server +**Purpose**: Web UI for managing backups, storage, and monitoring the appliance. +**Port**: 3000 +**Technology**: React + Vite (development mode) + +## Backup Services (Bacula) + +### bacula-director.service +**Status**: Running +**Description**: Bacula Director Daemon +**Purpose**: Central management daemon that orchestrates all backup, restore, and verify operations. +**Config**: `/etc/bacula/bacula-dir.conf` +**Docs**: `man:bacula-dir(8)` + +### bacula-sd.service +**Status**: Running +**Description**: Bacula Storage Daemon +**Purpose**: Manages physical backup storage devices (disks, tapes, virtual tape libraries). +**Config**: `/etc/bacula/bacula-sd.conf` + +### bacula-fd.service +**Status**: Running +**Description**: Bacula File Daemon +**Purpose**: Runs on systems being backed up, manages file access and metadata. +**Config**: `/etc/bacula/bacula-fd.conf` + +## Storage/iSCSI Services (SCST) + +### scst.service +**Status**: Active (exited) +**Description**: SCST - A Generic SCSI Target Subsystem +**Purpose**: Kernel-level SCSI target framework providing high-performance storage exports. +**Type**: One-shot service that loads SCST kernel modules + +### iscsi-scstd.service +**Status**: Running +**Description**: iSCSI SCST Target Daemon +**Purpose**: Provides iSCSI protocol support for SCST, allowing network block storage exports. +**Port**: 3260 (standard iSCSI port) +**Configured Targets**: +- `iqn.2025-12.id.atlas:lun01` (enabled) + +### iscsid.service +**Status**: Inactive +**Description**: iSCSI initiator daemon +**Purpose**: Client-side iSCSI service (not currently in use) + +### open-iscsi.service +**Status**: Inactive +**Description**: Login to default iSCSI targets +**Purpose**: Automatic iSCSI target login (not currently in use) + +## Virtual Tape Library + +### mhvtl-load-modules.service +**Status**: Active (exited) +**Description**: Load mhvtl modules +**Purpose**: Loads mhVTL (virtual tape library) kernel modules for tape emulation. +**Type**: One-shot service that runs at boot +**Docs**: `man:vtltape(1)`, `man:vtllibrary(1)` + +## Database + +### postgresql.service +**Status**: Active (exited) +**Description**: PostgreSQL RDBMS +**Purpose**: Parent service for PostgreSQL database management + +### postgresql@16-main.service +**Status**: Running +**Description**: PostgreSQL Cluster 16-main +**Purpose**: Main database for Calypso API, stores configuration, jobs, and metadata. +**Version**: PostgreSQL 16 + +## Service Management + +### Check All Services Status +```bash +systemctl status calypso-api calypso-frontend bacula-director bacula-sd bacula-fd scst iscsi-scstd mhvtl-load-modules postgresql +``` + +### Rebuild and Restart Core Services +```bash +/development/calypso/scripts/rebuild-and-restart.sh +``` + +### Restart Individual Services +```bash +systemctl restart calypso-api.service +systemctl restart calypso-frontend.service +systemctl restart bacula-director.service +``` + +## Service Dependencies + +``` +PostgreSQL + └── Calypso API + └── Calypso Frontend + +SCST + └── iSCSI SCST Target Daemon + +mhVTL + └── Bacula Storage Daemon + └── Bacula Director + └── Bacula File Daemon +``` + +## Total Service Count +**11 services** forming the complete Calypso backup appliance stack. diff --git a/frontend/src/api/backup.ts b/frontend/src/api/backup.ts new file mode 100644 index 0000000..c59313b --- /dev/null +++ b/frontend/src/api/backup.ts @@ -0,0 +1,75 @@ +import apiClient from './client' + +export interface BackupJob { + id: string + job_id: number + job_name: string + client_name: string + job_type: string + job_level: string + status: 'Running' | 'Completed' | 'Failed' | 'Canceled' | 'Waiting' + bytes_written: number + files_written: number + duration_seconds?: number + started_at?: string + ended_at?: string + error_message?: string + storage_name?: string + pool_name?: string + volume_name?: string + created_at: string + updated_at: string +} + +export interface ListJobsResponse { + jobs: BackupJob[] + total: number + limit: number + offset: number +} + +export interface ListJobsParams { + status?: string + job_type?: string + client_name?: string + job_name?: string + limit?: number + offset?: number +} + +export interface CreateJobRequest { + job_name: string + client_name: string + job_type: string + job_level: string + storage_name?: string + pool_name?: string +} + +export const backupAPI = { + listJobs: async (params?: ListJobsParams): Promise => { + const queryParams = new URLSearchParams() + if (params?.status) queryParams.append('status', params.status) + if (params?.job_type) queryParams.append('job_type', params.job_type) + if (params?.client_name) queryParams.append('client_name', params.client_name) + if (params?.job_name) queryParams.append('job_name', params.job_name) + if (params?.limit) queryParams.append('limit', params.limit.toString()) + if (params?.offset) queryParams.append('offset', params.offset.toString()) + + const response = await apiClient.get( + `/backup/jobs${queryParams.toString() ? `?${queryParams.toString()}` : ''}` + ) + return response.data + }, + + getJob: async (id: string): Promise => { + const response = await apiClient.get(`/backup/jobs/${id}`) + return response.data + }, + + createJob: async (data: CreateJobRequest): Promise => { + const response = await apiClient.post('/backup/jobs', data) + return response.data + }, +} + diff --git a/frontend/src/api/scst.ts b/frontend/src/api/scst.ts index cc47a9f..d7632a7 100644 --- a/frontend/src/api/scst.ts +++ b/frontend/src/api/scst.ts @@ -9,6 +9,7 @@ export interface SCSTTarget { iqn: string alias?: string is_active: boolean + lun_count?: number created_at: string updated_at: string } @@ -31,7 +32,11 @@ export interface SCSTInitiator { iqn: string is_active: boolean created_at: string - updated_at: string + updated_at?: string + target_id?: string + target_iqn?: string + target_name?: string + group_name?: string } export interface SCSTInitiatorGroup { @@ -45,9 +50,19 @@ export interface SCSTInitiatorGroup { export interface SCSTHandler { name: string + label: string description?: string } +export interface SCSTPortal { + id: string + ip_address: string + port: number + is_active: boolean + created_at: string + updated_at: string +} + export interface CreateTargetRequest { iqn: string target_type: string @@ -80,6 +95,7 @@ export const scstAPI = { getTarget: async (id: string): Promise<{ target: SCSTTarget luns: SCSTLUN[] + initiator_groups?: SCSTInitiatorGroup[] }> => { const response = await apiClient.get(`/scst/targets/${id}`) return response.data @@ -87,7 +103,8 @@ export const scstAPI = { createTarget: async (data: CreateTargetRequest): Promise => { const response = await apiClient.post('/scst/targets', data) - return response.data.target + // Backend returns target directly, not wrapped in { target: ... } + return response.data }, addLUN: async (targetId: string, data: AddLUNRequest): Promise<{ task_id: string }> => { @@ -109,5 +126,81 @@ export const scstAPI = { const response = await apiClient.get('/scst/handlers') return response.data.handlers || [] }, + + listPortals: async (): Promise => { + const response = await apiClient.get('/scst/portals') + return response.data.portals || [] + }, + + getPortal: async (id: string): Promise => { + const response = await apiClient.get(`/scst/portals/${id}`) + return response.data + }, + + createPortal: async (data: { ip_address: string; port?: number; is_active?: boolean }): Promise => { + const response = await apiClient.post('/scst/portals', data) + return response.data + }, + + updatePortal: async (id: string, data: { ip_address: string; port?: number; is_active?: boolean }): Promise => { + const response = await apiClient.put(`/scst/portals/${id}`, data) + return response.data + }, + + deletePortal: async (id: string): Promise => { + await apiClient.delete(`/scst/portals/${id}`) + }, + + enableTarget: async (targetId: string): Promise<{ message: string }> => { + const response = await apiClient.post(`/scst/targets/${targetId}/enable`) + return response.data + }, + + disableTarget: async (targetId: string): Promise<{ message: string }> => { + const response = await apiClient.post(`/scst/targets/${targetId}/disable`) + return response.data + }, + + listInitiators: async (): Promise => { + const response = await apiClient.get('/scst/initiators') + return response.data.initiators || [] + }, + + getInitiator: async (id: string): Promise => { + const response = await apiClient.get(`/scst/initiators/${id}`) + return response.data + }, + + removeInitiator: async (id: string): Promise => { + await apiClient.delete(`/scst/initiators/${id}`) + }, + + listExtents: async (): Promise => { + const response = await apiClient.get('/scst/extents') + return response.data.extents || [] + }, + + createExtent: async (extent: CreateExtentRequest): Promise<{ message: string }> => { + const response = await apiClient.post('/scst/extents', extent) + return response.data + }, + + deleteExtent: async (deviceName: string): Promise => { + await apiClient.delete(`/scst/extents/${deviceName}`) + }, +} + +export interface SCSTExtent { + handler_type: string + device_name: string + device_path: string + is_in_use: boolean + lun_count: number +} + +export interface CreateExtentRequest { + device_name: string + device_path: string + handler_type: string } diff --git a/frontend/src/pages/BackupManagement.tsx b/frontend/src/pages/BackupManagement.tsx index ebf3e68..b813d21 100644 --- a/frontend/src/pages/BackupManagement.tsx +++ b/frontend/src/pages/BackupManagement.tsx @@ -1,4 +1,7 @@ import { useState } from 'react' +import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' +import { backupAPI } from '@/api/backup' +import { Search, X } from 'lucide-react' export default function BackupManagement() { const [activeTab, setActiveTab] = useState<'dashboard' | 'jobs' | 'clients' | 'storage' | 'restore'>('dashboard') @@ -96,6 +99,9 @@ export default function BackupManagement() { + {/* Conditional Content Based on Active Tab */} + {activeTab === 'dashboard' && ( + <> {/* Stats Dashboard */}
{/* Service Status Card */} @@ -307,9 +313,499 @@ export default function BackupManagement() {

[14:23:45] bareos-dir: JobId 10423: Sending Accurate information.

+ + )} + + {activeTab === 'jobs' && ( + + )} + + {activeTab === 'clients' && ( +
+ Clients tab coming soon +
+ )} + + {activeTab === 'storage' && ( +
+ Storage tab coming soon +
+ )} + + {activeTab === 'restore' && ( +
+ Restore tab coming soon +
+ )} ) } +// Jobs Management Tab Component +function JobsManagementTab() { + const queryClient = useQueryClient() + const [searchQuery, setSearchQuery] = useState('') + const [statusFilter, setStatusFilter] = useState('') + const [jobTypeFilter, setJobTypeFilter] = useState('') + const [page, setPage] = useState(1) + const [showCreateForm, setShowCreateForm] = useState(false) + const limit = 20 + + const { data, isLoading, error } = useQuery({ + queryKey: ['backup-jobs', statusFilter, jobTypeFilter, searchQuery, page], + queryFn: () => backupAPI.listJobs({ + status: statusFilter || undefined, + job_type: jobTypeFilter || undefined, + job_name: searchQuery || undefined, + limit, + offset: (page - 1) * limit, + }), + }) + + const jobs = data?.jobs || [] + const total = data?.total || 0 + const totalPages = Math.ceil(total / limit) + + const formatBytes = (bytes: number): string => { + if (bytes === 0) return '0 B' + const k = 1024 + const sizes = ['B', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` + } + + const formatDuration = (seconds?: number): string => { + if (!seconds) return '-' + const hours = Math.floor(seconds / 3600) + const minutes = Math.floor((seconds % 3600) / 60) + const secs = seconds % 60 + if (hours > 0) { + return `${hours}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}` + } + return `${minutes}:${secs.toString().padStart(2, '0')}` + } + + const getStatusBadge = (status: string) => { + const statusMap: Record = { + Running: { + bg: 'bg-blue-500/10', + text: 'text-blue-400', + border: 'border-blue-500/20', + icon: 'pending_actions', + }, + Completed: { + bg: 'bg-green-500/10', + text: 'text-green-400', + border: 'border-green-500/20', + icon: 'check_circle', + }, + Failed: { + bg: 'bg-red-500/10', + text: 'text-red-400', + border: 'border-red-500/20', + icon: 'error', + }, + Canceled: { + bg: 'bg-yellow-500/10', + text: 'text-yellow-400', + border: 'border-yellow-500/20', + icon: 'cancel', + }, + Waiting: { + bg: 'bg-gray-500/10', + text: 'text-gray-400', + border: 'border-gray-500/20', + icon: 'schedule', + }, + } + + const config = statusMap[status] || statusMap.Waiting + + return ( + + {status === 'Running' && ( + + )} + {status !== 'Running' && ( + {config.icon} + )} + {status} + + ) + } + + return ( +
+ {/* Header */} +
+
+

Backup Jobs

+

Manage and monitor backup job executions

+
+ +
+ + {/* Filters */} +
+ {/* Search */} +
+ + { + setSearchQuery(e.target.value) + setPage(1) + }} + className="w-full pl-10 pr-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +
+ + {/* Status Filter */} + + + {/* Job Type Filter */} + +
+ + {/* Jobs Table */} +
+ {isLoading ? ( +
Loading jobs...
+ ) : error ? ( +
Failed to load jobs
+ ) : jobs.length === 0 ? ( +
+

No jobs found

+
+ ) : ( + <> +
+ + + + + + + + + + + + + + + + + {jobs.map((job) => ( + + + + + + + + + + + + + ))} + +
StatusJob IDJob NameClientTypeLevelDurationBytesFilesActions
{getStatusBadge(job.status)}{job.job_id}{job.job_name}{job.client_name}{job.job_type}{job.job_level}{formatDuration(job.duration_seconds)}{formatBytes(job.bytes_written)}{job.files_written.toLocaleString()} + +
+
+ {/* Pagination */} +
+

+ Showing {(page - 1) * limit + 1}-{Math.min(page * limit, total)} of {total} jobs +

+
+ + +
+
+ + )} +
+ + {/* Create Job Form Modal */} + {showCreateForm && ( + setShowCreateForm(false)} + onSuccess={async () => { + setShowCreateForm(false) + await queryClient.invalidateQueries({ queryKey: ['backup-jobs'] }) + await queryClient.refetchQueries({ queryKey: ['backup-jobs'] }) + }} + /> + )} +
+ ) +} + +// Create Job Form Component +interface CreateJobFormProps { + onClose: () => void + onSuccess: () => void +} + +function CreateJobForm({ onClose, onSuccess }: CreateJobFormProps) { + const [formData, setFormData] = useState({ + job_name: '', + client_name: '', + job_type: 'Backup', + job_level: 'Full', + storage_name: '', + pool_name: '', + }) + const [error, setError] = useState(null) + + const createJobMutation = useMutation({ + mutationFn: backupAPI.createJob, + onSuccess: () => { + onSuccess() + }, + onError: (err: any) => { + setError(err.response?.data?.error || 'Failed to create job') + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + setError(null) + + const payload: any = { + job_name: formData.job_name, + client_name: formData.client_name, + job_type: formData.job_type, + job_level: formData.job_level, + } + + if (formData.storage_name) { + payload.storage_name = formData.storage_name + } + if (formData.pool_name) { + payload.pool_name = formData.pool_name + } + + createJobMutation.mutate(payload) + } + + return ( +
+
+ {/* Header */} +
+

Create Backup Job

+ +
+ + {/* Form */} +
+ {error && ( +
+ {error} +
+ )} + + {/* Job Name */} +
+ + setFormData({ ...formData, job_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., DailyBackup" + /> +
+ + {/* Client Name */} +
+ + setFormData({ ...formData, client_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., filesrv-02" + /> +
+ + {/* Job Type & Level */} +
+
+ + +
+ +
+ + +
+
+ + {/* Storage Name */} +
+ + setFormData({ ...formData, storage_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., backup-srv-01" + /> +
+ + {/* Pool Name */} +
+ + setFormData({ ...formData, pool_name: e.target.value })} + className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + placeholder="e.g., Default" + /> +
+ + {/* Actions */} +
+ + +
+
+
+
+ ) +} + diff --git a/frontend/src/pages/ISCSITargetDetail.tsx b/frontend/src/pages/ISCSITargetDetail.tsx index 0b3ec6f..b16f4a8 100644 --- a/frontend/src/pages/ISCSITargetDetail.tsx +++ b/frontend/src/pages/ISCSITargetDetail.tsx @@ -4,7 +4,7 @@ import { scstAPI, type SCSTHandler } from '@/api/scst' import { Card, CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card' import { Button } from '@/components/ui/button' import { ArrowLeft, Plus, RefreshCw, HardDrive, Users } from 'lucide-react' -import { useState } from 'react' +import { useState, useEffect } from 'react' export default function ISCSITargetDetail() { const { id } = useParams<{ id: string }>() @@ -13,6 +13,10 @@ export default function ISCSITargetDetail() { const [showAddLUN, setShowAddLUN] = useState(false) const [showAddInitiator, setShowAddInitiator] = useState(false) + useEffect(() => { + console.log('showAddLUN state:', showAddLUN) + }, [showAddLUN]) + const { data, isLoading } = useQuery({ queryKey: ['scst-target', id], queryFn: () => scstAPI.getTarget(id!), @@ -22,6 +26,8 @@ export default function ISCSITargetDetail() { const { data: handlers } = useQuery({ queryKey: ['scst-handlers'], queryFn: scstAPI.listHandlers, + staleTime: 0, // Always fetch fresh data + refetchOnMount: true, }) if (isLoading) { @@ -33,6 +39,8 @@ export default function ISCSITargetDetail() { } const { target, luns } = data + // Ensure luns is always an array, not null + const lunsArray = luns || [] return (
@@ -91,12 +99,12 @@ export default function ISCSITargetDetail() {
Total LUNs: - {luns.length} + {lunsArray.length}
Active: - {luns.filter((l) => l.is_active).length} + {lunsArray.filter((l) => l.is_active).length}
@@ -140,14 +148,22 @@ export default function ISCSITargetDetail() { LUNs (Logical Unit Numbers) Storage devices exported by this target
- - {luns.length > 0 ? ( + {lunsArray.length > 0 ? (
@@ -170,7 +186,7 @@ export default function ISCSITargetDetail() { - {luns.map((lun) => ( + {lunsArray.map((lun) => (
{lun.lun_number} @@ -204,7 +220,14 @@ export default function ISCSITargetDetail() {

No LUNs configured

- @@ -254,12 +277,21 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps) const [deviceName, setDeviceName] = useState('') const [lunNumber, setLunNumber] = useState(0) + useEffect(() => { + console.log('AddLUNForm mounted, targetId:', targetId, 'handlers:', handlers) + }, [targetId, handlers]) + const addLUNMutation = useMutation({ mutationFn: (data: { device_name: string; device_path: string; lun_number: number; handler_type: string }) => scstAPI.addLUN(targetId, data), onSuccess: () => { onSuccess() }, + onError: (error: any) => { + console.error('Failed to add LUN:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to add LUN' + alert(errorMessage) + }, }) const handleSubmit = (e: React.FormEvent) => { @@ -278,35 +310,62 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps) } return ( - - - Add LUN - Add a storage device to this target - - -
+
+
+
+

Add LUN

+

Bind a ZFS volume or storage device to this target

+
+
-
-
+ +
+ setDeviceName(e.target.value)} placeholder="device1" - className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500" + className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" required /> +

+ Logical name for this device in SCST (auto-filled from volume path) +

- - setDevicePath(e.target.value)} - placeholder="/dev/sda or /dev/calypso/vg1/lv1" - className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 font-mono text-sm" - required - /> -
- -
-
-
+
@@ -359,8 +409,8 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
- - +
+
) } diff --git a/frontend/src/pages/ISCSITargets.tsx b/frontend/src/pages/ISCSITargets.tsx index b3234eb..7fef212 100644 --- a/frontend/src/pages/ISCSITargets.tsx +++ b/frontend/src/pages/ISCSITargets.tsx @@ -1,8 +1,8 @@ -import { useState } from 'react' +import { useState, useEffect } from 'react' import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' -import { scstAPI, type SCSTTarget } from '@/api/scst' +import { scstAPI, type SCSTTarget, type SCSTInitiatorGroup, type SCSTPortal, type SCSTInitiator, type SCSTExtent, type CreateExtentRequest } from '@/api/scst' import { Button } from '@/components/ui/button' -import { Plus, Settings, ChevronRight, Search, ChevronLeft, ChevronRight as ChevronRightIcon, CheckCircle, HardDrive, ArrowUpDown, ArrowUp, ChevronUp, ChevronDown, Copy, Network } from 'lucide-react' +import { Plus, Settings, ChevronRight, Search, ChevronLeft, ChevronRight as ChevronRightIcon, CheckCircle, HardDrive, ArrowUpDown, ArrowUp, ChevronUp, ChevronDown, Copy, Network, X, Trash2 } from 'lucide-react' import { Link } from 'react-router-dom' export default function ISCSITargets() { @@ -176,10 +176,18 @@ export default function ISCSITargets() {
Filter: - + + +
@@ -225,10 +233,16 @@ export default function ISCSITargets() {
)} - {activeTab !== 'targets' && ( -
- {activeTab.charAt(0).toUpperCase() + activeTab.slice(1)} tab coming soon -
+ {activeTab === 'portals' && ( + + )} + + {activeTab === 'initiators' && ( + + )} + + {activeTab === 'extents' && ( + )}
@@ -237,9 +251,11 @@ export default function ISCSITargets() { {showCreateForm && ( setShowCreateForm(false)} - onSuccess={() => { + onSuccess={async () => { setShowCreateForm(false) - queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + // Invalidate and refetch to ensure fresh data + await queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + await queryClient.refetchQueries({ queryKey: ['scst-targets'] }) }} /> )} @@ -255,11 +271,38 @@ interface TargetRowProps { } function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) { + // Fetch LUNs when expanded + const { data: targetData } = useQuery({ + queryKey: ['scst-target', target.id], + queryFn: () => scstAPI.getTarget(target.id), + enabled: isExpanded, // Only fetch when expanded + }) + + const luns = targetData?.luns || [] + const initiatorGroups = targetData?.initiator_groups || [] + const [showEditPolicy, setShowEditPolicy] = useState(false) + const queryClient = useQueryClient() const statusColor = target.is_active ? 'bg-green-500/20 text-green-400 border-green-500/20' : 'bg-red-500/20 text-red-400 border-red-500/20' const statusText = target.is_active ? 'Online' : 'Offline' + const enableMutation = useMutation({ + mutationFn: () => scstAPI.enableTarget(target.id), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + queryClient.invalidateQueries({ queryKey: ['scst-target', target.id] }) + }, + }) + + const disableMutation = useMutation({ + mutationFn: () => scstAPI.disableTarget(target.id), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + queryClient.invalidateQueries({ queryKey: ['scst-target', target.id] }) + }, + }) + return (
{/* Main Row */} @@ -278,6 +321,31 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) { {statusText} + {target.is_active ? ( + + ) : ( + + )}
{target.iqn} @@ -298,7 +366,7 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) { LUNs
- 0 + {target.lun_count || 0}
@@ -325,12 +393,44 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) {

Attached LUNs

- + e.stopPropagation()} + > + + Add LUN +
-
- No LUNs attached -
+ {luns.length > 0 ? ( + luns.map((lun) => ( +
+
+
+ LUN {lun.lun_number} + {lun.is_active && ( + + Active + + )} +
+
+ {lun.handler || 'Unknown'} • {lun.device_path || 'No path'} +
+
+ {lun.device_type || 'Unknown type'} +
+
+
+ )) + ) : ( +
+ No LUNs attached +
+ )}
@@ -338,7 +438,15 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) {

Access Control

- +
@@ -348,8 +456,34 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) {
Initiator Group - None + {initiatorGroups.length > 0 ? ( +
+ {initiatorGroups.map((group) => ( +
+ {group.group_name} + + ({group.initiators?.length || 0} initiators) + +
+ ))} +
+ ) : ( + None + )}
+ {initiatorGroups.length > 0 && ( +
+
+ {initiatorGroups.flatMap(group => + (group.initiators || []).map(initiator => ( +
+ {initiator.iqn} +
+ )) + )} +
+
+ )}
@@ -366,6 +500,169 @@ function TargetRow({ target, isExpanded, onToggle }: TargetRowProps) { )} + + {/* Edit Policy Modal */} + {showEditPolicy && ( + setShowEditPolicy(false)} + onSuccess={() => { + queryClient.invalidateQueries({ queryKey: ['scst-target', target.id] }) + setShowEditPolicy(false) + }} + /> + )} + + ) +} + +interface EditPolicyModalProps { + target: SCSTTarget + initiatorGroups: SCSTInitiatorGroup[] + onClose: () => void + onSuccess: () => void +} + +function EditPolicyModal({ target, initiatorGroups, onClose, onSuccess }: EditPolicyModalProps) { + const [initiatorIQN, setInitiatorIQN] = useState('') + const queryClient = useQueryClient() + + const addInitiatorMutation = useMutation({ + mutationFn: (iqn: string) => scstAPI.addInitiator(target.id, { initiator_iqn: iqn }), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-target', target.id] }) + setInitiatorIQN('') + onSuccess() + }, + onError: (error: any) => { + alert(`Failed to add initiator: ${error.response?.data?.error || error.message}`) + }, + }) + + const handleAddInitiator = (e: React.FormEvent) => { + e.preventDefault() + if (!initiatorIQN.trim()) { + alert('Please enter an initiator IQN') + return + } + addInitiatorMutation.mutate(initiatorIQN.trim()) + } + + // Flatten all initiators from all groups + const allInitiators = initiatorGroups.flatMap(group => group.initiators || []) + + return ( +
+
e.stopPropagation()} + > +
+
+

Edit Access Policy

+

Manage initiators for {target.iqn}

+
+ +
+ +
+ {/* Add Initiator Form */} +
+

Add Initiator

+
+ setInitiatorIQN(e.target.value)} + placeholder="iqn.2025-12.example:initiator" + className="flex-1 px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" + required + /> + +
+
+ + {/* Current Initiators */} +
+

+ Current Initiators ({allInitiators.length}) +

+ {allInitiators.length > 0 ? ( +
+ {allInitiators.map((initiator) => ( +
+
+
+ +
+
+
{initiator.iqn}
+
+ {initiator.is_active ? 'Active' : 'Inactive'} +
+
+
+ +
+ ))} +
+ ) : ( +
+ No initiators configured +
+ )} +
+ + {/* Auth Method (Placeholder) */} +
+

Authentication Method

+
+
+ Current Method + None +
+

+ Authentication configuration coming soon +

+
+
+
+ +
+ +
+
) } @@ -381,11 +678,21 @@ function CreateTargetForm({ onClose, onSuccess }: CreateTargetFormProps) { const [targetType, setTargetType] = useState('disk') const [description, setDescription] = useState('') + const queryClient = useQueryClient() + const createMutation = useMutation({ mutationFn: scstAPI.createTarget, - onSuccess: () => { + onSuccess: async () => { + // Invalidate and refetch targets list + await queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + await queryClient.refetchQueries({ queryKey: ['scst-targets'] }) onSuccess() }, + onError: (error: any) => { + console.error('Failed to create target:', error) + const errorMessage = error.response?.data?.error || error.message || 'Failed to create target' + alert(errorMessage) + }, }) const handleSubmit = (e: React.FormEvent) => { @@ -395,12 +702,15 @@ function CreateTargetForm({ onClose, onSuccess }: CreateTargetFormProps) { return } - createMutation.mutate({ + const targetData = { iqn: iqn.trim(), target_type: targetType, name: name.trim(), description: description.trim() || undefined, - }) + } + + console.log('Creating target:', targetData) + createMutation.mutate(targetData) } return ( @@ -488,3 +798,717 @@ function CreateTargetForm({ onClose, onSuccess }: CreateTargetFormProps) { ) } + +function PortalsTab() { + const queryClient = useQueryClient() + const [showCreateModal, setShowCreateModal] = useState(false) + const [editingPortal, setEditingPortal] = useState(null) + + const { data: portals = [], isLoading } = useQuery({ + queryKey: ['scst-portals'], + queryFn: scstAPI.listPortals, + }) + + const deleteMutation = useMutation({ + mutationFn: scstAPI.deletePortal, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-portals'] }) + }, + onError: (error: any) => { + alert(`Failed to delete portal: ${error.response?.data?.error || error.message}`) + }, + }) + + const handleDelete = (portal: SCSTPortal) => { + if (confirm(`Delete portal ${portal.ip_address}:${portal.port}?`)) { + deleteMutation.mutate(portal.id) + } + } + + return ( +
+ {/* Header */} +
+
+

iSCSI Portals

+

Manage network portals for iSCSI connections

+
+ +
+ + {/* Portals List */} + {isLoading ? ( +
Loading portals...
+ ) : portals.length > 0 ? ( +
+ {portals.map((portal) => ( +
+
+
+ +
+
{portal.ip_address}
+
Port: {portal.port}
+
+
+ + {portal.is_active ? 'Active' : 'Inactive'} + +
+
+ + +
+
+ ))} +
+ ) : ( +
+ +

No portals configured

+

Create a portal to start accepting iSCSI connections

+
+ )} + + {/* Create/Edit Modal */} + {(showCreateModal || editingPortal) && ( + { + setShowCreateModal(false) + setEditingPortal(null) + }} + onSuccess={() => { + queryClient.invalidateQueries({ queryKey: ['scst-portals'] }) + setShowCreateModal(false) + setEditingPortal(null) + }} + /> + )} +
+ ) +} + +interface PortalFormModalProps { + portal?: SCSTPortal | null + onClose: () => void + onSuccess: () => void +} + +function PortalFormModal({ portal, onClose, onSuccess }: PortalFormModalProps) { + const [ipAddress, setIpAddress] = useState(portal?.ip_address || '0.0.0.0') + const [port, setPort] = useState(portal?.port || 3260) + const [isActive, setIsActive] = useState(portal?.is_active ?? true) + const queryClient = useQueryClient() + + const createMutation = useMutation({ + mutationFn: (data: { ip_address: string; port: number; is_active: boolean }) => + scstAPI.createPortal(data), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-portals'] }) + onSuccess() + }, + onError: (error: any) => { + alert(`Failed to create portal: ${error.response?.data?.error || error.message}`) + }, + }) + + const updateMutation = useMutation({ + mutationFn: (data: { ip_address: string; port: number; is_active: boolean }) => + scstAPI.updatePortal(portal!.id, data), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-portals'] }) + onSuccess() + }, + onError: (error: any) => { + alert(`Failed to update portal: ${error.response?.data?.error || error.message}`) + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + const data = { + ip_address: ipAddress.trim(), + port: parseInt(port.toString()), + is_active: isActive, + } + + if (portal) { + updateMutation.mutate(data) + } else { + createMutation.mutate(data) + } + } + + const isLoading = createMutation.isPending || updateMutation.isPending + + return ( +
+
e.stopPropagation()} + > +
+

+ {portal ? 'Edit Portal' : 'Create Portal'} +

+ +
+ +
+
+ + setIpAddress(e.target.value)} + placeholder="0.0.0.0 (all interfaces) or specific IP" + className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" + required + /> +

+ Use 0.0.0.0 to listen on all interfaces, or specify an IP address +

+
+ +
+ + setPort(parseInt(e.target.value) || 3260)} + min="1" + max="65535" + className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" + required + /> +

Default iSCSI port is 3260

+
+ +
+ setIsActive(e.target.checked)} + className="w-4 h-4 rounded bg-[#0f161d] border-border-dark text-primary focus:ring-primary" + /> + +
+ +
+ + +
+
+
+
+ ) +} + +function InitiatorsTab() { + const queryClient = useQueryClient() + const [searchQuery, setSearchQuery] = useState('') + const [statusFilter, setStatusFilter] = useState('all') + + const { data: initiators = [], isLoading } = useQuery({ + queryKey: ['scst-initiators'], + queryFn: scstAPI.listInitiators, + }) + + const deleteMutation = useMutation({ + mutationFn: scstAPI.removeInitiator, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-initiators'] }) + queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + }, + onError: (error: any) => { + alert(`Failed to remove initiator: ${error.response?.data?.error || error.message}`) + }, + }) + + const filteredInitiators = initiators.filter(initiator => { + const matchesSearch = initiator.iqn.toLowerCase().includes(searchQuery.toLowerCase()) || + (initiator.target_iqn && initiator.target_iqn.toLowerCase().includes(searchQuery.toLowerCase())) || + (initiator.target_name && initiator.target_name.toLowerCase().includes(searchQuery.toLowerCase())) + + const matchesStatus = statusFilter === 'all' || + (statusFilter === 'active' && initiator.is_active) || + (statusFilter === 'inactive' && !initiator.is_active) + + return matchesSearch && matchesStatus + }) + + const handleDelete = (initiator: SCSTInitiator) => { + if (confirm(`Remove initiator ${initiator.iqn}?`)) { + deleteMutation.mutate(initiator.id) + } + } + + return ( +
+ {/* Header */} +
+
+

iSCSI Initiators

+

Manage initiator access control lists

+
+
+ + {/* Toolbar */} +
+
+ + setSearchQuery(e.target.value)} + className="w-full bg-[#0f161d] border border-border-dark rounded-lg pl-10 pr-4 py-2 text-sm text-white focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary transition-all placeholder-text-secondary/50" + /> +
+
+
+ Filter: + +
+
+
+ + {/* Initiators List */} + {isLoading ? ( +
Loading initiators...
+ ) : filteredInitiators.length > 0 ? ( +
+
+ {filteredInitiators.map((initiator) => ( +
+
+
+ +
+
+
+ + {initiator.iqn} + + + {initiator.is_active ? 'Active' : 'Inactive'} + +
+
+ {initiator.target_iqn && ( +
+ Target: + + {initiator.target_name || initiator.target_iqn} + +
+ )} + {initiator.group_name && ( +
+ Group: + {initiator.group_name} +
+ )} +
+
+
+
+ + +
+
+ ))} +
+
+ ) : ( +
+
+ +
+

No initiators found

+

+ {searchQuery || statusFilter !== 'all' + ? 'Try adjusting your search or filter criteria' + : 'Initiators will appear here once they are added to targets'} +

+
+ )} +
+ ) +} + +function ExtentsTab() { + const queryClient = useQueryClient() + const [searchQuery, setSearchQuery] = useState('') + const [showCreateModal, setShowCreateModal] = useState(false) + const [handlers, setHandlers] = useState>([]) + + const { data: extents = [], isLoading } = useQuery({ + queryKey: ['scst-extents'], + queryFn: scstAPI.listExtents, + }) + + const { data: handlersData } = useQuery({ + queryKey: ['scst-handlers'], + queryFn: scstAPI.listHandlers, + }) + + // Update handlers when data changes + useEffect(() => { + if (handlersData) { + setHandlers(handlersData.map(h => ({ name: h.name, label: h.label, description: h.description }))) + } + }, [handlersData]) + + const deleteMutation = useMutation({ + mutationFn: scstAPI.deleteExtent, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['scst-extents'] }) + queryClient.invalidateQueries({ queryKey: ['scst-targets'] }) + }, + onError: (error: any) => { + alert(`Failed to delete extent: ${error.response?.data?.error || error.message}`) + }, + }) + + const filteredExtents = extents.filter(extent => { + return extent.device_name.toLowerCase().includes(searchQuery.toLowerCase()) || + extent.device_path.toLowerCase().includes(searchQuery.toLowerCase()) || + extent.handler_type.toLowerCase().includes(searchQuery.toLowerCase()) + }) + + const handleDelete = (extent: SCSTExtent) => { + if (extent.is_in_use) { + alert(`Cannot delete extent ${extent.device_name}: it is in use by ${extent.lun_count} LUN(s)`) + return + } + if (confirm(`Delete extent ${extent.device_name}?`)) { + deleteMutation.mutate(extent.device_name) + } + } + + return ( +
+
+
+

iSCSI Extents

+

Manage device extents (opened devices) for iSCSI LUNs

+
+ +
+ +
+
+ + setSearchQuery(e.target.value)} + className="w-full bg-[#0f161d] border border-border-dark rounded-lg pl-10 pr-4 py-2 text-sm text-white focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary transition-all placeholder-text-secondary/50" + /> +
+
+ + {isLoading ? ( +
Loading extents...
+ ) : filteredExtents.length > 0 ? ( +
+
+ {filteredExtents.map((extent, idx) => ( +
+
+
+ +
+
+
+ + {extent.device_name} + + {extent.is_in_use && ( + + In Use ({extent.lun_count} LUN{extent.lun_count !== 1 ? 's' : ''}) + + )} +
+
+
+ Handler: + {extent.handler_type} +
+ {extent.device_path && ( +
+ Path: + {extent.device_path} +
+ )} +
+
+
+
+ + +
+
+ ))} +
+
+ ) : ( +
+
+ +
+

No extents found

+

+ {searchQuery + ? 'Try adjusting your search criteria' + : 'Create an extent to make a device available for iSCSI LUNs'} +

+
+ )} + + {showCreateModal && ( + setShowCreateModal(false)} + onSuccess={() => { + setShowCreateModal(false) + queryClient.invalidateQueries({ queryKey: ['scst-extents'] }) + }} + /> + )} +
+ ) +} + +function CreateExtentModal({ handlers, onClose, onSuccess }: { handlers: Array<{ name: string; label: string; description?: string }>, onClose: () => void, onSuccess: () => void }) { + const [deviceName, setDeviceName] = useState('') + const [devicePath, setDevicePath] = useState('') + const [handlerType, setHandlerType] = useState('') + + const createMutation = useMutation({ + mutationFn: (data: CreateExtentRequest) => scstAPI.createExtent(data), + onSuccess: () => { + onSuccess() + alert('Extent created successfully!') + }, + onError: (error: any) => { + alert(`Failed to create extent: ${error.response?.data?.error || error.message}`) + }, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + if (!deviceName || !devicePath || !handlerType) { + alert('Please fill in all required fields') + return + } + createMutation.mutate({ + device_name: deviceName, + device_path: devicePath, + handler_type: handlerType, + }) + } + + const handlePathChange = (path: string) => { + setDevicePath(path) + if (!deviceName && path) { + const parts = path.split('/') + const name = parts[parts.length - 1] + if (name) { + setDeviceName(name) + } + } + } + + return ( +
+
+
+

Create Extent

+ +
+ +
+
+ + +
+ +
+ + handlePathChange(e.target.value)} + placeholder="/dev/zvol/pool/volume or /path/to/file" + className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" + required + /> +

Full path to the device or file

+
+ +
+ + setDeviceName(e.target.value)} + placeholder="LUN01" + className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary" + required + /> +

Unique name for this device in SCST

+
+ +
+ + +
+
+
+
+ ) +} diff --git a/frontend/src/pages/TapeLibraries.tsx b/frontend/src/pages/TapeLibraries.tsx index 4e74bb1..b0a6576 100644 --- a/frontend/src/pages/TapeLibraries.tsx +++ b/frontend/src/pages/TapeLibraries.tsx @@ -394,7 +394,13 @@ export default function TapeLibraries() {
-
+
{ + const iqn = `iqn.2023-10.com.vtl:${library.name.toLowerCase().replace(/\s+/g, '')}` + navigator.clipboard.writeText(iqn) + }} + > iqn.2023-10.com.vtl:{library.name.toLowerCase().replace(/\s+/g, '')} diff --git a/scripts/rebuild-and-restart.sh b/scripts/rebuild-and-restart.sh new file mode 100755 index 0000000..f1ddbe2 --- /dev/null +++ b/scripts/rebuild-and-restart.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +set -e + +echo "🔨 Rebuilding Calypso API..." +cd /development/calypso/backend +make build + +echo "" +echo "🔨 Rebuilding Calypso Frontend..." +cd /development/calypso/frontend +npm run build + +echo "" +echo "🔄 Restarting API service..." +systemctl restart calypso-api.service + +echo "🔄 Restarting Frontend service..." +systemctl restart calypso-frontend.service + +echo "" +echo "✅ Build and restart complete!" +echo "" + +systemctl status calypso-api.service calypso-frontend.service --no-pager