working on the backup management parts
This commit is contained in:
Binary file not shown.
118
backend/internal/backup/handler.go
Normal file
118
backend/internal/backup/handler.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Handler handles backup-related API requests
|
||||
type Handler struct {
|
||||
service *Service
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewHandler creates a new backup handler
|
||||
func NewHandler(service *Service, log *logger.Logger) *Handler {
|
||||
return &Handler{
|
||||
service: service,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// ListJobs lists backup jobs with optional filters
|
||||
func (h *Handler) ListJobs(c *gin.Context) {
|
||||
opts := ListJobsOptions{
|
||||
Status: c.Query("status"),
|
||||
JobType: c.Query("job_type"),
|
||||
ClientName: c.Query("client_name"),
|
||||
JobName: c.Query("job_name"),
|
||||
}
|
||||
|
||||
// Parse pagination
|
||||
var limit, offset int
|
||||
if limitStr := c.Query("limit"); limitStr != "" {
|
||||
if _, err := fmt.Sscanf(limitStr, "%d", &limit); err == nil {
|
||||
opts.Limit = limit
|
||||
}
|
||||
}
|
||||
if offsetStr := c.Query("offset"); offsetStr != "" {
|
||||
if _, err := fmt.Sscanf(offsetStr, "%d", &offset); err == nil {
|
||||
opts.Offset = offset
|
||||
}
|
||||
}
|
||||
|
||||
jobs, totalCount, err := h.service.ListJobs(c.Request.Context(), opts)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list jobs", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list jobs"})
|
||||
return
|
||||
}
|
||||
|
||||
if jobs == nil {
|
||||
jobs = []Job{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"jobs": jobs,
|
||||
"total": totalCount,
|
||||
"limit": opts.Limit,
|
||||
"offset": opts.Offset,
|
||||
})
|
||||
}
|
||||
|
||||
// GetJob retrieves a job by ID
|
||||
func (h *Handler) GetJob(c *gin.Context) {
|
||||
id := c.Param("id")
|
||||
|
||||
job, err := h.service.GetJob(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
if err.Error() == "job not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "job not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get job", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get job"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, job)
|
||||
}
|
||||
|
||||
// CreateJob creates a new backup job
|
||||
func (h *Handler) CreateJob(c *gin.Context) {
|
||||
var req CreateJobRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate job type
|
||||
validJobTypes := map[string]bool{
|
||||
"Backup": true, "Restore": true, "Verify": true, "Copy": true, "Migrate": true,
|
||||
}
|
||||
if !validJobTypes[req.JobType] {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_type"})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate job level
|
||||
validJobLevels := map[string]bool{
|
||||
"Full": true, "Incremental": true, "Differential": true, "Since": true,
|
||||
}
|
||||
if !validJobLevels[req.JobLevel] {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_level"})
|
||||
return
|
||||
}
|
||||
|
||||
job, err := h.service.CreateJob(c.Request.Context(), req)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to create job", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create job"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, job)
|
||||
}
|
||||
962
backend/internal/backup/service.go
Normal file
962
backend/internal/backup/service.go
Normal file
@@ -0,0 +1,962 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// Service handles backup job operations
|
||||
type Service struct {
|
||||
db *database.DB
|
||||
baculaDB *database.DB // Optional: separate connection to Bacula database
|
||||
logger *logger.Logger
|
||||
baculaDBName string // Bacula database name (bacula, bareos, etc.)
|
||||
dbPassword string // Database password for dblink (optional, will try without if empty)
|
||||
}
|
||||
|
||||
// NewService creates a new backup service
|
||||
func NewService(db *database.DB, log *logger.Logger) *Service {
|
||||
return &Service{
|
||||
db: db,
|
||||
logger: log,
|
||||
baculaDBName: "bacula", // Default Bacula database name
|
||||
}
|
||||
}
|
||||
|
||||
// SetDatabasePassword sets the database password for dblink connections
|
||||
func (s *Service) SetDatabasePassword(password string) {
|
||||
s.dbPassword = password
|
||||
s.logger.Debug("Database password set for dblink", "has_password", password != "", "password_length", len(password))
|
||||
}
|
||||
|
||||
// Job represents a backup job
|
||||
type Job struct {
|
||||
ID string `json:"id"`
|
||||
JobID int `json:"job_id"`
|
||||
JobName string `json:"job_name"`
|
||||
ClientName string `json:"client_name"`
|
||||
JobType string `json:"job_type"`
|
||||
JobLevel string `json:"job_level"`
|
||||
Status string `json:"status"`
|
||||
BytesWritten int64 `json:"bytes_written"`
|
||||
FilesWritten int `json:"files_written"`
|
||||
DurationSeconds *int `json:"duration_seconds,omitempty"`
|
||||
StartedAt *time.Time `json:"started_at,omitempty"`
|
||||
EndedAt *time.Time `json:"ended_at,omitempty"`
|
||||
ErrorMessage *string `json:"error_message,omitempty"`
|
||||
StorageName *string `json:"storage_name,omitempty"`
|
||||
PoolName *string `json:"pool_name,omitempty"`
|
||||
VolumeName *string `json:"volume_name,omitempty"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// ListJobsOptions represents filtering and pagination options
|
||||
type ListJobsOptions struct {
|
||||
Status string // Filter by status: "Running", "Completed", "Failed", etc.
|
||||
JobType string // Filter by job type: "Backup", "Restore", etc.
|
||||
ClientName string // Filter by client name
|
||||
JobName string // Filter by job name
|
||||
Limit int // Number of results to return
|
||||
Offset int // Offset for pagination
|
||||
}
|
||||
|
||||
// SyncJobsFromBacula syncs jobs from Bacula/Bareos to the database
|
||||
// Tries to query Bacula database directly first, falls back to bconsole if database access fails
|
||||
func (s *Service) SyncJobsFromBacula(ctx context.Context) error {
|
||||
// Try to query Bacula database directly (if user has access)
|
||||
jobs, err := s.queryBaculaDatabase(ctx)
|
||||
if err != nil {
|
||||
s.logger.Debug("Failed to query Bacula database directly, trying bconsole", "error", err)
|
||||
// Fallback to bconsole
|
||||
return s.syncFromBconsole(ctx)
|
||||
}
|
||||
|
||||
if len(jobs) == 0 {
|
||||
s.logger.Debug("No jobs found in Bacula database")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upsert jobs to Calypso database
|
||||
successCount := 0
|
||||
for _, job := range jobs {
|
||||
err := s.upsertJob(ctx, job)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to upsert job", "job_id", job.JobID, "error", err)
|
||||
continue
|
||||
}
|
||||
successCount++
|
||||
}
|
||||
|
||||
s.logger.Info("Synced jobs from Bacula database", "total", len(jobs), "success", successCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBaculaConnection gets or creates a connection to Bacula database
|
||||
// Tries to create connection using same host/port/user but different database name
|
||||
func (s *Service) getBaculaConnection(ctx context.Context) (*database.DB, error) {
|
||||
if s.baculaDB != nil {
|
||||
// Test if connection is still alive
|
||||
if err := s.baculaDB.Ping(); err == nil {
|
||||
return s.baculaDB, nil
|
||||
}
|
||||
// Connection is dead, close it
|
||||
s.baculaDB.Close()
|
||||
s.baculaDB = nil
|
||||
}
|
||||
|
||||
// Try to get connection info from current database connection
|
||||
// We'll query the current database to get connection parameters
|
||||
var currentDB, currentUser, currentHost string
|
||||
var currentPort int
|
||||
|
||||
// Get current database connection info
|
||||
query := `SELECT current_database(), current_user, inet_server_addr(), inet_server_port()`
|
||||
err := s.db.QueryRowContext(ctx, query).Scan(¤tDB, ¤tUser, ¤tHost, ¤tPort)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get current database info: %w", err)
|
||||
}
|
||||
|
||||
// If host is null, it's a local connection (Unix socket)
|
||||
if currentHost == "" {
|
||||
currentHost = "localhost"
|
||||
}
|
||||
if currentPort == 0 {
|
||||
currentPort = 5432 // Default PostgreSQL port
|
||||
}
|
||||
|
||||
// Try common Bacula database names
|
||||
databases := []string{"bacula", "bareos", s.baculaDBName}
|
||||
|
||||
for _, dbName := range databases {
|
||||
if dbName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Try to create connection to Bacula database
|
||||
// We'll use the same connection parameters but different database name
|
||||
// Note: This assumes same host/port/user/password
|
||||
// For production, you'd want to configure this separately
|
||||
|
||||
// We can't create a new connection without password
|
||||
// So we'll try to query using dblink or assume same connection can access Bacula DB
|
||||
// For now, return nil and let queryBaculaDatabase handle it via dblink or direct query
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Bacula database connection not configured - will use dblink or direct query")
|
||||
}
|
||||
|
||||
// queryBaculaDatabase queries Bacula database directly
|
||||
// Following Bacularis approach: query Job table directly from Bacula database
|
||||
// Since Bacula is in separate database, prioritize dblink over direct query
|
||||
func (s *Service) queryBaculaDatabase(ctx context.Context) ([]Job, error) {
|
||||
// Method 1: Try using dblink extension for cross-database query (preferred for separate databases)
|
||||
checkDblink := `SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'dblink')`
|
||||
var dblinkExists bool
|
||||
err := s.db.QueryRowContext(ctx, checkDblink).Scan(&dblinkExists)
|
||||
if err == nil && dblinkExists {
|
||||
jobs, err := s.queryBaculaViaDblink(ctx)
|
||||
if err == nil && len(jobs) > 0 {
|
||||
return jobs, nil
|
||||
}
|
||||
s.logger.Debug("dblink query failed, trying direct query", "error", err)
|
||||
} else {
|
||||
s.logger.Debug("dblink extension not found, trying direct query")
|
||||
}
|
||||
|
||||
// Method 2: Try querying Job table directly (if Bacula is in same database)
|
||||
jobs, err := s.queryBaculaDirect(ctx)
|
||||
if err == nil && len(jobs) > 0 {
|
||||
return jobs, nil
|
||||
}
|
||||
s.logger.Debug("Direct query also failed", "error", err)
|
||||
|
||||
return nil, fmt.Errorf("failed to query Bacula database: dblink and direct query both failed")
|
||||
}
|
||||
|
||||
// queryBaculaDirect queries Job table directly (Bacularis approach)
|
||||
// Assumes Bacula tables are in same database or accessible via search_path
|
||||
func (s *Service) queryBaculaDirect(ctx context.Context) ([]Job, error) {
|
||||
// Bacularis-style query: direct query to Job table with JOIN to Client
|
||||
// This is the standard way Bacularis queries Bacula database
|
||||
query := `
|
||||
SELECT
|
||||
j.JobId as job_id,
|
||||
j.Name as job_name,
|
||||
COALESCE(c.Name, 'unknown') as client_name,
|
||||
CASE
|
||||
WHEN j.Type = 'B' THEN 'Backup'
|
||||
WHEN j.Type = 'R' THEN 'Restore'
|
||||
WHEN j.Type = 'V' THEN 'Verify'
|
||||
WHEN j.Type = 'C' THEN 'Copy'
|
||||
WHEN j.Type = 'M' THEN 'Migrate'
|
||||
ELSE 'Backup'
|
||||
END as job_type,
|
||||
CASE
|
||||
WHEN j.Level = 'F' THEN 'Full'
|
||||
WHEN j.Level = 'I' THEN 'Incremental'
|
||||
WHEN j.Level = 'D' THEN 'Differential'
|
||||
WHEN j.Level = 'S' THEN 'Since'
|
||||
ELSE 'Full'
|
||||
END as job_level,
|
||||
CASE
|
||||
WHEN j.JobStatus = 'T' THEN 'Running'
|
||||
WHEN j.JobStatus = 'C' THEN 'Completed'
|
||||
WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed'
|
||||
WHEN j.JobStatus = 'A' THEN 'Canceled'
|
||||
WHEN j.JobStatus = 'W' THEN 'Waiting'
|
||||
ELSE 'Waiting'
|
||||
END as status,
|
||||
COALESCE(j.JobBytes, 0) as bytes_written,
|
||||
COALESCE(j.JobFiles, 0) as files_written,
|
||||
j.StartTime as started_at,
|
||||
j.EndTime as ended_at
|
||||
FROM Job j
|
||||
LEFT JOIN Client c ON j.ClientId = c.ClientId
|
||||
ORDER BY j.StartTime DESC
|
||||
LIMIT 1000
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Job table not found or not accessible: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var jobs []Job
|
||||
for rows.Next() {
|
||||
var job Job
|
||||
var startedAt, endedAt sql.NullTime
|
||||
|
||||
err := rows.Scan(
|
||||
&job.JobID, &job.JobName, &job.ClientName,
|
||||
&job.JobType, &job.JobLevel, &job.Status,
|
||||
&job.BytesWritten, &job.FilesWritten, &startedAt, &endedAt,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to scan Bacula job", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
if endedAt.Valid {
|
||||
job.EndedAt = &endedAt.Time
|
||||
// Calculate duration if both start and end times are available
|
||||
if job.StartedAt != nil {
|
||||
duration := int(endedAt.Time.Sub(*job.StartedAt).Seconds())
|
||||
job.DurationSeconds = &duration
|
||||
}
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(jobs) > 0 {
|
||||
s.logger.Info("Successfully queried Bacula database (direct)", "count", len(jobs))
|
||||
return jobs, nil
|
||||
}
|
||||
|
||||
return jobs, nil // Return empty list, not an error
|
||||
}
|
||||
|
||||
// queryBaculaViaDblink queries Bacula database using dblink extension
|
||||
// Assumes dblink is installed and user has access to bacula database
|
||||
func (s *Service) queryBaculaViaDblink(ctx context.Context) ([]Job, error) {
|
||||
// Get current user and connection info for dblink
|
||||
var currentUser, currentHost string
|
||||
var currentPort int
|
||||
|
||||
// Get current connection info
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
`SELECT current_user, COALESCE(inet_server_addr()::text, ''), COALESCE(inet_server_port(), 5432)`).Scan(
|
||||
¤tUser, ¤tHost, ¤tPort)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get connection info: %w", err)
|
||||
}
|
||||
|
||||
// Log connection info (without password)
|
||||
s.logger.Debug("Preparing dblink connection", "user", currentUser, "host", currentHost, "port", currentPort, "has_password", s.dbPassword != "")
|
||||
|
||||
// Try common Bacula database names
|
||||
databases := []string{"bacula", "bareos", s.baculaDBName}
|
||||
|
||||
for _, dbName := range databases {
|
||||
if dbName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build dblink connection string
|
||||
// Format: 'dbname=database_name user=username password=password'
|
||||
// dblink requires password even for local connections
|
||||
connStr := fmt.Sprintf("dbname=%s user=%s", dbName, currentUser)
|
||||
|
||||
// Add password if available (required for dblink)
|
||||
if s.dbPassword != "" {
|
||||
// Escape special characters in password for connection string
|
||||
// Replace single quotes with \' and backslashes with \\
|
||||
escapedPassword := strings.ReplaceAll(s.dbPassword, "\\", "\\\\")
|
||||
escapedPassword = strings.ReplaceAll(escapedPassword, "'", "\\'")
|
||||
connStr += fmt.Sprintf(" password='%s'", escapedPassword)
|
||||
}
|
||||
|
||||
// Add host/port for remote connections
|
||||
if currentHost != "" {
|
||||
connStr += fmt.Sprintf(" host=%s port=%d", currentHost, currentPort)
|
||||
}
|
||||
|
||||
// Query using dblink - get all data in one query with JOIN
|
||||
// Escape single quotes in SQL string for dblink (double them)
|
||||
innerQuery := `SELECT
|
||||
j.JobId,
|
||||
j.Name,
|
||||
j.Type,
|
||||
j.Level,
|
||||
j.JobStatus,
|
||||
j.JobBytes,
|
||||
j.JobFiles,
|
||||
j.StartTime,
|
||||
j.EndTime,
|
||||
COALESCE(c.Name, 'unknown') as ClientName
|
||||
FROM Job j
|
||||
LEFT JOIN Client c ON j.ClientId = c.ClientId
|
||||
ORDER BY j.StartTime DESC
|
||||
LIMIT 1000`
|
||||
|
||||
// Escape single quotes in inner query for dblink (double them)
|
||||
escapedQuery := strings.ReplaceAll(innerQuery, "'", "''")
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
JobId as job_id,
|
||||
Name as job_name,
|
||||
ClientName as client_name,
|
||||
CASE
|
||||
WHEN Type = 'B' THEN 'Backup'
|
||||
WHEN Type = 'R' THEN 'Restore'
|
||||
WHEN Type = 'V' THEN 'Verify'
|
||||
WHEN Type = 'C' THEN 'Copy'
|
||||
WHEN Type = 'M' THEN 'Migrate'
|
||||
ELSE 'Backup'
|
||||
END as job_type,
|
||||
CASE
|
||||
WHEN Level = 'F' THEN 'Full'
|
||||
WHEN Level = 'I' THEN 'Incremental'
|
||||
WHEN Level = 'D' THEN 'Differential'
|
||||
WHEN Level = 'S' THEN 'Since'
|
||||
ELSE 'Full'
|
||||
END as job_level,
|
||||
CASE
|
||||
WHEN JobStatus = 'T' THEN 'Running'
|
||||
WHEN JobStatus = 'C' THEN 'Completed'
|
||||
WHEN JobStatus = 'f' OR JobStatus = 'F' THEN 'Failed'
|
||||
WHEN JobStatus = 'A' THEN 'Canceled'
|
||||
WHEN JobStatus = 'W' THEN 'Waiting'
|
||||
ELSE 'Waiting'
|
||||
END as status,
|
||||
COALESCE(JobBytes, 0) as bytes_written,
|
||||
COALESCE(JobFiles, 0) as files_written,
|
||||
StartTime as started_at,
|
||||
EndTime as ended_at
|
||||
FROM dblink('%s', '%s') AS t(JobId int, Name text, Type char, Level char, JobStatus char, JobBytes bigint, JobFiles int, StartTime timestamp, EndTime timestamp, ClientName text)
|
||||
`, connStr, escapedQuery)
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to query Bacula via dblink", "database", dbName, "connection", connStr, "error", err)
|
||||
continue
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var jobs []Job
|
||||
for rows.Next() {
|
||||
var job Job
|
||||
var startedAt, endedAt sql.NullTime
|
||||
|
||||
err := rows.Scan(
|
||||
&job.JobID, &job.JobName, &job.ClientName,
|
||||
&job.JobType, &job.JobLevel, &job.Status,
|
||||
&job.BytesWritten, &job.FilesWritten, &startedAt, &endedAt,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to scan Bacula job from dblink", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
if endedAt.Valid {
|
||||
job.EndedAt = &endedAt.Time
|
||||
// Calculate duration
|
||||
if job.StartedAt != nil {
|
||||
duration := int(endedAt.Time.Sub(*job.StartedAt).Seconds())
|
||||
job.DurationSeconds = &duration
|
||||
}
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
s.logger.Debug("Error iterating dblink results", "database", dbName, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(jobs) > 0 {
|
||||
s.logger.Info("Successfully queried Bacula database via dblink", "database", dbName, "count", len(jobs))
|
||||
return jobs, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to query Bacula database via dblink from any database")
|
||||
}
|
||||
|
||||
// syncFromBconsole syncs jobs using bconsole command (fallback method)
|
||||
func (s *Service) syncFromBconsole(ctx context.Context) error {
|
||||
// Execute bconsole command to list jobs
|
||||
cmd := exec.CommandContext(ctx, "sh", "-c", "echo -e 'list jobs\nquit' | bconsole")
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Debug("Failed to execute bconsole", "error", err, "output", string(output))
|
||||
return nil // Don't fail, just return empty
|
||||
}
|
||||
|
||||
if len(output) == 0 {
|
||||
s.logger.Debug("bconsole returned empty output")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse bconsole output
|
||||
jobs := s.parseBconsoleOutput(ctx, string(output))
|
||||
|
||||
if len(jobs) == 0 {
|
||||
s.logger.Debug("No jobs found in bconsole output")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Upsert jobs to database
|
||||
successCount := 0
|
||||
for _, job := range jobs {
|
||||
err := s.upsertJob(ctx, job)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to upsert job", "job_id", job.JobID, "error", err)
|
||||
continue
|
||||
}
|
||||
successCount++
|
||||
}
|
||||
|
||||
s.logger.Info("Synced jobs from bconsole", "total", len(jobs), "success", successCount)
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseBconsoleOutput parses bconsole "list jobs" output
|
||||
func (s *Service) parseBconsoleOutput(ctx context.Context, output string) []Job {
|
||||
var jobs []Job
|
||||
lines := strings.Split(output, "\n")
|
||||
|
||||
// Skip header lines until we find the data rows
|
||||
inDataSection := false
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Skip empty lines and separators
|
||||
if line == "" || strings.HasPrefix(line, "+") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Start data section when we see header
|
||||
if strings.HasPrefix(line, "| jobid") {
|
||||
inDataSection = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Stop at footer separator
|
||||
if strings.HasPrefix(line, "*") {
|
||||
break
|
||||
}
|
||||
|
||||
if !inDataSection {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse data row: | jobid | name | starttime | type | level | jobfiles | jobbytes | jobstatus |
|
||||
if strings.HasPrefix(line, "|") {
|
||||
parts := strings.Split(line, "|")
|
||||
if len(parts) < 9 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract fields (skip first empty part)
|
||||
jobIDStr := strings.TrimSpace(parts[1])
|
||||
jobName := strings.TrimSpace(parts[2])
|
||||
startTimeStr := strings.TrimSpace(parts[3])
|
||||
jobTypeChar := strings.TrimSpace(parts[4])
|
||||
jobLevelChar := strings.TrimSpace(parts[5])
|
||||
jobFilesStr := strings.TrimSpace(parts[6])
|
||||
jobBytesStr := strings.TrimSpace(parts[7])
|
||||
jobStatusChar := strings.TrimSpace(parts[8])
|
||||
|
||||
// Parse job ID
|
||||
jobID, err := strconv.Atoi(jobIDStr)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to parse job ID", "value", jobIDStr, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse start time
|
||||
var startedAt *time.Time
|
||||
if startTimeStr != "" && startTimeStr != "-" {
|
||||
// Format: 2025-12-27 23:05:02
|
||||
parsedTime, err := time.Parse("2006-01-02 15:04:05", startTimeStr)
|
||||
if err == nil {
|
||||
startedAt = &parsedTime
|
||||
}
|
||||
}
|
||||
|
||||
// Map job type
|
||||
jobType := "Backup"
|
||||
switch jobTypeChar {
|
||||
case "B":
|
||||
jobType = "Backup"
|
||||
case "R":
|
||||
jobType = "Restore"
|
||||
case "V":
|
||||
jobType = "Verify"
|
||||
case "C":
|
||||
jobType = "Copy"
|
||||
case "M":
|
||||
jobType = "Migrate"
|
||||
}
|
||||
|
||||
// Map job level
|
||||
jobLevel := "Full"
|
||||
switch jobLevelChar {
|
||||
case "F":
|
||||
jobLevel = "Full"
|
||||
case "I":
|
||||
jobLevel = "Incremental"
|
||||
case "D":
|
||||
jobLevel = "Differential"
|
||||
case "S":
|
||||
jobLevel = "Since"
|
||||
}
|
||||
|
||||
// Parse files and bytes
|
||||
filesWritten := 0
|
||||
if jobFilesStr != "" && jobFilesStr != "-" {
|
||||
if f, err := strconv.Atoi(jobFilesStr); err == nil {
|
||||
filesWritten = f
|
||||
}
|
||||
}
|
||||
|
||||
bytesWritten := int64(0)
|
||||
if jobBytesStr != "" && jobBytesStr != "-" {
|
||||
if b, err := strconv.ParseInt(jobBytesStr, 10, 64); err == nil {
|
||||
bytesWritten = b
|
||||
}
|
||||
}
|
||||
|
||||
// Map job status
|
||||
status := "Waiting"
|
||||
switch strings.ToLower(jobStatusChar) {
|
||||
case "t", "T":
|
||||
status = "Running"
|
||||
case "c", "C":
|
||||
status = "Completed"
|
||||
case "f", "F":
|
||||
status = "Failed"
|
||||
case "A":
|
||||
status = "Canceled"
|
||||
case "W":
|
||||
status = "Waiting"
|
||||
}
|
||||
|
||||
// Try to extract client name from job name (common pattern: JobName-ClientName)
|
||||
clientName := "unknown"
|
||||
// For now, use job name as client name if it looks like a client name
|
||||
// In real implementation, we'd query job details from Bacula
|
||||
if jobName != "" {
|
||||
// Try to get client name from job details
|
||||
clientNameFromJob := s.getClientNameFromJob(ctx, jobID)
|
||||
if clientNameFromJob != "" {
|
||||
clientName = clientNameFromJob
|
||||
} else {
|
||||
// Fallback: use job name as client name
|
||||
clientName = jobName
|
||||
}
|
||||
}
|
||||
|
||||
job := Job{
|
||||
JobID: jobID,
|
||||
JobName: jobName,
|
||||
ClientName: clientName,
|
||||
JobType: jobType,
|
||||
JobLevel: jobLevel,
|
||||
Status: status,
|
||||
BytesWritten: bytesWritten,
|
||||
FilesWritten: filesWritten,
|
||||
StartedAt: startedAt,
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
}
|
||||
|
||||
return jobs
|
||||
}
|
||||
|
||||
// getClientNameFromJob gets client name from job details using bconsole
|
||||
func (s *Service) getClientNameFromJob(ctx context.Context, jobID int) string {
|
||||
// Execute bconsole to get job details
|
||||
cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("echo -e 'list job jobid=%d\nquit' | bconsole", jobID))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Debug("Failed to get job details", "job_id", jobID, "error", err)
|
||||
return ""
|
||||
}
|
||||
|
||||
// Parse output to find Client line
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "Client:") {
|
||||
parts := strings.Split(line, ":")
|
||||
if len(parts) >= 2 {
|
||||
return strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// upsertJob inserts or updates a job in the database
|
||||
func (s *Service) upsertJob(ctx context.Context, job Job) error {
|
||||
query := `
|
||||
INSERT INTO backup_jobs (
|
||||
job_id, job_name, client_name, job_type, job_level, status,
|
||||
bytes_written, files_written, started_at, updated_at
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW())
|
||||
ON CONFLICT (job_id) DO UPDATE SET
|
||||
job_name = EXCLUDED.job_name,
|
||||
job_type = EXCLUDED.job_type,
|
||||
job_level = EXCLUDED.job_level,
|
||||
status = EXCLUDED.status,
|
||||
bytes_written = EXCLUDED.bytes_written,
|
||||
files_written = EXCLUDED.files_written,
|
||||
started_at = EXCLUDED.started_at,
|
||||
updated_at = NOW()
|
||||
`
|
||||
|
||||
// Use job name as client name if client_name is empty (we'll improve this later)
|
||||
clientName := job.ClientName
|
||||
if clientName == "" {
|
||||
clientName = "unknown"
|
||||
}
|
||||
|
||||
_, err := s.db.ExecContext(ctx, query,
|
||||
job.JobID, job.JobName, clientName, job.JobType, job.JobLevel, job.Status,
|
||||
job.BytesWritten, job.FilesWritten, job.StartedAt,
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ListJobs lists backup jobs with optional filters
|
||||
func (s *Service) ListJobs(ctx context.Context, opts ListJobsOptions) ([]Job, int, error) {
|
||||
// Try to sync jobs from Bacula first (non-blocking - if it fails, continue with database)
|
||||
// Don't return error if sync fails, just log it and continue
|
||||
// This allows the API to work even if bconsole is not available
|
||||
syncErr := s.SyncJobsFromBacula(ctx)
|
||||
if syncErr != nil {
|
||||
s.logger.Debug("Failed to sync jobs from Bacula, using database only", "error", syncErr)
|
||||
// Continue anyway - we'll use whatever is in the database
|
||||
}
|
||||
|
||||
// Build WHERE clause
|
||||
whereClauses := []string{"1=1"}
|
||||
args := []interface{}{}
|
||||
argIndex := 1
|
||||
|
||||
if opts.Status != "" {
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("status = $%d", argIndex))
|
||||
args = append(args, opts.Status)
|
||||
argIndex++
|
||||
}
|
||||
|
||||
if opts.JobType != "" {
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("job_type = $%d", argIndex))
|
||||
args = append(args, opts.JobType)
|
||||
argIndex++
|
||||
}
|
||||
|
||||
if opts.ClientName != "" {
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("client_name ILIKE $%d", argIndex))
|
||||
args = append(args, "%"+opts.ClientName+"%")
|
||||
argIndex++
|
||||
}
|
||||
|
||||
if opts.JobName != "" {
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("job_name ILIKE $%d", argIndex))
|
||||
args = append(args, "%"+opts.JobName+"%")
|
||||
argIndex++
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(whereClauses) > 0 {
|
||||
whereClause = "WHERE " + whereClauses[0]
|
||||
for i := 1; i < len(whereClauses); i++ {
|
||||
whereClause += " AND " + whereClauses[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Get total count
|
||||
countQuery := fmt.Sprintf("SELECT COUNT(*) FROM backup_jobs %s", whereClause)
|
||||
var totalCount int
|
||||
err := s.db.QueryRowContext(ctx, countQuery, args...).Scan(&totalCount)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to count jobs: %w", err)
|
||||
}
|
||||
|
||||
// Set default limit
|
||||
limit := opts.Limit
|
||||
if limit <= 0 {
|
||||
limit = 50
|
||||
}
|
||||
if limit > 100 {
|
||||
limit = 100
|
||||
}
|
||||
|
||||
// Build query with pagination
|
||||
query := fmt.Sprintf(`
|
||||
SELECT id, job_id, job_name, client_name, job_type, job_level, status,
|
||||
bytes_written, files_written, duration_seconds,
|
||||
started_at, ended_at, error_message,
|
||||
storage_name, pool_name, volume_name,
|
||||
created_at, updated_at
|
||||
FROM backup_jobs
|
||||
%s
|
||||
ORDER BY started_at DESC NULLS LAST, created_at DESC
|
||||
LIMIT $%d OFFSET $%d
|
||||
`, whereClause, argIndex, argIndex+1)
|
||||
|
||||
args = append(args, limit, opts.Offset)
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to query jobs: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var jobs []Job
|
||||
for rows.Next() {
|
||||
var job Job
|
||||
var durationSeconds sql.NullInt64
|
||||
var startedAt, endedAt sql.NullTime
|
||||
var errorMessage, storageName, poolName, volumeName sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&job.ID, &job.JobID, &job.JobName, &job.ClientName,
|
||||
&job.JobType, &job.JobLevel, &job.Status,
|
||||
&job.BytesWritten, &job.FilesWritten, &durationSeconds,
|
||||
&startedAt, &endedAt, &errorMessage,
|
||||
&storageName, &poolName, &volumeName,
|
||||
&job.CreatedAt, &job.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to scan job", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if durationSeconds.Valid {
|
||||
dur := int(durationSeconds.Int64)
|
||||
job.DurationSeconds = &dur
|
||||
}
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
if endedAt.Valid {
|
||||
job.EndedAt = &endedAt.Time
|
||||
}
|
||||
if errorMessage.Valid {
|
||||
job.ErrorMessage = &errorMessage.String
|
||||
}
|
||||
if storageName.Valid {
|
||||
job.StorageName = &storageName.String
|
||||
}
|
||||
if poolName.Valid {
|
||||
job.PoolName = &poolName.String
|
||||
}
|
||||
if volumeName.Valid {
|
||||
job.VolumeName = &volumeName.String
|
||||
}
|
||||
|
||||
jobs = append(jobs, job)
|
||||
}
|
||||
|
||||
return jobs, totalCount, rows.Err()
|
||||
}
|
||||
|
||||
// GetJob retrieves a job by ID
|
||||
func (s *Service) GetJob(ctx context.Context, id string) (*Job, error) {
|
||||
query := `
|
||||
SELECT id, job_id, job_name, client_name, job_type, job_level, status,
|
||||
bytes_written, files_written, duration_seconds,
|
||||
started_at, ended_at, error_message,
|
||||
storage_name, pool_name, volume_name,
|
||||
created_at, updated_at
|
||||
FROM backup_jobs
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
var job Job
|
||||
var durationSeconds sql.NullInt64
|
||||
var startedAt, endedAt sql.NullTime
|
||||
var errorMessage, storageName, poolName, volumeName sql.NullString
|
||||
|
||||
err := s.db.QueryRowContext(ctx, query, id).Scan(
|
||||
&job.ID, &job.JobID, &job.JobName, &job.ClientName,
|
||||
&job.JobType, &job.JobLevel, &job.Status,
|
||||
&job.BytesWritten, &job.FilesWritten, &durationSeconds,
|
||||
&startedAt, &endedAt, &errorMessage,
|
||||
&storageName, &poolName, &volumeName,
|
||||
&job.CreatedAt, &job.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("job not found")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get job: %w", err)
|
||||
}
|
||||
|
||||
if durationSeconds.Valid {
|
||||
dur := int(durationSeconds.Int64)
|
||||
job.DurationSeconds = &dur
|
||||
}
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
if endedAt.Valid {
|
||||
job.EndedAt = &endedAt.Time
|
||||
}
|
||||
if errorMessage.Valid {
|
||||
job.ErrorMessage = &errorMessage.String
|
||||
}
|
||||
if storageName.Valid {
|
||||
job.StorageName = &storageName.String
|
||||
}
|
||||
if poolName.Valid {
|
||||
job.PoolName = &poolName.String
|
||||
}
|
||||
if volumeName.Valid {
|
||||
job.VolumeName = &volumeName.String
|
||||
}
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
|
||||
// CreateJobRequest represents a request to create a new backup job
|
||||
type CreateJobRequest struct {
|
||||
JobName string `json:"job_name" binding:"required"`
|
||||
ClientName string `json:"client_name" binding:"required"`
|
||||
JobType string `json:"job_type" binding:"required"` // 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate'
|
||||
JobLevel string `json:"job_level" binding:"required"` // 'Full', 'Incremental', 'Differential', 'Since'
|
||||
StorageName *string `json:"storage_name,omitempty"`
|
||||
PoolName *string `json:"pool_name,omitempty"`
|
||||
}
|
||||
|
||||
// CreateJob creates a new backup job
|
||||
func (s *Service) CreateJob(ctx context.Context, req CreateJobRequest) (*Job, error) {
|
||||
// Generate a unique job ID (in real implementation, this would come from Bareos)
|
||||
// For now, we'll use a simple incrementing approach or timestamp-based ID
|
||||
var jobID int
|
||||
err := s.db.QueryRowContext(ctx, `
|
||||
SELECT COALESCE(MAX(job_id), 0) + 1 FROM backup_jobs
|
||||
`).Scan(&jobID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate job ID: %w", err)
|
||||
}
|
||||
|
||||
// Insert the job into database
|
||||
query := `
|
||||
INSERT INTO backup_jobs (
|
||||
job_id, job_name, client_name, job_type, job_level,
|
||||
status, bytes_written, files_written,
|
||||
storage_name, pool_name, started_at
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, NOW())
|
||||
RETURNING id, job_id, job_name, client_name, job_type, job_level, status,
|
||||
bytes_written, files_written, duration_seconds,
|
||||
started_at, ended_at, error_message,
|
||||
storage_name, pool_name, volume_name,
|
||||
created_at, updated_at
|
||||
`
|
||||
|
||||
var job Job
|
||||
var durationSeconds sql.NullInt64
|
||||
var startedAt, endedAt sql.NullTime
|
||||
var errorMessage, storageName, poolName, volumeName sql.NullString
|
||||
|
||||
err = s.db.QueryRowContext(ctx, query,
|
||||
jobID, req.JobName, req.ClientName, req.JobType, req.JobLevel,
|
||||
"Waiting", 0, 0,
|
||||
req.StorageName, req.PoolName,
|
||||
).Scan(
|
||||
&job.ID, &job.JobID, &job.JobName, &job.ClientName,
|
||||
&job.JobType, &job.JobLevel, &job.Status,
|
||||
&job.BytesWritten, &job.FilesWritten, &durationSeconds,
|
||||
&startedAt, &endedAt, &errorMessage,
|
||||
&storageName, &poolName, &volumeName,
|
||||
&job.CreatedAt, &job.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create job: %w", err)
|
||||
}
|
||||
|
||||
if durationSeconds.Valid {
|
||||
dur := int(durationSeconds.Int64)
|
||||
job.DurationSeconds = &dur
|
||||
}
|
||||
if startedAt.Valid {
|
||||
job.StartedAt = &startedAt.Time
|
||||
}
|
||||
if endedAt.Valid {
|
||||
job.EndedAt = &endedAt.Time
|
||||
}
|
||||
if errorMessage.Valid {
|
||||
job.ErrorMessage = &errorMessage.String
|
||||
}
|
||||
if storageName.Valid {
|
||||
job.StorageName = &storageName.String
|
||||
}
|
||||
if poolName.Valid {
|
||||
job.PoolName = &poolName.String
|
||||
}
|
||||
if volumeName.Valid {
|
||||
job.VolumeName = &volumeName.String
|
||||
}
|
||||
|
||||
s.logger.Info("Backup job created",
|
||||
"job_id", job.JobID,
|
||||
"job_name", job.JobName,
|
||||
"client_name", job.ClientName,
|
||||
"job_type", job.JobType,
|
||||
)
|
||||
|
||||
return &job, nil
|
||||
}
|
||||
@@ -59,7 +59,7 @@ func RunMigrations(ctx context.Context, db *DB) error {
|
||||
|
||||
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to execute migration %s: %w", migration.Version, err)
|
||||
return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
// Record migration
|
||||
@@ -68,11 +68,11 @@ func RunMigrations(ctx context.Context, db *DB) error {
|
||||
migration.Version,
|
||||
); err != nil {
|
||||
tx.Rollback()
|
||||
return fmt.Errorf("failed to record migration %s: %w", migration.Version, err)
|
||||
return fmt.Errorf("failed to record migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return fmt.Errorf("failed to commit migration %s: %w", migration.Version, err)
|
||||
return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err)
|
||||
}
|
||||
|
||||
log.Info("Migration applied successfully", "version", migration.Version)
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
-- AtlasOS - Calypso
|
||||
-- Backup Jobs Schema
|
||||
-- Version: 9.0
|
||||
|
||||
-- Backup jobs table
|
||||
CREATE TABLE IF NOT EXISTS backup_jobs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
job_id INTEGER NOT NULL UNIQUE, -- Bareos job ID
|
||||
job_name VARCHAR(255) NOT NULL,
|
||||
client_name VARCHAR(255) NOT NULL,
|
||||
job_type VARCHAR(50) NOT NULL, -- 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate'
|
||||
job_level VARCHAR(50) NOT NULL, -- 'Full', 'Incremental', 'Differential', 'Since'
|
||||
status VARCHAR(50) NOT NULL, -- 'Running', 'Completed', 'Failed', 'Canceled', 'Waiting'
|
||||
bytes_written BIGINT NOT NULL DEFAULT 0,
|
||||
files_written INTEGER NOT NULL DEFAULT 0,
|
||||
duration_seconds INTEGER,
|
||||
started_at TIMESTAMP,
|
||||
ended_at TIMESTAMP,
|
||||
error_message TEXT,
|
||||
storage_name VARCHAR(255),
|
||||
pool_name VARCHAR(255),
|
||||
volume_name VARCHAR(255),
|
||||
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- Indexes for performance
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_name ON backup_jobs(job_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_client_name ON backup_jobs(client_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status);
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_started_at ON backup_jobs(started_at DESC);
|
||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_type ON backup_jobs(job_type);
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
-- AtlasOS - Calypso
|
||||
-- Add Backup Permissions
|
||||
-- Version: 10.0
|
||||
|
||||
-- Insert backup permissions
|
||||
INSERT INTO permissions (name, resource, action, description) VALUES
|
||||
('backup:read', 'backup', 'read', 'View backup jobs and history'),
|
||||
('backup:write', 'backup', 'write', 'Create and manage backup jobs'),
|
||||
('backup:manage', 'backup', 'manage', 'Full backup management')
|
||||
ON CONFLICT (name) DO NOTHING;
|
||||
|
||||
-- Assign backup permissions to roles
|
||||
|
||||
-- Admin gets all backup permissions (explicitly assign since admin query in 001 only runs once)
|
||||
INSERT INTO role_permissions (role_id, permission_id)
|
||||
SELECT r.id, p.id
|
||||
FROM roles r, permissions p
|
||||
WHERE r.name = 'admin'
|
||||
AND p.resource = 'backup'
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- Operator gets read and write permissions for backup
|
||||
INSERT INTO role_permissions (role_id, permission_id)
|
||||
SELECT r.id, p.id
|
||||
FROM roles r, permissions p
|
||||
WHERE r.name = 'operator'
|
||||
AND p.resource = 'backup'
|
||||
AND p.action IN ('read', 'write')
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
-- ReadOnly gets only read permission for backup
|
||||
INSERT INTO role_permissions (role_id, permission_id)
|
||||
SELECT r.id, p.id
|
||||
FROM roles r, permissions p
|
||||
WHERE r.name = 'readonly'
|
||||
AND p.resource = 'backup'
|
||||
AND p.action = 'read'
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/atlasos/calypso/internal/audit"
|
||||
"github.com/atlasos/calypso/internal/auth"
|
||||
"github.com/atlasos/calypso/internal/backup"
|
||||
"github.com/atlasos/calypso/internal/common/cache"
|
||||
"github.com/atlasos/calypso/internal/common/config"
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
@@ -207,8 +208,21 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
scstGroup.POST("/targets", scstHandler.CreateTarget)
|
||||
scstGroup.POST("/targets/:id/luns", scstHandler.AddLUN)
|
||||
scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator)
|
||||
scstGroup.POST("/targets/:id/enable", scstHandler.EnableTarget)
|
||||
scstGroup.POST("/targets/:id/disable", scstHandler.DisableTarget)
|
||||
scstGroup.GET("/initiators", scstHandler.ListAllInitiators)
|
||||
scstGroup.GET("/initiators/:id", scstHandler.GetInitiator)
|
||||
scstGroup.DELETE("/initiators/:id", scstHandler.RemoveInitiator)
|
||||
scstGroup.GET("/extents", scstHandler.ListExtents)
|
||||
scstGroup.POST("/extents", scstHandler.CreateExtent)
|
||||
scstGroup.DELETE("/extents/:device", scstHandler.DeleteExtent)
|
||||
scstGroup.POST("/config/apply", scstHandler.ApplyConfig)
|
||||
scstGroup.GET("/handlers", scstHandler.ListHandlers)
|
||||
scstGroup.GET("/portals", scstHandler.ListPortals)
|
||||
scstGroup.GET("/portals/:id", scstHandler.GetPortal)
|
||||
scstGroup.POST("/portals", scstHandler.CreatePortal)
|
||||
scstGroup.PUT("/portals/:id", scstHandler.UpdatePortal)
|
||||
scstGroup.DELETE("/portals/:id", scstHandler.DeletePortal)
|
||||
}
|
||||
|
||||
// Physical Tape Libraries
|
||||
@@ -299,6 +313,19 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
iamGroup.DELETE("/groups/:id/users/:user_id", iamHandler.RemoveUserFromGroup)
|
||||
}
|
||||
|
||||
// Backup Jobs
|
||||
backupService := backup.NewService(db, log)
|
||||
// Set database password for dblink connections
|
||||
backupService.SetDatabasePassword(cfg.Database.Password)
|
||||
backupHandler := backup.NewHandler(backupService, log)
|
||||
backupGroup := protected.Group("/backup")
|
||||
backupGroup.Use(requirePermission("backup", "read"))
|
||||
{
|
||||
backupGroup.GET("/jobs", backupHandler.ListJobs)
|
||||
backupGroup.GET("/jobs/:id", backupHandler.GetJob)
|
||||
backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob)
|
||||
}
|
||||
|
||||
// Monitoring
|
||||
monitoringHandler := monitoring.NewHandler(db, log, alertService, metricsService, eventHub)
|
||||
monitoringGroup := protected.Group("/monitoring")
|
||||
|
||||
@@ -260,118 +260,158 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
// Update roles if provided
|
||||
if req.Roles != nil {
|
||||
h.logger.Info("Updating user roles", "user_id", userID, "roles", *req.Roles)
|
||||
// Get current roles
|
||||
currentRoles, err := GetUserRoles(h.db, userID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get current roles", "error", err)
|
||||
}
|
||||
h.logger.Info("Current roles", "user_id", userID, "current_roles", currentRoles)
|
||||
|
||||
// Remove roles that are not in the new list
|
||||
for _, role := range currentRoles {
|
||||
found := false
|
||||
for _, newRole := range *req.Roles {
|
||||
if role == newRole {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
roleID, err := GetRoleIDByName(h.db, role)
|
||||
if err == nil {
|
||||
err = RemoveUserRole(h.db, userID, roleID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to remove role", "error", err, "role", role)
|
||||
} else {
|
||||
h.logger.Info("Role removed", "user_id", userID, "role", role)
|
||||
}
|
||||
} else {
|
||||
h.logger.Error("Failed to get role ID", "error", err, "role", role)
|
||||
}
|
||||
}
|
||||
h.logger.Error("Failed to get current roles for user", "user_id", userID, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user roles"})
|
||||
return
|
||||
}
|
||||
|
||||
// Add new roles that are not in the current list
|
||||
for _, roleName := range *req.Roles {
|
||||
rolesToAdd := []string{}
|
||||
rolesToRemove := []string{}
|
||||
|
||||
// Find roles to add
|
||||
for _, newRole := range *req.Roles {
|
||||
found := false
|
||||
for _, currentRole := range currentRoles {
|
||||
if roleName == currentRole {
|
||||
if newRole == currentRole {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
roleID, err := GetRoleIDByName(h.db, roleName)
|
||||
if err == nil {
|
||||
err = AddUserRole(h.db, userID, roleID, currentUser.ID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to add role", "error", err, "role", roleName)
|
||||
} else {
|
||||
h.logger.Info("Role added", "user_id", userID, "role", roleName)
|
||||
}
|
||||
} else {
|
||||
h.logger.Error("Failed to get role ID", "error", err, "role", roleName)
|
||||
rolesToAdd = append(rolesToAdd, newRole)
|
||||
}
|
||||
}
|
||||
|
||||
// Find roles to remove
|
||||
for _, currentRole := range currentRoles {
|
||||
found := false
|
||||
for _, newRole := range *req.Roles {
|
||||
if currentRole == newRole {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
rolesToRemove = append(rolesToRemove, currentRole)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new roles
|
||||
for _, roleName := range rolesToAdd {
|
||||
roleID, err := GetRoleIDByName(h.db, roleName)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
h.logger.Warn("Attempted to add non-existent role to user", "user_id", userID, "role_name", roleName)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("role '%s' not found", roleName)})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get role ID by name", "role_name", roleName, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process roles"})
|
||||
return
|
||||
}
|
||||
if err := AddUserRole(h.db, userID, roleID, currentUser.ID); err != nil {
|
||||
h.logger.Error("Failed to add role to user", "user_id", userID, "role_id", roleID, "error", err)
|
||||
// Don't return early, continue with other roles
|
||||
continue
|
||||
}
|
||||
h.logger.Info("Role added to user", "user_id", userID, "role_name", roleName)
|
||||
}
|
||||
|
||||
// Remove old roles
|
||||
for _, roleName := range rolesToRemove {
|
||||
roleID, err := GetRoleIDByName(h.db, roleName)
|
||||
if err != nil {
|
||||
// This case should be rare, but handle it defensively
|
||||
h.logger.Error("Failed to get role ID for role to be removed", "role_name", roleName, "error", err)
|
||||
continue
|
||||
}
|
||||
if err := RemoveUserRole(h.db, userID, roleID); err != nil {
|
||||
h.logger.Error("Failed to remove role from user", "user_id", userID, "role_id", roleID, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove role"})
|
||||
return
|
||||
}
|
||||
h.logger.Info("Role removed from user", "user_id", userID, "role_name", roleName)
|
||||
}
|
||||
}
|
||||
|
||||
// Update groups if provided
|
||||
if req.Groups != nil {
|
||||
h.logger.Info("Updating user groups", "user_id", userID, "groups", *req.Groups)
|
||||
// Get current groups
|
||||
currentGroups, err := GetUserGroups(h.db, userID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get current groups", "error", err)
|
||||
}
|
||||
h.logger.Info("Current groups", "user_id", userID, "current_groups", currentGroups)
|
||||
|
||||
// Remove groups that are not in the new list
|
||||
for _, group := range currentGroups {
|
||||
found := false
|
||||
for _, newGroup := range *req.Groups {
|
||||
if group == newGroup {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
groupObj, err := GetGroupByName(h.db, group)
|
||||
if err == nil {
|
||||
err = RemoveUserFromGroup(h.db, userID, groupObj.ID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to remove group", "error", err, "group", group)
|
||||
} else {
|
||||
h.logger.Info("Group removed", "user_id", userID, "group", group)
|
||||
}
|
||||
} else {
|
||||
h.logger.Error("Failed to get group", "error", err, "group", group)
|
||||
}
|
||||
}
|
||||
h.logger.Error("Failed to get current groups for user", "user_id", userID, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user groups"})
|
||||
return
|
||||
}
|
||||
|
||||
// Add new groups that are not in the current list
|
||||
for _, groupName := range *req.Groups {
|
||||
groupsToAdd := []string{}
|
||||
groupsToRemove := []string{}
|
||||
|
||||
// Find groups to add
|
||||
for _, newGroup := range *req.Groups {
|
||||
found := false
|
||||
for _, currentGroup := range currentGroups {
|
||||
if groupName == currentGroup {
|
||||
if newGroup == currentGroup {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
groupObj, err := GetGroupByName(h.db, groupName)
|
||||
if err == nil {
|
||||
err = AddUserToGroup(h.db, userID, groupObj.ID, currentUser.ID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to add group", "error", err, "group", groupName)
|
||||
} else {
|
||||
h.logger.Info("Group added", "user_id", userID, "group", groupName)
|
||||
}
|
||||
} else {
|
||||
h.logger.Error("Failed to get group", "error", err, "group", groupName)
|
||||
groupsToAdd = append(groupsToAdd, newGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Find groups to remove
|
||||
for _, currentGroup := range currentGroups {
|
||||
found := false
|
||||
for _, newGroup := range *req.Groups {
|
||||
if currentGroup == newGroup {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
groupsToRemove = append(groupsToRemove, currentGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Add new groups
|
||||
for _, groupName := range groupsToAdd {
|
||||
group, err := GetGroupByName(h.db, groupName)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
h.logger.Warn("Attempted to add user to non-existent group", "user_id", userID, "group_name", groupName)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("group '%s' not found", groupName)})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get group by name", "group_name", groupName, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process groups"})
|
||||
return
|
||||
}
|
||||
if err := AddUserToGroup(h.db, userID, group.ID, currentUser.ID); err != nil {
|
||||
h.logger.Error("Failed to add user to group", "user_id", userID, "group_id", group.ID, "error", err)
|
||||
// Don't return early, continue with other groups
|
||||
continue
|
||||
}
|
||||
h.logger.Info("User added to group", "user_id", userID, "group_name", groupName)
|
||||
}
|
||||
|
||||
// Remove old groups
|
||||
for _, groupName := range groupsToRemove {
|
||||
group, err := GetGroupByName(h.db, groupName)
|
||||
if err != nil {
|
||||
// This case should be rare, but handle it defensively
|
||||
h.logger.Error("Failed to get group ID for group to be removed", "group_name", groupName, "error", err)
|
||||
continue
|
||||
}
|
||||
if err := RemoveUserFromGroup(h.db, userID, group.ID); err != nil {
|
||||
h.logger.Error("Failed to remove user from group", "user_id", userID, "group_id", group.ID, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to remove user from group"})
|
||||
return
|
||||
}
|
||||
h.logger.Info("User removed from group", "user_id", userID, "group_name", groupName)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
@@ -13,14 +17,14 @@ import (
|
||||
|
||||
// Metrics represents system metrics
|
||||
type Metrics struct {
|
||||
System SystemMetrics `json:"system"`
|
||||
Storage StorageMetrics `json:"storage"`
|
||||
SCST SCSTMetrics `json:"scst"`
|
||||
Tape TapeMetrics `json:"tape"`
|
||||
VTL VTLMetrics `json:"vtl"`
|
||||
Tasks TaskMetrics `json:"tasks"`
|
||||
API APIMetrics `json:"api"`
|
||||
CollectedAt time.Time `json:"collected_at"`
|
||||
System SystemMetrics `json:"system"`
|
||||
Storage StorageMetrics `json:"storage"`
|
||||
SCST SCSTMetrics `json:"scst"`
|
||||
Tape TapeMetrics `json:"tape"`
|
||||
VTL VTLMetrics `json:"vtl"`
|
||||
Tasks TaskMetrics `json:"tasks"`
|
||||
API APIMetrics `json:"api"`
|
||||
CollectedAt time.Time `json:"collected_at"`
|
||||
}
|
||||
|
||||
// SystemMetrics represents system-level metrics
|
||||
@@ -37,11 +41,11 @@ type SystemMetrics struct {
|
||||
|
||||
// StorageMetrics represents storage metrics
|
||||
type StorageMetrics struct {
|
||||
TotalDisks int `json:"total_disks"`
|
||||
TotalRepositories int `json:"total_repositories"`
|
||||
TotalCapacityBytes int64 `json:"total_capacity_bytes"`
|
||||
UsedCapacityBytes int64 `json:"used_capacity_bytes"`
|
||||
AvailableBytes int64 `json:"available_bytes"`
|
||||
TotalDisks int `json:"total_disks"`
|
||||
TotalRepositories int `json:"total_repositories"`
|
||||
TotalCapacityBytes int64 `json:"total_capacity_bytes"`
|
||||
UsedCapacityBytes int64 `json:"used_capacity_bytes"`
|
||||
AvailableBytes int64 `json:"available_bytes"`
|
||||
UsagePercent float64 `json:"usage_percent"`
|
||||
}
|
||||
|
||||
@@ -72,28 +76,43 @@ type VTLMetrics struct {
|
||||
|
||||
// TaskMetrics represents task execution metrics
|
||||
type TaskMetrics struct {
|
||||
TotalTasks int `json:"total_tasks"`
|
||||
PendingTasks int `json:"pending_tasks"`
|
||||
RunningTasks int `json:"running_tasks"`
|
||||
CompletedTasks int `json:"completed_tasks"`
|
||||
FailedTasks int `json:"failed_tasks"`
|
||||
AvgDurationSec float64 `json:"avg_duration_seconds"`
|
||||
TotalTasks int `json:"total_tasks"`
|
||||
PendingTasks int `json:"pending_tasks"`
|
||||
RunningTasks int `json:"running_tasks"`
|
||||
CompletedTasks int `json:"completed_tasks"`
|
||||
FailedTasks int `json:"failed_tasks"`
|
||||
AvgDurationSec float64 `json:"avg_duration_seconds"`
|
||||
}
|
||||
|
||||
// APIMetrics represents API metrics
|
||||
type APIMetrics struct {
|
||||
TotalRequests int64 `json:"total_requests"`
|
||||
RequestsPerSec float64 `json:"requests_per_second"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
AvgLatencyMs float64 `json:"avg_latency_ms"`
|
||||
ActiveConnections int `json:"active_connections"`
|
||||
TotalRequests int64 `json:"total_requests"`
|
||||
RequestsPerSec float64 `json:"requests_per_second"`
|
||||
ErrorRate float64 `json:"error_rate"`
|
||||
AvgLatencyMs float64 `json:"avg_latency_ms"`
|
||||
ActiveConnections int `json:"active_connections"`
|
||||
}
|
||||
|
||||
// MetricsService collects and provides system metrics
|
||||
type MetricsService struct {
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
startTime time.Time
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
startTime time.Time
|
||||
lastCPU *cpuStats // For CPU usage calculation
|
||||
lastCPUTime time.Time
|
||||
}
|
||||
|
||||
// cpuStats represents CPU statistics from /proc/stat
|
||||
type cpuStats struct {
|
||||
user uint64
|
||||
nice uint64
|
||||
system uint64
|
||||
idle uint64
|
||||
iowait uint64
|
||||
irq uint64
|
||||
softirq uint64
|
||||
steal uint64
|
||||
guest uint64
|
||||
}
|
||||
|
||||
// NewMetricsService creates a new metrics service
|
||||
@@ -115,6 +134,8 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) {
|
||||
sysMetrics, err := s.collectSystemMetrics(ctx)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to collect system metrics", "error", err)
|
||||
// Set default/zero values if collection fails
|
||||
metrics.System = SystemMetrics{}
|
||||
} else {
|
||||
metrics.System = *sysMetrics
|
||||
}
|
||||
@@ -167,21 +188,17 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) {
|
||||
|
||||
// collectSystemMetrics collects system-level metrics
|
||||
func (s *MetricsService) collectSystemMetrics(ctx context.Context) (*SystemMetrics, error) {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
// Get system memory from /proc/meminfo
|
||||
memoryTotal, memoryUsed, memoryPercent := s.getSystemMemory()
|
||||
|
||||
// Get memory info
|
||||
memoryUsed := int64(m.Alloc)
|
||||
memoryTotal := int64(m.Sys)
|
||||
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
||||
// Get CPU usage from /proc/stat
|
||||
cpuUsage := s.getCPUUsage()
|
||||
|
||||
// Uptime
|
||||
uptime := time.Since(s.startTime).Seconds()
|
||||
// Get system uptime from /proc/uptime
|
||||
uptime := s.getSystemUptime()
|
||||
|
||||
// CPU and disk would require external tools or system calls
|
||||
// For now, we'll use placeholders
|
||||
metrics := &SystemMetrics{
|
||||
CPUUsagePercent: 0.0, // Would need to read from /proc/stat
|
||||
CPUUsagePercent: cpuUsage,
|
||||
MemoryUsed: memoryUsed,
|
||||
MemoryTotal: memoryTotal,
|
||||
MemoryPercent: memoryPercent,
|
||||
@@ -268,7 +285,7 @@ func (s *MetricsService) collectSCSTMetrics(ctx context.Context) (*SCSTMetrics,
|
||||
TotalTargets: totalTargets,
|
||||
TotalLUNs: totalLUNs,
|
||||
TotalInitiators: totalInitiators,
|
||||
ActiveTargets: activeTargets,
|
||||
ActiveTargets: activeTargets,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -403,3 +420,232 @@ func (s *MetricsService) collectTaskMetrics(ctx context.Context) (*TaskMetrics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getSystemUptime reads system uptime from /proc/uptime
|
||||
// Returns uptime in seconds, or service uptime as fallback
|
||||
func (s *MetricsService) getSystemUptime() float64 {
|
||||
file, err := os.Open("/proc/uptime")
|
||||
if err != nil {
|
||||
// Fallback to service uptime if /proc/uptime is not available
|
||||
s.logger.Warn("Failed to read /proc/uptime, using service uptime", "error", err)
|
||||
return time.Since(s.startTime).Seconds()
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
if !scanner.Scan() {
|
||||
// Fallback to service uptime if file is empty
|
||||
s.logger.Warn("Failed to read /proc/uptime content, using service uptime")
|
||||
return time.Since(s.startTime).Seconds()
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
// Fallback to service uptime if no data
|
||||
s.logger.Warn("No data in /proc/uptime, using service uptime")
|
||||
return time.Since(s.startTime).Seconds()
|
||||
}
|
||||
|
||||
// First field is system uptime in seconds
|
||||
uptimeSeconds, err := strconv.ParseFloat(fields[0], 64)
|
||||
if err != nil {
|
||||
// Fallback to service uptime if parsing fails
|
||||
s.logger.Warn("Failed to parse /proc/uptime, using service uptime", "error", err)
|
||||
return time.Since(s.startTime).Seconds()
|
||||
}
|
||||
|
||||
return uptimeSeconds
|
||||
}
|
||||
|
||||
// getSystemMemory reads system memory from /proc/meminfo
|
||||
// Returns total, used (in bytes), and usage percentage
|
||||
func (s *MetricsService) getSystemMemory() (int64, int64, float64) {
|
||||
file, err := os.Open("/proc/meminfo")
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to read /proc/meminfo, using Go runtime memory", "error", err)
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
memoryUsed := int64(m.Alloc)
|
||||
memoryTotal := int64(m.Sys)
|
||||
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
||||
return memoryTotal, memoryUsed, memoryPercent
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var memTotal, memAvailable, memFree, buffers, cached int64
|
||||
scanner := bufio.NewScanner(file)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse line like "MemTotal: 16375596 kB"
|
||||
// or "MemTotal: 16375596" (some systems don't have unit)
|
||||
colonIdx := strings.Index(line, ":")
|
||||
if colonIdx == -1 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSpace(line[:colonIdx])
|
||||
valuePart := strings.TrimSpace(line[colonIdx+1:])
|
||||
|
||||
// Split value part to get number (ignore unit like "kB")
|
||||
fields := strings.Fields(valuePart)
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := strconv.ParseInt(fields[0], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Values in /proc/meminfo are in KB, convert to bytes
|
||||
valueBytes := value * 1024
|
||||
|
||||
switch key {
|
||||
case "MemTotal":
|
||||
memTotal = valueBytes
|
||||
case "MemAvailable":
|
||||
memAvailable = valueBytes
|
||||
case "MemFree":
|
||||
memFree = valueBytes
|
||||
case "Buffers":
|
||||
buffers = valueBytes
|
||||
case "Cached":
|
||||
cached = valueBytes
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
s.logger.Warn("Error scanning /proc/meminfo", "error", err)
|
||||
}
|
||||
|
||||
if memTotal == 0 {
|
||||
s.logger.Warn("Failed to get MemTotal from /proc/meminfo, using Go runtime memory", "memTotal", memTotal)
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
memoryUsed := int64(m.Alloc)
|
||||
memoryTotal := int64(m.Sys)
|
||||
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
||||
return memoryTotal, memoryUsed, memoryPercent
|
||||
}
|
||||
|
||||
// Calculate used memory
|
||||
// If MemAvailable exists (kernel 3.14+), use it for more accurate calculation
|
||||
var memoryUsed int64
|
||||
if memAvailable > 0 {
|
||||
memoryUsed = memTotal - memAvailable
|
||||
} else {
|
||||
// Fallback: MemTotal - MemFree - Buffers - Cached
|
||||
memoryUsed = memTotal - memFree - buffers - cached
|
||||
if memoryUsed < 0 {
|
||||
memoryUsed = memTotal - memFree
|
||||
}
|
||||
}
|
||||
|
||||
memoryPercent := float64(memoryUsed) / float64(memTotal) * 100
|
||||
|
||||
s.logger.Debug("System memory stats",
|
||||
"memTotal", memTotal,
|
||||
"memAvailable", memAvailable,
|
||||
"memoryUsed", memoryUsed,
|
||||
"memoryPercent", memoryPercent)
|
||||
|
||||
return memTotal, memoryUsed, memoryPercent
|
||||
}
|
||||
|
||||
// getCPUUsage reads CPU usage from /proc/stat
|
||||
// Requires two readings to calculate percentage
|
||||
func (s *MetricsService) getCPUUsage() float64 {
|
||||
currentCPU, err := s.readCPUStats()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to read CPU stats", "error", err)
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// If this is the first reading, store it and return 0
|
||||
if s.lastCPU == nil {
|
||||
s.lastCPU = currentCPU
|
||||
s.lastCPUTime = time.Now()
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// Calculate time difference
|
||||
timeDiff := time.Since(s.lastCPUTime).Seconds()
|
||||
if timeDiff < 0.1 {
|
||||
// Too soon, return previous value or 0
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// Calculate total CPU time
|
||||
prevTotal := s.lastCPU.user + s.lastCPU.nice + s.lastCPU.system + s.lastCPU.idle +
|
||||
s.lastCPU.iowait + s.lastCPU.irq + s.lastCPU.softirq + s.lastCPU.steal + s.lastCPU.guest
|
||||
currTotal := currentCPU.user + currentCPU.nice + currentCPU.system + currentCPU.idle +
|
||||
currentCPU.iowait + currentCPU.irq + currentCPU.softirq + currentCPU.steal + currentCPU.guest
|
||||
|
||||
// Calculate idle time
|
||||
prevIdle := s.lastCPU.idle + s.lastCPU.iowait
|
||||
currIdle := currentCPU.idle + currentCPU.iowait
|
||||
|
||||
// Calculate used time
|
||||
totalDiff := currTotal - prevTotal
|
||||
idleDiff := currIdle - prevIdle
|
||||
|
||||
if totalDiff == 0 {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
// Calculate CPU usage percentage
|
||||
usagePercent := 100.0 * (1.0 - float64(idleDiff)/float64(totalDiff))
|
||||
|
||||
// Update last CPU stats
|
||||
s.lastCPU = currentCPU
|
||||
s.lastCPUTime = time.Now()
|
||||
|
||||
return usagePercent
|
||||
}
|
||||
|
||||
// readCPUStats reads CPU statistics from /proc/stat
|
||||
func (s *MetricsService) readCPUStats() (*cpuStats, error) {
|
||||
file, err := os.Open("/proc/stat")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open /proc/stat: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
if !scanner.Scan() {
|
||||
return nil, fmt.Errorf("failed to read /proc/stat")
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if !strings.HasPrefix(line, "cpu ") {
|
||||
return nil, fmt.Errorf("invalid /proc/stat format")
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 8 {
|
||||
return nil, fmt.Errorf("insufficient CPU stats fields")
|
||||
}
|
||||
|
||||
stats := &cpuStats{}
|
||||
stats.user, _ = strconv.ParseUint(fields[1], 10, 64)
|
||||
stats.nice, _ = strconv.ParseUint(fields[2], 10, 64)
|
||||
stats.system, _ = strconv.ParseUint(fields[3], 10, 64)
|
||||
stats.idle, _ = strconv.ParseUint(fields[4], 10, 64)
|
||||
stats.iowait, _ = strconv.ParseUint(fields[5], 10, 64)
|
||||
stats.irq, _ = strconv.ParseUint(fields[6], 10, 64)
|
||||
stats.softirq, _ = strconv.ParseUint(fields[7], 10, 64)
|
||||
|
||||
if len(fields) > 8 {
|
||||
stats.steal, _ = strconv.ParseUint(fields[8], 10, 64)
|
||||
}
|
||||
if len(fields) > 9 {
|
||||
stats.guest, _ = strconv.ParseUint(fields[9], 10, 64)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scst
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
@@ -11,19 +12,19 @@ import (
|
||||
|
||||
// Handler handles SCST-related API requests
|
||||
type Handler struct {
|
||||
service *Service
|
||||
service *Service
|
||||
taskEngine *tasks.Engine
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewHandler creates a new SCST handler
|
||||
func NewHandler(db *database.DB, log *logger.Logger) *Handler {
|
||||
return &Handler{
|
||||
service: NewService(db, log),
|
||||
service: NewService(db, log),
|
||||
taskEngine: tasks.NewEngine(db, log),
|
||||
db: db,
|
||||
logger: log,
|
||||
db: db,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,21 +56,34 @@ func (h *Handler) GetTarget(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Get LUNs
|
||||
luns, _ := h.service.GetTargetLUNs(c.Request.Context(), targetID)
|
||||
luns, err := h.service.GetTargetLUNs(c.Request.Context(), targetID)
|
||||
if err != nil {
|
||||
h.logger.Warn("Failed to get LUNs", "target_id", targetID, "error", err)
|
||||
// Return empty array instead of nil
|
||||
luns = []LUN{}
|
||||
}
|
||||
|
||||
// Get initiator groups
|
||||
groups, err2 := h.service.GetTargetInitiatorGroups(c.Request.Context(), targetID)
|
||||
if err2 != nil {
|
||||
h.logger.Warn("Failed to get initiator groups", "target_id", targetID, "error", err2)
|
||||
groups = []InitiatorGroup{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"target": target,
|
||||
"luns": luns,
|
||||
"target": target,
|
||||
"luns": luns,
|
||||
"initiator_groups": groups,
|
||||
})
|
||||
}
|
||||
|
||||
// CreateTargetRequest represents a target creation request
|
||||
type CreateTargetRequest struct {
|
||||
IQN string `json:"iqn" binding:"required"`
|
||||
TargetType string `json:"target_type" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
Description string `json:"description"`
|
||||
SingleInitiatorOnly bool `json:"single_initiator_only"`
|
||||
IQN string `json:"iqn" binding:"required"`
|
||||
TargetType string `json:"target_type" binding:"required"`
|
||||
Name string `json:"name" binding:"required"`
|
||||
Description string `json:"description"`
|
||||
SingleInitiatorOnly bool `json:"single_initiator_only"`
|
||||
}
|
||||
|
||||
// CreateTarget creates a new SCST target
|
||||
@@ -83,13 +97,13 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
||||
userID, _ := c.Get("user_id")
|
||||
|
||||
target := &Target{
|
||||
IQN: req.IQN,
|
||||
TargetType: req.TargetType,
|
||||
Name: req.Name,
|
||||
Description: req.Description,
|
||||
IsActive: true,
|
||||
IQN: req.IQN,
|
||||
TargetType: req.TargetType,
|
||||
Name: req.Name,
|
||||
Description: req.Description,
|
||||
IsActive: true,
|
||||
SingleInitiatorOnly: req.SingleInitiatorOnly || req.TargetType == "vtl" || req.TargetType == "physical_tape",
|
||||
CreatedBy: userID.(string),
|
||||
CreatedBy: userID.(string),
|
||||
}
|
||||
|
||||
if err := h.service.CreateTarget(c.Request.Context(), target); err != nil {
|
||||
@@ -103,9 +117,9 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
||||
|
||||
// AddLUNRequest represents a LUN addition request
|
||||
type AddLUNRequest struct {
|
||||
DeviceName string `json:"device_name" binding:"required"`
|
||||
DevicePath string `json:"device_path" binding:"required"`
|
||||
LUNNumber int `json:"lun_number" binding:"required"`
|
||||
DeviceName string `json:"device_name" binding:"required"`
|
||||
DevicePath string `json:"device_path" binding:"required"`
|
||||
LUNNumber int `json:"lun_number" binding:"required"`
|
||||
HandlerType string `json:"handler_type" binding:"required"`
|
||||
}
|
||||
|
||||
@@ -121,7 +135,15 @@ func (h *Handler) AddLUN(c *gin.Context) {
|
||||
|
||||
var req AddLUNRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
h.logger.Error("Failed to bind AddLUN request", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if req.DeviceName == "" || req.DevicePath == "" || req.HandlerType == "" {
|
||||
h.logger.Error("Missing required fields in AddLUN request", "device_name", req.DeviceName, "device_path", req.DevicePath, "handler_type", req.HandlerType)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "device_name, device_path, and handler_type are required"})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -164,6 +186,110 @@ func (h *Handler) AddInitiator(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"})
|
||||
}
|
||||
|
||||
// ListAllInitiators lists all initiators across all targets
|
||||
func (h *Handler) ListAllInitiators(c *gin.Context) {
|
||||
initiators, err := h.service.ListAllInitiators(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list initiators", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiators"})
|
||||
return
|
||||
}
|
||||
|
||||
if initiators == nil {
|
||||
initiators = []InitiatorWithTarget{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"initiators": initiators})
|
||||
}
|
||||
|
||||
// RemoveInitiator removes an initiator
|
||||
func (h *Handler) RemoveInitiator(c *gin.Context) {
|
||||
initiatorID := c.Param("id")
|
||||
|
||||
if err := h.service.RemoveInitiator(c.Request.Context(), initiatorID); err != nil {
|
||||
if err.Error() == "initiator not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to remove initiator", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator removed successfully"})
|
||||
}
|
||||
|
||||
// GetInitiator retrieves an initiator by ID
|
||||
func (h *Handler) GetInitiator(c *gin.Context) {
|
||||
initiatorID := c.Param("id")
|
||||
|
||||
initiator, err := h.service.GetInitiator(c.Request.Context(), initiatorID)
|
||||
if err != nil {
|
||||
if err.Error() == "initiator not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get initiator", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, initiator)
|
||||
}
|
||||
|
||||
// ListExtents lists all device extents
|
||||
func (h *Handler) ListExtents(c *gin.Context) {
|
||||
extents, err := h.service.ListExtents(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list extents", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list extents"})
|
||||
return
|
||||
}
|
||||
|
||||
if extents == nil {
|
||||
extents = []Extent{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"extents": extents})
|
||||
}
|
||||
|
||||
// CreateExtentRequest represents a request to create an extent
|
||||
type CreateExtentRequest struct {
|
||||
DeviceName string `json:"device_name" binding:"required"`
|
||||
DevicePath string `json:"device_path" binding:"required"`
|
||||
HandlerType string `json:"handler_type" binding:"required"`
|
||||
}
|
||||
|
||||
// CreateExtent creates a new device extent
|
||||
func (h *Handler) CreateExtent(c *gin.Context) {
|
||||
var req CreateExtentRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.CreateExtent(c.Request.Context(), req.DeviceName, req.DevicePath, req.HandlerType); err != nil {
|
||||
h.logger.Error("Failed to create extent", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, gin.H{"message": "Extent created successfully"})
|
||||
}
|
||||
|
||||
// DeleteExtent deletes a device extent
|
||||
func (h *Handler) DeleteExtent(c *gin.Context) {
|
||||
deviceName := c.Param("device")
|
||||
|
||||
if err := h.service.DeleteExtent(c.Request.Context(), deviceName); err != nil {
|
||||
h.logger.Error("Failed to delete extent", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Extent deleted successfully"})
|
||||
}
|
||||
|
||||
// ApplyConfig applies SCST configuration
|
||||
func (h *Handler) ApplyConfig(c *gin.Context) {
|
||||
userID, _ := c.Get("user_id")
|
||||
@@ -209,3 +335,142 @@ func (h *Handler) ListHandlers(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"handlers": handlers})
|
||||
}
|
||||
|
||||
// ListPortals lists all iSCSI portals
|
||||
func (h *Handler) ListPortals(c *gin.Context) {
|
||||
portals, err := h.service.ListPortals(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list portals", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list portals"})
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure we return an empty array instead of null
|
||||
if portals == nil {
|
||||
portals = []Portal{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"portals": portals})
|
||||
}
|
||||
|
||||
// CreatePortal creates a new portal
|
||||
func (h *Handler) CreatePortal(c *gin.Context) {
|
||||
var portal Portal
|
||||
if err := c.ShouldBindJSON(&portal); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.CreatePortal(c.Request.Context(), &portal); err != nil {
|
||||
h.logger.Error("Failed to create portal", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, portal)
|
||||
}
|
||||
|
||||
// UpdatePortal updates a portal
|
||||
func (h *Handler) UpdatePortal(c *gin.Context) {
|
||||
id := c.Param("id")
|
||||
|
||||
var portal Portal
|
||||
if err := c.ShouldBindJSON(&portal); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.UpdatePortal(c.Request.Context(), id, &portal); err != nil {
|
||||
if err.Error() == "portal not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to update portal", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, portal)
|
||||
}
|
||||
|
||||
// EnableTarget enables a target
|
||||
func (h *Handler) EnableTarget(c *gin.Context) {
|
||||
targetID := c.Param("id")
|
||||
|
||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
||||
if err != nil {
|
||||
if err.Error() == "target not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get target", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.EnableTarget(c.Request.Context(), target.IQN); err != nil {
|
||||
h.logger.Error("Failed to enable target", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Target enabled successfully"})
|
||||
}
|
||||
|
||||
// DisableTarget disables a target
|
||||
func (h *Handler) DisableTarget(c *gin.Context) {
|
||||
targetID := c.Param("id")
|
||||
|
||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
||||
if err != nil {
|
||||
if err.Error() == "target not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get target", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.DisableTarget(c.Request.Context(), target.IQN); err != nil {
|
||||
h.logger.Error("Failed to disable target", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Target disabled successfully"})
|
||||
}
|
||||
|
||||
// DeletePortal deletes a portal
|
||||
func (h *Handler) DeletePortal(c *gin.Context) {
|
||||
id := c.Param("id")
|
||||
|
||||
if err := h.service.DeletePortal(c.Request.Context(), id); err != nil {
|
||||
if err.Error() == "portal not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to delete portal", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Portal deleted successfully"})
|
||||
}
|
||||
|
||||
// GetPortal retrieves a portal by ID
|
||||
func (h *Handler) GetPortal(c *gin.Context) {
|
||||
id := c.Param("id")
|
||||
|
||||
portal, err := h.service.GetPortal(c.Request.Context(), id)
|
||||
if err != nil {
|
||||
if err.Error() == "portal not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get portal", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get portal"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, portal)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
25
deploy/systemd/calypso-logger.service
Normal file
25
deploy/systemd/calypso-logger.service
Normal file
@@ -0,0 +1,25 @@
|
||||
[Unit]
|
||||
Description=Calypso Stack Log Aggregator
|
||||
Documentation=https://github.com/atlasos/calypso
|
||||
After=network.target
|
||||
Wants=calypso-api.service calypso-frontend.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
# Run as root to access journald and write to /var/syslog
|
||||
# Format: timestamp [service] message
|
||||
ExecStart=/bin/bash -c '/usr/bin/journalctl -u calypso-api.service -u calypso-frontend.service -f --no-pager -o short-iso >> /var/syslog/calypso.log 2>&1'
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
# Security hardening
|
||||
NoNewPrivileges=false
|
||||
PrivateTmp=true
|
||||
ReadWritePaths=/var/syslog
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
126
docs/services.md
Normal file
126
docs/services.md
Normal file
@@ -0,0 +1,126 @@
|
||||
# Calypso Appliance Services Documentation
|
||||
|
||||
This document provides an overview of all services that form the Calypso backup appliance.
|
||||
|
||||
## Core Calypso Services
|
||||
|
||||
### calypso-api.service
|
||||
**Status**: Running
|
||||
**Description**: AtlasOS Calypso API Service (Development)
|
||||
**Purpose**: Main REST API backend for the Calypso appliance, handles all business logic and database operations.
|
||||
**Binary**: `/development/calypso/backend/bin/calypso-api`
|
||||
**Config**: `/development/calypso/backend/config.yaml.example`
|
||||
|
||||
### calypso-frontend.service
|
||||
**Status**: Running
|
||||
**Description**: Calypso Frontend Development Server
|
||||
**Purpose**: Web UI for managing backups, storage, and monitoring the appliance.
|
||||
**Port**: 3000
|
||||
**Technology**: React + Vite (development mode)
|
||||
|
||||
## Backup Services (Bacula)
|
||||
|
||||
### bacula-director.service
|
||||
**Status**: Running
|
||||
**Description**: Bacula Director Daemon
|
||||
**Purpose**: Central management daemon that orchestrates all backup, restore, and verify operations.
|
||||
**Config**: `/etc/bacula/bacula-dir.conf`
|
||||
**Docs**: `man:bacula-dir(8)`
|
||||
|
||||
### bacula-sd.service
|
||||
**Status**: Running
|
||||
**Description**: Bacula Storage Daemon
|
||||
**Purpose**: Manages physical backup storage devices (disks, tapes, virtual tape libraries).
|
||||
**Config**: `/etc/bacula/bacula-sd.conf`
|
||||
|
||||
### bacula-fd.service
|
||||
**Status**: Running
|
||||
**Description**: Bacula File Daemon
|
||||
**Purpose**: Runs on systems being backed up, manages file access and metadata.
|
||||
**Config**: `/etc/bacula/bacula-fd.conf`
|
||||
|
||||
## Storage/iSCSI Services (SCST)
|
||||
|
||||
### scst.service
|
||||
**Status**: Active (exited)
|
||||
**Description**: SCST - A Generic SCSI Target Subsystem
|
||||
**Purpose**: Kernel-level SCSI target framework providing high-performance storage exports.
|
||||
**Type**: One-shot service that loads SCST kernel modules
|
||||
|
||||
### iscsi-scstd.service
|
||||
**Status**: Running
|
||||
**Description**: iSCSI SCST Target Daemon
|
||||
**Purpose**: Provides iSCSI protocol support for SCST, allowing network block storage exports.
|
||||
**Port**: 3260 (standard iSCSI port)
|
||||
**Configured Targets**:
|
||||
- `iqn.2025-12.id.atlas:lun01` (enabled)
|
||||
|
||||
### iscsid.service
|
||||
**Status**: Inactive
|
||||
**Description**: iSCSI initiator daemon
|
||||
**Purpose**: Client-side iSCSI service (not currently in use)
|
||||
|
||||
### open-iscsi.service
|
||||
**Status**: Inactive
|
||||
**Description**: Login to default iSCSI targets
|
||||
**Purpose**: Automatic iSCSI target login (not currently in use)
|
||||
|
||||
## Virtual Tape Library
|
||||
|
||||
### mhvtl-load-modules.service
|
||||
**Status**: Active (exited)
|
||||
**Description**: Load mhvtl modules
|
||||
**Purpose**: Loads mhVTL (virtual tape library) kernel modules for tape emulation.
|
||||
**Type**: One-shot service that runs at boot
|
||||
**Docs**: `man:vtltape(1)`, `man:vtllibrary(1)`
|
||||
|
||||
## Database
|
||||
|
||||
### postgresql.service
|
||||
**Status**: Active (exited)
|
||||
**Description**: PostgreSQL RDBMS
|
||||
**Purpose**: Parent service for PostgreSQL database management
|
||||
|
||||
### postgresql@16-main.service
|
||||
**Status**: Running
|
||||
**Description**: PostgreSQL Cluster 16-main
|
||||
**Purpose**: Main database for Calypso API, stores configuration, jobs, and metadata.
|
||||
**Version**: PostgreSQL 16
|
||||
|
||||
## Service Management
|
||||
|
||||
### Check All Services Status
|
||||
```bash
|
||||
systemctl status calypso-api calypso-frontend bacula-director bacula-sd bacula-fd scst iscsi-scstd mhvtl-load-modules postgresql
|
||||
```
|
||||
|
||||
### Rebuild and Restart Core Services
|
||||
```bash
|
||||
/development/calypso/scripts/rebuild-and-restart.sh
|
||||
```
|
||||
|
||||
### Restart Individual Services
|
||||
```bash
|
||||
systemctl restart calypso-api.service
|
||||
systemctl restart calypso-frontend.service
|
||||
systemctl restart bacula-director.service
|
||||
```
|
||||
|
||||
## Service Dependencies
|
||||
|
||||
```
|
||||
PostgreSQL
|
||||
└── Calypso API
|
||||
└── Calypso Frontend
|
||||
|
||||
SCST
|
||||
└── iSCSI SCST Target Daemon
|
||||
|
||||
mhVTL
|
||||
└── Bacula Storage Daemon
|
||||
└── Bacula Director
|
||||
└── Bacula File Daemon
|
||||
```
|
||||
|
||||
## Total Service Count
|
||||
**11 services** forming the complete Calypso backup appliance stack.
|
||||
75
frontend/src/api/backup.ts
Normal file
75
frontend/src/api/backup.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
import apiClient from './client'
|
||||
|
||||
export interface BackupJob {
|
||||
id: string
|
||||
job_id: number
|
||||
job_name: string
|
||||
client_name: string
|
||||
job_type: string
|
||||
job_level: string
|
||||
status: 'Running' | 'Completed' | 'Failed' | 'Canceled' | 'Waiting'
|
||||
bytes_written: number
|
||||
files_written: number
|
||||
duration_seconds?: number
|
||||
started_at?: string
|
||||
ended_at?: string
|
||||
error_message?: string
|
||||
storage_name?: string
|
||||
pool_name?: string
|
||||
volume_name?: string
|
||||
created_at: string
|
||||
updated_at: string
|
||||
}
|
||||
|
||||
export interface ListJobsResponse {
|
||||
jobs: BackupJob[]
|
||||
total: number
|
||||
limit: number
|
||||
offset: number
|
||||
}
|
||||
|
||||
export interface ListJobsParams {
|
||||
status?: string
|
||||
job_type?: string
|
||||
client_name?: string
|
||||
job_name?: string
|
||||
limit?: number
|
||||
offset?: number
|
||||
}
|
||||
|
||||
export interface CreateJobRequest {
|
||||
job_name: string
|
||||
client_name: string
|
||||
job_type: string
|
||||
job_level: string
|
||||
storage_name?: string
|
||||
pool_name?: string
|
||||
}
|
||||
|
||||
export const backupAPI = {
|
||||
listJobs: async (params?: ListJobsParams): Promise<ListJobsResponse> => {
|
||||
const queryParams = new URLSearchParams()
|
||||
if (params?.status) queryParams.append('status', params.status)
|
||||
if (params?.job_type) queryParams.append('job_type', params.job_type)
|
||||
if (params?.client_name) queryParams.append('client_name', params.client_name)
|
||||
if (params?.job_name) queryParams.append('job_name', params.job_name)
|
||||
if (params?.limit) queryParams.append('limit', params.limit.toString())
|
||||
if (params?.offset) queryParams.append('offset', params.offset.toString())
|
||||
|
||||
const response = await apiClient.get<ListJobsResponse>(
|
||||
`/backup/jobs${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
||||
)
|
||||
return response.data
|
||||
},
|
||||
|
||||
getJob: async (id: string): Promise<BackupJob> => {
|
||||
const response = await apiClient.get<BackupJob>(`/backup/jobs/${id}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
createJob: async (data: CreateJobRequest): Promise<BackupJob> => {
|
||||
const response = await apiClient.post<BackupJob>('/backup/jobs', data)
|
||||
return response.data
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ export interface SCSTTarget {
|
||||
iqn: string
|
||||
alias?: string
|
||||
is_active: boolean
|
||||
lun_count?: number
|
||||
created_at: string
|
||||
updated_at: string
|
||||
}
|
||||
@@ -31,7 +32,11 @@ export interface SCSTInitiator {
|
||||
iqn: string
|
||||
is_active: boolean
|
||||
created_at: string
|
||||
updated_at: string
|
||||
updated_at?: string
|
||||
target_id?: string
|
||||
target_iqn?: string
|
||||
target_name?: string
|
||||
group_name?: string
|
||||
}
|
||||
|
||||
export interface SCSTInitiatorGroup {
|
||||
@@ -45,9 +50,19 @@ export interface SCSTInitiatorGroup {
|
||||
|
||||
export interface SCSTHandler {
|
||||
name: string
|
||||
label: string
|
||||
description?: string
|
||||
}
|
||||
|
||||
export interface SCSTPortal {
|
||||
id: string
|
||||
ip_address: string
|
||||
port: number
|
||||
is_active: boolean
|
||||
created_at: string
|
||||
updated_at: string
|
||||
}
|
||||
|
||||
export interface CreateTargetRequest {
|
||||
iqn: string
|
||||
target_type: string
|
||||
@@ -80,6 +95,7 @@ export const scstAPI = {
|
||||
getTarget: async (id: string): Promise<{
|
||||
target: SCSTTarget
|
||||
luns: SCSTLUN[]
|
||||
initiator_groups?: SCSTInitiatorGroup[]
|
||||
}> => {
|
||||
const response = await apiClient.get(`/scst/targets/${id}`)
|
||||
return response.data
|
||||
@@ -87,7 +103,8 @@ export const scstAPI = {
|
||||
|
||||
createTarget: async (data: CreateTargetRequest): Promise<SCSTTarget> => {
|
||||
const response = await apiClient.post('/scst/targets', data)
|
||||
return response.data.target
|
||||
// Backend returns target directly, not wrapped in { target: ... }
|
||||
return response.data
|
||||
},
|
||||
|
||||
addLUN: async (targetId: string, data: AddLUNRequest): Promise<{ task_id: string }> => {
|
||||
@@ -109,5 +126,81 @@ export const scstAPI = {
|
||||
const response = await apiClient.get('/scst/handlers')
|
||||
return response.data.handlers || []
|
||||
},
|
||||
|
||||
listPortals: async (): Promise<SCSTPortal[]> => {
|
||||
const response = await apiClient.get('/scst/portals')
|
||||
return response.data.portals || []
|
||||
},
|
||||
|
||||
getPortal: async (id: string): Promise<SCSTPortal> => {
|
||||
const response = await apiClient.get(`/scst/portals/${id}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
createPortal: async (data: { ip_address: string; port?: number; is_active?: boolean }): Promise<SCSTPortal> => {
|
||||
const response = await apiClient.post('/scst/portals', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
updatePortal: async (id: string, data: { ip_address: string; port?: number; is_active?: boolean }): Promise<SCSTPortal> => {
|
||||
const response = await apiClient.put(`/scst/portals/${id}`, data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deletePortal: async (id: string): Promise<void> => {
|
||||
await apiClient.delete(`/scst/portals/${id}`)
|
||||
},
|
||||
|
||||
enableTarget: async (targetId: string): Promise<{ message: string }> => {
|
||||
const response = await apiClient.post(`/scst/targets/${targetId}/enable`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
disableTarget: async (targetId: string): Promise<{ message: string }> => {
|
||||
const response = await apiClient.post(`/scst/targets/${targetId}/disable`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
listInitiators: async (): Promise<SCSTInitiator[]> => {
|
||||
const response = await apiClient.get('/scst/initiators')
|
||||
return response.data.initiators || []
|
||||
},
|
||||
|
||||
getInitiator: async (id: string): Promise<SCSTInitiator> => {
|
||||
const response = await apiClient.get(`/scst/initiators/${id}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
removeInitiator: async (id: string): Promise<void> => {
|
||||
await apiClient.delete(`/scst/initiators/${id}`)
|
||||
},
|
||||
|
||||
listExtents: async (): Promise<SCSTExtent[]> => {
|
||||
const response = await apiClient.get('/scst/extents')
|
||||
return response.data.extents || []
|
||||
},
|
||||
|
||||
createExtent: async (extent: CreateExtentRequest): Promise<{ message: string }> => {
|
||||
const response = await apiClient.post('/scst/extents', extent)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteExtent: async (deviceName: string): Promise<void> => {
|
||||
await apiClient.delete(`/scst/extents/${deviceName}`)
|
||||
},
|
||||
}
|
||||
|
||||
export interface SCSTExtent {
|
||||
handler_type: string
|
||||
device_name: string
|
||||
device_path: string
|
||||
is_in_use: boolean
|
||||
lun_count: number
|
||||
}
|
||||
|
||||
export interface CreateExtentRequest {
|
||||
device_name: string
|
||||
device_path: string
|
||||
handler_type: string
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { useState } from 'react'
|
||||
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'
|
||||
import { backupAPI } from '@/api/backup'
|
||||
import { Search, X } from 'lucide-react'
|
||||
|
||||
export default function BackupManagement() {
|
||||
const [activeTab, setActiveTab] = useState<'dashboard' | 'jobs' | 'clients' | 'storage' | 'restore'>('dashboard')
|
||||
@@ -96,6 +99,9 @@ export default function BackupManagement() {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Conditional Content Based on Active Tab */}
|
||||
{activeTab === 'dashboard' && (
|
||||
<>
|
||||
{/* Stats Dashboard */}
|
||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
|
||||
{/* Service Status Card */}
|
||||
@@ -307,9 +313,499 @@ export default function BackupManagement() {
|
||||
<p>[14:23:45] bareos-dir: JobId 10423: Sending Accurate information.</p>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{activeTab === 'jobs' && (
|
||||
<JobsManagementTab />
|
||||
)}
|
||||
|
||||
{activeTab === 'clients' && (
|
||||
<div className="p-8 text-center text-text-secondary">
|
||||
Clients tab coming soon
|
||||
</div>
|
||||
)}
|
||||
|
||||
{activeTab === 'storage' && (
|
||||
<div className="p-8 text-center text-text-secondary">
|
||||
Storage tab coming soon
|
||||
</div>
|
||||
)}
|
||||
|
||||
{activeTab === 'restore' && (
|
||||
<div className="p-8 text-center text-text-secondary">
|
||||
Restore tab coming soon
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Jobs Management Tab Component
|
||||
function JobsManagementTab() {
|
||||
const queryClient = useQueryClient()
|
||||
const [searchQuery, setSearchQuery] = useState('')
|
||||
const [statusFilter, setStatusFilter] = useState<string>('')
|
||||
const [jobTypeFilter, setJobTypeFilter] = useState<string>('')
|
||||
const [page, setPage] = useState(1)
|
||||
const [showCreateForm, setShowCreateForm] = useState(false)
|
||||
const limit = 20
|
||||
|
||||
const { data, isLoading, error } = useQuery({
|
||||
queryKey: ['backup-jobs', statusFilter, jobTypeFilter, searchQuery, page],
|
||||
queryFn: () => backupAPI.listJobs({
|
||||
status: statusFilter || undefined,
|
||||
job_type: jobTypeFilter || undefined,
|
||||
job_name: searchQuery || undefined,
|
||||
limit,
|
||||
offset: (page - 1) * limit,
|
||||
}),
|
||||
})
|
||||
|
||||
const jobs = data?.jobs || []
|
||||
const total = data?.total || 0
|
||||
const totalPages = Math.ceil(total / limit)
|
||||
|
||||
const formatBytes = (bytes: number): string => {
|
||||
if (bytes === 0) return '0 B'
|
||||
const k = 1024
|
||||
const sizes = ['B', 'KB', 'MB', 'GB', 'TB']
|
||||
const i = Math.floor(Math.log(bytes) / Math.log(k))
|
||||
return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}`
|
||||
}
|
||||
|
||||
const formatDuration = (seconds?: number): string => {
|
||||
if (!seconds) return '-'
|
||||
const hours = Math.floor(seconds / 3600)
|
||||
const minutes = Math.floor((seconds % 3600) / 60)
|
||||
const secs = seconds % 60
|
||||
if (hours > 0) {
|
||||
return `${hours}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
|
||||
}
|
||||
return `${minutes}:${secs.toString().padStart(2, '0')}`
|
||||
}
|
||||
|
||||
const getStatusBadge = (status: string) => {
|
||||
const statusMap: Record<string, { bg: string; text: string; border: string; icon: string }> = {
|
||||
Running: {
|
||||
bg: 'bg-blue-500/10',
|
||||
text: 'text-blue-400',
|
||||
border: 'border-blue-500/20',
|
||||
icon: 'pending_actions',
|
||||
},
|
||||
Completed: {
|
||||
bg: 'bg-green-500/10',
|
||||
text: 'text-green-400',
|
||||
border: 'border-green-500/20',
|
||||
icon: 'check_circle',
|
||||
},
|
||||
Failed: {
|
||||
bg: 'bg-red-500/10',
|
||||
text: 'text-red-400',
|
||||
border: 'border-red-500/20',
|
||||
icon: 'error',
|
||||
},
|
||||
Canceled: {
|
||||
bg: 'bg-yellow-500/10',
|
||||
text: 'text-yellow-400',
|
||||
border: 'border-yellow-500/20',
|
||||
icon: 'cancel',
|
||||
},
|
||||
Waiting: {
|
||||
bg: 'bg-gray-500/10',
|
||||
text: 'text-gray-400',
|
||||
border: 'border-gray-500/20',
|
||||
icon: 'schedule',
|
||||
},
|
||||
}
|
||||
|
||||
const config = statusMap[status] || statusMap.Waiting
|
||||
|
||||
return (
|
||||
<span className={`inline-flex items-center gap-1.5 rounded px-2 py-1 text-xs font-medium ${config.bg} ${config.text} border ${config.border}`}>
|
||||
{status === 'Running' && (
|
||||
<span className="block h-1.5 w-1.5 rounded-full bg-blue-400 animate-pulse"></span>
|
||||
)}
|
||||
{status !== 'Running' && (
|
||||
<span className="material-symbols-outlined text-[14px]">{config.icon}</span>
|
||||
)}
|
||||
{status}
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="space-y-6">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between">
|
||||
<div>
|
||||
<h2 className="text-white text-2xl font-bold">Backup Jobs</h2>
|
||||
<p className="text-text-secondary text-sm mt-1">Manage and monitor backup job executions</p>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => setShowCreateForm(true)}
|
||||
className="flex items-center gap-2 cursor-pointer justify-center rounded-lg h-10 px-4 bg-primary text-white text-sm font-bold shadow-lg shadow-primary/20 hover:bg-primary/90 transition-colors"
|
||||
>
|
||||
<span className="material-symbols-outlined text-base">add</span>
|
||||
<span>Create Job</span>
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Filters */}
|
||||
<div className="flex flex-wrap items-center gap-4 p-4 bg-[#1c2936] border border-border-dark rounded-lg">
|
||||
{/* Search */}
|
||||
<div className="flex-1 min-w-[200px] relative">
|
||||
<Search className="absolute left-3 top-1/2 transform -translate-y-1/2 text-text-secondary" size={18} />
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Search by job name..."
|
||||
value={searchQuery}
|
||||
onChange={(e) => {
|
||||
setSearchQuery(e.target.value)
|
||||
setPage(1)
|
||||
}}
|
||||
className="w-full pl-10 pr-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm placeholder-text-secondary focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Status Filter */}
|
||||
<select
|
||||
value={statusFilter}
|
||||
onChange={(e) => {
|
||||
setStatusFilter(e.target.value)
|
||||
setPage(1)
|
||||
}}
|
||||
className="px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent appearance-none cursor-pointer"
|
||||
style={{
|
||||
backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23ffffff' d='M6 9L1 4h10z'/%3E%3C/svg%3E")`,
|
||||
backgroundRepeat: 'no-repeat',
|
||||
backgroundPosition: 'right 0.75rem center',
|
||||
paddingRight: '2.5rem',
|
||||
}}
|
||||
>
|
||||
<option value="">All Status</option>
|
||||
<option value="Running">Running</option>
|
||||
<option value="Completed">Completed</option>
|
||||
<option value="Failed">Failed</option>
|
||||
<option value="Canceled">Canceled</option>
|
||||
<option value="Waiting">Waiting</option>
|
||||
</select>
|
||||
|
||||
{/* Job Type Filter */}
|
||||
<select
|
||||
value={jobTypeFilter}
|
||||
onChange={(e) => {
|
||||
setJobTypeFilter(e.target.value)
|
||||
setPage(1)
|
||||
}}
|
||||
className="px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent appearance-none cursor-pointer"
|
||||
style={{
|
||||
backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23ffffff' d='M6 9L1 4h10z'/%3E%3C/svg%3E")`,
|
||||
backgroundRepeat: 'no-repeat',
|
||||
backgroundPosition: 'right 0.75rem center',
|
||||
paddingRight: '2.5rem',
|
||||
}}
|
||||
>
|
||||
<option value="">All Types</option>
|
||||
<option value="Backup">Backup</option>
|
||||
<option value="Restore">Restore</option>
|
||||
<option value="Verify">Verify</option>
|
||||
<option value="Copy">Copy</option>
|
||||
<option value="Migrate">Migrate</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
{/* Jobs Table */}
|
||||
<div className="rounded-lg border border-border-dark bg-[#1c2936] overflow-hidden shadow-sm">
|
||||
{isLoading ? (
|
||||
<div className="p-8 text-center text-text-secondary">Loading jobs...</div>
|
||||
) : error ? (
|
||||
<div className="p-8 text-center text-red-400">Failed to load jobs</div>
|
||||
) : jobs.length === 0 ? (
|
||||
<div className="p-12 text-center">
|
||||
<p className="text-text-secondary">No jobs found</p>
|
||||
</div>
|
||||
) : (
|
||||
<>
|
||||
<div className="overflow-x-auto">
|
||||
<table className="w-full text-left border-collapse">
|
||||
<thead>
|
||||
<tr className="bg-[#111a22] border-b border-border-dark text-text-secondary text-xs uppercase tracking-wider">
|
||||
<th className="px-6 py-4 font-semibold">Status</th>
|
||||
<th className="px-6 py-4 font-semibold">Job ID</th>
|
||||
<th className="px-6 py-4 font-semibold">Job Name</th>
|
||||
<th className="px-6 py-4 font-semibold">Client</th>
|
||||
<th className="px-6 py-4 font-semibold">Type</th>
|
||||
<th className="px-6 py-4 font-semibold">Level</th>
|
||||
<th className="px-6 py-4 font-semibold">Duration</th>
|
||||
<th className="px-6 py-4 font-semibold">Bytes</th>
|
||||
<th className="px-6 py-4 font-semibold">Files</th>
|
||||
<th className="px-6 py-4 font-semibold text-right">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="divide-y divide-border-dark text-sm">
|
||||
{jobs.map((job) => (
|
||||
<tr key={job.id} className="hover:bg-[#111a22]/50 transition-colors">
|
||||
<td className="px-6 py-4">{getStatusBadge(job.status)}</td>
|
||||
<td className="px-6 py-4 text-text-secondary font-mono">{job.job_id}</td>
|
||||
<td className="px-6 py-4 text-white font-medium">{job.job_name}</td>
|
||||
<td className="px-6 py-4 text-text-secondary">{job.client_name}</td>
|
||||
<td className="px-6 py-4 text-text-secondary">{job.job_type}</td>
|
||||
<td className="px-6 py-4 text-text-secondary">{job.job_level}</td>
|
||||
<td className="px-6 py-4 text-text-secondary font-mono">{formatDuration(job.duration_seconds)}</td>
|
||||
<td className="px-6 py-4 text-text-secondary font-mono">{formatBytes(job.bytes_written)}</td>
|
||||
<td className="px-6 py-4 text-text-secondary">{job.files_written.toLocaleString()}</td>
|
||||
<td className="px-6 py-4 text-right">
|
||||
<button className="text-text-secondary hover:text-white p-1 rounded hover:bg-[#111a22] transition-colors">
|
||||
<span className="material-symbols-outlined text-[20px]">more_vert</span>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{/* Pagination */}
|
||||
<div className="bg-[#111a22] border-t border-border-dark px-6 py-3 flex items-center justify-between">
|
||||
<p className="text-text-secondary text-xs">
|
||||
Showing {(page - 1) * limit + 1}-{Math.min(page * limit, total)} of {total} jobs
|
||||
</p>
|
||||
<div className="flex gap-2">
|
||||
<button
|
||||
onClick={() => setPage(p => Math.max(1, p - 1))}
|
||||
disabled={page === 1}
|
||||
className="p-1 rounded text-text-secondary hover:text-white disabled:opacity-50 hover:bg-[#1c2936]"
|
||||
>
|
||||
<span className="material-symbols-outlined text-base">chevron_left</span>
|
||||
</button>
|
||||
<button
|
||||
onClick={() => setPage(p => Math.min(totalPages, p + 1))}
|
||||
disabled={page >= totalPages}
|
||||
className="p-1 rounded text-text-secondary hover:text-white disabled:opacity-50 hover:bg-[#1c2936]"
|
||||
>
|
||||
<span className="material-symbols-outlined text-base">chevron_right</span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Create Job Form Modal */}
|
||||
{showCreateForm && (
|
||||
<CreateJobForm
|
||||
onClose={() => setShowCreateForm(false)}
|
||||
onSuccess={async () => {
|
||||
setShowCreateForm(false)
|
||||
await queryClient.invalidateQueries({ queryKey: ['backup-jobs'] })
|
||||
await queryClient.refetchQueries({ queryKey: ['backup-jobs'] })
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Create Job Form Component
|
||||
interface CreateJobFormProps {
|
||||
onClose: () => void
|
||||
onSuccess: () => void
|
||||
}
|
||||
|
||||
function CreateJobForm({ onClose, onSuccess }: CreateJobFormProps) {
|
||||
const [formData, setFormData] = useState({
|
||||
job_name: '',
|
||||
client_name: '',
|
||||
job_type: 'Backup',
|
||||
job_level: 'Full',
|
||||
storage_name: '',
|
||||
pool_name: '',
|
||||
})
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
|
||||
const createJobMutation = useMutation({
|
||||
mutationFn: backupAPI.createJob,
|
||||
onSuccess: () => {
|
||||
onSuccess()
|
||||
},
|
||||
onError: (err: any) => {
|
||||
setError(err.response?.data?.error || 'Failed to create job')
|
||||
},
|
||||
})
|
||||
|
||||
const handleSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault()
|
||||
setError(null)
|
||||
|
||||
const payload: any = {
|
||||
job_name: formData.job_name,
|
||||
client_name: formData.client_name,
|
||||
job_type: formData.job_type,
|
||||
job_level: formData.job_level,
|
||||
}
|
||||
|
||||
if (formData.storage_name) {
|
||||
payload.storage_name = formData.storage_name
|
||||
}
|
||||
if (formData.pool_name) {
|
||||
payload.pool_name = formData.pool_name
|
||||
}
|
||||
|
||||
createJobMutation.mutate(payload)
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="fixed inset-0 bg-black/50 backdrop-blur-sm z-50 flex items-center justify-center p-4">
|
||||
<div className="bg-[#1c2936] border border-border-dark rounded-lg shadow-xl w-full max-w-2xl max-h-[90vh] overflow-y-auto">
|
||||
{/* Header */}
|
||||
<div className="flex items-center justify-between p-6 border-b border-border-dark">
|
||||
<h2 className="text-white text-xl font-bold">Create Backup Job</h2>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="text-text-secondary hover:text-white transition-colors"
|
||||
>
|
||||
<X size={20} />
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Form */}
|
||||
<form onSubmit={handleSubmit} className="p-6 space-y-4">
|
||||
{error && (
|
||||
<div className="p-3 bg-red-500/10 border border-red-500/20 rounded-lg text-red-400 text-sm">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Job Name */}
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Job Name <span className="text-red-400">*</span>
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
required
|
||||
value={formData.job_name}
|
||||
onChange={(e) => setFormData({ ...formData, job_name: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
||||
placeholder="e.g., DailyBackup"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Client Name */}
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Client Name <span className="text-red-400">*</span>
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
required
|
||||
value={formData.client_name}
|
||||
onChange={(e) => setFormData({ ...formData, client_name: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
||||
placeholder="e.g., filesrv-02"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Job Type & Level */}
|
||||
<div className="grid grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Job Type <span className="text-red-400">*</span>
|
||||
</label>
|
||||
<select
|
||||
required
|
||||
value={formData.job_type}
|
||||
onChange={(e) => setFormData({ ...formData, job_type: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent appearance-none cursor-pointer"
|
||||
style={{
|
||||
backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23ffffff' d='M6 9L1 4h10z'/%3E%3C/svg%3E")`,
|
||||
backgroundRepeat: 'no-repeat',
|
||||
backgroundPosition: 'right 0.75rem center',
|
||||
paddingRight: '2.5rem',
|
||||
}}
|
||||
>
|
||||
<option value="Backup">Backup</option>
|
||||
<option value="Restore">Restore</option>
|
||||
<option value="Verify">Verify</option>
|
||||
<option value="Copy">Copy</option>
|
||||
<option value="Migrate">Migrate</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Job Level <span className="text-red-400">*</span>
|
||||
</label>
|
||||
<select
|
||||
required
|
||||
value={formData.job_level}
|
||||
onChange={(e) => setFormData({ ...formData, job_level: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent appearance-none cursor-pointer"
|
||||
style={{
|
||||
backgroundImage: `url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='0 0 12 12'%3E%3Cpath fill='%23ffffff' d='M6 9L1 4h10z'/%3E%3C/svg%3E")`,
|
||||
backgroundRepeat: 'no-repeat',
|
||||
backgroundPosition: 'right 0.75rem center',
|
||||
paddingRight: '2.5rem',
|
||||
}}
|
||||
>
|
||||
<option value="Full">Full</option>
|
||||
<option value="Incremental">Incremental</option>
|
||||
<option value="Differential">Differential</option>
|
||||
<option value="Since">Since</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Storage Name */}
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Storage Name (Optional)
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
value={formData.storage_name}
|
||||
onChange={(e) => setFormData({ ...formData, storage_name: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
||||
placeholder="e.g., backup-srv-01"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Pool Name */}
|
||||
<div>
|
||||
<label className="block text-white text-sm font-semibold mb-2">
|
||||
Pool Name (Optional)
|
||||
</label>
|
||||
<input
|
||||
type="text"
|
||||
value={formData.pool_name}
|
||||
onChange={(e) => setFormData({ ...formData, pool_name: e.target.value })}
|
||||
className="w-full px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
||||
placeholder="e.g., Default"
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Actions */}
|
||||
<div className="flex items-center justify-end gap-3 pt-4 border-t border-border-dark">
|
||||
<button
|
||||
type="button"
|
||||
onClick={onClose}
|
||||
className="px-4 py-2 bg-[#111a22] border border-border-dark rounded-lg text-white text-sm font-semibold hover:bg-[#1c2936] transition-colors"
|
||||
>
|
||||
Cancel
|
||||
</button>
|
||||
<button
|
||||
type="submit"
|
||||
disabled={createJobMutation.isPending}
|
||||
className="px-4 py-2 bg-primary text-white rounded-lg text-sm font-semibold hover:bg-primary/90 transition-colors disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
>
|
||||
{createJobMutation.isPending ? 'Creating...' : 'Create Job'}
|
||||
</button>
|
||||
</div>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import { scstAPI, type SCSTHandler } from '@/api/scst'
|
||||
import { Card, CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { ArrowLeft, Plus, RefreshCw, HardDrive, Users } from 'lucide-react'
|
||||
import { useState } from 'react'
|
||||
import { useState, useEffect } from 'react'
|
||||
|
||||
export default function ISCSITargetDetail() {
|
||||
const { id } = useParams<{ id: string }>()
|
||||
@@ -13,6 +13,10 @@ export default function ISCSITargetDetail() {
|
||||
const [showAddLUN, setShowAddLUN] = useState(false)
|
||||
const [showAddInitiator, setShowAddInitiator] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
console.log('showAddLUN state:', showAddLUN)
|
||||
}, [showAddLUN])
|
||||
|
||||
const { data, isLoading } = useQuery({
|
||||
queryKey: ['scst-target', id],
|
||||
queryFn: () => scstAPI.getTarget(id!),
|
||||
@@ -22,6 +26,8 @@ export default function ISCSITargetDetail() {
|
||||
const { data: handlers } = useQuery<SCSTHandler[]>({
|
||||
queryKey: ['scst-handlers'],
|
||||
queryFn: scstAPI.listHandlers,
|
||||
staleTime: 0, // Always fetch fresh data
|
||||
refetchOnMount: true,
|
||||
})
|
||||
|
||||
if (isLoading) {
|
||||
@@ -33,6 +39,8 @@ export default function ISCSITargetDetail() {
|
||||
}
|
||||
|
||||
const { target, luns } = data
|
||||
// Ensure luns is always an array, not null
|
||||
const lunsArray = luns || []
|
||||
|
||||
return (
|
||||
<div className="space-y-6 min-h-screen bg-background-dark p-6">
|
||||
@@ -91,12 +99,12 @@ export default function ISCSITargetDetail() {
|
||||
<div className="space-y-2">
|
||||
<div className="flex justify-between">
|
||||
<span className="text-text-secondary">Total LUNs:</span>
|
||||
<span className="font-medium text-white">{luns.length}</span>
|
||||
<span className="font-medium text-white">{lunsArray.length}</span>
|
||||
</div>
|
||||
<div className="flex justify-between">
|
||||
<span className="text-text-secondary">Active:</span>
|
||||
<span className="font-medium text-white">
|
||||
{luns.filter((l) => l.is_active).length}
|
||||
{lunsArray.filter((l) => l.is_active).length}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
@@ -140,14 +148,22 @@ export default function ISCSITargetDetail() {
|
||||
<CardTitle>LUNs (Logical Unit Numbers)</CardTitle>
|
||||
<CardDescription>Storage devices exported by this target</CardDescription>
|
||||
</div>
|
||||
<Button variant="outline" size="sm" onClick={() => setShowAddLUN(true)}>
|
||||
<Button
|
||||
variant="outline"
|
||||
size="sm"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
console.log('Add LUN button clicked, setting showAddLUN to true')
|
||||
setShowAddLUN(true)
|
||||
}}
|
||||
>
|
||||
<Plus className="h-4 w-4 mr-2" />
|
||||
Add LUN
|
||||
</Button>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
{luns.length > 0 ? (
|
||||
{lunsArray.length > 0 ? (
|
||||
<div className="overflow-x-auto">
|
||||
<table className="min-w-full divide-y divide-gray-200">
|
||||
<thead className="bg-[#1a2632]">
|
||||
@@ -170,7 +186,7 @@ export default function ISCSITargetDetail() {
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody className="bg-card-dark divide-y divide-border-dark">
|
||||
{luns.map((lun) => (
|
||||
{lunsArray.map((lun) => (
|
||||
<tr key={lun.id} className="hover:bg-[#233648]">
|
||||
<td className="px-6 py-4 whitespace-nowrap text-sm font-medium text-white">
|
||||
{lun.lun_number}
|
||||
@@ -204,7 +220,14 @@ export default function ISCSITargetDetail() {
|
||||
<div className="text-center py-8">
|
||||
<HardDrive className="h-12 w-12 text-gray-400 mx-auto mb-4" />
|
||||
<p className="text-sm text-text-secondary mb-4">No LUNs configured</p>
|
||||
<Button variant="outline" onClick={() => setShowAddLUN(true)}>
|
||||
<Button
|
||||
variant="outline"
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
console.log('Add First LUN button clicked, setting showAddLUN to true')
|
||||
setShowAddLUN(true)
|
||||
}}
|
||||
>
|
||||
<Plus className="h-4 w-4 mr-2" />
|
||||
Add First LUN
|
||||
</Button>
|
||||
@@ -254,12 +277,21 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
|
||||
const [deviceName, setDeviceName] = useState('')
|
||||
const [lunNumber, setLunNumber] = useState(0)
|
||||
|
||||
useEffect(() => {
|
||||
console.log('AddLUNForm mounted, targetId:', targetId, 'handlers:', handlers)
|
||||
}, [targetId, handlers])
|
||||
|
||||
const addLUNMutation = useMutation({
|
||||
mutationFn: (data: { device_name: string; device_path: string; lun_number: number; handler_type: string }) =>
|
||||
scstAPI.addLUN(targetId, data),
|
||||
onSuccess: () => {
|
||||
onSuccess()
|
||||
},
|
||||
onError: (error: any) => {
|
||||
console.error('Failed to add LUN:', error)
|
||||
const errorMessage = error.response?.data?.error || error.message || 'Failed to add LUN'
|
||||
alert(errorMessage)
|
||||
},
|
||||
})
|
||||
|
||||
const handleSubmit = (e: React.FormEvent) => {
|
||||
@@ -278,35 +310,62 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
|
||||
}
|
||||
|
||||
return (
|
||||
<Card>
|
||||
<CardHeader>
|
||||
<CardTitle>Add LUN</CardTitle>
|
||||
<CardDescription>Add a storage device to this target</CardDescription>
|
||||
</CardHeader>
|
||||
<CardContent>
|
||||
<form onSubmit={handleSubmit} className="space-y-4">
|
||||
<div className="fixed inset-0 bg-black/50 z-50 flex items-center justify-center p-4">
|
||||
<div className="bg-card-dark border border-border-dark rounded-xl max-w-2xl w-full max-h-[90vh] overflow-y-auto">
|
||||
<div className="p-6 border-b border-border-dark">
|
||||
<h2 className="text-xl font-bold text-white">Add LUN</h2>
|
||||
<p className="text-sm text-text-secondary mt-1">Bind a ZFS volume or storage device to this target</p>
|
||||
</div>
|
||||
<form onSubmit={handleSubmit} className="p-6 space-y-4">
|
||||
<div>
|
||||
<label htmlFor="handlerType" className="block text-sm font-medium text-gray-700 mb-1">
|
||||
<label htmlFor="handlerType" className="block text-sm font-medium text-white mb-1">
|
||||
Handler Type *
|
||||
</label>
|
||||
<select
|
||||
id="handlerType"
|
||||
value={handlerType}
|
||||
onChange={(e) => setHandlerType(e.target.value)}
|
||||
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary"
|
||||
required
|
||||
>
|
||||
<option value="">Select a handler</option>
|
||||
{handlers.map((h) => (
|
||||
<option key={h.name} value={h.name}>
|
||||
{h.name} {h.description && `- ${h.description}`}
|
||||
{h.label || h.name}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label htmlFor="deviceName" className="block text-sm font-medium text-gray-700 mb-1">
|
||||
<label htmlFor="devicePath" className="block text-sm font-medium text-white mb-1">
|
||||
ZFS Volume Path *
|
||||
</label>
|
||||
<input
|
||||
id="devicePath"
|
||||
type="text"
|
||||
value={devicePath}
|
||||
onChange={(e) => {
|
||||
const path = e.target.value.trim()
|
||||
setDevicePath(path)
|
||||
// Auto-generate device name from path (e.g., /dev/zvol/pool/volume -> volume)
|
||||
if (path && !deviceName) {
|
||||
const parts = path.split('/')
|
||||
const name = parts[parts.length - 1] || parts[parts.length - 2] || 'device'
|
||||
setDeviceName(name)
|
||||
}
|
||||
}}
|
||||
placeholder="/dev/zvol/pool/volume or /dev/sda"
|
||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary font-mono"
|
||||
required
|
||||
/>
|
||||
<p className="mt-1 text-xs text-text-secondary">
|
||||
Enter ZFS volume path (e.g., /dev/zvol/pool/volume) or block device path
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label htmlFor="deviceName" className="block text-sm font-medium text-white mb-1">
|
||||
Device Name *
|
||||
</label>
|
||||
<input
|
||||
@@ -315,28 +374,16 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
|
||||
value={deviceName}
|
||||
onChange={(e) => setDeviceName(e.target.value)}
|
||||
placeholder="device1"
|
||||
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary"
|
||||
required
|
||||
/>
|
||||
<p className="mt-1 text-xs text-text-secondary">
|
||||
Logical name for this device in SCST (auto-filled from volume path)
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label htmlFor="devicePath" className="block text-sm font-medium text-gray-700 mb-1">
|
||||
Device Path *
|
||||
</label>
|
||||
<input
|
||||
id="devicePath"
|
||||
type="text"
|
||||
value={devicePath}
|
||||
onChange={(e) => setDevicePath(e.target.value)}
|
||||
placeholder="/dev/sda or /dev/calypso/vg1/lv1"
|
||||
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 font-mono text-sm"
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<label htmlFor="lunNumber" className="block text-sm font-medium text-gray-700 mb-1">
|
||||
<label htmlFor="lunNumber" className="block text-sm font-medium text-white mb-1">
|
||||
LUN Number *
|
||||
</label>
|
||||
<input
|
||||
@@ -345,12 +392,15 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
|
||||
value={lunNumber}
|
||||
onChange={(e) => setLunNumber(parseInt(e.target.value) || 0)}
|
||||
min="0"
|
||||
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary"
|
||||
required
|
||||
/>
|
||||
<p className="mt-1 text-xs text-text-secondary">
|
||||
Logical Unit Number (0-255, typically start from 0)
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div className="flex justify-end gap-2">
|
||||
<div className="flex justify-end gap-2 pt-4 border-t border-border-dark">
|
||||
<Button type="button" variant="outline" onClick={onClose}>
|
||||
Cancel
|
||||
</Button>
|
||||
@@ -359,8 +409,8 @@ function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps)
|
||||
</Button>
|
||||
</div>
|
||||
</form>
|
||||
</CardContent>
|
||||
</Card>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -394,7 +394,13 @@ export default function TapeLibraries() {
|
||||
</div>
|
||||
</td>
|
||||
<td className="py-4 px-6">
|
||||
<div className="flex items-center gap-2 group/copy cursor-pointer">
|
||||
<div
|
||||
className="flex items-center gap-2 group/copy cursor-pointer"
|
||||
onClick={() => {
|
||||
const iqn = `iqn.2023-10.com.vtl:${library.name.toLowerCase().replace(/\s+/g, '')}`
|
||||
navigator.clipboard.writeText(iqn)
|
||||
}}
|
||||
>
|
||||
<code className="text-xs text-text-secondary font-mono bg-[#111a22] px-2 py-1 rounded border border-border-dark group-hover/copy:text-white transition-colors">
|
||||
iqn.2023-10.com.vtl:{library.name.toLowerCase().replace(/\s+/g, '')}
|
||||
</code>
|
||||
|
||||
25
scripts/rebuild-and-restart.sh
Executable file
25
scripts/rebuild-and-restart.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "🔨 Rebuilding Calypso API..."
|
||||
cd /development/calypso/backend
|
||||
make build
|
||||
|
||||
echo ""
|
||||
echo "🔨 Rebuilding Calypso Frontend..."
|
||||
cd /development/calypso/frontend
|
||||
npm run build
|
||||
|
||||
echo ""
|
||||
echo "🔄 Restarting API service..."
|
||||
systemctl restart calypso-api.service
|
||||
|
||||
echo "🔄 Restarting Frontend service..."
|
||||
systemctl restart calypso-frontend.service
|
||||
|
||||
echo ""
|
||||
echo "✅ Build and restart complete!"
|
||||
echo ""
|
||||
|
||||
systemctl status calypso-api.service calypso-frontend.service --no-pager
|
||||
Reference in New Issue
Block a user