diff --git a/backend/bin/calypso-api b/backend/bin/calypso-api index b9d9acb..16a97bd 100755 Binary files a/backend/bin/calypso-api and b/backend/bin/calypso-api differ diff --git a/backend/internal/backup/handler.go b/backend/internal/backup/handler.go index 08d0ec1..982dadf 100644 --- a/backend/internal/backup/handler.go +++ b/backend/internal/backup/handler.go @@ -175,3 +175,209 @@ func (h *Handler) ListClients(c *gin.Context) { "total": len(clients), }) } + +// GetDashboardStats returns dashboard statistics +func (h *Handler) GetDashboardStats(c *gin.Context) { + stats, err := h.service.GetDashboardStats(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to get dashboard stats", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get dashboard stats"}) + return + } + + c.JSON(http.StatusOK, stats) +} + +// ListStoragePools lists all storage pools +func (h *Handler) ListStoragePools(c *gin.Context) { + pools, err := h.service.ListStoragePools(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list storage pools", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage pools"}) + return + } + + if pools == nil { + pools = []StoragePool{} + } + + h.logger.Info("Listed storage pools", "count", len(pools)) + c.JSON(http.StatusOK, gin.H{ + "pools": pools, + "total": len(pools), + }) +} + +// ListStorageVolumes lists all storage volumes +func (h *Handler) ListStorageVolumes(c *gin.Context) { + poolName := c.Query("pool_name") + + volumes, err := h.service.ListStorageVolumes(c.Request.Context(), poolName) + if err != nil { + h.logger.Error("Failed to list storage volumes", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage volumes"}) + return + } + + if volumes == nil { + volumes = []StorageVolume{} + } + + c.JSON(http.StatusOK, gin.H{ + "volumes": volumes, + "total": len(volumes), + }) +} + +// ListStorageDaemons lists all storage daemons +func (h *Handler) ListStorageDaemons(c *gin.Context) { + daemons, err := h.service.ListStorageDaemons(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list storage daemons", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage daemons"}) + return + } + + if daemons == nil { + daemons = []StorageDaemon{} + } + + c.JSON(http.StatusOK, gin.H{ + "daemons": daemons, + "total": len(daemons), + }) +} + +// CreateStoragePool creates a new storage pool +func (h *Handler) CreateStoragePool(c *gin.Context) { + var req CreatePoolRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + pool, err := h.service.CreateStoragePool(c.Request.Context(), req) + if err != nil { + h.logger.Error("Failed to create storage pool", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, pool) +} + +// DeleteStoragePool deletes a storage pool +func (h *Handler) DeleteStoragePool(c *gin.Context) { + idStr := c.Param("id") + if idStr == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "pool ID is required"}) + return + } + + var poolID int + if _, err := fmt.Sscanf(idStr, "%d", &poolID); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pool ID"}) + return + } + + err := h.service.DeleteStoragePool(c.Request.Context(), poolID) + if err != nil { + h.logger.Error("Failed to delete storage pool", "error", err, "pool_id", poolID) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "pool deleted successfully"}) +} + +// CreateStorageVolume creates a new storage volume +func (h *Handler) CreateStorageVolume(c *gin.Context) { + var req CreateVolumeRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + volume, err := h.service.CreateStorageVolume(c.Request.Context(), req) + if err != nil { + h.logger.Error("Failed to create storage volume", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusCreated, volume) +} + +// UpdateStorageVolume updates a storage volume +func (h *Handler) UpdateStorageVolume(c *gin.Context) { + idStr := c.Param("id") + if idStr == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"}) + return + } + + var volumeID int + if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"}) + return + } + + var req UpdateVolumeRequest + if err := c.ShouldBindJSON(&req); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + volume, err := h.service.UpdateStorageVolume(c.Request.Context(), volumeID, req) + if err != nil { + h.logger.Error("Failed to update storage volume", "error", err, "volume_id", volumeID) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, volume) +} + +// DeleteStorageVolume deletes a storage volume +func (h *Handler) DeleteStorageVolume(c *gin.Context) { + idStr := c.Param("id") + if idStr == "" { + c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"}) + return + } + + var volumeID int + if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"}) + return + } + + err := h.service.DeleteStorageVolume(c.Request.Context(), volumeID) + if err != nil { + h.logger.Error("Failed to delete storage volume", "error", err, "volume_id", volumeID) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + c.JSON(http.StatusOK, gin.H{"message": "volume deleted successfully"}) +} + +// ListMedia lists all media from bconsole "list media" command +func (h *Handler) ListMedia(c *gin.Context) { + media, err := h.service.ListMedia(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list media", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + + if media == nil { + media = []Media{} + } + + h.logger.Info("Listed media", "count", len(media)) + c.JSON(http.StatusOK, gin.H{ + "media": media, + "total": len(media), + }) +} diff --git a/backend/internal/backup/service.go b/backend/internal/backup/service.go index 69d6119..d26eec7 100644 --- a/backend/internal/backup/service.go +++ b/backend/internal/backup/service.go @@ -99,6 +99,64 @@ type ListClientsOptions struct { Search string // Search by client name } +// DashboardStats represents statistics for the backup dashboard +type DashboardStats struct { + DirectorStatus string `json:"director_status"` // "Active" or "Inactive" + DirectorUptime string `json:"director_uptime"` // e.g., "14d 2h 12m" + LastJob *Job `json:"last_job,omitempty"` + ActiveJobsCount int `json:"active_jobs_count"` + DefaultPool *PoolStats `json:"default_pool,omitempty"` +} + +// PoolStats represents pool storage statistics +type PoolStats struct { + Name string `json:"name"` + UsedBytes int64 `json:"used_bytes"` + TotalBytes int64 `json:"total_bytes"` + UsagePercent float64 `json:"usage_percent"` +} + +// StoragePool represents a Bacula storage pool +type StoragePool struct { + PoolID int `json:"pool_id"` + Name string `json:"name"` + PoolType string `json:"pool_type"` + LabelFormat *string `json:"label_format,omitempty"` + Recycle *bool `json:"recycle,omitempty"` + AutoPrune *bool `json:"auto_prune,omitempty"` + VolumeCount int `json:"volume_count"` + UsedBytes int64 `json:"used_bytes"` + TotalBytes int64 `json:"total_bytes"` + UsagePercent float64 `json:"usage_percent"` +} + +// StorageVolume represents a Bacula storage volume +type StorageVolume struct { + VolumeID int `json:"volume_id"` + MediaID int `json:"media_id"` + VolumeName string `json:"volume_name"` + PoolName string `json:"pool_name"` + MediaType string `json:"media_type"` + VolStatus string `json:"vol_status"` // Full, Append, Used, Error, etc. + VolBytes int64 `json:"vol_bytes"` + MaxVolBytes int64 `json:"max_vol_bytes"` + VolFiles int `json:"vol_files"` + VolRetention *time.Time `json:"vol_retention,omitempty"` + LastWritten *time.Time `json:"last_written,omitempty"` + RecycleCount int `json:"recycle_count"` +} + +// StorageDaemon represents a Bacula storage daemon +type StorageDaemon struct { + StorageID int `json:"storage_id"` + Name string `json:"name"` + Address string `json:"address"` + Port int `json:"port"` + DeviceName string `json:"device_name"` + MediaType string `json:"media_type"` + Status string `json:"status"` // Online, Offline +} + // SyncJobsFromBacula syncs jobs from Bacula/Bareos to the database // Tries to query Bacula database directly first, falls back to bconsole if database access fails func (s *Service) SyncJobsFromBacula(ctx context.Context) error { @@ -1115,3 +1173,1150 @@ func (s *Service) CreateJob(ctx context.Context, req CreateJobRequest) (*Job, er return &job, nil } + +// GetDashboardStats returns statistics for the backup dashboard +func (s *Service) GetDashboardStats(ctx context.Context) (*DashboardStats, error) { + stats := &DashboardStats{ + DirectorStatus: "Active", // Default to active + ActiveJobsCount: 0, + } + + // Get director status and uptime from bconsole + output, err := s.ExecuteBconsoleCommand(ctx, "status director") + if err == nil && len(output) > 0 { + // If bconsole returns output, director is active + // Parse output to extract uptime + lines := strings.Split(output, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + // Look for "Daemon started" line which contains uptime info + if strings.Contains(line, "Daemon started") { + stats.DirectorStatus = "Active" + stats.DirectorUptime = s.parseUptimeFromStatus(line) + break + } + // Also check for version line as indicator of active director + if strings.Contains(line, "Version:") { + stats.DirectorStatus = "Active" + } + } + + // If we didn't find uptime yet, try to parse from any date in the output + if stats.DirectorUptime == "" { + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, "started") || strings.Contains(line, "since") { + uptime := s.parseUptimeFromStatus(line) + if uptime != "Unknown" { + stats.DirectorUptime = uptime + break + } + } + } + } + + // If still no uptime, set default + if stats.DirectorUptime == "" { + stats.DirectorUptime = "Active" + } + } else { + s.logger.Warn("Failed to get director status from bconsole", "error", err) + stats.DirectorStatus = "Inactive" + stats.DirectorUptime = "Unknown" + } + + // Get last completed job + lastJobQuery := ` + SELECT id, job_id, job_name, client_name, job_type, job_level, status, + bytes_written, files_written, started_at, ended_at, created_at, updated_at + FROM backup_jobs + WHERE status = 'Completed' + ORDER BY ended_at DESC NULLS LAST, started_at DESC + LIMIT 1 + ` + var lastJob Job + var startedAt, endedAt sql.NullTime + err = s.db.QueryRowContext(ctx, lastJobQuery).Scan( + &lastJob.ID, &lastJob.JobID, &lastJob.JobName, &lastJob.ClientName, + &lastJob.JobType, &lastJob.JobLevel, &lastJob.Status, + &lastJob.BytesWritten, &lastJob.FilesWritten, + &startedAt, &endedAt, &lastJob.CreatedAt, &lastJob.UpdatedAt, + ) + if err == nil { + if startedAt.Valid { + lastJob.StartedAt = &startedAt.Time + } + if endedAt.Valid { + lastJob.EndedAt = &endedAt.Time + } + // Calculate duration + if lastJob.StartedAt != nil && lastJob.EndedAt != nil { + duration := int(lastJob.EndedAt.Sub(*lastJob.StartedAt).Seconds()) + lastJob.DurationSeconds = &duration + } + stats.LastJob = &lastJob + } else if err != sql.ErrNoRows { + s.logger.Warn("Failed to get last job", "error", err) + } + + // Count active (running) jobs + activeJobsQuery := `SELECT COUNT(*) FROM backup_jobs WHERE status = 'Running'` + err = s.db.QueryRowContext(ctx, activeJobsQuery).Scan(&stats.ActiveJobsCount) + if err != nil { + s.logger.Warn("Failed to count active jobs", "error", err) + } + + // Get default pool stats from Bacula database + if s.baculaDB != nil { + poolStats, err := s.getDefaultPoolStats(ctx) + if err == nil { + stats.DefaultPool = poolStats + } else { + s.logger.Warn("Failed to get pool stats", "error", err) + } + } + + return stats, nil +} + +// parseUptimeFromStatus parses uptime from bconsole status output +func (s *Service) parseUptimeFromStatus(line string) string { + // Look for "Daemon started" pattern: "Daemon started 28-Dec-25 01:45" + // Bacula format: "28-Dec-25 01:45" or "30-Dec-2025 02:24:58" + + // Try to find date pattern in the line + // Format: "DD-MMM-YY HH:MM" or "DD-MMM-YYYY HH:MM:SS" + words := strings.Fields(line) + for i := 0; i < len(words); i++ { + word := words[i] + // Check if word looks like a date (contains "-" and month abbreviation) + if strings.Contains(word, "-") && (strings.Contains(word, "Jan") || + strings.Contains(word, "Feb") || strings.Contains(word, "Mar") || + strings.Contains(word, "Apr") || strings.Contains(word, "May") || + strings.Contains(word, "Jun") || strings.Contains(word, "Jul") || + strings.Contains(word, "Aug") || strings.Contains(word, "Sep") || + strings.Contains(word, "Oct") || strings.Contains(word, "Nov") || + strings.Contains(word, "Dec")) { + + // Try to parse date + time + var dateTimeStr string + if i+1 < len(words) { + // Date + time (2 words) + dateTimeStr = word + " " + words[i+1] + } else { + dateTimeStr = word + } + + // Try different date formats + formats := []string{ + "02-Jan-06 15:04", // "28-Dec-25 01:45" + "02-Jan-2006 15:04:05", // "30-Dec-2025 02:24:58" + "02-Jan-2006 15:04", // "30-Dec-2025 02:24" + "2006-01-02 15:04:05", // ISO format + "2006-01-02 15:04", // ISO format without seconds + } + + for _, format := range formats { + if t, err := time.Parse(format, dateTimeStr); err == nil { + duration := time.Since(t) + return s.formatUptime(duration) + } + } + } + } + + return "Unknown" +} + +// formatUptime formats a duration as "Xd Xh Xm" +func (s *Service) formatUptime(duration time.Duration) string { + days := int(duration.Hours() / 24) + hours := int(duration.Hours()) % 24 + minutes := int(duration.Minutes()) % 60 + + if days > 0 { + return fmt.Sprintf("%dd %dh %dm", days, hours, minutes) + } + if hours > 0 { + return fmt.Sprintf("%dh %dm", hours, minutes) + } + return fmt.Sprintf("%dm", minutes) +} + +// getDefaultPoolStats gets statistics for the default pool from Bacula database +func (s *Service) getDefaultPoolStats(ctx context.Context) (*PoolStats, error) { + // Query Pool table for default pool (usually named "Default" or "Full") + query := ` + SELECT + p.Name, + COALESCE(SUM(v.VolBytes), 0) as used_bytes, + COALESCE(SUM(v.MaxVolBytes), 0) as total_bytes + FROM Pool p + LEFT JOIN Media m ON p.PoolId = m.PoolId + LEFT JOIN Volumes v ON m.MediaId = v.MediaId + WHERE p.Name = 'Default' OR p.Name = 'Full' + GROUP BY p.Name + ORDER BY p.Name + LIMIT 1 + ` + + var pool PoolStats + var name sql.NullString + var usedBytes, totalBytes sql.NullInt64 + + err := s.baculaDB.QueryRowContext(ctx, query).Scan(&name, &usedBytes, &totalBytes) + if err != nil { + if err == sql.ErrNoRows { + // Try alternative query - get pool with most volumes + altQuery := ` + SELECT + p.Name, + COALESCE(SUM(v.VolBytes), 0) as used_bytes, + COALESCE(SUM(v.MaxVolBytes), 0) as total_bytes + FROM Pool p + LEFT JOIN Media m ON p.PoolId = m.PoolId + LEFT JOIN Volumes v ON m.MediaId = v.MediaId + GROUP BY p.Name + ORDER BY COUNT(m.MediaId) DESC + LIMIT 1 + ` + err = s.baculaDB.QueryRowContext(ctx, altQuery).Scan(&name, &usedBytes, &totalBytes) + if err != nil { + return nil, fmt.Errorf("failed to query pool stats: %w", err) + } + } else { + return nil, fmt.Errorf("failed to query pool stats: %w", err) + } + } + + if name.Valid { + pool.Name = name.String + } + if usedBytes.Valid { + pool.UsedBytes = usedBytes.Int64 + } + if totalBytes.Valid { + pool.TotalBytes = totalBytes.Int64 + } + + // Calculate usage percent + if pool.TotalBytes > 0 { + pool.UsagePercent = float64(pool.UsedBytes) / float64(pool.TotalBytes) * 100 + } else { + // If total is 0, set total to used to show 100% if there's data + if pool.UsedBytes > 0 { + pool.TotalBytes = pool.UsedBytes + pool.UsagePercent = 100.0 + } + } + + return &pool, nil +} + +// ListStoragePools lists all storage pools from Bacula database +func (s *Service) ListStoragePools(ctx context.Context) ([]StoragePool, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + query := ` + SELECT + p.PoolId, + p.Name, + COALESCE(p.PoolType, 'Backup') as PoolType, + p.LabelFormat, + p.Recycle, + p.AutoPrune, + COALESCE(COUNT(DISTINCT m.MediaId), 0) as volume_count, + COALESCE(SUM(m.VolBytes), 0) as used_bytes, + COALESCE(SUM(m.VolBytes), 0) as total_bytes + FROM Pool p + LEFT JOIN Media m ON p.PoolId = m.PoolId + GROUP BY p.PoolId, p.Name, p.PoolType, p.LabelFormat, p.Recycle, p.AutoPrune + ORDER BY p.Name + ` + + rows, err := s.baculaDB.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to query storage pools: %w", err) + } + defer rows.Close() + + var pools []StoragePool + for rows.Next() { + var pool StoragePool + var labelFormat, poolType sql.NullString + var recycle, autoPrune sql.NullBool + var usedBytes, totalBytes sql.NullInt64 + + err := rows.Scan( + &pool.PoolID, &pool.Name, &poolType, &labelFormat, + &recycle, &autoPrune, &pool.VolumeCount, + &usedBytes, &totalBytes, + ) + if err != nil { + s.logger.Error("Failed to scan pool row", "error", err) + continue + } + + // Set default pool type if null + if poolType.Valid && poolType.String != "" { + pool.PoolType = poolType.String + } else { + pool.PoolType = "Backup" // Default + } + + if labelFormat.Valid && labelFormat.String != "" { + pool.LabelFormat = &labelFormat.String + } + if recycle.Valid { + pool.Recycle = &recycle.Bool + } + if autoPrune.Valid { + pool.AutoPrune = &autoPrune.Bool + } + if usedBytes.Valid { + pool.UsedBytes = usedBytes.Int64 + } else { + pool.UsedBytes = 0 + } + if totalBytes.Valid { + pool.TotalBytes = totalBytes.Int64 + } else { + pool.TotalBytes = 0 + } + + // Calculate usage percent + if pool.TotalBytes > 0 { + pool.UsagePercent = float64(pool.UsedBytes) / float64(pool.TotalBytes) * 100 + } else if pool.UsedBytes > 0 { + pool.TotalBytes = pool.UsedBytes + pool.UsagePercent = 100.0 + } else { + pool.UsagePercent = 0.0 + } + + s.logger.Debug("Loaded pool", "pool_id", pool.PoolID, "name", pool.Name, "type", pool.PoolType, "volumes", pool.VolumeCount) + pools = append(pools, pool) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating pool rows: %w", err) + } + + return pools, nil +} + +// ListStorageVolumes lists all storage volumes from Bacula database +func (s *Service) ListStorageVolumes(ctx context.Context, poolName string) ([]StorageVolume, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + query := ` + SELECT + v.VolumeId, + v.MediaId, + v.VolumeName, + COALESCE(p.Name, 'Unknown') as pool_name, + m.MediaType, + v.VolStatus, + COALESCE(v.VolBytes, 0) as vol_bytes, + COALESCE(v.MaxVolBytes, 0) as max_vol_bytes, + COALESCE(v.VolFiles, 0) as vol_files, + v.VolRetention, + v.LastWritten, + COALESCE(v.RecycleCount, 0) as recycle_count + FROM Volumes v + LEFT JOIN Media m ON v.MediaId = m.MediaId + LEFT JOIN Pool p ON m.PoolId = p.PoolId + WHERE 1=1 + ` + args := []interface{}{} + argIndex := 1 + + if poolName != "" { + query += fmt.Sprintf(" AND p.Name = $%d", argIndex) + args = append(args, poolName) + argIndex++ + } + + query += " ORDER BY v.LastWritten DESC NULLS LAST, v.VolumeName" + + rows, err := s.baculaDB.QueryContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to query storage volumes: %w", err) + } + defer rows.Close() + + var volumes []StorageVolume + for rows.Next() { + var vol StorageVolume + var volRetention, lastWritten sql.NullTime + + err := rows.Scan( + &vol.VolumeID, &vol.MediaID, &vol.VolumeName, &vol.PoolName, + &vol.MediaType, &vol.VolStatus, &vol.VolBytes, &vol.MaxVolBytes, + &vol.VolFiles, &volRetention, &lastWritten, &vol.RecycleCount, + ) + if err != nil { + s.logger.Error("Failed to scan volume row", "error", err) + continue + } + + if volRetention.Valid { + vol.VolRetention = &volRetention.Time + } + if lastWritten.Valid { + vol.LastWritten = &lastWritten.Time + } + + volumes = append(volumes, vol) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating volume rows: %w", err) + } + + return volumes, nil +} + +// ListStorageDaemons lists all storage daemons from Bacula database +func (s *Service) ListStorageDaemons(ctx context.Context) ([]StorageDaemon, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + query := ` + SELECT + s.StorageId, + s.Name, + s.Address, + s.Port, + s.DeviceName, + s.MediaType + FROM Storage s + ORDER BY s.Name + ` + + rows, err := s.baculaDB.QueryContext(ctx, query) + if err != nil { + return nil, fmt.Errorf("failed to query storage daemons: %w", err) + } + defer rows.Close() + + var daemons []StorageDaemon + for rows.Next() { + var daemon StorageDaemon + var address, deviceName, mediaType sql.NullString + var port sql.NullInt64 + + err := rows.Scan( + &daemon.StorageID, &daemon.Name, &address, &port, + &deviceName, &mediaType, + ) + if err != nil { + s.logger.Error("Failed to scan storage daemon row", "error", err) + continue + } + + if address.Valid { + daemon.Address = address.String + } + if port.Valid { + daemon.Port = int(port.Int64) + } + if deviceName.Valid { + daemon.DeviceName = deviceName.String + } + if mediaType.Valid { + daemon.MediaType = mediaType.String + } + + // Default status to Online (could be enhanced with actual connection check) + daemon.Status = "Online" + + daemons = append(daemons, daemon) + } + + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("error iterating storage daemon rows: %w", err) + } + + return daemons, nil +} + +// CreatePoolRequest represents a request to create a new storage pool +type CreatePoolRequest struct { + Name string `json:"name" binding:"required"` + PoolType string `json:"pool_type"` // Backup, Scratch, Recycle + LabelFormat *string `json:"label_format,omitempty"` + Recycle *bool `json:"recycle,omitempty"` + AutoPrune *bool `json:"auto_prune,omitempty"` +} + +// CreateStoragePool creates a new storage pool in Bacula database +func (s *Service) CreateStoragePool(ctx context.Context, req CreatePoolRequest) (*StoragePool, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + // Validate pool name + if req.Name == "" { + return nil, fmt.Errorf("pool name is required") + } + + // Check if pool already exists + var existingID int + err := s.baculaDB.QueryRowContext(ctx, "SELECT PoolId FROM Pool WHERE Name = $1", req.Name).Scan(&existingID) + if err == nil { + return nil, fmt.Errorf("pool with name %s already exists", req.Name) + } else if err != sql.ErrNoRows { + return nil, fmt.Errorf("failed to check existing pool: %w", err) + } + + // Set defaults + poolType := req.PoolType + if poolType == "" { + poolType = "Backup" // Default to Backup pool + } + + // Insert new pool + query := ` + INSERT INTO Pool (Name, PoolType, LabelFormat, Recycle, AutoPrune) + VALUES ($1, $2, $3, $4, $5) + RETURNING PoolId, Name, PoolType, LabelFormat, Recycle, AutoPrune + ` + + var pool StoragePool + var labelFormat sql.NullString + var recycle, autoPrune sql.NullBool + + err = s.baculaDB.QueryRowContext(ctx, query, + req.Name, poolType, req.LabelFormat, req.Recycle, req.AutoPrune, + ).Scan( + &pool.PoolID, &pool.Name, &pool.PoolType, &labelFormat, &recycle, &autoPrune, + ) + if err != nil { + return nil, fmt.Errorf("failed to create pool: %w", err) + } + + if labelFormat.Valid { + pool.LabelFormat = &labelFormat.String + } + if recycle.Valid { + pool.Recycle = &recycle.Bool + } + if autoPrune.Valid { + pool.AutoPrune = &autoPrune.Bool + } + + pool.VolumeCount = 0 + pool.UsedBytes = 0 + pool.TotalBytes = 0 + pool.UsagePercent = 0 + + s.logger.Info("Storage pool created", "pool_id", pool.PoolID, "name", pool.Name, "type", pool.PoolType) + return &pool, nil +} + +// DeleteStoragePool deletes a storage pool from Bacula database +func (s *Service) DeleteStoragePool(ctx context.Context, poolID int) error { + if s.baculaDB == nil { + return fmt.Errorf("Bacula database connection not configured") + } + + // Check if pool exists and get name + var poolName string + err := s.baculaDB.QueryRowContext(ctx, "SELECT Name FROM Pool WHERE PoolId = $1", poolID).Scan(&poolName) + if err == sql.ErrNoRows { + return fmt.Errorf("pool not found") + } else if err != nil { + return fmt.Errorf("failed to check pool: %w", err) + } + + // Check if pool has volumes + var volumeCount int + err = s.baculaDB.QueryRowContext(ctx, ` + SELECT COUNT(*) FROM Media m + INNER JOIN Pool p ON m.PoolId = p.PoolId + WHERE p.PoolId = $1 + `, poolID).Scan(&volumeCount) + if err != nil { + return fmt.Errorf("failed to check pool volumes: %w", err) + } + + if volumeCount > 0 { + return fmt.Errorf("cannot delete pool %s: pool contains %d volumes. Please remove or move volumes first", poolName, volumeCount) + } + + // Delete pool + _, err = s.baculaDB.ExecContext(ctx, "DELETE FROM Pool WHERE PoolId = $1", poolID) + if err != nil { + return fmt.Errorf("failed to delete pool: %w", err) + } + + s.logger.Info("Storage pool deleted", "pool_id", poolID, "name", poolName) + return nil +} + +// CreateVolumeRequest represents a request to create a new storage volume +type CreateVolumeRequest struct { + VolumeName string `json:"volume_name" binding:"required"` + PoolName string `json:"pool_name" binding:"required"` + MediaType string `json:"media_type"` // File, Tape, etc. + MaxVolBytes *int64 `json:"max_vol_bytes,omitempty"` + VolRetention *int `json:"vol_retention,omitempty"` // Retention period in days +} + +// CreateStorageVolume creates a new storage volume in Bacula database +func (s *Service) CreateStorageVolume(ctx context.Context, req CreateVolumeRequest) (*StorageVolume, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + // Validate volume name + if req.VolumeName == "" { + return nil, fmt.Errorf("volume name is required") + } + + // Get pool ID + var poolID int + err := s.baculaDB.QueryRowContext(ctx, "SELECT PoolId FROM Pool WHERE Name = $1", req.PoolName).Scan(&poolID) + if err == sql.ErrNoRows { + return nil, fmt.Errorf("pool %s not found", req.PoolName) + } else if err != nil { + return nil, fmt.Errorf("failed to get pool: %w", err) + } + + // Set defaults + mediaType := req.MediaType + if mediaType == "" { + mediaType = "File" // Default to File for disk volumes + } + + // Create Media entry first (Volumes table references Media) + var mediaID int + mediaQuery := ` + INSERT INTO Media (PoolId, MediaType, VolumeName, VolBytes, VolFiles, VolStatus, LastWritten) + VALUES ($1, $2, $3, 0, 0, 'Append', NOW()) + RETURNING MediaId + ` + err = s.baculaDB.QueryRowContext(ctx, mediaQuery, poolID, mediaType, req.VolumeName).Scan(&mediaID) + if err != nil { + return nil, fmt.Errorf("failed to create media entry: %w", err) + } + + // Create Volume entry + var maxVolBytes sql.NullInt64 + if req.MaxVolBytes != nil { + maxVolBytes = sql.NullInt64{Int64: *req.MaxVolBytes, Valid: true} + } + + var volRetention sql.NullTime + if req.VolRetention != nil { + retentionTime := time.Now().AddDate(0, 0, *req.VolRetention) + volRetention = sql.NullTime{Time: retentionTime, Valid: true} + } + + volumeQuery := ` + INSERT INTO Volumes (MediaId, VolumeName, VolBytes, MaxVolBytes, VolFiles, VolStatus, VolRetention, LastWritten) + VALUES ($1, $2, 0, $3, 0, 'Append', $4, NOW()) + RETURNING VolumeId, MediaId, VolumeName, VolBytes, MaxVolBytes, VolFiles, VolRetention, LastWritten + ` + + var vol StorageVolume + var lastWritten sql.NullTime + + err = s.baculaDB.QueryRowContext(ctx, volumeQuery, + mediaID, req.VolumeName, maxVolBytes, volRetention, + ).Scan( + &vol.VolumeID, &vol.MediaID, &vol.VolumeName, &vol.VolBytes, &vol.MaxVolBytes, + &vol.VolFiles, &volRetention, &lastWritten, + ) + if err != nil { + // Cleanup: delete media if volume creation fails + s.baculaDB.ExecContext(ctx, "DELETE FROM Media WHERE MediaId = $1", mediaID) + return nil, fmt.Errorf("failed to create volume: %w", err) + } + + vol.PoolName = req.PoolName + vol.MediaType = mediaType + vol.VolStatus = "Append" + vol.RecycleCount = 0 + + if volRetention.Valid { + vol.VolRetention = &volRetention.Time + } + if lastWritten.Valid { + vol.LastWritten = &lastWritten.Time + } + + s.logger.Info("Storage volume created", "volume_id", vol.VolumeID, "name", vol.VolumeName, "pool", req.PoolName) + return &vol, nil +} + +// UpdateVolumeRequest represents a request to update a storage volume +type UpdateVolumeRequest struct { + MaxVolBytes *int64 `json:"max_vol_bytes,omitempty"` + VolRetention *int `json:"vol_retention,omitempty"` // Retention period in days +} + +// UpdateStorageVolume updates a storage volume's meta-data in Bacula database +func (s *Service) UpdateStorageVolume(ctx context.Context, volumeID int, req UpdateVolumeRequest) (*StorageVolume, error) { + if s.baculaDB == nil { + return nil, fmt.Errorf("Bacula database connection not configured") + } + + // Check if volume exists + var volumeName string + err := s.baculaDB.QueryRowContext(ctx, "SELECT VolumeName FROM Volumes WHERE VolumeId = $1", volumeID).Scan(&volumeName) + if err == sql.ErrNoRows { + return nil, fmt.Errorf("volume not found") + } else if err != nil { + return nil, fmt.Errorf("failed to check volume: %w", err) + } + + // Build update query dynamically + updates := []string{} + args := []interface{}{} + argIndex := 1 + + if req.MaxVolBytes != nil { + updates = append(updates, fmt.Sprintf("MaxVolBytes = $%d", argIndex)) + args = append(args, *req.MaxVolBytes) + argIndex++ + } + + if req.VolRetention != nil { + retentionTime := time.Now().AddDate(0, 0, *req.VolRetention) + updates = append(updates, fmt.Sprintf("VolRetention = $%d", argIndex)) + args = append(args, retentionTime) + argIndex++ + } + + if len(updates) == 0 { + return nil, fmt.Errorf("no fields to update") + } + + args = append(args, volumeID) + query := fmt.Sprintf("UPDATE Volumes SET %s WHERE VolumeId = $%d", strings.Join(updates, ", "), argIndex) + + _, err = s.baculaDB.ExecContext(ctx, query, args...) + if err != nil { + return nil, fmt.Errorf("failed to update volume: %w", err) + } + + // Get updated volume + volumes, err := s.ListStorageVolumes(ctx, "") + if err != nil { + return nil, fmt.Errorf("failed to get updated volume: %w", err) + } + + for _, vol := range volumes { + if vol.VolumeID == volumeID { + s.logger.Info("Storage volume updated", "volume_id", volumeID, "name", volumeName) + return &vol, nil + } + } + + return nil, fmt.Errorf("updated volume not found") +} + +// DeleteStorageVolume deletes a storage volume from Bacula database +func (s *Service) DeleteStorageVolume(ctx context.Context, volumeID int) error { + if s.baculaDB == nil { + return fmt.Errorf("Bacula database connection not configured") + } + + // Check if volume exists and get name + var volumeName string + var mediaID int + err := s.baculaDB.QueryRowContext(ctx, "SELECT VolumeName, MediaId FROM Volumes WHERE VolumeId = $1", volumeID).Scan(&volumeName, &mediaID) + if err == sql.ErrNoRows { + return fmt.Errorf("volume not found") + } else if err != nil { + return fmt.Errorf("failed to check volume: %w", err) + } + + // Check if volume has data + var volBytes int64 + err = s.baculaDB.QueryRowContext(ctx, "SELECT VolBytes FROM Volumes WHERE VolumeId = $1", volumeID).Scan(&volBytes) + if err != nil { + return fmt.Errorf("failed to check volume data: %w", err) + } + + if volBytes > 0 { + return fmt.Errorf("cannot delete volume %s: volume contains data. Please purge or truncate first", volumeName) + } + + // Delete volume + _, err = s.baculaDB.ExecContext(ctx, "DELETE FROM Volumes WHERE VolumeId = $1", volumeID) + if err != nil { + return fmt.Errorf("failed to delete volume: %w", err) + } + + // Delete associated media entry + _, err = s.baculaDB.ExecContext(ctx, "DELETE FROM Media WHERE MediaId = $1", mediaID) + if err != nil { + s.logger.Warn("Failed to delete media entry", "media_id", mediaID, "error", err) + // Continue anyway, volume is deleted + } + + s.logger.Info("Storage volume deleted", "volume_id", volumeID, "name", volumeName) + return nil +} + +// Media represents a media entry from bconsole "list media" +type Media struct { + MediaID int `json:"media_id"` + VolumeName string `json:"volume_name"` + PoolName string `json:"pool_name"` + MediaType string `json:"media_type"` + Status string `json:"status"` + VolBytes int64 `json:"vol_bytes"` + MaxVolBytes int64 `json:"max_vol_bytes"` + VolFiles int `json:"vol_files"` + LastWritten string `json:"last_written,omitempty"` + RecycleCount int `json:"recycle_count"` + Slot int `json:"slot,omitempty"` // Slot number in library + InChanger int `json:"in_changer,omitempty"` // 1 if in changer, 0 if not + LibraryName string `json:"library_name,omitempty"` // Library name (for tape media) +} + +// ListMedia lists all media from bconsole "list media" command +func (s *Service) ListMedia(ctx context.Context) ([]Media, error) { + // Execute bconsole command to list media + s.logger.Debug("Executing bconsole list media command") + output, err := s.ExecuteBconsoleCommand(ctx, "list media") + if err != nil { + s.logger.Error("Failed to execute bconsole list media", "error", err) + return nil, fmt.Errorf("failed to execute bconsole list media: %w", err) + } + + previewLen := 500 + if len(output) < previewLen { + previewLen = len(output) + } + s.logger.Debug("bconsole list media output", "output_length", len(output), "output_preview", output[:previewLen]) + + // Parse bconsole output + media := s.parseBconsoleMediaOutput(output) + s.logger.Debug("Parsed media from bconsole", "count", len(media)) + + // Enrich with pool names from database + if s.baculaDB != nil && len(media) > 0 { + media = s.enrichMediaWithPoolNames(ctx, media) + } + + return media, nil +} + +// enrichMediaWithPoolNames enriches media list with pool names from database +func (s *Service) enrichMediaWithPoolNames(ctx context.Context, media []Media) []Media { + // Create maps of media_id to pool_name and library_name + poolMap := make(map[int]string) + libraryMap := make(map[int]string) + + if len(media) == 0 { + return media + } + + // Query database to get pool names for all media + mediaIDs := make([]interface{}, len(media)) + for i, m := range media { + mediaIDs[i] = m.MediaID + } + + // Build query with placeholders + placeholders := make([]string, len(mediaIDs)) + args := make([]interface{}, len(mediaIDs)) + for i := range mediaIDs { + placeholders[i] = fmt.Sprintf("$%d", i+1) + args[i] = mediaIDs[i] + } + + // First, get pool names + query := fmt.Sprintf(` + SELECT m.MediaId, COALESCE(p.Name, 'Unknown') as pool_name + FROM Media m + LEFT JOIN Pool p ON m.PoolId = p.PoolId + WHERE m.MediaId IN (%s) + `, strings.Join(placeholders, ",")) + + rows, err := s.baculaDB.QueryContext(ctx, query, args...) + if err != nil { + s.logger.Warn("Failed to query pool names for media", "error", err) + } else { + defer rows.Close() + for rows.Next() { + var mediaID int + var poolName string + if err := rows.Scan(&mediaID, &poolName); err == nil { + poolMap[mediaID] = poolName + } + } + } + + // Get storage names for tape media + // Since Storage table doesn't have MediaType column, we'll use bconsole to match + // For each storage, check which media belong to it using "list volumes storage=" + storageQuery := ` + SELECT Name + FROM Storage + ORDER BY Name + ` + storageRows, err := s.baculaDB.QueryContext(ctx, storageQuery) + if err == nil { + defer storageRows.Close() + var storageNames []string + for storageRows.Next() { + var storageName string + if err := storageRows.Scan(&storageName); err == nil { + storageNames = append(storageNames, storageName) + } + } + + // For each storage, use bconsole to get list of media volumes + // This will tell us which media belong to which storage + for _, storageName := range storageNames { + // Skip file storages (File1, File2) + if strings.Contains(strings.ToLower(storageName), "file") { + continue + } + + // Use bconsole to list volumes for this storage + cmd := fmt.Sprintf("list volumes storage=%s", storageName) + output, err := s.ExecuteBconsoleCommand(ctx, cmd) + if err != nil { + s.logger.Debug("Failed to get volumes for storage", "storage", storageName, "error", err) + continue + } + + // Parse output to get media IDs + mediaIDs := s.parseMediaIDsFromBconsoleOutput(output) + for _, mediaID := range mediaIDs { + if mediaID > 0 { + libraryMap[mediaID] = storageName + } + } + } + } + + // Update media with pool names and library names + for i := range media { + if poolName, ok := poolMap[media[i].MediaID]; ok { + media[i].PoolName = poolName + } + // Set library name for tape media that are in changer + if media[i].MediaType != "" && (strings.Contains(strings.ToLower(media[i].MediaType), "lto") || strings.Contains(strings.ToLower(media[i].MediaType), "tape")) { + if libraryName, ok := libraryMap[media[i].MediaID]; ok && libraryName != "" { + media[i].LibraryName = libraryName + } else if media[i].InChanger > 0 { + // If in changer but no storage name, use generic name + media[i].LibraryName = "Unknown Library" + } + } + } + + return media +} + +// parseMediaIDsFromBconsoleOutput parses media IDs from bconsole "list volumes storage=..." output +func (s *Service) parseMediaIDsFromBconsoleOutput(output string) []int { + var mediaIDs []int + lines := strings.Split(output, "\n") + + inTable := false + headerFound := false + mediaIDColIndex := -1 + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip connection messages + if strings.Contains(line, "Connecting to Director") || + strings.Contains(line, "Enter a period") || + strings.Contains(line, "list volumes") || + strings.Contains(line, "quit") || + strings.Contains(line, "You have messages") || + strings.Contains(line, "Automatically selected") || + strings.Contains(line, "Using Catalog") || + strings.Contains(line, "Pool:") { + continue + } + + // Detect table header + if !headerFound && strings.Contains(line, "|") && strings.Contains(strings.ToLower(line), "mediaid") { + parts := strings.Split(line, "|") + for i, part := range parts { + if strings.Contains(strings.ToLower(strings.TrimSpace(part)), "mediaid") { + mediaIDColIndex = i + break + } + } + headerFound = true + inTable = true + continue + } + + // Detect table separator + if strings.HasPrefix(line, "+") && strings.Contains(line, "-") { + continue + } + + // Skip empty lines + if line == "" { + continue + } + + // Parse table rows + if inTable && strings.Contains(line, "|") && mediaIDColIndex >= 0 { + parts := strings.Split(line, "|") + if mediaIDColIndex < len(parts) { + mediaIDStr := strings.TrimSpace(parts[mediaIDColIndex]) + // Remove commas + mediaIDStr = strings.ReplaceAll(mediaIDStr, ",", "") + if mediaID, err := strconv.Atoi(mediaIDStr); err == nil && mediaID > 0 { + // Skip header row + if mediaIDStr != "mediaid" && mediaIDStr != "MediaId" { + mediaIDs = append(mediaIDs, mediaID) + } + } + } + } + } + + return mediaIDs +} + +// parseBconsoleMediaOutput parses bconsole "list media" output +func (s *Service) parseBconsoleMediaOutput(output string) []Media { + var mediaList []Media + lines := strings.Split(output, "\n") + + inTable := false + headerFound := false + headerMap := make(map[string]int) // Map header name to column index + + for _, line := range lines { + line = strings.TrimSpace(line) + + // Skip connection messages and command echo + if strings.Contains(line, "Connecting to Director") || + strings.Contains(line, "Enter a period") || + strings.Contains(line, "list media") || + strings.Contains(line, "quit") || + strings.Contains(line, "You have messages") || + strings.Contains(line, "Automatically selected") || + strings.Contains(line, "Using Catalog") || + strings.Contains(line, "Pool:") { + continue + } + + // Detect table header - format: | mediaid | volumename | volstatus | ... + if !headerFound && strings.Contains(line, "|") && (strings.Contains(strings.ToLower(line), "mediaid") || strings.Contains(strings.ToLower(line), "volumename")) { + // Parse header to get column positions + parts := strings.Split(line, "|") + for i, part := range parts { + headerName := strings.ToLower(strings.TrimSpace(part)) + if headerName != "" { + headerMap[headerName] = i + } + } + headerFound = true + inTable = true + s.logger.Debug("Found media table header", "headers", headerMap) + continue + } + + // Detect table separator + if strings.HasPrefix(line, "+") && strings.Contains(line, "-") { + continue + } + + // Skip empty lines + if line == "" { + continue + } + + // Parse table rows + if inTable && strings.Contains(line, "|") { + parts := strings.Split(line, "|") + if len(parts) > 1 { + // Helper to get string value safely + getString := func(colName string) string { + if idx, ok := headerMap[colName]; ok && idx < len(parts) { + return strings.TrimSpace(parts[idx]) + } + return "" + } + + // Helper to get int value safely + getInt := func(colName string) int { + valStr := getString(colName) + // Remove commas from numbers + valStr = strings.ReplaceAll(valStr, ",", "") + if val, e := strconv.Atoi(valStr); e == nil { + return val + } + return 0 + } + + // Helper to get int64 value safely + getInt64 := func(colName string) int64 { + valStr := getString(colName) + // Remove commas from numbers + valStr = strings.ReplaceAll(valStr, ",", "") + if val, e := strconv.ParseInt(valStr, 10, 64); e == nil { + return val + } + return 0 + } + + mediaID := getInt("mediaid") + volumeName := getString("volumename") + volStatus := getString("volstatus") + volBytes := getInt64("volbytes") + volFiles := getInt("volfiles") + mediaType := getString("mediatype") + lastWritten := getString("lastwritten") + recycleCount := getInt("recycle") + slot := getInt("slot") + inChanger := getInt("inchanger") + + // Skip header row or invalid rows + if mediaID == 0 || volumeName == "" || volumeName == "volumename" { + continue + } + + // Get pool name - it's not in the table, we'll need to get it from database or set default + // For now, we'll use "Default" as pool name since bconsole list media doesn't show pool + poolName := "Default" + + // MaxVolBytes is not in the output, we'll set to 0 for now + maxVolBytes := int64(0) + + media := Media{ + MediaID: mediaID, + VolumeName: volumeName, + PoolName: poolName, + MediaType: mediaType, + Status: volStatus, + VolBytes: volBytes, + MaxVolBytes: maxVolBytes, + VolFiles: volFiles, + LastWritten: lastWritten, + RecycleCount: recycleCount, + Slot: slot, + InChanger: inChanger, + } + + mediaList = append(mediaList, media) + } + } + } + + s.logger.Debug("Parsed media from bconsole", "count", len(mediaList)) + return mediaList +} diff --git a/backend/internal/common/router/router.go b/backend/internal/common/router/router.go index 57212f9..76b1508 100644 --- a/backend/internal/common/router/router.go +++ b/backend/internal/common/router/router.go @@ -346,10 +346,20 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng backupGroup := protected.Group("/backup") backupGroup.Use(requirePermission("backup", "read")) { + backupGroup.GET("/dashboard/stats", backupHandler.GetDashboardStats) backupGroup.GET("/jobs", backupHandler.ListJobs) backupGroup.GET("/jobs/:id", backupHandler.GetJob) backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob) backupGroup.GET("/clients", backupHandler.ListClients) + backupGroup.GET("/storage/pools", backupHandler.ListStoragePools) + backupGroup.POST("/storage/pools", requirePermission("backup", "write"), backupHandler.CreateStoragePool) + backupGroup.DELETE("/storage/pools/:id", requirePermission("backup", "write"), backupHandler.DeleteStoragePool) + backupGroup.GET("/storage/volumes", backupHandler.ListStorageVolumes) + backupGroup.POST("/storage/volumes", requirePermission("backup", "write"), backupHandler.CreateStorageVolume) + backupGroup.PUT("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.UpdateStorageVolume) + backupGroup.DELETE("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.DeleteStorageVolume) + backupGroup.GET("/media", backupHandler.ListMedia) + backupGroup.GET("/storage/daemons", backupHandler.ListStorageDaemons) backupGroup.POST("/console/execute", requirePermission("backup", "write"), backupHandler.ExecuteBconsoleCommand) } diff --git a/bacula-config b/bacula-config new file mode 120000 index 0000000..bcd0f74 --- /dev/null +++ b/bacula-config @@ -0,0 +1 @@ +/etc/bacula \ No newline at end of file diff --git a/docs/bacula-vtl-troubleshooting.md b/docs/bacula-vtl-troubleshooting.md new file mode 100644 index 0000000..e1bbdab --- /dev/null +++ b/docs/bacula-vtl-troubleshooting.md @@ -0,0 +1,354 @@ +# Bacula VTL Integration - Root Cause Analysis & Troubleshooting + +## Issue Summary +Bacula Storage Daemon was unable to read slots from mhVTL (Virtual Tape Library) autochanger devices, reporting "Device has 0 slots" despite mtx-changer script working correctly when called manually. + +## Environment +- **OS**: Ubuntu Linux +- **Bacula Version**: 13.0.4 +- **VTL**: mhVTL (Virtual Tape Library) +- **Autochangers**: + - Quantum Scalar i500 (4 drives, 43 slots) + - Quantum Scalar i40 (4 drives, 44 slots) +- **Tape Drives**: 8x QUANTUM ULTRIUM-HH8 (LTO-8) + +## Root Cause Analysis + +### Primary Issues Identified + +#### 1. **Incorrect Tape Device Type** +**Problem**: Using rewinding tape devices (`/dev/st*`) instead of non-rewinding devices (`/dev/nst*`) + +**Impact**: Tape would rewind after each operation, causing data loss and operational failures + +**Solution**: Changed all Archive Device directives from `/dev/st*` to `/dev/nst*` + +```diff +Device { + Name = Drive-0 +- Archive Device = /dev/st0 ++ Archive Device = /dev/nst0 +} +``` + +#### 2. **Missing Drive Index Parameter** +**Problem**: Device configurations lacked Drive Index parameter + +**Impact**: Bacula couldn't properly identify which physical drive in the autochanger to use + +**Solution**: Added Drive Index (0-3) to each Device resource + +```diff +Device { + Name = Drive-0 ++ Drive Index = 0 + Archive Device = /dev/nst0 +} +``` + +#### 3. **Incorrect AlwaysOpen Setting** +**Problem**: AlwaysOpen was set to `no` + +**Impact**: Device wouldn't remain open, causing connection issues with VTL + +**Solution**: Changed AlwaysOpen to `yes` for all tape devices + +```diff +Device { + Name = Drive-0 +- AlwaysOpen = no ++ AlwaysOpen = yes +} +``` + +#### 4. **Wrong Changer Device Path** +**Problem**: Using `/dev/sch*` (medium changer device) instead of `/dev/sg*` (generic SCSI device) + +**Impact**: bacula user couldn't access the changer due to permission issues (cdrom group vs tape group) + +**Solution**: Changed Changer Device to use sg devices + +```diff +Autochanger { + Name = Scalar-i500 +- Changer Device = /dev/sch0 ++ Changer Device = /dev/sg7 +} +``` + +**Device Mapping**: +- `/dev/sch0` → `/dev/sg7` (Scalar i500) +- `/dev/sch1` → `/dev/sg8` (Scalar i40) + +#### 5. **Missing User Permissions** +**Problem**: bacula user not in required groups for device access + +**Impact**: "Permission denied" errors when accessing tape and changer devices + +**Solution**: Added bacula user to tape and cdrom groups + +```bash +usermod -a -G tape,cdrom bacula +systemctl restart bacula-sd +``` + +#### 6. **Incorrect Storage Resource Configuration** +**Problem**: Storage resource in Director config referenced autochanger name instead of individual drives + +**Impact**: Bacula couldn't properly communicate with individual tape drives + +**Solution**: Listed all drives explicitly in Storage resource + +```diff +Storage { + Name = Scalar-i500 +- Device = Scalar-i500 ++ Device = Drive-0 ++ Device = Drive-1 ++ Device = Drive-2 ++ Device = Drive-3 + Autochanger = Scalar-i500 +} +``` + +#### 7. **mtx-changer List Output Format** +**Problem**: Script output format didn't match Bacula's expected format + +**Impact**: "Invalid Slot number" errors, preventing volume labeling + +**Original Output**: `1 Full:VolumeTag=E01001L8` +**Expected Output**: `1:E01001L8` + +**Solution**: Fixed sed pattern in list command + +```bash +# Original (incorrect) +list) + ${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:/ /' + ;; + +# Fixed +list) + ${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/' + ;; +``` + +## Troubleshooting Steps + +### Step 1: Verify mtx-changer Script Works Manually +```bash +# Test slots command +/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots +# Expected output: 43 + +# Test list command +/usr/lib/bacula/scripts/mtx-changer /dev/sg7 list +# Expected output: 1:E01001L8, 2:E01002L8, etc. +``` + +### Step 2: Test as bacula User +```bash +# Test if bacula user can access devices +su -s /bin/bash bacula -c "/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots" + +# If permission denied, check groups +groups bacula +# Should include: bacula tape cdrom +``` + +### Step 3: Verify Device Permissions +```bash +# Check changer devices +ls -l /dev/sch* /dev/sg7 /dev/sg8 +# sg devices should be in tape group + +# Check tape devices +ls -l /dev/nst* +# Should be in tape group with rw permissions +``` + +### Step 4: Test Bacula Storage Daemon Connection +```bash +# From bconsole +echo "status storage=Scalar-i500" | bconsole + +# Should show autochanger and drives +``` + +### Step 5: Update Slots +```bash +echo -e "update slots storage=Scalar-i500\n0\n" | bconsole + +# Should show: Device "Drive-0" has 43 slots +# NOT: Device has 0 slots +``` + +### Step 6: Label Tapes +```bash +echo -e "label barcodes storage=Scalar-i500 pool=Default\n0\nyes\n" | bconsole + +# Should successfully label tapes using barcodes +``` + +## Configuration Files + +### /etc/bacula/bacula-sd.conf (Storage Daemon) +```bash +Autochanger { + Name = Scalar-i500 + Device = Drive-0, Drive-1, Drive-2, Drive-3 + Changer Command = "/usr/lib/bacula/scripts/mtx-changer %c %o %S %a %d" + Changer Device = /dev/sg7 +} + +Device { + Name = Drive-0 + Drive Index = 0 + Changer Device = /dev/sg7 + Media Type = LTO-8 + Archive Device = /dev/nst0 + AutomaticMount = yes + AlwaysOpen = yes + RemovableMedia = yes + RandomAccess = no + AutoChanger = yes + Maximum Concurrent Jobs = 1 +} +``` + +### /etc/bacula/bacula-dir.conf (Director) +```bash +Storage { + Name = Scalar-i500 + Address = localhost + SDPort = 9103 + Password = "QJQPnZ5Q5p6D73RcvR7ksrOm9UG3mAhvV" + Device = Drive-0 + Device = Drive-1 + Device = Drive-2 + Device = Drive-3 + Media Type = LTO-8 + Autochanger = Scalar-i500 + Maximum Concurrent Jobs = 4 +} +``` + +### /usr/lib/bacula/scripts/mtx-changer +```bash +#!/bin/sh +MTX=/usr/sbin/mtx + +ctl=$1 +cmd="$2" +slot=$3 +device=$4 +drive=$5 + +case "$cmd" in + loaded) + ${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" >/dev/null 2>&1 + if [ $? -eq 0 ]; then + ${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" | awk '{print $7}' | sed 's/.*=//' + else + echo "0" + fi + ;; + + load) + ${MTX} -f $ctl load $slot $drive + ;; + + unload) + ${MTX} -f $ctl unload $slot $drive + ;; + + list) + ${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/' + ;; + + slots) + ${MTX} -f $ctl status | grep "Storage Changer" | awk '{print $5}' + ;; + + *) + echo "Invalid command: $cmd" + exit 1 + ;; +esac + +exit 0 +``` + +## Verification Commands + +### Check Device Mapping +```bash +lsscsi -g | grep -E "mediumx|tape" +``` + +### Check VTL Services +```bash +systemctl list-units 'vtl*' +``` + +### Test Manual Tape Load +```bash +# Load tape to drive +mtx -f /dev/sg7 load 1 0 + +# Check drive status +mt -f /dev/nst0 status + +# Unload tape +mtx -f /dev/sg7 unload 1 0 +``` + +### List Labeled Volumes +```bash +echo "list volumes pool=Default" | bconsole +``` + +## Common Errors and Solutions + +### Error: "Device has 0 slots" +**Cause**: Wrong changer device or permission issues +**Solution**: Use /dev/sg* devices and verify bacula user in tape/cdrom groups + +### Error: "Permission denied" accessing /dev/sch0 +**Cause**: bacula user not in cdrom group +**Solution**: `usermod -a -G cdrom bacula && systemctl restart bacula-sd` + +### Error: "Invalid Slot number" +**Cause**: mtx-changer list output format incorrect +**Solution**: Fix sed pattern to output `slot:volumetag` format + +### Error: "No medium found" after successful load +**Cause**: Using rewinding devices (/dev/st*) or AlwaysOpen=no +**Solution**: Use /dev/nst* and set AlwaysOpen=yes + +### Error: "READ ELEMENT STATUS Command Failed" +**Cause**: Permission issue or VTL service problem +**Solution**: Check user permissions and restart vtllibrary service + +## Results + +### Scalar i500 (WORKING) +- ✅ 43 slots detected +- ✅ 20 tapes successfully labeled (E01001L8 - E01020L8) +- ✅ Autochanger operations functional +- ✅ Ready for backup jobs + +### Scalar i40 (ISSUE) +- ⚠️ 44 slots detected +- ❌ Hardware Error during tape load operations +- ❌ 0 tapes labeled +- **Status**: Requires mhVTL configuration investigation or system restart + +## References +- Bacula Documentation: https://www.bacula.org/ +- Article: "Using Bacula with mhVTL" - https://karellen.blogspot.com/2012/02/using-bacula-with-mhvtl.html +- mhVTL Project: https://github.com/markh794/mhvtl + +## Date +Created: 2025-12-31 +Author: Warp AI Agent diff --git a/docs/healthcheck-script.md b/docs/healthcheck-script.md new file mode 100644 index 0000000..1d9b581 --- /dev/null +++ b/docs/healthcheck-script.md @@ -0,0 +1,344 @@ +# Calypso Appliance Health Check Script + +## Overview +Comprehensive health check script for all Calypso Appliance components. Performs automated checks across system resources, services, network, storage, and backup infrastructure. + +## Installation +Script location: `/usr/local/bin/calypso-healthcheck` + +## Usage + +### Basic Usage +```bash +# Run health check (requires root) +calypso-healthcheck + +# Run and save to specific location +calypso-healthcheck 2>&1 | tee /root/healthcheck-$(date +%Y%m%d).log +``` + +### Exit Codes +- `0` - All checks passed (100% healthy) +- `1` - Healthy with warnings (some non-critical issues) +- `2` - Degraded (80%+ checks passed, some failures) +- `3` - Critical (less than 80% checks passed) + +### Automated Checks + +#### System Resources (4 checks) +- Root filesystem usage (threshold: 80%) +- /var filesystem usage (threshold: 80%) +- Memory usage (threshold: 90%) +- CPU load average + +#### Database Services (2 checks) +- PostgreSQL service status +- Database presence (calypso, bacula) + +#### Calypso Application (7 checks) +- calypso-api service +- calypso-frontend service +- calypso-logger service +- API port 8443 +- Frontend port 3000 +- API health endpoint +- Frontend health endpoint + +#### Backup Services - Bacula (8 checks) +- bacula-director service +- bacula-fd service +- bacula-sd service +- Director bconsole connectivity +- Storage (Scalar-i500) accessibility +- Director port 9101 +- FD port 9102 +- SD port 9103 + +#### Virtual Tape Library - mhVTL (4 checks) +- mhvtl.target status +- vtllibrary@10 (Scalar i500) +- vtllibrary@30 (Scalar i40) +- VTL device count (2 changers, 8 tape drives) +- Scalar i500 slots detection + +#### Storage Protocols (9 checks) +- NFS server service +- Samba (smbd) service +- NetBIOS (nmbd) service +- SCST service +- iSCSI target service +- NFS port 2049 +- SMB port 445 +- NetBIOS port 139 +- iSCSI port 3260 + +#### Monitoring & Management (2 checks) +- SNMP daemon +- SNMP port 161 + +#### Network Connectivity (2 checks) +- Internet connectivity (ping 8.8.8.8) +- Network manager status + +**Total: 39+ automated checks** + +## Output Format + +### Console Output +- Color-coded status indicators: + - ✓ Green = Passed + - ⚠ Yellow = Warning + - ✗ Red = Failed + +### Example Output +``` +========================================== + CALYPSO APPLIANCE HEALTH CHECK +========================================== +Date: 2025-12-31 01:46:27 +Hostname: calypso +Uptime: up 6 days, 2 hours, 50 minutes +Log file: /var/log/calypso-healthcheck-20251231-014627.log + +======================================== +SYSTEM RESOURCES +======================================== +✓ Root filesystem (18% used) +✓ Var filesystem (18% used) +✓ Memory usage (49% used, 8206MB available) +✓ CPU load average (2.18, 8 cores) + +... + +======================================== +HEALTH CHECK SUMMARY +======================================== + +Total Checks: 39 +Passed: 35 +Warnings: 0 +Failed: 4 + +⚠ OVERALL STATUS: DEGRADED (89%) +``` + +### Log Files +All checks are logged to: `/var/log/calypso-healthcheck-YYYYMMDD-HHMMSS.log` + +Logs include: +- Timestamp and system information +- Detailed check results +- Summary statistics +- Overall health status + +## Scheduling + +### Manual Execution +```bash +# Run on demand +sudo calypso-healthcheck +``` + +### Cron Job (Recommended) +Add to crontab for automated checks: + +```bash +# Daily health check at 2 AM +0 2 * * * /usr/local/bin/calypso-healthcheck > /dev/null 2>&1 + +# Weekly health check on Monday at 6 AM with email notification +0 6 * * 1 /usr/local/bin/calypso-healthcheck 2>&1 | mail -s "Calypso Health Check" admin@example.com +``` + +### Systemd Timer (Alternative) +Create `/etc/systemd/system/calypso-healthcheck.timer`: +```ini +[Unit] +Description=Daily Calypso Health Check +Requires=calypso-healthcheck.service + +[Timer] +OnCalendar=daily +Persistent=true + +[Install] +WantedBy=timers.target +``` + +Create `/etc/systemd/system/calypso-healthcheck.service`: +```ini +[Unit] +Description=Calypso Appliance Health Check + +[Service] +Type=oneshot +ExecStart=/usr/local/bin/calypso-healthcheck +``` + +Enable: +```bash +systemctl enable --now calypso-healthcheck.timer +``` + +## Troubleshooting + +### Common Failures + +#### API/Frontend Health Endpoints Failing +```bash +# Check if services are running +systemctl status calypso-api calypso-frontend + +# Check service logs +journalctl -u calypso-api -n 50 +journalctl -u calypso-frontend -n 50 + +# Test manually +curl -k https://localhost:8443/health +curl -k https://localhost:3000/health +``` + +#### Bacula Director Not Responding +```bash +# Check service +systemctl status bacula-director + +# Test bconsole +echo "status director" | bconsole + +# Check logs +tail -50 /var/log/bacula/bacula.log +``` + +#### VTL Slots Not Detected +```bash +# Check VTL services +systemctl status mhvtl.target + +# Check devices +lsscsi | grep -E "mediumx|tape" + +# Test manually +mtx -f /dev/sg7 status +echo "update slots storage=Scalar-i500" | bconsole +``` + +#### Storage Protocols Port Not Listening +```bash +# Check service status +systemctl status nfs-server smbd nmbd scst iscsi-scstd + +# Check listening ports +ss -tuln | grep -E "2049|445|139|3260" + +# Restart services if needed +systemctl restart nfs-server +systemctl restart smbd nmbd +``` + +## Customization + +### Modify Thresholds +Edit `/usr/local/bin/calypso-healthcheck`: + +```bash +# Disk usage threshold (default: 80%) +check_disk "/" 80 "Root filesystem" + +# Memory usage threshold (default: 90%) +if [ "$mem_percent" -lt 90 ]; then + +# Change expected VTL devices +if [ "$changer_count" -ge 2 ] && [ "$tape_count" -ge 8 ]; then +``` + +### Add Custom Checks +Add new check functions: + +```bash +check_custom() { + TOTAL_CHECKS=$((TOTAL_CHECKS + 1)) + + if [[ condition ]]; then + echo -e "${GREEN}${CHECK}${NC} Custom check passed" | tee -a "$LOG_FILE" + PASSED_CHECKS=$((PASSED_CHECKS + 1)) + else + echo -e "${RED}${CROSS}${NC} Custom check failed" | tee -a "$LOG_FILE" + FAILED_CHECKS=$((FAILED_CHECKS + 1)) + fi +} + +# Call in main script +check_custom +``` + +## Integration + +### Monitoring Systems +Export metrics for monitoring: + +```bash +# Nagios/Icinga format +calypso-healthcheck +if [ $? -eq 0 ]; then + echo "OK - All checks passed" + exit 0 +elif [ $? -eq 1 ]; then + echo "WARNING - Healthy with warnings" + exit 1 +else + echo "CRITICAL - System degraded" + exit 2 +fi +``` + +### API Integration +Parse JSON output: + +```bash +# Add JSON output option +calypso-healthcheck --json > /tmp/health.json +``` + +## Maintenance + +### Log Rotation +Logs are stored in `/var/log/calypso-healthcheck-*.log` + +Create `/etc/logrotate.d/calypso-healthcheck`: +``` +/var/log/calypso-healthcheck-*.log { + weekly + rotate 12 + compress + delaycompress + missingok + notifempty +} +``` + +### Cleanup Old Logs +```bash +# Remove logs older than 30 days +find /var/log -name "calypso-healthcheck-*.log" -mtime +30 -delete +``` + +## Best Practices + +1. **Run after reboot** - Verify all services started correctly +2. **Schedule regular checks** - Daily or weekly automated runs +3. **Monitor exit codes** - Alert on degraded/critical status +4. **Review logs periodically** - Identify patterns or recurring issues +5. **Update checks** - Add new components as system evolves +6. **Baseline health** - Establish normal operating parameters +7. **Document exceptions** - Note known warnings that are acceptable + +## See Also +- `pre-reboot-checklist.md` - Pre-reboot verification +- `bacula-vtl-troubleshooting.md` - VTL troubleshooting guide +- System logs: `/var/log/syslog`, `/var/log/bacula/` + +--- + +*Created: 2025-12-31* +*Script: `/usr/local/bin/calypso-healthcheck`* diff --git a/docs/pre-reboot-checklist.md b/docs/pre-reboot-checklist.md new file mode 100644 index 0000000..4d35eaf --- /dev/null +++ b/docs/pre-reboot-checklist.md @@ -0,0 +1,225 @@ +# Calypso Appliance - Pre-Reboot Checklist + +**Date:** 2025-12-31 +**Status:** ✅ READY FOR REBOOT + +--- + +## Enabled Services (Auto-start on boot) + +### Core Application Services +| Service | Status | Purpose | +|---------|--------|---------| +| postgresql.service | ✅ enabled | Database backend | +| calypso-api.service | ✅ enabled | REST API backend | +| calypso-frontend.service | ✅ enabled | Web UI (React) | +| calypso-logger.service | ✅ enabled | Application logging | + +### Backup Services (Bacula) +| Service | Status | Purpose | +|---------|--------|---------| +| bacula-director.service | ✅ enabled | Backup orchestration | +| bacula-fd.service | ✅ enabled | File daemon (client) | +| bacula-sd.service | ✅ enabled | Storage daemon (VTL) | + +### Virtual Tape Library (mhVTL) +| Service | Status | Purpose | +|---------|--------|---------| +| mhvtl.target | ✅ enabled | VTL master target | +| vtllibrary@10.service | ✅ enabled | Scalar i500 library | +| vtllibrary@30.service | ✅ enabled | Scalar i40 library | +| vtltape@11-14.service | ✅ enabled | i500 tape drives (4) | +| vtltape@31-34.service | ✅ enabled | i40 tape drives (4) | + +### Storage Protocols +| Service | Status | Purpose | +|---------|--------|---------| +| nfs-server.service | ✅ enabled | NFS file sharing | +| nfs-blkmap.service | ✅ enabled | NFS block mapping | +| smbd.service | ✅ enabled | Samba/CIFS server | +| nmbd.service | ✅ enabled | NetBIOS name service | +| scst.service | ✅ enabled | SCSI target subsystem | +| iscsi-scstd.service | ✅ enabled | iSCSI target daemon | + +### Monitoring & Management +| Service | Status | Purpose | +|---------|--------|---------| +| snmpd.service | ✅ enabled | SNMP monitoring | + +--- + +## Boot Order & Dependencies + +``` +1. Network (systemd-networkd) + ↓ +2. Storage Foundation + - NFS server + - Samba (smbd/nmbd) + - SCST/iSCSI + ↓ +3. PostgreSQL Database + ↓ +4. VTL Services (mhvtl.target) + - vtllibrary services + - vtltape services + ↓ +5. Bacula Services + - bacula-director (after postgresql) + - bacula-fd + - bacula-sd (after VTL) + ↓ +6. Calypso Application + - calypso-api (after postgresql) + - calypso-frontend (wants calypso-api) + - calypso-logger (wants api & frontend) +``` + +--- + +## Post-Reboot Verification + +### 1. Check System Boot +```bash +# Check boot time +systemd-analyze +systemd-analyze blame | head -20 +``` + +### 2. Check Core Services +```bash +# Calypso application +systemctl status calypso-api calypso-frontend calypso-logger + +# Database +systemctl status postgresql + +# Check API health +curl -k https://localhost:8443/health +curl -k https://localhost:3000/health +``` + +### 3. Check Backup Services +```bash +# Bacula status +systemctl status bacula-director bacula-fd bacula-sd + +# Test bconsole connection +echo "status director" | bconsole + +# Check VTL connection +echo "status storage=Scalar-i500" | bconsole +``` + +### 4. Check Storage Protocols +```bash +# NFS +systemctl status nfs-server +showmount -e localhost + +# Samba +systemctl status smbd nmbd +smbstatus + +# iSCSI/SCST +systemctl status scst iscsi-scstd +scstadmin -list_target +``` + +### 5. Check VTL Devices +```bash +# VTL services +systemctl status mhvtl.target + +# Check devices +lsscsi | grep -E "mediumx|tape" + +# Test autochanger +mtx -f /dev/sg7 status | head -10 +``` + +### 6. Check Monitoring +```bash +# SNMP +systemctl status snmpd +snmpwalk -v2c -c public localhost system +``` + +--- + +## Network Access Points + +| Service | URL/Port | Description | +|---------|----------|-------------| +| Web UI | https://[IP]:3000 | Calypso frontend | +| API | https://[IP]:8443 | REST API | +| Bacula Director | localhost:9101 | bconsole access | +| PostgreSQL | localhost:5432 | Database | +| NFS | tcp/2049 | NFS shares | +| Samba | tcp/445, tcp/139 | CIFS/SMB shares | +| iSCSI | tcp/3260 | iSCSI targets | +| SNMP | udp/161 | Monitoring | + +--- + +## Important Notes + +### Bacula VTL Configuration +- **Scalar i500**: 43 slots, 20 tapes labeled (E01001L8-E01020L8) ✅ +- **Scalar i40**: 44 slots, needs investigation after reboot ⚠️ +- Changer devices: /dev/sg7 (i500), /dev/sg8 (i40) +- Tape devices: /dev/nst0-7 (non-rewinding) +- User permissions: bacula in tape+cdrom groups + +### Storage Paths +- Calypso working directory: `/development/calypso` +- Bacula configs: `/etc/bacula/` +- VTL configs: `/etc/mhvtl/` +- PostgreSQL data: `/var/lib/postgresql/` + +### Known Issues +- Scalar i40 VTL: Hardware error during tape load (requires investigation) + +--- + +## Emergency Recovery + +If services fail to start after reboot: + +```bash +# Check failed services +systemctl --failed + +# View service logs +journalctl -xeu calypso-api +journalctl -xeu bacula-director +journalctl -xeu mhvtl.target + +# Manual service restart +systemctl restart calypso-api +systemctl restart bacula-sd +systemctl restart mhvtl.target +``` + +--- + +## Checklist Summary + +- [x] PostgreSQL database: enabled +- [x] Calypso services (api, frontend, logger): enabled +- [x] Bacula services (director, fd, sd): enabled +- [x] mhVTL services (libraries, tape drives): enabled +- [x] NFS server: enabled +- [x] Samba (smbd, nmbd): enabled +- [x] SCST/iSCSI: enabled +- [x] SNMP monitoring: enabled +- [x] Network services: configured +- [x] User permissions: configured +- [x] Service dependencies: verified + +**Status: SAFE TO REBOOT** ✅ + +--- + +*Generated: 2025-12-31* +*Documentation: /development/calypso/docs/* diff --git a/frontend/src/api/backup.ts b/frontend/src/api/backup.ts index 58df303..3d64b62 100644 --- a/frontend/src/api/backup.ts +++ b/frontend/src/api/backup.ts @@ -70,6 +70,21 @@ export interface ListClientsParams { search?: string } +export interface PoolStats { + name: string + used_bytes: number + total_bytes: number + usage_percent: number +} + +export interface DashboardStats { + director_status: string + director_uptime: string + last_job?: BackupJob + active_jobs_count: number + default_pool?: PoolStats +} + export const backupAPI = { listJobs: async (params?: ListJobsParams): Promise => { const queryParams = new URLSearchParams() @@ -111,5 +126,132 @@ export const backupAPI = { ) return response.data }, + + getDashboardStats: async (): Promise => { + const response = await apiClient.get('/backup/dashboard/stats') + return response.data + }, + + listStoragePools: async (): Promise<{ pools: StoragePool[]; total: number }> => { + const response = await apiClient.get<{ pools: StoragePool[]; total: number }>('/backup/storage/pools') + return response.data + }, + + listStorageVolumes: async (poolName?: string): Promise<{ volumes: StorageVolume[]; total: number }> => { + const queryParams = new URLSearchParams() + if (poolName) queryParams.append('pool_name', poolName) + const response = await apiClient.get<{ volumes: StorageVolume[]; total: number }>( + `/backup/storage/volumes${queryParams.toString() ? `?${queryParams.toString()}` : ''}` + ) + return response.data + }, + + listStorageDaemons: async (): Promise<{ daemons: StorageDaemon[]; total: number }> => { + const response = await apiClient.get<{ daemons: StorageDaemon[]; total: number }>('/backup/storage/daemons') + return response.data + }, + + createStoragePool: async (data: CreateStoragePoolRequest): Promise => { + const response = await apiClient.post('/backup/storage/pools', data) + return response.data + }, + + deleteStoragePool: async (poolId: number): Promise => { + await apiClient.delete(`/backup/storage/pools/${poolId}`) + }, + + createStorageVolume: async (data: CreateStorageVolumeRequest): Promise => { + const response = await apiClient.post('/backup/storage/volumes', data) + return response.data + }, + + updateStorageVolume: async (volumeId: number, data: UpdateStorageVolumeRequest): Promise => { + const response = await apiClient.put(`/backup/storage/volumes/${volumeId}`, data) + return response.data + }, + + deleteStorageVolume: async (volumeId: number): Promise => { + await apiClient.delete(`/backup/storage/volumes/${volumeId}`) + }, + + listMedia: async (): Promise<{ media: Media[]; total: number }> => { + const response = await apiClient.get<{ media: Media[]; total: number }>('/backup/media') + return response.data + }, +} + +export interface CreateStoragePoolRequest { + name: string + pool_type?: string + label_format?: string + recycle?: boolean + auto_prune?: boolean +} + +export interface CreateStorageVolumeRequest { + volume_name: string + pool_name: string + media_type?: string + max_vol_bytes?: number + vol_retention?: number +} + +export interface UpdateStorageVolumeRequest { + max_vol_bytes?: number + vol_retention?: number +} + +export interface Media { + media_id: number + volume_name: string + pool_name: string + media_type: string + status: string + vol_bytes: number + max_vol_bytes: number + vol_files: number + last_written?: string + recycle_count: number + slot?: number + in_changer?: number + library_name?: string +} + +export interface StoragePool { + pool_id: number + name: string + pool_type: string + label_format?: string + recycle?: boolean + auto_prune?: boolean + volume_count: number + used_bytes: number + total_bytes: number + usage_percent: number +} + +export interface StorageVolume { + volume_id: number + media_id: number + volume_name: string + pool_name: string + media_type: string + vol_status: string + vol_bytes: number + max_vol_bytes: number + vol_files: number + vol_retention?: string + last_written?: string + recycle_count: number +} + +export interface StorageDaemon { + storage_id: number + name: string + address: string + port: number + device_name: string + media_type: string + status: string } diff --git a/frontend/src/pages/BackupManagement.tsx b/frontend/src/pages/BackupManagement.tsx index 97f5450..f1229db 100644 --- a/frontend/src/pages/BackupManagement.tsx +++ b/frontend/src/pages/BackupManagement.tsx @@ -1,6 +1,6 @@ import { useState, useRef, useEffect } from 'react' import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query' -import { backupAPI } from '@/api/backup' +import { backupAPI, StoragePool, StorageVolume, CreateStoragePoolRequest } from '@/api/backup' import { Search, X } from 'lucide-react' // Styles for checkbox and tree lines @@ -58,6 +58,15 @@ const clientManagementStyles = ` export default function BackupManagement() { const [activeTab, setActiveTab] = useState<'dashboard' | 'jobs' | 'clients' | 'storage' | 'restore' | 'console'>('dashboard') + + // Fetch dashboard stats + const { data: dashboardStats } = useQuery({ + queryKey: ['dashboard-stats'], + queryFn: () => backupAPI.getDashboardStats(), + enabled: activeTab === 'dashboard', + refetchInterval: 30000, // Refresh every 30 seconds + }) + // Fetch recent jobs for dashboard const { data: dashboardJobsData } = useQuery({ queryKey: ['dashboard-jobs'], @@ -76,6 +85,26 @@ export default function BackupManagement() { return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` } + const formatDate = (dateStr?: string): string => { + if (!dateStr) return '-' + try { + const date = new Date(dateStr) + const now = new Date() + const diffMs = now.getTime() - date.getTime() + const diffMins = Math.floor(diffMs / 60000) + const diffHours = Math.floor(diffMs / 3600000) + const diffDays = Math.floor(diffMs / 86400000) + + if (diffMins < 1) return 'Just now' + if (diffMins < 60) return `${diffMins}m ago` + if (diffHours < 24) return `${diffHours}h ago` + if (diffDays < 7) return `${diffDays}d ago` + return date.toLocaleDateString() + } catch { + return '-' + } + } + const formatDuration = (seconds?: number): string => { if (!seconds) return '-' const hours = Math.floor(seconds / 3600) @@ -223,10 +252,14 @@ export default function BackupManagement() {

Director Status

- check_circle -

Active

+ + {dashboardStats?.director_status === 'Active' ? 'check_circle' : 'error'} + +

{dashboardStats?.director_status || 'Unknown'}

-

Uptime: 14d 2h 12m

+

+ Uptime: {dashboardStats?.director_uptime || 'Unknown'} +

@@ -238,9 +271,19 @@ export default function BackupManagement() {

Last Job

-

Success

+

+ {dashboardStats?.last_job ? (dashboardStats.last_job.status === 'Completed' ? 'Success' : dashboardStats.last_job.status) : 'N/A'} +

-

DailyBackup • 2h 15m ago

+

+ {dashboardStats?.last_job ? ( + <> + {dashboardStats.last_job.job_name} • {formatDate(dashboardStats.last_job.ended_at || dashboardStats.last_job.started_at)} + + ) : ( + 'No jobs yet' + )} +

@@ -252,10 +295,15 @@ export default function BackupManagement() {

Active Jobs

-

3 Running

+

+ {dashboardStats?.active_jobs_count || 0} Running +

-
+
@@ -267,15 +315,26 @@ export default function BackupManagement() {
-

Default Pool

- 78% +

+ {dashboardStats?.default_pool?.name || 'Default Pool'} +

+ + {dashboardStats?.default_pool ? `${Math.round(dashboardStats.default_pool.usage_percent)}%` : 'N/A'} +
-

9.4 TB

-

/ 12 TB

+

+ {dashboardStats?.default_pool ? formatBytes(dashboardStats.default_pool.used_bytes) : 'N/A'} +

+

+ {dashboardStats?.default_pool ? `/ ${formatBytes(dashboardStats.default_pool.total_bytes)}` : ''} +

-
+
@@ -377,9 +436,7 @@ export default function BackupManagement() { )} {activeTab === 'storage' && ( -
- Storage tab coming soon -
+ )} {activeTab === 'restore' && ( @@ -1445,9 +1502,1329 @@ function ClientsManagementTab({ onSwitchToConsole }: { onSwitchToConsole?: () =>

[14:23:10] warning: /var/www/html/cache/tmp locked by another process, skipping

[14:23:45] bareos-dir: JobId 10423: Sending Accurate information.

- + ) } +function StorageManagementTab() { + const [activeView, setActiveView] = useState<'pools' | 'volumes' | 'daemons'>('pools') + const [poolAction, setPoolAction] = useState<'list' | 'add' | 'delete'>('list') + const [showDeleteModal, setShowDeleteModal] = useState(false) + const [poolToDelete, setPoolToDelete] = useState(null) + const queryClient = useQueryClient() + + // Fetch storage pools + const { data: poolsData, isLoading: poolsLoading } = useQuery({ + queryKey: ['storage-pools'], + queryFn: () => backupAPI.listStoragePools(), + }) + + // Create pool mutation + const createPoolMutation = useMutation({ + mutationFn: backupAPI.createStoragePool, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['storage-pools'] }) + setPoolAction('list') + }, + }) + + // Delete pool mutation + const deletePoolMutation = useMutation({ + mutationFn: backupAPI.deleteStoragePool, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['storage-pools'] }) + setShowDeleteModal(false) + setPoolToDelete(null) + }, + }) + + // Fetch storage daemons + const { data: daemonsData, isLoading: daemonsLoading } = useQuery({ + queryKey: ['storage-daemons'], + queryFn: () => backupAPI.listStorageDaemons(), + enabled: activeView === 'daemons', + }) + + const pools = poolsData?.pools || [] + const daemons = daemonsData?.daemons || [] + + const formatBytes = (bytes: number): string => { + if (bytes === 0) return '0 B' + const k = 1024 + const sizes = ['B', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` + } + + const getStatusBadge = (status: string) => { + const statusMap: Record = { + Full: { bg: 'bg-green-500/10', text: 'text-green-400', border: 'border-green-500/20' }, + Append: { bg: 'bg-blue-500/10', text: 'text-blue-400', border: 'border-blue-500/20' }, + Used: { bg: 'bg-yellow-500/10', text: 'text-yellow-400', border: 'border-yellow-500/20' }, + Error: { bg: 'bg-red-500/10', text: 'text-red-400', border: 'border-red-500/20' }, + Online: { bg: 'bg-green-500/10', text: 'text-green-400', border: 'border-green-500/20' }, + Offline: { bg: 'bg-red-500/10', text: 'text-red-400', border: 'border-red-500/20' }, + } + const config = statusMap[status] || { bg: 'bg-gray-500/10', text: 'text-gray-400', border: 'border-gray-500/20' } + return ( + + {status} + + ) + } + + return ( +
+ {/* Header */} +
+
+
+

Storage Management

+ + {pools.length} Pools + +
+

+ Manage storage pools, volumes, and storage daemons for backup operations. +

+
+
+ + {/* Tabs */} +
+
+ + + +
+
+ + {/* Content */} + {activeView === 'pools' && ( +
+ {/* Pool Actions Tabs */} +
+ + + +
+ + {/* List Pools View */} + {poolAction === 'list' && ( +
+ {poolsLoading ? ( +
Loading pools...
+ ) : pools.length === 0 ? ( +
+

No storage pools found

+
+ ) : ( + <> +
+ + + + + + + + + + + + + {pools.map((pool) => ( + + + + + + + + + ))} + +
Pool NameTypeVolumesUsageCapacityOptions
+
+
+ hard_drive +
+
+

{pool.name}

+ {pool.label_format && ( +

{pool.label_format}

+ )} +
+
+
{pool.pool_type || '-'}{pool.volume_count} +
+
+ {Math.round(pool.usage_percent)}% +
+
+
+
+
+
+ {formatBytes(pool.used_bytes)} / {formatBytes(pool.total_bytes)} + +
+ {pool.recycle && ( + Recycle + )} + {pool.auto_prune && ( + Auto-Prune + )} +
+
+
+ + )} +
+ )} + + {/* Add Pool View */} + {poolAction === 'add' && ( +
+

Create New Storage Pool

+

+ Pools define the set of storage Volumes to be used by Bacula. Configure different pools to organize your backup data. +

+ { + createPoolMutation.mutate(data) + }} + isLoading={createPoolMutation.isPending} + onCancel={() => setPoolAction('list')} + /> +
+ )} + + {/* Delete Pool View */} + {poolAction === 'delete' && ( +
+ {poolsLoading ? ( +
Loading pools...
+ ) : pools.length === 0 ? ( +
+

No storage pools found

+
+ ) : ( + <> +
+

+ Select a pool to delete. Pools with volumes cannot be deleted. +

+
+
+ + + + + + + + + + + + {pools.map((pool) => ( + + + + + + + + ))} + +
Pool NameTypeVolumesUsageActions
+
+
+ hard_drive +
+

{pool.name}

+
+
{pool.pool_type || '-'}{pool.volume_count} +
+ {Math.round(pool.usage_percent)}% +
+
+
+
+
+ +
+
+ + )} +
+ )} +
+ )} + + {activeView === 'volumes' && ( + + )} + + {activeView === 'daemons' && ( +
+ {daemonsLoading ? ( +
Loading storage daemons...
+ ) : daemons.length === 0 ? ( +
+

No storage daemons found

+
+ ) : ( + <> +
+ + + + + + + + + + + + + {daemons.map((daemon) => ( + + + + + + + + + ))} + +
NameAddressPortDeviceMedia TypeStatus
+
+
+ dns +
+

{daemon.name}

+
+
{daemon.address}{daemon.port}{daemon.device_name}{daemon.media_type}{getStatusBadge(daemon.status)}
+
+ + )} +
+ )} + + {/* Delete Confirmation Modal */} + {showDeleteModal && poolToDelete && ( +
+
+

Delete Storage Pool

+

+ Are you sure you want to delete pool {poolToDelete.name}? +

+ {poolToDelete.volume_count > 0 && ( +
+

+ ⚠️ This pool contains {poolToDelete.volume_count} volume(s). Pools with volumes cannot be deleted. +

+
+ )} +
+ + +
+
+
+ )} +
+ ) +} + +function AddPoolForm({ onSubmit, isLoading, onCancel }: { onSubmit: (data: CreateStoragePoolRequest) => void; isLoading: boolean; onCancel: () => void }) { + const [formData, setFormData] = useState({ + name: '', + pool_type: 'Backup', + label_format: '', + recycle: false, + auto_prune: false, + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + onSubmit({ + name: formData.name, + pool_type: formData.pool_type, + label_format: formData.label_format || undefined, + recycle: formData.recycle || undefined, + auto_prune: formData.auto_prune || undefined, + }) + } + + return ( +
+
+ + setFormData({ ...formData, name: e.target.value })} + placeholder="e.g., Full-Backup, Incremental-Backup" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Unique name for the storage pool

+
+ +
+ + +

+ {formData.pool_type === 'Backup' && 'Contains volumes for backup data with retention periods'} + {formData.pool_type === 'Scratch' && 'Contains volumes that can be used by any pool when needed'} + {formData.pool_type === 'Recycle' && 'Contains volumes that have been purged and are ready for reuse'} +

+
+ +
+ + setFormData({ ...formData, label_format: e.target.value })} + placeholder="e.g., Vol-${Year}-${Month:p/2/0/r}-${Day:p/2/0/r}-${Counter:4}" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Format string for automatic volume labeling

+
+ +
+ + +
+ +
+ + +
+
+ ) +} + +function VolumeManagementTab({ pools }: { pools: StoragePool[] }) { + const [volumeAction, setVolumeAction] = useState<'list' | 'add' | 'delete' | 'update'>('list') + const [showDeleteModal, setShowDeleteModal] = useState(false) + const [showUpdateModal, setShowUpdateModal] = useState(false) + const [volumeToDelete, setVolumeToDelete] = useState(null) + const [volumeToUpdate, setVolumeToUpdate] = useState(null) + const [selectedVolumes, setSelectedVolumes] = useState>(new Set()) + const [searchQuery, setSearchQuery] = useState('') + const [selectedVolume, setSelectedVolume] = useState(null) + const [currentPage, setCurrentPage] = useState(1) + const itemsPerPage = 5 + const queryClient = useQueryClient() + + // Fetch media from bconsole "list media" command + const { data: mediaData, isLoading: mediaLoading } = useQuery({ + queryKey: ['backup-media'], + queryFn: () => backupAPI.listMedia(), + enabled: volumeAction === 'list', + refetchOnWindowFocus: false, + }) + + // Fetch storage volumes (for add/delete/update operations) + const { data: volumesData, isLoading: volumesLoading } = useQuery({ + queryKey: ['storage-volumes'], + queryFn: () => backupAPI.listStorageVolumes(), + enabled: volumeAction !== 'list', + }) + + // Create volume mutation + const createVolumeMutation = useMutation({ + mutationFn: backupAPI.createStorageVolume, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['storage-volumes'] }) + setVolumeAction('list') + }, + }) + + // Update volume mutation + const updateVolumeMutation = useMutation({ + mutationFn: ({ volumeId, data }: { volumeId: number; data: any }) => backupAPI.updateStorageVolume(volumeId, data), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['storage-volumes'] }) + setShowUpdateModal(false) + setVolumeToUpdate(null) + }, + }) + + // Delete volume mutation + const deleteVolumeMutation = useMutation({ + mutationFn: backupAPI.deleteStorageVolume, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['storage-volumes'] }) + setShowDeleteModal(false) + setVolumeToDelete(null) + }, + }) + + const media = mediaData?.media || [] + const volumes = volumesData?.volumes || [] + + const formatBytes = (bytes: number): string => { + if (bytes === 0) return '0 B' + const k = 1024 + const sizes = ['B', 'KB', 'MB', 'GB', 'TB'] + const i = Math.floor(Math.log(bytes) / Math.log(k)) + return `${(bytes / Math.pow(k, i)).toFixed(2)} ${sizes[i]}` + } + + const getStatusBadge = (status: string) => { + const statusMap: Record = { + Full: { bg: 'bg-green-500/10', text: 'text-green-400', border: 'border-green-500/20' }, + Append: { bg: 'bg-blue-500/10', text: 'text-blue-400', border: 'border-blue-500/20' }, + Used: { bg: 'bg-yellow-500/10', text: 'text-yellow-400', border: 'border-yellow-500/20' }, + Error: { bg: 'bg-red-500/10', text: 'text-red-400', border: 'border-red-500/20' }, + Recycle: { bg: 'bg-gray-500/10', text: 'text-gray-400', border: 'border-gray-500/20' }, + Purged: { bg: 'bg-gray-500/10', text: 'text-gray-400', border: 'border-gray-500/20' }, + Online: { bg: 'bg-green-500/10', text: 'text-green-400', border: 'border-green-500/20' }, + Offline: { bg: 'bg-red-500/10', text: 'text-red-400', border: 'border-red-500/20' }, + } + const config = statusMap[status] || { bg: 'bg-gray-500/10', text: 'text-gray-400', border: 'border-gray-500/20' } + return ( + + {status} + + ) + } + + // Filter and paginate media + const filteredMedia = media.filter((m) => { + if (!searchQuery) return true + const query = searchQuery.toLowerCase() + return ( + m.volume_name.toLowerCase().includes(query) || + m.pool_name.toLowerCase().includes(query) || + m.status.toLowerCase().includes(query) || + m.media_type.toLowerCase().includes(query) + ) + }) + + const totalPages = Math.ceil(filteredMedia.length / itemsPerPage) + const startIndex = (currentPage - 1) * itemsPerPage + const endIndex = startIndex + itemsPerPage + const paginatedMedia = filteredMedia.slice(startIndex, endIndex) + + + const formatDate = (dateStr?: string): string => { + if (!dateStr) return '-' + try { + const date = new Date(dateStr) + return date.toLocaleString('en-US', { year: 'numeric', month: '2-digit', day: '2-digit', hour: '2-digit', minute: '2-digit' }) + } catch { + return dateStr + } + } + + const handleSelectAll = (checked: boolean) => { + if (checked) { + setSelectedVolumes(new Set(paginatedMedia.map((m) => m.media_id))) + } else { + setSelectedVolumes(new Set()) + } + } + + const handleSelectVolume = (mediaId: number, checked: boolean) => { + const newSelected = new Set(selectedVolumes) + if (checked) { + newSelected.add(mediaId) + } else { + newSelected.delete(mediaId) + } + setSelectedVolumes(newSelected) + } + + const selectedMedia = media.find((m) => m.media_id === selectedVolume) + + return ( +
+ {/* Action Buttons and Search */} +
+ +
+ + + +
+
+ + search + + { + setSearchQuery(e.target.value) + setCurrentPage(1) + }} + className="w-full md:w-64 bg-[#1c2936] border border-border-dark text-white text-sm rounded pl-10 pr-3 py-2 focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary placeholder-text-secondary shadow-sm" + placeholder="Filter volumes..." + /> +
+
+ + {/* List Media View (from bconsole "list media") */} + {volumeAction === 'list' && ( +
+ {/* Media Table */} +
+
+

Volume Inventory

+
+ + Showing {startIndex + 1} to {Math.min(endIndex, filteredMedia.length)} of{' '} + {filteredMedia.length} items + +
+
+ {mediaLoading ? ( +
Loading media...
+ ) : filteredMedia.length === 0 ? ( +
+

No media found

+
+ ) : ( + <> +
+ + + + + + + + + + + + + + + + {paginatedMedia.map((m) => { + const isSelected = selectedVolume === m.media_id + return ( + setSelectedVolume(isSelected ? null : m.media_id)} + className={`hover:bg-[#2a3c50] transition-colors cursor-pointer border-l-4 ${ + isSelected ? 'bg-primary/10 border-l-primary' : 'border-l-transparent' + }`} + > + + + + + + + + + + + ) + })} + +
+ 0 && paginatedMedia.every((m) => selectedVolumes.has(m.media_id))} + onChange={(e) => handleSelectAll(e.target.checked)} + className="rounded border-border-dark text-primary focus:ring-primary bg-[#1c2936]" + /> + + Volume Name arrow_downward + PoolStatusLibraryCapacityUsedRetentionLast Written
e.stopPropagation()}> + { + handleSelectVolume(m.media_id, e.target.checked) + if (e.target.checked) { + setSelectedVolume(m.media_id) + } + }} + onClick={(e) => e.stopPropagation()} + className="rounded border-border-dark text-primary focus:ring-primary bg-[#1c2936]" + /> + + hard_drive + {m.volume_name} + {m.pool_name}{getStatusBadge(m.status)} + {m.media_type && (m.media_type.toLowerCase().includes('lto') || m.media_type.toLowerCase().includes('tape')) && m.in_changer && m.in_changer > 0 ? ( + + {m.library_name || 'Unknown'} {m.slot && m.slot > 0 ? `(Slot ${m.slot})` : ''} + + ) : ( + - + )} + {formatBytes(m.max_vol_bytes)}{formatBytes(m.vol_bytes)} + {m.max_vol_bytes > 0 && m.vol_bytes > 0 ? '30 Days' : '-'} + {formatDate(m.last_written)}
+
+
+
+ Showing {startIndex + 1} to {Math.min(endIndex, filteredMedia.length)} of{' '} + {filteredMedia.length} results +
+
+ + +
+
+ + )} +
+ + {/* Volume Details Section */} + {selectedMedia && ( +
+
+

+ info + Volume Details: {selectedMedia.volume_name} +

+
+ + | + +
+
+
+
+ Volume ID + {selectedMedia.media_id} +
+
+ Media Type + {selectedMedia.media_type} +
+
+ Label Date + {formatDate(selectedMedia.last_written) || '-'} +
+
+ Storage Device + {selectedMedia.pool_name} +
+
+ Capacity Usage +
+
0 + ? Math.min(100, (selectedMedia.vol_bytes / selectedMedia.max_vol_bytes) * 100) + : 0 + }%`, + }} + >
+
+
+ {formatBytes(selectedMedia.vol_bytes)} Used + {formatBytes(selectedMedia.max_vol_bytes)} Total +
+
+
+
+ + +
+
+ )} +
+ )} + + {/* Add Volume View */} + {volumeAction === 'add' && ( +
+

Create New Storage Volume

+

+ Volumes are archive units where Bacula stores backed up data. Each volume belongs to a Pool and can be a tape or a disk file. +

+ { + createVolumeMutation.mutate(data) + }} + isLoading={createVolumeMutation.isPending} + onCancel={() => setVolumeAction('list')} + /> +
+ )} + + {/* Delete Volume View */} + {volumeAction === 'delete' && ( +
+ {volumesLoading ? ( +
Loading volumes...
+ ) : volumes.length === 0 ? ( +
+

No volumes found

+
+ ) : ( + <> +
+

+ Select a volume to delete. Volumes with data cannot be deleted. +

+
+
+ + + + + + + + + + + + {volumes.map((vol) => ( + + + + + + + + ))} + +
Volume NamePoolStatusSizeActions
{vol.volume_name}{vol.pool_name}{getStatusBadge(vol.vol_status)} + {formatBytes(vol.vol_bytes)} / {formatBytes(vol.max_vol_bytes)} + + +
+
+ + )} +
+ )} + + {/* Update Volume View */} + {volumeAction === 'update' && ( +
+ {volumesLoading ? ( +
Loading volumes...
+ ) : volumes.length === 0 ? ( +
+

No volumes found

+
+ ) : ( + <> +
+

+ Select a volume to update its meta-data (Max Volume Bytes, Retention Period). +

+
+
+ + + + + + + + + + + + {volumes.map((vol) => ( + + + + + + + + ))} + +
Volume NamePoolStatusMax SizeActions
{vol.volume_name}{vol.pool_name}{getStatusBadge(vol.vol_status)} + {formatBytes(vol.max_vol_bytes)} + + +
+
+ + )} +
+ )} + + {/* Delete Confirmation Modal */} + {showDeleteModal && volumeToDelete && ( +
+
+

Delete Storage Volume

+

+ Are you sure you want to delete volume {volumeToDelete.volume_name}? +

+ {volumeToDelete.vol_bytes > 0 && ( +
+

+ ⚠️ This volume contains {formatBytes(volumeToDelete.vol_bytes)} of data. Volumes with data cannot be deleted. +

+
+ )} +
+ + +
+
+
+ )} + + {/* Update Modal */} + {showUpdateModal && volumeToUpdate && ( +
+
+

Update Storage Volume

+

+ Update meta-data for volume {volumeToUpdate.volume_name} +

+ { + updateVolumeMutation.mutate({ volumeId: volumeToUpdate.volume_id, data }) + }} + isLoading={updateVolumeMutation.isPending} + onCancel={() => { + setShowUpdateModal(false) + setVolumeToUpdate(null) + }} + /> +
+
+ )} +
+ ) +} + +function AddVolumeForm({ pools, onSubmit, isLoading, onCancel }: { pools: StoragePool[]; onSubmit: (data: any) => void; isLoading: boolean; onCancel: () => void }) { + const [formData, setFormData] = useState({ + volume_name: '', + pool_name: '', + media_type: 'File', + max_vol_bytes: '', + vol_retention: '', + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + onSubmit({ + volume_name: formData.volume_name, + pool_name: formData.pool_name, + media_type: formData.media_type, + max_vol_bytes: formData.max_vol_bytes ? parseInt(formData.max_vol_bytes) * 1024 * 1024 * 1024 : undefined, + vol_retention: formData.vol_retention ? parseInt(formData.vol_retention) : undefined, + }) + } + + return ( +
+
+ + setFormData({ ...formData, volume_name: e.target.value })} + placeholder="e.g., Vol-001, MyBackup-001" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Unique name for the storage volume

+
+ +
+ + +

Pool where this volume will be stored

+
+ +
+ + +

Type of storage media

+
+ +
+ + setFormData({ ...formData, max_vol_bytes: e.target.value })} + placeholder="e.g., 100" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Maximum size in GB (optional, for disk volumes)

+
+ +
+ + setFormData({ ...formData, vol_retention: e.target.value })} + placeholder="e.g., 30" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Number of days to retain the volume (optional)

+
+ +
+ + +
+
+ ) +} + +function UpdateVolumeForm({ volume, onSubmit, isLoading, onCancel }: { volume: StorageVolume; onSubmit: (data: any) => void; isLoading: boolean; onCancel: () => void }) { + const [formData, setFormData] = useState({ + max_vol_bytes: volume.max_vol_bytes > 0 ? (volume.max_vol_bytes / (1024 * 1024 * 1024)).toString() : '', + vol_retention: '', + }) + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault() + onSubmit({ + max_vol_bytes: formData.max_vol_bytes ? parseInt(formData.max_vol_bytes) * 1024 * 1024 * 1024 : undefined, + vol_retention: formData.vol_retention ? parseInt(formData.vol_retention) : undefined, + }) + } + + return ( +
+
+ + setFormData({ ...formData, max_vol_bytes: e.target.value })} + placeholder="e.g., 100" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Current: {volume.max_vol_bytes > 0 ? `${(volume.max_vol_bytes / (1024 * 1024 * 1024)).toFixed(2)} GB` : 'Not set'}

+
+ +
+ + setFormData({ ...formData, vol_retention: e.target.value })} + placeholder="e.g., 30" + className="w-full bg-surface-dark border border-border-dark rounded-lg px-4 py-2 text-white text-sm focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent" + /> +

Number of days to retain the volume

+
+ +
+ + +
+
+ ) +} + diff --git a/frontend/src/pages/Login.tsx b/frontend/src/pages/Login.tsx index b58b760..b4e72b7 100644 --- a/frontend/src/pages/Login.tsx +++ b/frontend/src/pages/Login.tsx @@ -29,6 +29,21 @@ export default function LoginPage() { } return ( + <> +
@@ -73,10 +88,11 @@ export default function LoginPage() { name="username" type="text" required - className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-t-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm" + className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-t-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm autofill:bg-[#111a22] autofill:text-white" placeholder="Username" value={username} onChange={(e) => setUsername(e.target.value)} + autoComplete="username" />
@@ -88,10 +104,11 @@ export default function LoginPage() { name="password" type="password" required - className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-b-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm" + className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-b-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm autofill:bg-[#111a22] autofill:text-white" placeholder="Password" value={password} onChange={(e) => setPassword(e.target.value)} + autoComplete="current-password" />
@@ -121,6 +138,7 @@ export default function LoginPage() {
+ ) }