diff --git a/backend/bin/calypso-api b/backend/bin/calypso-api index b90c252..684773b 100755 Binary files a/backend/bin/calypso-api and b/backend/bin/calypso-api differ diff --git a/backend/calypso-api b/backend/calypso-api index 6e138a6..4dfe918 100755 Binary files a/backend/calypso-api and b/backend/calypso-api differ diff --git a/backend/internal/common/router/cache.go b/backend/internal/common/router/cache.go index 0020420..c3907b9 100644 --- a/backend/internal/common/router/cache.go +++ b/backend/internal/common/router/cache.go @@ -51,6 +51,13 @@ func cacheMiddleware(cfg CacheConfig, cache *cache.Cache) gin.HandlerFunc { return } + // Don't cache VTL endpoints - they change frequently + path := c.Request.URL.Path + if strings.HasPrefix(path, "/api/v1/tape/vtl/") { + c.Next() + return + } + // Generate cache key from request path and query string keyParts := []string{c.Request.URL.Path} if c.Request.URL.RawQuery != "" { diff --git a/backend/internal/common/router/router.go b/backend/internal/common/router/router.go index c8e9f20..0ba7437 100644 --- a/backend/internal/common/router/router.go +++ b/backend/internal/common/router/router.go @@ -255,6 +255,7 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng systemGroup.POST("/services/:name/restart", systemHandler.RestartService) systemGroup.GET("/services/:name/logs", systemHandler.GetServiceLogs) systemGroup.POST("/support-bundle", systemHandler.GenerateSupportBundle) + systemGroup.GET("/interfaces", systemHandler.ListNetworkInterfaces) } // IAM (admin only) diff --git a/backend/internal/storage/handler.go b/backend/internal/storage/handler.go index c470fec..2f3382c 100644 --- a/backend/internal/storage/handler.go +++ b/backend/internal/storage/handler.go @@ -304,6 +304,13 @@ func (h *Handler) DeleteZFSPool(c *gin.Context) { return } + // Invalidate cache for pools list + if h.cache != nil { + cacheKey := "http:/api/v1/storage/zfs/pools:" + h.cache.Delete(cacheKey) + h.logger.Debug("Cache invalidated for pools list", "key", cacheKey) + } + c.JSON(http.StatusOK, gin.H{"message": "ZFS pool deleted successfully"}) } diff --git a/backend/internal/storage/zfs.go b/backend/internal/storage/zfs.go index facd226..8c8242b 100644 --- a/backend/internal/storage/zfs.go +++ b/backend/internal/storage/zfs.go @@ -7,6 +7,7 @@ import ( "os" "os/exec" "path/filepath" + "strconv" "strings" "time" @@ -44,7 +45,8 @@ type ZFSPool struct { AutoExpand bool `json:"auto_expand"` ScrubInterval int `json:"scrub_interval"` // days IsActive bool `json:"is_active"` - HealthStatus string `json:"health_status"` // online, degraded, faulted, offline + HealthStatus string `json:"health_status"` // online, degraded, faulted, offline + CompressRatio float64 `json:"compress_ratio"` // compression ratio (e.g., 1.45x) CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` CreatedBy string `json:"created_by"` @@ -359,6 +361,26 @@ func (s *ZFSService) getSpareDisks(ctx context.Context, poolName string) ([]stri return spareDisks, nil } +// getCompressRatio gets the compression ratio from ZFS +func (s *ZFSService) getCompressRatio(ctx context.Context, poolName string) (float64, error) { + cmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "compressratio", poolName) + output, err := cmd.Output() + if err != nil { + return 1.0, err + } + + ratioStr := strings.TrimSpace(string(output)) + // Remove 'x' suffix if present (e.g., "1.45x" -> "1.45") + ratioStr = strings.TrimSuffix(ratioStr, "x") + + ratio, err := strconv.ParseFloat(ratioStr, 64) + if err != nil { + return 1.0, err + } + + return ratio, nil +} + // ListPools lists all ZFS pools func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) { query := ` @@ -407,8 +429,17 @@ func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) { pool.SpareDisks = spareDisks } + // Get compressratio from ZFS system + compressRatio, err := s.getCompressRatio(ctx, pool.Name) + if err != nil { + s.logger.Warn("Failed to get compressratio", "pool", pool.Name, "error", err) + pool.CompressRatio = 1.0 // Default to 1.0 if can't get ratio + } else { + pool.CompressRatio = compressRatio + } + pools = append(pools, &pool) - s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name) + s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name, "compressratio", pool.CompressRatio) } if err := rows.Err(); err != nil { diff --git a/backend/internal/storage/zfs_pool_monitor.go b/backend/internal/storage/zfs_pool_monitor.go index 6812167..255f676 100644 --- a/backend/internal/storage/zfs_pool_monitor.go +++ b/backend/internal/storage/zfs_pool_monitor.go @@ -218,7 +218,7 @@ func (m *ZFSPoolMonitor) updatePoolStatus(ctx context.Context, poolName string, return nil } -// markMissingPoolsOffline marks pools that exist in database but not in system as offline +// markMissingPoolsOffline marks pools that exist in database but not in system as offline or deletes them func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPools map[string]PoolInfo) error { // Get all pools from database rows, err := m.zfsService.db.QueryContext(ctx, "SELECT id, name FROM zfs_pools WHERE is_active = true") @@ -235,17 +235,13 @@ func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPool // Check if pool exists in system if _, exists := systemPools[poolName]; !exists { - // Pool doesn't exist in system, mark as offline - _, err = m.zfsService.db.ExecContext(ctx, ` - UPDATE zfs_pools SET - health_status = 'offline', - updated_at = NOW() - WHERE id = $1 - `, poolID) + // Pool doesn't exist in system - delete from database (pool was destroyed) + m.logger.Info("Pool not found in system, removing from database", "pool", poolName) + _, err = m.zfsService.db.ExecContext(ctx, "DELETE FROM zfs_pools WHERE id = $1", poolID) if err != nil { - m.logger.Warn("Failed to mark pool as offline", "pool", poolName, "error", err) + m.logger.Warn("Failed to delete missing pool from database", "pool", poolName, "error", err) } else { - m.logger.Info("Marked pool as offline (not found in system)", "pool", poolName) + m.logger.Info("Removed missing pool from database", "pool", poolName) } } } diff --git a/backend/internal/system/handler.go b/backend/internal/system/handler.go index 2c85b78..04fd9bd 100644 --- a/backend/internal/system/handler.go +++ b/backend/internal/system/handler.go @@ -115,3 +115,19 @@ func (h *Handler) GenerateSupportBundle(c *gin.Context) { c.JSON(http.StatusAccepted, gin.H{"task_id": taskID}) } +// ListNetworkInterfaces lists all network interfaces +func (h *Handler) ListNetworkInterfaces(c *gin.Context) { + interfaces, err := h.service.ListNetworkInterfaces(c.Request.Context()) + if err != nil { + h.logger.Error("Failed to list network interfaces", "error", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list network interfaces"}) + return + } + + // Ensure we return an empty array instead of null + if interfaces == nil { + interfaces = []NetworkInterface{} + } + + c.JSON(http.StatusOK, gin.H{"interfaces": interfaces}) +} diff --git a/backend/internal/system/service.go b/backend/internal/system/service.go index 1e57b92..8a55345 100644 --- a/backend/internal/system/service.go +++ b/backend/internal/system/service.go @@ -175,3 +175,173 @@ func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string) return nil } +// NetworkInterface represents a network interface +type NetworkInterface struct { + Name string `json:"name"` + IPAddress string `json:"ip_address"` + Subnet string `json:"subnet"` + Status string `json:"status"` // "Connected" or "Down" + Speed string `json:"speed"` // e.g., "10 Gbps", "1 Gbps" + Role string `json:"role"` // "Management", "ISCSI", or empty +} + +// ListNetworkInterfaces lists all network interfaces +func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface, error) { + // First, get all interface names and their states + cmd := exec.CommandContext(ctx, "ip", "link", "show") + output, err := cmd.Output() + if err != nil { + s.logger.Error("Failed to list interfaces", "error", err) + return nil, fmt.Errorf("failed to list interfaces: %w", err) + } + + interfaceMap := make(map[string]*NetworkInterface) + lines := strings.Split(string(output), "\n") + + s.logger.Debug("Parsing network interfaces", "output_lines", len(lines)) + + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Parse interface name and state + // Format: "2: ens18: mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000" + // Look for lines that start with a number followed by ":" (interface definition line) + // Simple check: line starts with digit, contains ":", and contains "state" + if len(line) > 0 && line[0] >= '0' && line[0] <= '9' && strings.Contains(line, ":") && strings.Contains(line, "state") { + parts := strings.Fields(line) + if len(parts) < 2 { + continue + } + + // Extract interface name (e.g., "ens18:" or "lo:") + ifaceName := strings.TrimSuffix(parts[1], ":") + if ifaceName == "" || ifaceName == "lo" { + continue // Skip loopback + } + + // Extract state - look for "state UP" or "state DOWN" in the line + state := "Down" + if strings.Contains(line, "state UP") { + state = "Connected" + } else if strings.Contains(line, "state DOWN") { + state = "Down" + } + + s.logger.Info("Found interface", "name", ifaceName, "state", state) + + interfaceMap[ifaceName] = &NetworkInterface{ + Name: ifaceName, + Status: state, + Speed: "Unknown", + } + } + } + + s.logger.Debug("Found interfaces from ip link", "count", len(interfaceMap)) + + // Get IP addresses for each interface + cmd = exec.CommandContext(ctx, "ip", "-4", "addr", "show") + output, err = cmd.Output() + if err != nil { + s.logger.Warn("Failed to get IP addresses", "error", err) + } else { + lines = strings.Split(string(output), "\n") + var currentIfaceName string + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Parse interface name (e.g., "2: ens18: ") + if strings.Contains(line, ":") && !strings.Contains(line, "inet") && !strings.HasPrefix(line, "valid_lft") && !strings.HasPrefix(line, "altname") { + parts := strings.Fields(line) + if len(parts) >= 2 { + currentIfaceName = strings.TrimSuffix(parts[1], ":") + s.logger.Debug("Processing interface for IP", "name", currentIfaceName) + } + continue + } + + // Parse IP address (e.g., "inet 10.10.14.16/24 brd 10.10.14.255 scope global ens18") + if strings.HasPrefix(line, "inet ") && currentIfaceName != "" && currentIfaceName != "lo" { + parts := strings.Fields(line) + if len(parts) >= 2 { + ipWithSubnet := parts[1] // e.g., "10.10.14.16/24" + ipParts := strings.Split(ipWithSubnet, "/") + if len(ipParts) == 2 { + ip := ipParts[0] + subnet := ipParts[1] + + // Find or create interface + iface, exists := interfaceMap[currentIfaceName] + if !exists { + s.logger.Debug("Creating new interface entry", "name", currentIfaceName) + iface = &NetworkInterface{ + Name: currentIfaceName, + Status: "Down", + Speed: "Unknown", + } + interfaceMap[currentIfaceName] = iface + } + + iface.IPAddress = ip + iface.Subnet = subnet + s.logger.Debug("Set IP for interface", "name", currentIfaceName, "ip", ip, "subnet", subnet) + } + } + } + } + } + + // Convert map to slice + var interfaces []NetworkInterface + s.logger.Debug("Converting interface map to slice", "map_size", len(interfaceMap)) + for _, iface := range interfaceMap { + // Get speed for each interface using ethtool + if iface.Name != "" && iface.Name != "lo" { + cmd := exec.CommandContext(ctx, "ethtool", iface.Name) + output, err := cmd.Output() + if err == nil { + // Parse speed from ethtool output + ethtoolLines := strings.Split(string(output), "\n") + for _, ethtoolLine := range ethtoolLines { + if strings.Contains(ethtoolLine, "Speed:") { + parts := strings.Fields(ethtoolLine) + if len(parts) >= 2 { + iface.Speed = parts[1] + } + break + } + } + } + + // Determine role based on interface name or IP (simple heuristic) + // You can enhance this with configuration file or database lookup + if strings.Contains(iface.Name, "eth") || strings.Contains(iface.Name, "ens") { + // Default to Management for first interface, ISCSI for others + if iface.Name == "eth0" || iface.Name == "ens18" { + iface.Role = "Management" + } else { + // Check if IP is in typical iSCSI range (10.x.x.x) + if strings.HasPrefix(iface.IPAddress, "10.") && iface.IPAddress != "" { + iface.Role = "ISCSI" + } + } + } + } + interfaces = append(interfaces, *iface) + } + + // If no interfaces found, return empty slice + if len(interfaces) == 0 { + s.logger.Warn("No network interfaces found") + return []NetworkInterface{}, nil + } + + s.logger.Info("Listed network interfaces", "count", len(interfaces)) + return interfaces, nil +} diff --git a/backend/internal/tape_vtl/handler.go b/backend/internal/tape_vtl/handler.go index 31e2d8a..27a416d 100644 --- a/backend/internal/tape_vtl/handler.go +++ b/backend/internal/tape_vtl/handler.go @@ -1,6 +1,7 @@ package tape_vtl import ( + "fmt" "net/http" "github.com/atlasos/calypso/internal/common/database" @@ -29,6 +30,7 @@ func NewHandler(db *database.DB, log *logger.Logger) *Handler { // ListLibraries lists all virtual tape libraries func (h *Handler) ListLibraries(c *gin.Context) { + h.logger.Info("ListLibraries called") libraries, err := h.service.ListLibraries(c.Request.Context()) if err != nil { h.logger.Error("Failed to list libraries", "error", err) @@ -36,7 +38,36 @@ func (h *Handler) ListLibraries(c *gin.Context) { return } - c.JSON(http.StatusOK, gin.H{"libraries": libraries}) + h.logger.Info("ListLibraries result", "count", len(libraries), "is_nil", libraries == nil) + + // Ensure we return an empty array instead of null + if libraries == nil { + h.logger.Warn("Libraries is nil, converting to empty array") + libraries = []VirtualTapeLibrary{} + } + + h.logger.Info("Returning libraries", "count", len(libraries), "libraries", libraries) + + // Ensure we always return an array, never null + if libraries == nil { + libraries = []VirtualTapeLibrary{} + } + + // Force empty array if nil (double check) + if libraries == nil { + h.logger.Warn("Libraries is still nil in handler, forcing empty array") + libraries = []VirtualTapeLibrary{} + } + + // Use explicit JSON marshalling to ensure empty array, not null + response := map[string]interface{}{ + "libraries": libraries, + } + + h.logger.Info("Response payload", "count", len(libraries), "response_type", fmt.Sprintf("%T", libraries)) + + // Use JSON marshalling that handles empty slices correctly + c.JSON(http.StatusOK, response) } // GetLibrary retrieves a library by ID @@ -69,11 +100,11 @@ func (h *Handler) GetLibrary(c *gin.Context) { // CreateLibraryRequest represents a library creation request type CreateLibraryRequest struct { - Name string `json:"name" binding:"required"` - Description string `json:"description"` + Name string `json:"name" binding:"required"` + Description string `json:"description"` BackingStorePath string `json:"backing_store_path" binding:"required"` - SlotCount int `json:"slot_count" binding:"required"` - DriveCount int `json:"drive_count" binding:"required"` + SlotCount int `json:"slot_count" binding:"required"` + DriveCount int `json:"drive_count" binding:"required"` } // CreateLibrary creates a new virtual tape library @@ -161,10 +192,10 @@ func (h *Handler) GetLibraryTapes(c *gin.Context) { // CreateTapeRequest represents a tape creation request type CreateTapeRequest struct { - Barcode string `json:"barcode" binding:"required"` - SlotNumber int `json:"slot_number" binding:"required"` - TapeType string `json:"tape_type" binding:"required"` - SizeGB int64 `json:"size_gb" binding:"required"` + Barcode string `json:"barcode" binding:"required"` + SlotNumber int `json:"slot_number" binding:"required"` + TapeType string `json:"tape_type" binding:"required"` + SizeGB int64 `json:"size_gb" binding:"required"` } // CreateTape creates a new virtual tape @@ -218,9 +249,9 @@ func (h *Handler) LoadTape(c *gin.Context) { // Create async task taskID, err := h.taskEngine.CreateTask(c.Request.Context(), tasks.TaskTypeLoadUnload, userID.(string), map[string]interface{}{ - "operation": "load_tape", - "library_id": libraryID, - "slot_number": req.SlotNumber, + "operation": "load_tape", + "library_id": libraryID, + "slot_number": req.SlotNumber, "drive_number": req.DriveNumber, }) if err != nil { @@ -268,9 +299,9 @@ func (h *Handler) UnloadTape(c *gin.Context) { // Create async task taskID, err := h.taskEngine.CreateTask(c.Request.Context(), tasks.TaskTypeLoadUnload, userID.(string), map[string]interface{}{ - "operation": "unload_tape", - "library_id": libraryID, - "slot_number": req.SlotNumber, + "operation": "unload_tape", + "library_id": libraryID, + "slot_number": req.SlotNumber, "drive_number": req.DriveNumber, }) if err != nil { @@ -295,4 +326,3 @@ func (h *Handler) UnloadTape(c *gin.Context) { c.JSON(http.StatusAccepted, gin.H{"task_id": taskID}) } - diff --git a/backend/internal/tape_vtl/service.go b/backend/internal/tape_vtl/service.go index e8a65c9..0507912 100644 --- a/backend/internal/tape_vtl/service.go +++ b/backend/internal/tape_vtl/service.go @@ -28,46 +28,46 @@ func NewService(db *database.DB, log *logger.Logger) *Service { // VirtualTapeLibrary represents a virtual tape library type VirtualTapeLibrary struct { - ID string `json:"id"` - Name string `json:"name"` - Description string `json:"description"` - MHVTLibraryID int `json:"mhvtl_library_id"` - BackingStorePath string `json:"backing_store_path"` - SlotCount int `json:"slot_count"` - DriveCount int `json:"drive_count"` - IsActive bool `json:"is_active"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - CreatedBy string `json:"created_by"` + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + MHVTLibraryID int `json:"mhvtl_library_id"` + BackingStorePath string `json:"backing_store_path"` + SlotCount int `json:"slot_count"` + DriveCount int `json:"drive_count"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + CreatedBy string `json:"created_by"` } // VirtualTapeDrive represents a virtual tape drive type VirtualTapeDrive struct { - ID string `json:"id"` - LibraryID string `json:"library_id"` - DriveNumber int `json:"drive_number"` - DevicePath *string `json:"device_path,omitempty"` - StablePath *string `json:"stable_path,omitempty"` - Status string `json:"status"` - CurrentTapeID string `json:"current_tape_id,omitempty"` - IsActive bool `json:"is_active"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID string `json:"id"` + LibraryID string `json:"library_id"` + DriveNumber int `json:"drive_number"` + DevicePath *string `json:"device_path,omitempty"` + StablePath *string `json:"stable_path,omitempty"` + Status string `json:"status"` + CurrentTapeID string `json:"current_tape_id,omitempty"` + IsActive bool `json:"is_active"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } // VirtualTape represents a virtual tape type VirtualTape struct { - ID string `json:"id"` - LibraryID string `json:"library_id"` - Barcode string `json:"barcode"` - SlotNumber int `json:"slot_number"` - ImageFilePath string `json:"image_file_path"` - SizeBytes int64 `json:"size_bytes"` - UsedBytes int64 `json:"used_bytes"` - TapeType string `json:"tape_type"` - Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID string `json:"id"` + LibraryID string `json:"library_id"` + Barcode string `json:"barcode"` + SlotNumber int `json:"slot_number"` + ImageFilePath string `json:"image_file_path"` + SizeBytes int64 `json:"size_bytes"` + UsedBytes int64 `json:"used_bytes"` + TapeType string `json:"tape_type"` + Status string `json:"status"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` } // CreateLibrary creates a new virtual tape library @@ -135,14 +135,14 @@ func (s *Service) CreateLibrary(ctx context.Context, name, description, backingS for i := 1; i <= slotCount; i++ { barcode := fmt.Sprintf("V%05d", i) tape := VirtualTape{ - LibraryID: lib.ID, - Barcode: barcode, - SlotNumber: i, + LibraryID: lib.ID, + Barcode: barcode, + SlotNumber: i, ImageFilePath: filepath.Join(tapesPath, fmt.Sprintf("%s.img", barcode)), - SizeBytes: 800 * 1024 * 1024 * 1024, // 800 GB default (LTO-8) - UsedBytes: 0, - TapeType: "LTO-8", - Status: "idle", + SizeBytes: 800 * 1024 * 1024 * 1024, // 800 GB default (LTO-8) + UsedBytes: 0, + TapeType: "LTO-8", + Status: "idle", } if err := s.createTape(ctx, &tape); err != nil { s.logger.Error("Failed to create tape", "slot", i, "error", err) @@ -228,28 +228,56 @@ func (s *Service) ListLibraries(ctx context.Context) ([]VirtualTapeLibrary, erro ORDER BY name ` + s.logger.Info("Executing query to list libraries") rows, err := s.db.QueryContext(ctx, query) if err != nil { + s.logger.Error("Failed to query libraries", "error", err) return nil, fmt.Errorf("failed to list libraries: %w", err) } + s.logger.Info("Query executed successfully, got rows") defer rows.Close() - var libraries []VirtualTapeLibrary + libraries := make([]VirtualTapeLibrary, 0) // Initialize as empty slice, not nil + s.logger.Info("Starting to scan library rows", "query", query) + rowCount := 0 for rows.Next() { + rowCount++ var lib VirtualTapeLibrary + var description sql.NullString + var createdBy sql.NullString err := rows.Scan( - &lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath, &lib.SlotCount, &lib.DriveCount, &lib.IsActive, - &lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy, + &lib.CreatedAt, &lib.UpdatedAt, &createdBy, ) if err != nil { - s.logger.Error("Failed to scan library", "error", err) + s.logger.Error("Failed to scan library", "error", err, "row", rowCount) continue } + if description.Valid { + lib.Description = description.String + } + if createdBy.Valid { + lib.CreatedBy = createdBy.String + } libraries = append(libraries, lib) + s.logger.Info("Added library to list", "library_id", lib.ID, "name", lib.Name, "mhvtl_id", lib.MHVTLibraryID) + } + s.logger.Info("Finished scanning library rows", "total_rows", rowCount, "libraries_added", len(libraries)) + + if err := rows.Err(); err != nil { + s.logger.Error("Error iterating library rows", "error", err) + return nil, fmt.Errorf("error iterating library rows: %w", err) } - return libraries, rows.Err() + s.logger.Info("Listed virtual tape libraries", "count", len(libraries), "is_nil", libraries == nil) + // Ensure we return an empty slice, not nil + if libraries == nil { + s.logger.Warn("Libraries is nil in service, converting to empty array") + libraries = []VirtualTapeLibrary{} + } + s.logger.Info("Returning from service", "count", len(libraries), "is_nil", libraries == nil) + return libraries, nil } // GetLibrary retrieves a library by ID @@ -262,10 +290,12 @@ func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrar ` var lib VirtualTapeLibrary + var description sql.NullString + var createdBy sql.NullString err := s.db.QueryRowContext(ctx, query, id).Scan( - &lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath, + &lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath, &lib.SlotCount, &lib.DriveCount, &lib.IsActive, - &lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy, + &lib.CreatedAt, &lib.UpdatedAt, &createdBy, ) if err != nil { if err == sql.ErrNoRows { @@ -274,6 +304,13 @@ func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrar return nil, fmt.Errorf("failed to get library: %w", err) } + if description.Valid { + lib.Description = description.String + } + if createdBy.Valid { + lib.CreatedBy = createdBy.String + } + return &lib, nil } @@ -500,4 +537,3 @@ func (s *Service) DeleteLibrary(ctx context.Context, id string) error { s.logger.Info("Virtual tape library deleted", "id", id, "name", lib.Name) return nil } - diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx index 37e3b32..4c7c1bc 100644 --- a/frontend/src/App.tsx +++ b/frontend/src/App.tsx @@ -10,6 +10,7 @@ import TapeLibrariesPage from '@/pages/TapeLibraries' import VTLDetailPage from '@/pages/VTLDetail' import ISCSITargetsPage from '@/pages/ISCSITargets' import ISCSITargetDetailPage from '@/pages/ISCSITargetDetail' +import SystemPage from '@/pages/System' import Layout from '@/components/Layout' // Create a client @@ -55,6 +56,7 @@ function App() { } /> } /> } /> + } /> diff --git a/frontend/src/api/storage.ts b/frontend/src/api/storage.ts index e7715e9..0653528 100644 --- a/frontend/src/api/storage.ts +++ b/frontend/src/api/storage.ts @@ -100,6 +100,7 @@ export interface ZFSPool { scrub_interval: number // days is_active: boolean health_status: string // online, degraded, faulted, offline + compress_ratio?: number // compression ratio (e.g., 1.45) created_at: string updated_at: string created_by: string diff --git a/frontend/src/api/system.ts b/frontend/src/api/system.ts new file mode 100644 index 0000000..a640144 --- /dev/null +++ b/frontend/src/api/system.ts @@ -0,0 +1,18 @@ +import apiClient from './client' + +export interface NetworkInterface { + name: string + ip_address: string + subnet: string + status: string // "Connected" or "Down" + speed: string // e.g., "10 Gbps", "1 Gbps" + role: string // "Management", "ISCSI", or empty +} + +export const systemAPI = { + listNetworkInterfaces: async (): Promise => { + const response = await apiClient.get<{ interfaces: NetworkInterface[] | null }>('/system/interfaces') + return response.data.interfaces || [] + }, +} + diff --git a/frontend/src/index.css b/frontend/src/index.css index b470e28..2a75742 100644 --- a/frontend/src/index.css +++ b/frontend/src/index.css @@ -122,3 +122,23 @@ } } +/* Custom Toggle Switch */ +.toggle-checkbox:checked { + right: 0; + border-color: #137fec; +} + +.toggle-checkbox:checked + .toggle-label { + background-color: #137fec; +} + +.toggle-checkbox { + right: 0; + left: auto; +} + +.toggle-checkbox:checked { + right: 0; + left: auto; +} + diff --git a/frontend/src/pages/Storage.tsx b/frontend/src/pages/Storage.tsx index 436c7bb..3dd9659 100644 --- a/frontend/src/pages/Storage.tsx +++ b/frontend/src/pages/Storage.tsx @@ -186,8 +186,10 @@ export default function StoragePage() { const { data: zfsPools = [], isLoading: poolsLoading } = useQuery({ queryKey: ['storage', 'zfs', 'pools'], queryFn: zfsApi.listPools, - refetchInterval: 2000, // Auto-refresh every 2 seconds + refetchInterval: 3000, // Auto-refresh every 3 seconds staleTime: 0, // Always consider data stale + refetchOnWindowFocus: true, + refetchOnMount: true, }) // Fetch ARC stats with auto-refresh every 2 seconds for live data @@ -254,8 +256,10 @@ export default function StoragePage() { const deletePoolMutation = useMutation({ mutationFn: (poolId: string) => zfsApi.deletePool(poolId), - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: ['storage', 'zfs', 'pools'] }) + onSuccess: async () => { + // Invalidate and immediately refetch + await queryClient.invalidateQueries({ queryKey: ['storage', 'zfs', 'pools'] }) + queryClient.refetchQueries({ queryKey: ['storage', 'zfs', 'pools'] }) queryClient.invalidateQueries({ queryKey: ['storage', 'disks'] }) setSelectedPool(null) alert('Pool destroyed successfully!') @@ -341,20 +345,51 @@ export default function StoragePage() { const healthyPools = allPools.filter((p) => { if ('health_status' in p) { - return p.is_active && (p as ZFSPool).health_status === 'online' + const health = (p as ZFSPool).health_status?.toLowerCase() || '' + return p.is_active && health === 'online' } return p.is_active }).length const degradedPools = allPools.filter((p) => { if ('health_status' in p) { - return !p.is_active || (p as ZFSPool).health_status !== 'online' + const health = (p as ZFSPool).health_status?.toLowerCase() || '' + return !p.is_active || health !== 'online' } return !p.is_active }).length const healthStatus = degradedPools === 0 ? 'Optimal' : 'Degraded' - // Mock efficiency data (would come from backend) - const efficiencyRatio = 1.45 + // Calculate efficiency ratio from ZFS pools + // Efficiency = average compressratio across all active pools + // Use actual compressratio from ZFS if available, otherwise estimate + const activeZFSPools = zfsPools.filter(p => p.is_active && p.health_status?.toLowerCase() === 'online') + const efficiencyRatio = activeZFSPools.length > 0 + ? activeZFSPools.reduce((sum, pool) => { + // Use actual compressratio from ZFS if available + if (pool.compress_ratio && pool.compress_ratio > 0) { + // Deduplication can add additional savings (typically 1.2-2x) + const dedupMultiplier = pool.deduplication ? 1.3 : 1.0 + return sum + (pool.compress_ratio * dedupMultiplier) + } + // Fallback: estimate based on compression type + const compressionMultiplier: Record = { + 'lz4': 1.5, + 'zstd': 2.5, + 'gzip': 2.0, + 'gzip-1': 1.8, + 'gzip-9': 2.5, + 'off': 1.0, + } + const baseRatio = compressionMultiplier[pool.compression?.toLowerCase() || 'lz4'] || 1.5 + const dedupMultiplier = pool.deduplication ? 1.3 : 1.0 + return sum + (baseRatio * dedupMultiplier) + }, 0) / activeZFSPools.length + : 1.0 + + // Get compression and deduplication status from pools + const hasCompression = activeZFSPools.some(p => p.compression && p.compression.toLowerCase() !== 'off') + const hasDedup = activeZFSPools.some(p => p.deduplication) + const compressionType = activeZFSPools.find(p => p.compression && p.compression.toLowerCase() !== 'off')?.compression?.toUpperCase() || 'LZ4' // Use live ARC stats if available, otherwise fallback to 0 const arcHitRatio = arcStats?.hit_ratio ?? 0 const arcCacheUsage = arcStats?.cache_usage ?? 0 @@ -478,8 +513,21 @@ export default function StoragePage() { Ratio
- LZ4 - DEDUP ON + {hasCompression && ( + + {compressionType} + + )} + {hasDedup && ( + + DEDUP ON + + )} + {!hasCompression && !hasDedup && ( + + NO COMPRESSION + + )}
@@ -558,7 +606,7 @@ export default function StoragePage() { // Check if it's a ZFS pool or LVM repository const isZFSPool = 'raid_level' in pool - const healthStatus = isZFSPool ? (pool as ZFSPool).health_status : 'online' + const healthStatus = isZFSPool ? ((pool as ZFSPool).health_status?.toLowerCase() || 'online') : 'online' const isHealthy = pool.is_active && (healthStatus === 'online' || healthStatus === '') const statusColor = isHealthy @@ -809,11 +857,11 @@ export default function StoragePage() {
info - {selectedPool.is_active && selectedPool.health_status === 'online' ? 'Healthy' : 'Degraded'} + {selectedPool.is_active && selectedPool.health_status?.toLowerCase() === 'online' ? 'Healthy' : 'Degraded'}

- {selectedPool.is_active && selectedPool.health_status === 'online' + {selectedPool.is_active && selectedPool.health_status?.toLowerCase() === 'online' ? 'This pool is operating normally.' : 'This pool has issues and requires attention.'}

diff --git a/frontend/src/pages/System.tsx b/frontend/src/pages/System.tsx new file mode 100644 index 0000000..8add6bb --- /dev/null +++ b/frontend/src/pages/System.tsx @@ -0,0 +1,434 @@ +import { useState } from 'react' +import { Link } from 'react-router-dom' +import { useQuery } from '@tanstack/react-query' +import { systemAPI, NetworkInterface } from '@/api/system' + +export default function System() { + const [snmpEnabled, setSnmpEnabled] = useState(false) + + // Fetch network interfaces + const { data: interfaces = [], isLoading: interfacesLoading } = useQuery({ + queryKey: ['system', 'interfaces'], + queryFn: () => systemAPI.listNetworkInterfaces(), + refetchInterval: 5000, // Refresh every 5 seconds + }) + + return ( +
+ {/* Top Navigation */} +
+
+ + {/* Breadcrumbs */} +
+ + System + + / + Configuration +
+
+
+
+
+ System Healthy +
+
+ + +
+
+ + {/* Scrollable Content */} +
+
+ {/* Page Header */} +
+
+

System Configuration

+

+ Manage network interfaces, time synchronization, service states, and remote management protocols. +

+
+ +
+ + {/* Grid Layout */} +
+ {/* Network Card */} +
+
+
+ lan +

Network Interfaces

+
+ +
+
+ {interfacesLoading ? ( +
+ Loading interfaces... +
+ ) : interfaces.length === 0 ? ( +
+ No network interfaces found +
+ ) : ( + interfaces.map((iface: NetworkInterface) => { + const isConnected = iface.status === 'Connected' + const roleBgColor = iface.role === 'ISCSI' ? 'bg-purple-500/20' : 'bg-primary/20' + const roleTextColor = iface.role === 'ISCSI' ? 'text-purple-400' : 'text-primary' + + return ( +
+
+
+ settings_ethernet +
+
+
+

{iface.name}

+ {iface.role && ( + + {iface.role} + + )} +
+ {iface.ip_address ? ( +

+ {iface.ip_address} / {iface.subnet} +

+ ) : ( +

No Carrier

+ )} +
+
+
+
+ {isConnected ? ( + <> +
+
+ Connected +
+ {iface.speed && iface.speed !== 'Unknown' && ( + {iface.speed} + )} + + ) : ( +
+
+ Down +
+ )} +
+ +
+
+ ) + }) + )} +
+
+ + {/* Services Card */} +
+
+
+ memory +

Service Control

+
+
+ + All Systems Normal +
+
+
+ {/* Service Row */} +
+
+
+ terminal +
+
+

SSH Service

+

Remote command line access

+
+
+
+ RUNNING +
+ + +
+
+
+ {/* Service Row */} +
+
+
+ folder_shared +
+
+

SMB / CIFS

+

Windows file sharing

+
+
+
+ RUNNING +
+ + +
+
+
+ {/* Service Row */} +
+
+
+ storage +
+
+

iSCSI Target

+

Block storage sharing

+
+
+
+ STOPPED +
+ + +
+
+
+ {/* Service Row */} +
+
+
+ share +
+
+

NFS Service

+

Unix file sharing

+
+
+
+ RUNNING +
+ + +
+
+
+ {/* Service Row - VTL (MHVTL) */} +
+
+
+ album +
+
+

VTL Service

+

Virtual tape library emulation

+
+
+
+ RUNNING +
+ + +
+
+
+
+
+ + {/* Date & Time Card */} +
+
+
+ schedule +

Date & Time

+
+ UTC +
+
+
+
+ +
+ +
+ expand_more +
+
+
+
+
+
+ + +
+
+
+
+
+ pool.ntp.org +
+ Stratum 2 • 12ms +
+
+
+
+ time.google.com +
+ Stratum 1 • 45ms +
+
+
+
+
+ + {/* Management & SNMP Card */} +
+
+
+ hub +

Management

+
+
+
+
+
+
+

SNMP Monitoring

+

Enable Simple Network Management Protocol

+
+
+ setSnmpEnabled(e.target.checked)} + className="toggle-checkbox absolute block w-5 h-5 rounded-full bg-white border-4 appearance-none cursor-pointer checked:right-0 checked:border-primary transition-all duration-300" + id="snmp-toggle" + name="toggle" + type="checkbox" + /> + +
+
+
+
+ + +
+
+ + +
+
+
+
+

Syslog Forwarding

+
+ + +
+
+
+
+
+ + {/* Bottom Spacer */} +
+
+
+
+ ) +} + diff --git a/frontend/src/pages/TapeLibraries.tsx b/frontend/src/pages/TapeLibraries.tsx index b858c93..91ccfb7 100644 --- a/frontend/src/pages/TapeLibraries.tsx +++ b/frontend/src/pages/TapeLibraries.tsx @@ -460,10 +460,10 @@ export default function TapeLibraries() { {/* Tape Detail Drawer */} - {selectedLibrary && activeTab === 'vtl' && libraryTapes.length > 0 && ( -
+ {selectedLibrary && activeTab === 'vtl' && ( +
-
+
cable
@@ -475,7 +475,7 @@ export default function TapeLibraries() {

-
+
@@ -488,47 +488,71 @@ export default function TapeLibraries() {
-
- {libraryTapes.map((tape) => ( -
+ album +

No Tapes Found

+

+ This library has no tapes yet. Create tapes to get started. +

+ -
- - album - - - Slot {tape.slot_number} - + add + Add Tapes + +
+ ) : ( +
+ {libraryTapes.map((tape) => ( +
+
+ + album + + + SLOT {tape.slot_number} + +
+
+

+ {tape.barcode} +

+

+ {formatBytes(tape.size_bytes, 1)} / {formatBytes(tape.size_bytes, 1)} +

+
+
+ + +
-
-

{tape.barcode}

-

- {formatBytes(tape.size_bytes, 1)} / {formatBytes(tape.size_bytes, 1)} -

-
-
- eject - delete -
-
- ))} -
+ ))} +
+ )}
)} diff --git a/frontend/tailwind.config.js b/frontend/tailwind.config.js index ba0dd71..7620187 100644 --- a/frontend/tailwind.config.js +++ b/frontend/tailwind.config.js @@ -42,9 +42,9 @@ export default { foreground: "hsl(var(--card-foreground))", }, // Dark theme colors from example - "background-dark": "#111a22", - "card-dark": "#1a2632", - "border-dark": "#324d67", + "background-dark": "#101922", + "card-dark": "#192633", + "border-dark": "#233648", "text-secondary": "#92adc9", }, fontFamily: {