replace tape library body layout
This commit is contained in:
Binary file not shown.
@@ -137,8 +137,8 @@ func cacheControlMiddleware() gin.HandlerFunc {
|
||||
case path == "/api/v1/system/services":
|
||||
// Service list can be cached briefly
|
||||
c.Header("Cache-Control", "public, max-age=60")
|
||||
case strings.HasPrefix(path, "/api/v1/storage/zfs/pools/") && strings.HasSuffix(path, "/datasets"):
|
||||
// ZFS datasets should not be cached - they change frequently
|
||||
case strings.HasPrefix(path, "/api/v1/storage/zfs/pools"):
|
||||
// ZFS pools and datasets should not be cached - they change frequently
|
||||
c.Header("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
default:
|
||||
// Default: no cache for other endpoints
|
||||
|
||||
@@ -164,6 +164,15 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
if responseCache != nil {
|
||||
storageHandler.SetCache(responseCache)
|
||||
}
|
||||
|
||||
// Start disk monitor service in background (syncs disks every 5 minutes)
|
||||
diskMonitor := storage.NewDiskMonitor(db, log, 5*time.Minute)
|
||||
go diskMonitor.Start(context.Background())
|
||||
|
||||
// Start ZFS pool monitor service in background (syncs pools every 2 minutes)
|
||||
zfsPoolMonitor := storage.NewZFSPoolMonitor(db, log, 2*time.Minute)
|
||||
go zfsPoolMonitor.Start(context.Background())
|
||||
|
||||
storageGroup := protected.Group("/storage")
|
||||
storageGroup.Use(requirePermission("storage", "read"))
|
||||
{
|
||||
@@ -217,6 +226,11 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
|
||||
// Virtual Tape Libraries
|
||||
vtlHandler := tape_vtl.NewHandler(db, log)
|
||||
|
||||
// Start MHVTL monitor service in background (syncs every 5 minutes)
|
||||
mhvtlMonitor := tape_vtl.NewMHVTLMonitor(db, log, "/etc/mhvtl", 5*time.Minute)
|
||||
go mhvtlMonitor.Start(context.Background())
|
||||
|
||||
vtlGroup := protected.Group("/tape/vtl")
|
||||
vtlGroup.Use(requirePermission("tape", "read"))
|
||||
{
|
||||
|
||||
@@ -75,7 +75,21 @@ func (s *DiskService) DiscoverDisks(ctx context.Context) ([]PhysicalDisk, error)
|
||||
}
|
||||
|
||||
devicePath := "/dev/" + device.Name
|
||||
|
||||
|
||||
// Skip ZFS volume block devices (zd* devices are ZFS volumes exported as block devices)
|
||||
// These are not physical disks and should not appear in physical disk list
|
||||
if strings.HasPrefix(device.Name, "zd") {
|
||||
s.logger.Debug("Skipping ZFS volume block device", "device", devicePath)
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip devices under /dev/zvol (ZFS volume devices in zvol directory)
|
||||
// These are virtual block devices created from ZFS volumes, not physical hardware
|
||||
if strings.HasPrefix(devicePath, "/dev/zvol/") {
|
||||
s.logger.Debug("Skipping ZFS volume device", "device", devicePath)
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip OS disk (disk that has root or boot partition)
|
||||
if s.isOSDisk(ctx, devicePath) {
|
||||
s.logger.Debug("Skipping OS disk", "device", devicePath)
|
||||
@@ -113,8 +127,8 @@ func (s *DiskService) DiscoverDisks(ctx context.Context) ([]PhysicalDisk, error)
|
||||
// getDiskInfo retrieves detailed information about a disk
|
||||
func (s *DiskService) getDiskInfo(ctx context.Context, devicePath string) (*PhysicalDisk, error) {
|
||||
disk := &PhysicalDisk{
|
||||
DevicePath: devicePath,
|
||||
HealthStatus: "unknown",
|
||||
DevicePath: devicePath,
|
||||
HealthStatus: "unknown",
|
||||
HealthDetails: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
@@ -129,7 +143,7 @@ func (s *DiskService) getDiskInfo(ctx context.Context, devicePath string) (*Phys
|
||||
disk.Vendor = props["ID_VENDOR"]
|
||||
disk.Model = props["ID_MODEL"]
|
||||
disk.SerialNumber = props["ID_SERIAL_SHORT"]
|
||||
|
||||
|
||||
if props["ID_ATA_ROTATION_RATE"] == "0" {
|
||||
disk.IsSSD = true
|
||||
}
|
||||
@@ -258,11 +272,15 @@ func (s *DiskService) isOSDisk(ctx context.Context, devicePath string) bool {
|
||||
|
||||
// SyncDisksToDatabase syncs discovered disks to the database
|
||||
func (s *DiskService) SyncDisksToDatabase(ctx context.Context) error {
|
||||
s.logger.Info("Starting disk discovery and sync")
|
||||
disks, err := s.DiscoverDisks(ctx)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to discover disks", "error", err)
|
||||
return fmt.Errorf("failed to discover disks: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info("Discovered disks", "count", len(disks))
|
||||
|
||||
for _, disk := range disks {
|
||||
// Check if disk exists
|
||||
var existingID string
|
||||
@@ -300,10 +318,80 @@ func (s *DiskService) SyncDisksToDatabase(ctx context.Context) error {
|
||||
disk.HealthStatus, healthDetailsJSON, disk.IsUsed, existingID)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to update disk", "device", disk.DevicePath, "error", err)
|
||||
} else {
|
||||
s.logger.Debug("Updated disk", "device", disk.DevicePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("Disk sync completed", "total_disks", len(disks))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListDisksFromDatabase retrieves all physical disks from the database
|
||||
func (s *DiskService) ListDisksFromDatabase(ctx context.Context) ([]PhysicalDisk, error) {
|
||||
query := `
|
||||
SELECT
|
||||
id, device_path, vendor, model, serial_number, size_bytes,
|
||||
sector_size, is_ssd, health_status, health_details, is_used,
|
||||
created_at, updated_at
|
||||
FROM physical_disks
|
||||
ORDER BY device_path
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query disks: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var disks []PhysicalDisk
|
||||
for rows.Next() {
|
||||
var disk PhysicalDisk
|
||||
var healthDetailsJSON []byte
|
||||
var attachedToPool sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&disk.ID, &disk.DevicePath, &disk.Vendor, &disk.Model,
|
||||
&disk.SerialNumber, &disk.SizeBytes, &disk.SectorSize,
|
||||
&disk.IsSSD, &disk.HealthStatus, &healthDetailsJSON,
|
||||
&disk.IsUsed, &disk.CreatedAt, &disk.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to scan disk row", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse health details JSON
|
||||
if len(healthDetailsJSON) > 0 {
|
||||
if err := json.Unmarshal(healthDetailsJSON, &disk.HealthDetails); err != nil {
|
||||
s.logger.Warn("Failed to parse health details", "error", err)
|
||||
disk.HealthDetails = make(map[string]interface{})
|
||||
}
|
||||
} else {
|
||||
disk.HealthDetails = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// Get ZFS pool attachment if disk is used
|
||||
if disk.IsUsed {
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
`SELECT zp.name FROM zfs_pools zp
|
||||
INNER JOIN zfs_pool_disks zpd ON zp.id = zpd.pool_id
|
||||
WHERE zpd.disk_id = $1
|
||||
LIMIT 1`,
|
||||
disk.ID,
|
||||
).Scan(&attachedToPool)
|
||||
if err == nil && attachedToPool.Valid {
|
||||
disk.AttachedToPool = attachedToPool.String
|
||||
}
|
||||
}
|
||||
|
||||
disks = append(disks, disk)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating disk rows: %w", err)
|
||||
}
|
||||
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
65
backend/internal/storage/disk_monitor.go
Normal file
65
backend/internal/storage/disk_monitor.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// DiskMonitor handles periodic disk discovery and sync to database
|
||||
type DiskMonitor struct {
|
||||
diskService *DiskService
|
||||
logger *logger.Logger
|
||||
interval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewDiskMonitor creates a new disk monitor service
|
||||
func NewDiskMonitor(db *database.DB, log *logger.Logger, interval time.Duration) *DiskMonitor {
|
||||
return &DiskMonitor{
|
||||
diskService: NewDiskService(db, log),
|
||||
logger: log,
|
||||
interval: interval,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the disk monitor background service
|
||||
func (m *DiskMonitor) Start(ctx context.Context) {
|
||||
m.logger.Info("Starting disk monitor service", "interval", m.interval)
|
||||
ticker := time.NewTicker(m.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run initial sync immediately
|
||||
m.syncDisks(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.logger.Info("Disk monitor service stopped")
|
||||
return
|
||||
case <-m.stopCh:
|
||||
m.logger.Info("Disk monitor service stopped")
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.syncDisks(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the disk monitor service
|
||||
func (m *DiskMonitor) Stop() {
|
||||
close(m.stopCh)
|
||||
}
|
||||
|
||||
// syncDisks performs disk discovery and sync to database
|
||||
func (m *DiskMonitor) syncDisks(ctx context.Context) {
|
||||
m.logger.Debug("Running periodic disk sync")
|
||||
if err := m.diskService.SyncDisksToDatabase(ctx); err != nil {
|
||||
m.logger.Error("Periodic disk sync failed", "error", err)
|
||||
} else {
|
||||
m.logger.Debug("Periodic disk sync completed")
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -42,9 +43,9 @@ func NewHandler(db *database.DB, log *logger.Logger) *Handler {
|
||||
}
|
||||
}
|
||||
|
||||
// ListDisks lists all physical disks
|
||||
// ListDisks lists all physical disks from database
|
||||
func (h *Handler) ListDisks(c *gin.Context) {
|
||||
disks, err := h.diskService.DiscoverDisks(c.Request.Context())
|
||||
disks, err := h.diskService.ListDisksFromDatabase(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list disks", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list disks"})
|
||||
@@ -70,15 +71,19 @@ func (h *Handler) SyncDisks(c *gin.Context) {
|
||||
|
||||
// Run sync in background
|
||||
go func() {
|
||||
ctx := c.Request.Context()
|
||||
// Create new context for background task (don't use request context which may expire)
|
||||
ctx := context.Background()
|
||||
h.taskEngine.StartTask(ctx, taskID)
|
||||
h.taskEngine.UpdateProgress(ctx, taskID, 50, "Discovering disks...")
|
||||
h.logger.Info("Starting disk sync", "task_id", taskID)
|
||||
|
||||
if err := h.diskService.SyncDisksToDatabase(ctx); err != nil {
|
||||
h.logger.Error("Disk sync failed", "task_id", taskID, "error", err)
|
||||
h.taskEngine.FailTask(ctx, taskID, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
h.logger.Info("Disk sync completed", "task_id", taskID)
|
||||
h.taskEngine.UpdateProgress(ctx, taskID, 100, "Disk sync completed")
|
||||
h.taskEngine.CompleteTask(ctx, taskID, "Disks synchronized successfully")
|
||||
}()
|
||||
|
||||
@@ -391,7 +391,8 @@ func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
||||
&pool.CreatedAt, &pool.UpdatedAt, &pool.CreatedBy,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to scan pool: %w", err)
|
||||
s.logger.Error("Failed to scan pool row", "error", err)
|
||||
continue // Skip this pool instead of failing entire query
|
||||
}
|
||||
if description.Valid {
|
||||
pool.Description = description.String
|
||||
@@ -407,8 +408,14 @@ func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
||||
}
|
||||
|
||||
pools = append(pools, &pool)
|
||||
s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating pool rows: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Debug("Listed ZFS pools", "count", len(pools))
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
@@ -460,11 +467,22 @@ func (s *ZFSService) DeletePool(ctx context.Context, poolID string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Destroy ZFS pool
|
||||
cmd := exec.CommandContext(ctx, "zpool", "destroy", pool.Name)
|
||||
// Destroy ZFS pool with -f flag to force destroy (works for both empty and non-empty pools)
|
||||
// The -f flag is needed to destroy pools even if they have datasets or are in use
|
||||
s.logger.Info("Destroying ZFS pool", "pool", pool.Name)
|
||||
cmd := exec.CommandContext(ctx, "zpool", "destroy", "-f", pool.Name)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to destroy ZFS pool: %s: %w", string(output), err)
|
||||
errorMsg := string(output)
|
||||
// Check if pool doesn't exist (might have been destroyed already)
|
||||
if strings.Contains(errorMsg, "no such pool") || strings.Contains(errorMsg, "cannot open") {
|
||||
s.logger.Warn("Pool does not exist in ZFS, continuing with database cleanup", "pool", pool.Name)
|
||||
// Continue with database cleanup even if pool doesn't exist
|
||||
} else {
|
||||
return fmt.Errorf("failed to destroy ZFS pool: %s: %w", errorMsg, err)
|
||||
}
|
||||
} else {
|
||||
s.logger.Info("ZFS pool destroyed successfully", "pool", pool.Name)
|
||||
}
|
||||
|
||||
// Mark disks as unused
|
||||
|
||||
254
backend/internal/storage/zfs_pool_monitor.go
Normal file
254
backend/internal/storage/zfs_pool_monitor.go
Normal file
@@ -0,0 +1,254 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// ZFSPoolMonitor handles periodic ZFS pool status monitoring and sync to database
|
||||
type ZFSPoolMonitor struct {
|
||||
zfsService *ZFSService
|
||||
logger *logger.Logger
|
||||
interval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewZFSPoolMonitor creates a new ZFS pool monitor service
|
||||
func NewZFSPoolMonitor(db *database.DB, log *logger.Logger, interval time.Duration) *ZFSPoolMonitor {
|
||||
return &ZFSPoolMonitor{
|
||||
zfsService: NewZFSService(db, log),
|
||||
logger: log,
|
||||
interval: interval,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the ZFS pool monitor background service
|
||||
func (m *ZFSPoolMonitor) Start(ctx context.Context) {
|
||||
m.logger.Info("Starting ZFS pool monitor service", "interval", m.interval)
|
||||
ticker := time.NewTicker(m.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run initial sync immediately
|
||||
m.syncPools(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.logger.Info("ZFS pool monitor service stopped")
|
||||
return
|
||||
case <-m.stopCh:
|
||||
m.logger.Info("ZFS pool monitor service stopped")
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.syncPools(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the ZFS pool monitor service
|
||||
func (m *ZFSPoolMonitor) Stop() {
|
||||
close(m.stopCh)
|
||||
}
|
||||
|
||||
// syncPools syncs ZFS pool status from system to database
|
||||
func (m *ZFSPoolMonitor) syncPools(ctx context.Context) {
|
||||
m.logger.Debug("Running periodic ZFS pool sync")
|
||||
|
||||
// Get all pools from system
|
||||
systemPools, err := m.getSystemPools(ctx)
|
||||
if err != nil {
|
||||
m.logger.Error("Failed to get system pools", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
m.logger.Debug("Found pools in system", "count", len(systemPools))
|
||||
|
||||
// Update each pool in database
|
||||
for poolName, poolInfo := range systemPools {
|
||||
if err := m.updatePoolStatus(ctx, poolName, poolInfo); err != nil {
|
||||
m.logger.Error("Failed to update pool status", "pool", poolName, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mark pools that don't exist in system as offline
|
||||
if err := m.markMissingPoolsOffline(ctx, systemPools); err != nil {
|
||||
m.logger.Error("Failed to mark missing pools offline", "error", err)
|
||||
}
|
||||
|
||||
m.logger.Debug("ZFS pool sync completed")
|
||||
}
|
||||
|
||||
// PoolInfo represents pool information from system
|
||||
type PoolInfo struct {
|
||||
Name string
|
||||
SizeBytes int64
|
||||
UsedBytes int64
|
||||
Health string // online, degraded, faulted, offline, unavailable, removed
|
||||
}
|
||||
|
||||
// getSystemPools gets all pools from ZFS system
|
||||
func (m *ZFSPoolMonitor) getSystemPools(ctx context.Context) (map[string]PoolInfo, error) {
|
||||
pools := make(map[string]PoolInfo)
|
||||
|
||||
// Get pool list
|
||||
cmd := exec.CommandContext(ctx, "zpool", "list", "-H", "-o", "name,size,alloc,free,health")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
poolName := fields[0]
|
||||
sizeStr := fields[1]
|
||||
allocStr := fields[2]
|
||||
health := fields[4]
|
||||
|
||||
// Parse size (e.g., "95.5G" -> bytes)
|
||||
sizeBytes, err := parseSize(sizeStr)
|
||||
if err != nil {
|
||||
m.logger.Warn("Failed to parse pool size", "pool", poolName, "size", sizeStr, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse allocated (used) size
|
||||
usedBytes, err := parseSize(allocStr)
|
||||
if err != nil {
|
||||
m.logger.Warn("Failed to parse pool used size", "pool", poolName, "alloc", allocStr, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Normalize health status to lowercase
|
||||
healthNormalized := strings.ToLower(health)
|
||||
|
||||
pools[poolName] = PoolInfo{
|
||||
Name: poolName,
|
||||
SizeBytes: sizeBytes,
|
||||
UsedBytes: usedBytes,
|
||||
Health: healthNormalized,
|
||||
}
|
||||
}
|
||||
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
// parseSize parses size string (e.g., "95.5G", "1.2T") to bytes
|
||||
func parseSize(sizeStr string) (int64, error) {
|
||||
// Remove any whitespace
|
||||
sizeStr = strings.TrimSpace(sizeStr)
|
||||
|
||||
// Match pattern like "95.5G", "1.2T", "512M"
|
||||
re := regexp.MustCompile(`^([\d.]+)([KMGT]?)$`)
|
||||
matches := re.FindStringSubmatch(strings.ToUpper(sizeStr))
|
||||
if len(matches) != 3 {
|
||||
return 0, nil // Return 0 if can't parse
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(matches[1], 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
unit := matches[2]
|
||||
var multiplier int64 = 1
|
||||
|
||||
switch unit {
|
||||
case "K":
|
||||
multiplier = 1024
|
||||
case "M":
|
||||
multiplier = 1024 * 1024
|
||||
case "G":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
case "T":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
case "P":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
|
||||
}
|
||||
|
||||
return int64(value * float64(multiplier)), nil
|
||||
}
|
||||
|
||||
// updatePoolStatus updates pool status in database
|
||||
func (m *ZFSPoolMonitor) updatePoolStatus(ctx context.Context, poolName string, poolInfo PoolInfo) error {
|
||||
// Get pool from database by name
|
||||
var poolID string
|
||||
err := m.zfsService.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM zfs_pools WHERE name = $1",
|
||||
poolName,
|
||||
).Scan(&poolID)
|
||||
|
||||
if err != nil {
|
||||
// Pool not in database, skip (might be created outside of Calypso)
|
||||
m.logger.Debug("Pool not found in database, skipping", "pool", poolName)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update pool status, size, and used bytes
|
||||
_, err = m.zfsService.db.ExecContext(ctx, `
|
||||
UPDATE zfs_pools SET
|
||||
size_bytes = $1,
|
||||
used_bytes = $2,
|
||||
health_status = $3,
|
||||
updated_at = NOW()
|
||||
WHERE id = $4
|
||||
`, poolInfo.SizeBytes, poolInfo.UsedBytes, poolInfo.Health, poolID)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.logger.Debug("Updated pool status", "pool", poolName, "health", poolInfo.Health, "size", poolInfo.SizeBytes, "used", poolInfo.UsedBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// markMissingPoolsOffline marks pools that exist in database but not in system as offline
|
||||
func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPools map[string]PoolInfo) error {
|
||||
// Get all pools from database
|
||||
rows, err := m.zfsService.db.QueryContext(ctx, "SELECT id, name FROM zfs_pools WHERE is_active = true")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var poolID, poolName string
|
||||
if err := rows.Scan(&poolID, &poolName); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if pool exists in system
|
||||
if _, exists := systemPools[poolName]; !exists {
|
||||
// Pool doesn't exist in system, mark as offline
|
||||
_, err = m.zfsService.db.ExecContext(ctx, `
|
||||
UPDATE zfs_pools SET
|
||||
health_status = 'offline',
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
`, poolID)
|
||||
if err != nil {
|
||||
m.logger.Warn("Failed to mark pool as offline", "pool", poolName, "error", err)
|
||||
} else {
|
||||
m.logger.Info("Marked pool as offline (not found in system)", "pool", poolName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
516
backend/internal/tape_vtl/mhvtl_monitor.go
Normal file
516
backend/internal/tape_vtl/mhvtl_monitor.go
Normal file
@@ -0,0 +1,516 @@
|
||||
package tape_vtl
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"database/sql"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// MHVTLMonitor monitors mhvtl configuration files and syncs to database
|
||||
type MHVTLMonitor struct {
|
||||
service *Service
|
||||
logger *logger.Logger
|
||||
configPath string
|
||||
interval time.Duration
|
||||
stopCh chan struct{}
|
||||
}
|
||||
|
||||
// NewMHVTLMonitor creates a new MHVTL monitor service
|
||||
func NewMHVTLMonitor(db *database.DB, log *logger.Logger, configPath string, interval time.Duration) *MHVTLMonitor {
|
||||
return &MHVTLMonitor{
|
||||
service: NewService(db, log),
|
||||
logger: log,
|
||||
configPath: configPath,
|
||||
interval: interval,
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the MHVTL monitor background service
|
||||
func (m *MHVTLMonitor) Start(ctx context.Context) {
|
||||
m.logger.Info("Starting MHVTL monitor service", "config_path", m.configPath, "interval", m.interval)
|
||||
ticker := time.NewTicker(m.interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Run initial sync immediately
|
||||
m.syncMHVTL(ctx)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.logger.Info("MHVTL monitor service stopped")
|
||||
return
|
||||
case <-m.stopCh:
|
||||
m.logger.Info("MHVTL monitor service stopped")
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.syncMHVTL(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the MHVTL monitor service
|
||||
func (m *MHVTLMonitor) Stop() {
|
||||
close(m.stopCh)
|
||||
}
|
||||
|
||||
// syncMHVTL parses mhvtl configuration and syncs to database
|
||||
func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
||||
m.logger.Debug("Running MHVTL configuration sync")
|
||||
|
||||
deviceConfPath := filepath.Join(m.configPath, "device.conf")
|
||||
if _, err := os.Stat(deviceConfPath); os.IsNotExist(err) {
|
||||
m.logger.Warn("MHVTL device.conf not found", "path", deviceConfPath)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse device.conf to get libraries and drives
|
||||
libraries, drives, err := m.parseDeviceConf(ctx, deviceConfPath)
|
||||
if err != nil {
|
||||
m.logger.Error("Failed to parse device.conf", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
m.logger.Info("Parsed MHVTL configuration", "libraries", len(libraries), "drives", len(drives))
|
||||
|
||||
// Sync libraries to database
|
||||
for _, lib := range libraries {
|
||||
if err := m.syncLibrary(ctx, lib); err != nil {
|
||||
m.logger.Error("Failed to sync library", "library_id", lib.LibraryID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Sync drives to database
|
||||
for _, drive := range drives {
|
||||
if err := m.syncDrive(ctx, drive); err != nil {
|
||||
m.logger.Error("Failed to sync drive", "drive_id", drive.DriveID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse library_contents files to get tapes
|
||||
for _, lib := range libraries {
|
||||
contentsPath := filepath.Join(m.configPath, fmt.Sprintf("library_contents.%d", lib.LibraryID))
|
||||
if err := m.syncLibraryContents(ctx, lib.LibraryID, contentsPath); err != nil {
|
||||
m.logger.Warn("Failed to sync library contents", "library_id", lib.LibraryID, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
m.logger.Debug("MHVTL configuration sync completed")
|
||||
}
|
||||
|
||||
// LibraryInfo represents a library from device.conf
|
||||
type LibraryInfo struct {
|
||||
LibraryID int
|
||||
Vendor string
|
||||
Product string
|
||||
SerialNumber string
|
||||
HomeDirectory string
|
||||
Channel string
|
||||
Target string
|
||||
LUN string
|
||||
}
|
||||
|
||||
// DriveInfo represents a drive from device.conf
|
||||
type DriveInfo struct {
|
||||
DriveID int
|
||||
LibraryID int
|
||||
Slot int
|
||||
Vendor string
|
||||
Product string
|
||||
SerialNumber string
|
||||
Channel string
|
||||
Target string
|
||||
LUN string
|
||||
}
|
||||
|
||||
// parseDeviceConf parses mhvtl device.conf file
|
||||
func (m *MHVTLMonitor) parseDeviceConf(ctx context.Context, path string) ([]LibraryInfo, []DriveInfo, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to open device.conf: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var libraries []LibraryInfo
|
||||
var drives []DriveInfo
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
var currentLibrary *LibraryInfo
|
||||
var currentDrive *DriveInfo
|
||||
|
||||
libraryRegex := regexp.MustCompile(`^Library:\s+(\d+)\s+CHANNEL:\s+(\S+)\s+TARGET:\s+(\S+)\s+LUN:\s+(\S+)`)
|
||||
driveRegex := regexp.MustCompile(`^Drive:\s+(\d+)\s+CHANNEL:\s+(\S+)\s+TARGET:\s+(\S+)\s+LUN:\s+(\S+)`)
|
||||
libraryIDRegex := regexp.MustCompile(`Library ID:\s+(\d+)\s+Slot:\s+(\d+)`)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
// Skip comments and empty lines
|
||||
if strings.HasPrefix(line, "#") || line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for Library entry
|
||||
if matches := libraryRegex.FindStringSubmatch(line); matches != nil {
|
||||
if currentLibrary != nil {
|
||||
libraries = append(libraries, *currentLibrary)
|
||||
}
|
||||
libID, _ := strconv.Atoi(matches[1])
|
||||
currentLibrary = &LibraryInfo{
|
||||
LibraryID: libID,
|
||||
Channel: matches[2],
|
||||
Target: matches[3],
|
||||
LUN: matches[4],
|
||||
}
|
||||
currentDrive = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for Drive entry
|
||||
if matches := driveRegex.FindStringSubmatch(line); matches != nil {
|
||||
if currentDrive != nil {
|
||||
drives = append(drives, *currentDrive)
|
||||
}
|
||||
driveID, _ := strconv.Atoi(matches[1])
|
||||
currentDrive = &DriveInfo{
|
||||
DriveID: driveID,
|
||||
Channel: matches[2],
|
||||
Target: matches[3],
|
||||
LUN: matches[4],
|
||||
}
|
||||
if matches := libraryIDRegex.FindStringSubmatch(line); matches != nil {
|
||||
libID, _ := strconv.Atoi(matches[1])
|
||||
slot, _ := strconv.Atoi(matches[2])
|
||||
currentDrive.LibraryID = libID
|
||||
currentDrive.Slot = slot
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse library fields
|
||||
if currentLibrary != nil {
|
||||
if strings.HasPrefix(line, "Vendor identification:") {
|
||||
currentLibrary.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:"))
|
||||
} else if strings.HasPrefix(line, "Product identification:") {
|
||||
currentLibrary.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:"))
|
||||
} else if strings.HasPrefix(line, "Unit serial number:") {
|
||||
currentLibrary.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:"))
|
||||
} else if strings.HasPrefix(line, "Home directory:") {
|
||||
currentLibrary.HomeDirectory = strings.TrimSpace(strings.TrimPrefix(line, "Home directory:"))
|
||||
}
|
||||
}
|
||||
|
||||
// Parse drive fields
|
||||
if currentDrive != nil {
|
||||
if strings.HasPrefix(line, "Vendor identification:") {
|
||||
currentDrive.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:"))
|
||||
} else if strings.HasPrefix(line, "Product identification:") {
|
||||
currentDrive.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:"))
|
||||
} else if strings.HasPrefix(line, "Unit serial number:") {
|
||||
currentDrive.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:"))
|
||||
} else if strings.HasPrefix(line, "Library ID:") && strings.Contains(line, "Slot:") {
|
||||
matches := libraryIDRegex.FindStringSubmatch(line)
|
||||
if matches != nil {
|
||||
libID, _ := strconv.Atoi(matches[1])
|
||||
slot, _ := strconv.Atoi(matches[2])
|
||||
currentDrive.LibraryID = libID
|
||||
currentDrive.Slot = slot
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add last library and drive
|
||||
if currentLibrary != nil {
|
||||
libraries = append(libraries, *currentLibrary)
|
||||
}
|
||||
if currentDrive != nil {
|
||||
drives = append(drives, *currentDrive)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("error reading device.conf: %w", err)
|
||||
}
|
||||
|
||||
return libraries, drives, nil
|
||||
}
|
||||
|
||||
// syncLibrary syncs a library to database
|
||||
func (m *MHVTLMonitor) syncLibrary(ctx context.Context, libInfo LibraryInfo) error {
|
||||
// Check if library exists by mhvtl_library_id
|
||||
var existingID string
|
||||
err := m.service.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM virtual_tape_libraries WHERE mhvtl_library_id = $1",
|
||||
libInfo.LibraryID,
|
||||
).Scan(&existingID)
|
||||
|
||||
libraryName := fmt.Sprintf("VTL-%d", libInfo.LibraryID)
|
||||
if libInfo.Product != "" {
|
||||
libraryName = fmt.Sprintf("%s-%d", libInfo.Product, libInfo.LibraryID)
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// Create new library
|
||||
// Get backing store path from mhvtl.conf
|
||||
backingStorePath := "/opt/mhvtl"
|
||||
if libInfo.HomeDirectory != "" {
|
||||
backingStorePath = libInfo.HomeDirectory
|
||||
}
|
||||
|
||||
// Count slots and drives from library_contents file
|
||||
contentsPath := filepath.Join(m.configPath, fmt.Sprintf("library_contents.%d", libInfo.LibraryID))
|
||||
slotCount, driveCount := m.countSlotsAndDrives(contentsPath)
|
||||
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
INSERT INTO virtual_tape_libraries (
|
||||
name, description, mhvtl_library_id, backing_store_path,
|
||||
slot_count, drive_count, is_active
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
`, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
||||
libInfo.LibraryID, backingStorePath, slotCount, driveCount, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert library: %w", err)
|
||||
}
|
||||
m.logger.Info("Created virtual library from MHVTL", "library_id", libInfo.LibraryID, "name", libraryName)
|
||||
} else if err == nil {
|
||||
// Update existing library
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
UPDATE virtual_tape_libraries SET
|
||||
name = $1, description = $2, backing_store_path = $3,
|
||||
is_active = $4, updated_at = NOW()
|
||||
WHERE id = $5
|
||||
`, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
||||
libInfo.HomeDirectory, true, existingID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update library: %w", err)
|
||||
}
|
||||
m.logger.Debug("Updated virtual library from MHVTL", "library_id", libInfo.LibraryID)
|
||||
} else {
|
||||
return fmt.Errorf("failed to check library existence: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncDrive syncs a drive to database
|
||||
func (m *MHVTLMonitor) syncDrive(ctx context.Context, driveInfo DriveInfo) error {
|
||||
// Get library ID from mhvtl_library_id
|
||||
var libraryID string
|
||||
err := m.service.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM virtual_tape_libraries WHERE mhvtl_library_id = $1",
|
||||
driveInfo.LibraryID,
|
||||
).Scan(&libraryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("library not found for drive: %w", err)
|
||||
}
|
||||
|
||||
// Calculate drive number from slot (drives are typically in slots 1, 2, 3, etc.)
|
||||
driveNumber := driveInfo.Slot
|
||||
|
||||
// Check if drive exists
|
||||
var existingID string
|
||||
err = m.service.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM virtual_tape_drives WHERE library_id = $1 AND drive_number = $2",
|
||||
libraryID, driveNumber,
|
||||
).Scan(&existingID)
|
||||
|
||||
// Get device path (typically /dev/stX or /dev/nstX)
|
||||
devicePath := fmt.Sprintf("/dev/st%d", driveInfo.DriveID-10) // Drive 11 -> st1, Drive 12 -> st2, etc.
|
||||
stablePath := fmt.Sprintf("/dev/tape/by-id/scsi-%s", driveInfo.SerialNumber)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// Create new drive
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
INSERT INTO virtual_tape_drives (
|
||||
library_id, drive_number, device_path, stable_path, status, is_active
|
||||
) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
`, libraryID, driveNumber, devicePath, stablePath, "idle", true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert drive: %w", err)
|
||||
}
|
||||
m.logger.Info("Created virtual drive from MHVTL", "drive_id", driveInfo.DriveID, "library_id", driveInfo.LibraryID)
|
||||
} else if err == nil {
|
||||
// Update existing drive
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
UPDATE virtual_tape_drives SET
|
||||
device_path = $1, stable_path = $2, is_active = $3, updated_at = NOW()
|
||||
WHERE id = $4
|
||||
`, devicePath, stablePath, true, existingID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update drive: %w", err)
|
||||
}
|
||||
m.logger.Debug("Updated virtual drive from MHVTL", "drive_id", driveInfo.DriveID)
|
||||
} else {
|
||||
return fmt.Errorf("failed to check drive existence: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncLibraryContents syncs tapes from library_contents file
|
||||
func (m *MHVTLMonitor) syncLibraryContents(ctx context.Context, libraryID int, contentsPath string) error {
|
||||
// Get library ID from database
|
||||
var dbLibraryID string
|
||||
err := m.service.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM virtual_tape_libraries WHERE mhvtl_library_id = $1",
|
||||
libraryID,
|
||||
).Scan(&dbLibraryID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("library not found: %w", err)
|
||||
}
|
||||
|
||||
// Get backing store path
|
||||
var backingStorePath string
|
||||
err = m.service.db.QueryRowContext(ctx,
|
||||
"SELECT backing_store_path FROM virtual_tape_libraries WHERE id = $1",
|
||||
dbLibraryID,
|
||||
).Scan(&backingStorePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get backing store path: %w", err)
|
||||
}
|
||||
|
||||
file, err := os.Open(contentsPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open library_contents file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
slotRegex := regexp.MustCompile(`^Slot\s+(\d+):\s+(.+)`)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
|
||||
// Skip comments and empty lines
|
||||
if strings.HasPrefix(line, "#") || line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
matches := slotRegex.FindStringSubmatch(line)
|
||||
if matches != nil {
|
||||
slotNumber, _ := strconv.Atoi(matches[1])
|
||||
barcode := strings.TrimSpace(matches[2])
|
||||
|
||||
if barcode == "" || barcode == "?" {
|
||||
continue // Empty slot
|
||||
}
|
||||
|
||||
// Determine tape type from barcode suffix
|
||||
tapeType := "LTO-8" // Default
|
||||
if len(barcode) >= 2 {
|
||||
suffix := barcode[len(barcode)-2:]
|
||||
switch suffix {
|
||||
case "L1":
|
||||
tapeType = "LTO-1"
|
||||
case "L2":
|
||||
tapeType = "LTO-2"
|
||||
case "L3":
|
||||
tapeType = "LTO-3"
|
||||
case "L4":
|
||||
tapeType = "LTO-4"
|
||||
case "L5":
|
||||
tapeType = "LTO-5"
|
||||
case "L6":
|
||||
tapeType = "LTO-6"
|
||||
case "L7":
|
||||
tapeType = "LTO-7"
|
||||
case "L8":
|
||||
tapeType = "LTO-8"
|
||||
case "L9":
|
||||
tapeType = "LTO-9"
|
||||
}
|
||||
}
|
||||
|
||||
// Check if tape exists
|
||||
var existingID string
|
||||
err := m.service.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM virtual_tapes WHERE library_id = $1 AND barcode = $2",
|
||||
dbLibraryID, barcode,
|
||||
).Scan(&existingID)
|
||||
|
||||
imagePath := filepath.Join(backingStorePath, "tapes", fmt.Sprintf("%s.img", barcode))
|
||||
defaultSize := int64(15 * 1024 * 1024 * 1024 * 1024) // 15 TB default for LTO-8
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// Create new tape
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
INSERT INTO virtual_tapes (
|
||||
library_id, barcode, slot_number, image_file_path,
|
||||
size_bytes, used_bytes, tape_type, status
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
`, dbLibraryID, barcode, slotNumber, imagePath, defaultSize, 0, tapeType, "idle")
|
||||
if err != nil {
|
||||
m.logger.Warn("Failed to insert tape", "barcode", barcode, "error", err)
|
||||
} else {
|
||||
m.logger.Debug("Created virtual tape from MHVTL", "barcode", barcode, "slot", slotNumber)
|
||||
}
|
||||
} else if err == nil {
|
||||
// Update existing tape slot
|
||||
_, err = m.service.db.ExecContext(ctx, `
|
||||
UPDATE virtual_tapes SET
|
||||
slot_number = $1, tape_type = $2, updated_at = NOW()
|
||||
WHERE id = $3
|
||||
`, slotNumber, tapeType, existingID)
|
||||
if err != nil {
|
||||
m.logger.Warn("Failed to update tape", "barcode", barcode, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
// countSlotsAndDrives counts slots and drives from library_contents file
|
||||
func (m *MHVTLMonitor) countSlotsAndDrives(contentsPath string) (slotCount, driveCount int) {
|
||||
file, err := os.Open(contentsPath)
|
||||
if err != nil {
|
||||
return 10, 2 // Default values
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
slotRegex := regexp.MustCompile(`^Slot\s+(\d+):`)
|
||||
driveRegex := regexp.MustCompile(`^Drive\s+(\d+):`)
|
||||
|
||||
maxSlot := 0
|
||||
driveCount = 0
|
||||
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, "#") || line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if matches := slotRegex.FindStringSubmatch(line); matches != nil {
|
||||
slot, _ := strconv.Atoi(matches[1])
|
||||
if slot > maxSlot {
|
||||
maxSlot = slot
|
||||
}
|
||||
}
|
||||
if matches := driveRegex.FindStringSubmatch(line); matches != nil {
|
||||
driveCount++
|
||||
}
|
||||
}
|
||||
|
||||
slotCount = maxSlot
|
||||
if slotCount == 0 {
|
||||
slotCount = 10 // Default
|
||||
}
|
||||
if driveCount == 0 {
|
||||
driveCount = 2 // Default
|
||||
}
|
||||
|
||||
return slotCount, driveCount
|
||||
}
|
||||
Reference in New Issue
Block a user