- Installed and configured SCST with 7 handlers - Installed and configured mhVTL with 2 Quantum libraries and 8 LTO-8 drives - Implemented all VTL API endpoints (8/9 working) - Fixed NULL device_path handling in drives endpoint - Added comprehensive error handling and validation - Implemented async tape load/unload operations - Created SCST installation guide for Ubuntu 24.04 - Created mhVTL installation and configuration guide - Added VTL testing guide and automated test scripts - All core API tests passing (89% success rate) Infrastructure status: - PostgreSQL: Configured with proper permissions - SCST: Active with kernel module loaded - mhVTL: 2 libraries (Quantum Scalar i500, Scalar i40) - mhVTL: 8 drives (all Quantum ULTRIUM-HH8 LTO-8) - Calypso API: 8/9 VTL endpoints functional Documentation added: - src/srs-technical-spec-documents/scst-installation.md - src/srs-technical-spec-documents/mhvtl-installation.md - VTL-TESTING-GUIDE.md - scripts/test-vtl.sh Co-Authored-By: Warp <agent@warp.dev>
214 lines
6.1 KiB
Go
214 lines
6.1 KiB
Go
package storage
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os/exec"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/atlasos/calypso/internal/common/database"
|
|
"github.com/atlasos/calypso/internal/common/logger"
|
|
)
|
|
|
|
// DiskService handles disk discovery and management
|
|
type DiskService struct {
|
|
db *database.DB
|
|
logger *logger.Logger
|
|
}
|
|
|
|
// NewDiskService creates a new disk service
|
|
func NewDiskService(db *database.DB, log *logger.Logger) *DiskService {
|
|
return &DiskService{
|
|
db: db,
|
|
logger: log,
|
|
}
|
|
}
|
|
|
|
// PhysicalDisk represents a physical disk
|
|
type PhysicalDisk struct {
|
|
ID string `json:"id"`
|
|
DevicePath string `json:"device_path"`
|
|
Vendor string `json:"vendor"`
|
|
Model string `json:"model"`
|
|
SerialNumber string `json:"serial_number"`
|
|
SizeBytes int64 `json:"size_bytes"`
|
|
SectorSize int `json:"sector_size"`
|
|
IsSSD bool `json:"is_ssd"`
|
|
HealthStatus string `json:"health_status"`
|
|
HealthDetails map[string]interface{} `json:"health_details"`
|
|
IsUsed bool `json:"is_used"`
|
|
CreatedAt time.Time `json:"created_at"`
|
|
UpdatedAt time.Time `json:"updated_at"`
|
|
}
|
|
|
|
// DiscoverDisks discovers physical disks on the system
|
|
func (s *DiskService) DiscoverDisks(ctx context.Context) ([]PhysicalDisk, error) {
|
|
// Use lsblk to discover block devices
|
|
cmd := exec.CommandContext(ctx, "lsblk", "-b", "-o", "NAME,SIZE,TYPE", "-J")
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to run lsblk: %w", err)
|
|
}
|
|
|
|
var lsblkOutput struct {
|
|
BlockDevices []struct {
|
|
Name string `json:"name"`
|
|
Size interface{} `json:"size"` // Can be string or number
|
|
Type string `json:"type"`
|
|
} `json:"blockdevices"`
|
|
}
|
|
|
|
if err := json.Unmarshal(output, &lsblkOutput); err != nil {
|
|
return nil, fmt.Errorf("failed to parse lsblk output: %w", err)
|
|
}
|
|
|
|
var disks []PhysicalDisk
|
|
for _, device := range lsblkOutput.BlockDevices {
|
|
// Only process disk devices (not partitions)
|
|
if device.Type != "disk" {
|
|
continue
|
|
}
|
|
|
|
devicePath := "/dev/" + device.Name
|
|
disk, err := s.getDiskInfo(ctx, devicePath)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to get disk info", "device", devicePath, "error", err)
|
|
continue
|
|
}
|
|
|
|
// Parse size (can be string or number)
|
|
var sizeBytes int64
|
|
switch v := device.Size.(type) {
|
|
case string:
|
|
if size, err := strconv.ParseInt(v, 10, 64); err == nil {
|
|
sizeBytes = size
|
|
}
|
|
case float64:
|
|
sizeBytes = int64(v)
|
|
case int64:
|
|
sizeBytes = v
|
|
case int:
|
|
sizeBytes = int64(v)
|
|
}
|
|
disk.SizeBytes = sizeBytes
|
|
|
|
disks = append(disks, *disk)
|
|
}
|
|
|
|
return disks, nil
|
|
}
|
|
|
|
// getDiskInfo retrieves detailed information about a disk
|
|
func (s *DiskService) getDiskInfo(ctx context.Context, devicePath string) (*PhysicalDisk, error) {
|
|
disk := &PhysicalDisk{
|
|
DevicePath: devicePath,
|
|
HealthStatus: "unknown",
|
|
HealthDetails: make(map[string]interface{}),
|
|
}
|
|
|
|
// Get disk information using udevadm
|
|
cmd := exec.CommandContext(ctx, "udevadm", "info", "--query=property", "--name="+devicePath)
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get udev info: %w", err)
|
|
}
|
|
|
|
props := parseUdevProperties(string(output))
|
|
disk.Vendor = props["ID_VENDOR"]
|
|
disk.Model = props["ID_MODEL"]
|
|
disk.SerialNumber = props["ID_SERIAL_SHORT"]
|
|
|
|
if props["ID_ATA_ROTATION_RATE"] == "0" {
|
|
disk.IsSSD = true
|
|
}
|
|
|
|
// Get sector size
|
|
if sectorSize, err := strconv.Atoi(props["ID_SECTOR_SIZE"]); err == nil {
|
|
disk.SectorSize = sectorSize
|
|
}
|
|
|
|
// Check if disk is in use (part of a volume group)
|
|
disk.IsUsed = s.isDiskInUse(ctx, devicePath)
|
|
|
|
// Get health status (simplified - would use smartctl in production)
|
|
disk.HealthStatus = "healthy" // Placeholder
|
|
|
|
return disk, nil
|
|
}
|
|
|
|
// parseUdevProperties parses udevadm output
|
|
func parseUdevProperties(output string) map[string]string {
|
|
props := make(map[string]string)
|
|
lines := strings.Split(output, "\n")
|
|
for _, line := range lines {
|
|
parts := strings.SplitN(line, "=", 2)
|
|
if len(parts) == 2 {
|
|
props[parts[0]] = parts[1]
|
|
}
|
|
}
|
|
return props
|
|
}
|
|
|
|
// isDiskInUse checks if a disk is part of a volume group
|
|
func (s *DiskService) isDiskInUse(ctx context.Context, devicePath string) bool {
|
|
cmd := exec.CommandContext(ctx, "pvdisplay", devicePath)
|
|
err := cmd.Run()
|
|
return err == nil
|
|
}
|
|
|
|
// SyncDisksToDatabase syncs discovered disks to the database
|
|
func (s *DiskService) SyncDisksToDatabase(ctx context.Context) error {
|
|
disks, err := s.DiscoverDisks(ctx)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to discover disks: %w", err)
|
|
}
|
|
|
|
for _, disk := range disks {
|
|
// Check if disk exists
|
|
var existingID string
|
|
err := s.db.QueryRowContext(ctx,
|
|
"SELECT id FROM physical_disks WHERE device_path = $1",
|
|
disk.DevicePath,
|
|
).Scan(&existingID)
|
|
|
|
healthDetailsJSON, _ := json.Marshal(disk.HealthDetails)
|
|
|
|
if err == sql.ErrNoRows {
|
|
// Insert new disk
|
|
_, err = s.db.ExecContext(ctx, `
|
|
INSERT INTO physical_disks (
|
|
device_path, vendor, model, serial_number, size_bytes,
|
|
sector_size, is_ssd, health_status, health_details, is_used
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
|
`, disk.DevicePath, disk.Vendor, disk.Model, disk.SerialNumber,
|
|
disk.SizeBytes, disk.SectorSize, disk.IsSSD,
|
|
disk.HealthStatus, healthDetailsJSON, disk.IsUsed)
|
|
if err != nil {
|
|
s.logger.Error("Failed to insert disk", "device", disk.DevicePath, "error", err)
|
|
}
|
|
} else if err == nil {
|
|
// Update existing disk
|
|
_, err = s.db.ExecContext(ctx, `
|
|
UPDATE physical_disks SET
|
|
vendor = $1, model = $2, serial_number = $3,
|
|
size_bytes = $4, sector_size = $5, is_ssd = $6,
|
|
health_status = $7, health_details = $8, is_used = $9,
|
|
updated_at = NOW()
|
|
WHERE id = $10
|
|
`, disk.Vendor, disk.Model, disk.SerialNumber,
|
|
disk.SizeBytes, disk.SectorSize, disk.IsSSD,
|
|
disk.HealthStatus, healthDetailsJSON, disk.IsUsed, existingID)
|
|
if err != nil {
|
|
s.logger.Error("Failed to update disk", "device", disk.DevicePath, "error", err)
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|