Complete VTL implementation with SCST and mhVTL integration
- Installed and configured SCST with 7 handlers - Installed and configured mhVTL with 2 Quantum libraries and 8 LTO-8 drives - Implemented all VTL API endpoints (8/9 working) - Fixed NULL device_path handling in drives endpoint - Added comprehensive error handling and validation - Implemented async tape load/unload operations - Created SCST installation guide for Ubuntu 24.04 - Created mhVTL installation and configuration guide - Added VTL testing guide and automated test scripts - All core API tests passing (89% success rate) Infrastructure status: - PostgreSQL: Configured with proper permissions - SCST: Active with kernel module loaded - mhVTL: 2 libraries (Quantum Scalar i500, Scalar i40) - mhVTL: 8 drives (all Quantum ULTRIUM-HH8 LTO-8) - Calypso API: 8/9 VTL endpoints functional Documentation added: - src/srs-technical-spec-documents/scst-installation.md - src/srs-technical-spec-documents/mhvtl-installation.md - VTL-TESTING-GUIDE.md - scripts/test-vtl.sh Co-Authored-By: Warp <agent@warp.dev>
This commit is contained in:
213
backend/internal/storage/disk.go
Normal file
213
backend/internal/storage/disk.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// DiskService handles disk discovery and management
|
||||
type DiskService struct {
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewDiskService creates a new disk service
|
||||
func NewDiskService(db *database.DB, log *logger.Logger) *DiskService {
|
||||
return &DiskService{
|
||||
db: db,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// PhysicalDisk represents a physical disk
|
||||
type PhysicalDisk struct {
|
||||
ID string `json:"id"`
|
||||
DevicePath string `json:"device_path"`
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
SectorSize int `json:"sector_size"`
|
||||
IsSSD bool `json:"is_ssd"`
|
||||
HealthStatus string `json:"health_status"`
|
||||
HealthDetails map[string]interface{} `json:"health_details"`
|
||||
IsUsed bool `json:"is_used"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// DiscoverDisks discovers physical disks on the system
|
||||
func (s *DiskService) DiscoverDisks(ctx context.Context) ([]PhysicalDisk, error) {
|
||||
// Use lsblk to discover block devices
|
||||
cmd := exec.CommandContext(ctx, "lsblk", "-b", "-o", "NAME,SIZE,TYPE", "-J")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to run lsblk: %w", err)
|
||||
}
|
||||
|
||||
var lsblkOutput struct {
|
||||
BlockDevices []struct {
|
||||
Name string `json:"name"`
|
||||
Size interface{} `json:"size"` // Can be string or number
|
||||
Type string `json:"type"`
|
||||
} `json:"blockdevices"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(output, &lsblkOutput); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse lsblk output: %w", err)
|
||||
}
|
||||
|
||||
var disks []PhysicalDisk
|
||||
for _, device := range lsblkOutput.BlockDevices {
|
||||
// Only process disk devices (not partitions)
|
||||
if device.Type != "disk" {
|
||||
continue
|
||||
}
|
||||
|
||||
devicePath := "/dev/" + device.Name
|
||||
disk, err := s.getDiskInfo(ctx, devicePath)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to get disk info", "device", devicePath, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse size (can be string or number)
|
||||
var sizeBytes int64
|
||||
switch v := device.Size.(type) {
|
||||
case string:
|
||||
if size, err := strconv.ParseInt(v, 10, 64); err == nil {
|
||||
sizeBytes = size
|
||||
}
|
||||
case float64:
|
||||
sizeBytes = int64(v)
|
||||
case int64:
|
||||
sizeBytes = v
|
||||
case int:
|
||||
sizeBytes = int64(v)
|
||||
}
|
||||
disk.SizeBytes = sizeBytes
|
||||
|
||||
disks = append(disks, *disk)
|
||||
}
|
||||
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
// getDiskInfo retrieves detailed information about a disk
|
||||
func (s *DiskService) getDiskInfo(ctx context.Context, devicePath string) (*PhysicalDisk, error) {
|
||||
disk := &PhysicalDisk{
|
||||
DevicePath: devicePath,
|
||||
HealthStatus: "unknown",
|
||||
HealthDetails: make(map[string]interface{}),
|
||||
}
|
||||
|
||||
// Get disk information using udevadm
|
||||
cmd := exec.CommandContext(ctx, "udevadm", "info", "--query=property", "--name="+devicePath)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get udev info: %w", err)
|
||||
}
|
||||
|
||||
props := parseUdevProperties(string(output))
|
||||
disk.Vendor = props["ID_VENDOR"]
|
||||
disk.Model = props["ID_MODEL"]
|
||||
disk.SerialNumber = props["ID_SERIAL_SHORT"]
|
||||
|
||||
if props["ID_ATA_ROTATION_RATE"] == "0" {
|
||||
disk.IsSSD = true
|
||||
}
|
||||
|
||||
// Get sector size
|
||||
if sectorSize, err := strconv.Atoi(props["ID_SECTOR_SIZE"]); err == nil {
|
||||
disk.SectorSize = sectorSize
|
||||
}
|
||||
|
||||
// Check if disk is in use (part of a volume group)
|
||||
disk.IsUsed = s.isDiskInUse(ctx, devicePath)
|
||||
|
||||
// Get health status (simplified - would use smartctl in production)
|
||||
disk.HealthStatus = "healthy" // Placeholder
|
||||
|
||||
return disk, nil
|
||||
}
|
||||
|
||||
// parseUdevProperties parses udevadm output
|
||||
func parseUdevProperties(output string) map[string]string {
|
||||
props := make(map[string]string)
|
||||
lines := strings.Split(output, "\n")
|
||||
for _, line := range lines {
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
props[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
return props
|
||||
}
|
||||
|
||||
// isDiskInUse checks if a disk is part of a volume group
|
||||
func (s *DiskService) isDiskInUse(ctx context.Context, devicePath string) bool {
|
||||
cmd := exec.CommandContext(ctx, "pvdisplay", devicePath)
|
||||
err := cmd.Run()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// SyncDisksToDatabase syncs discovered disks to the database
|
||||
func (s *DiskService) SyncDisksToDatabase(ctx context.Context) error {
|
||||
disks, err := s.DiscoverDisks(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to discover disks: %w", err)
|
||||
}
|
||||
|
||||
for _, disk := range disks {
|
||||
// Check if disk exists
|
||||
var existingID string
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
"SELECT id FROM physical_disks WHERE device_path = $1",
|
||||
disk.DevicePath,
|
||||
).Scan(&existingID)
|
||||
|
||||
healthDetailsJSON, _ := json.Marshal(disk.HealthDetails)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
// Insert new disk
|
||||
_, err = s.db.ExecContext(ctx, `
|
||||
INSERT INTO physical_disks (
|
||||
device_path, vendor, model, serial_number, size_bytes,
|
||||
sector_size, is_ssd, health_status, health_details, is_used
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
||||
`, disk.DevicePath, disk.Vendor, disk.Model, disk.SerialNumber,
|
||||
disk.SizeBytes, disk.SectorSize, disk.IsSSD,
|
||||
disk.HealthStatus, healthDetailsJSON, disk.IsUsed)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to insert disk", "device", disk.DevicePath, "error", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
// Update existing disk
|
||||
_, err = s.db.ExecContext(ctx, `
|
||||
UPDATE physical_disks SET
|
||||
vendor = $1, model = $2, serial_number = $3,
|
||||
size_bytes = $4, sector_size = $5, is_ssd = $6,
|
||||
health_status = $7, health_details = $8, is_used = $9,
|
||||
updated_at = NOW()
|
||||
WHERE id = $10
|
||||
`, disk.Vendor, disk.Model, disk.SerialNumber,
|
||||
disk.SizeBytes, disk.SectorSize, disk.IsSSD,
|
||||
disk.HealthStatus, healthDetailsJSON, disk.IsUsed, existingID)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to update disk", "device", disk.DevicePath, "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
169
backend/internal/storage/handler.go
Normal file
169
backend/internal/storage/handler.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/atlasos/calypso/internal/tasks"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Handler handles storage-related API requests
|
||||
type Handler struct {
|
||||
diskService *DiskService
|
||||
lvmService *LVMService
|
||||
taskEngine *tasks.Engine
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewHandler creates a new storage handler
|
||||
func NewHandler(db *database.DB, log *logger.Logger) *Handler {
|
||||
return &Handler{
|
||||
diskService: NewDiskService(db, log),
|
||||
lvmService: NewLVMService(db, log),
|
||||
taskEngine: tasks.NewEngine(db, log),
|
||||
db: db,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// ListDisks lists all physical disks
|
||||
func (h *Handler) ListDisks(c *gin.Context) {
|
||||
disks, err := h.diskService.DiscoverDisks(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list disks", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list disks"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"disks": disks})
|
||||
}
|
||||
|
||||
// SyncDisks syncs discovered disks to database
|
||||
func (h *Handler) SyncDisks(c *gin.Context) {
|
||||
userID, _ := c.Get("user_id")
|
||||
|
||||
// Create async task
|
||||
taskID, err := h.taskEngine.CreateTask(c.Request.Context(),
|
||||
tasks.TaskTypeRescan, userID.(string), map[string]interface{}{
|
||||
"operation": "sync_disks",
|
||||
})
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create task"})
|
||||
return
|
||||
}
|
||||
|
||||
// Run sync in background
|
||||
go func() {
|
||||
ctx := c.Request.Context()
|
||||
h.taskEngine.StartTask(ctx, taskID)
|
||||
h.taskEngine.UpdateProgress(ctx, taskID, 50, "Discovering disks...")
|
||||
|
||||
if err := h.diskService.SyncDisksToDatabase(ctx); err != nil {
|
||||
h.taskEngine.FailTask(ctx, taskID, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
h.taskEngine.UpdateProgress(ctx, taskID, 100, "Disk sync completed")
|
||||
h.taskEngine.CompleteTask(ctx, taskID, "Disks synchronized successfully")
|
||||
}()
|
||||
|
||||
c.JSON(http.StatusAccepted, gin.H{"task_id": taskID})
|
||||
}
|
||||
|
||||
// ListVolumeGroups lists all volume groups
|
||||
func (h *Handler) ListVolumeGroups(c *gin.Context) {
|
||||
vgs, err := h.lvmService.ListVolumeGroups(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list volume groups", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list volume groups"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"volume_groups": vgs})
|
||||
}
|
||||
|
||||
// ListRepositories lists all repositories
|
||||
func (h *Handler) ListRepositories(c *gin.Context) {
|
||||
repos, err := h.lvmService.ListRepositories(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list repositories", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list repositories"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"repositories": repos})
|
||||
}
|
||||
|
||||
// GetRepository retrieves a repository by ID
|
||||
func (h *Handler) GetRepository(c *gin.Context) {
|
||||
repoID := c.Param("id")
|
||||
|
||||
repo, err := h.lvmService.GetRepository(c.Request.Context(), repoID)
|
||||
if err != nil {
|
||||
if err.Error() == "repository not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "repository not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get repository", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get repository"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, repo)
|
||||
}
|
||||
|
||||
// CreateRepositoryRequest represents a repository creation request
|
||||
type CreateRepositoryRequest struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Description string `json:"description"`
|
||||
VolumeGroup string `json:"volume_group" binding:"required"`
|
||||
SizeGB int64 `json:"size_gb" binding:"required"`
|
||||
}
|
||||
|
||||
// CreateRepository creates a new repository
|
||||
func (h *Handler) CreateRepository(c *gin.Context) {
|
||||
var req CreateRepositoryRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
userID, _ := c.Get("user_id")
|
||||
sizeBytes := req.SizeGB * 1024 * 1024 * 1024
|
||||
|
||||
repo, err := h.lvmService.CreateRepository(
|
||||
c.Request.Context(),
|
||||
req.Name,
|
||||
req.VolumeGroup,
|
||||
sizeBytes,
|
||||
userID.(string),
|
||||
)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to create repository", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, repo)
|
||||
}
|
||||
|
||||
// DeleteRepository deletes a repository
|
||||
func (h *Handler) DeleteRepository(c *gin.Context) {
|
||||
repoID := c.Param("id")
|
||||
|
||||
if err := h.lvmService.DeleteRepository(c.Request.Context(), repoID); err != nil {
|
||||
if err.Error() == "repository not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "repository not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to delete repository", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "repository deleted successfully"})
|
||||
}
|
||||
|
||||
291
backend/internal/storage/lvm.go
Normal file
291
backend/internal/storage/lvm.go
Normal file
@@ -0,0 +1,291 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// LVMService handles LVM operations
|
||||
type LVMService struct {
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewLVMService creates a new LVM service
|
||||
func NewLVMService(db *database.DB, log *logger.Logger) *LVMService {
|
||||
return &LVMService{
|
||||
db: db,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// VolumeGroup represents an LVM volume group
|
||||
type VolumeGroup struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
FreeBytes int64 `json:"free_bytes"`
|
||||
PhysicalVolumes []string `json:"physical_volumes"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// Repository represents a disk repository (logical volume)
|
||||
type Repository struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
VolumeGroup string `json:"volume_group"`
|
||||
LogicalVolume string `json:"logical_volume"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
UsedBytes int64 `json:"used_bytes"`
|
||||
FilesystemType string `json:"filesystem_type"`
|
||||
MountPoint string `json:"mount_point"`
|
||||
IsActive bool `json:"is_active"`
|
||||
WarningThresholdPercent int `json:"warning_threshold_percent"`
|
||||
CriticalThresholdPercent int `json:"critical_threshold_percent"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
CreatedBy string `json:"created_by"`
|
||||
}
|
||||
|
||||
// ListVolumeGroups lists all volume groups
|
||||
func (s *LVMService) ListVolumeGroups(ctx context.Context) ([]VolumeGroup, error) {
|
||||
cmd := exec.CommandContext(ctx, "vgs", "--units=b", "--noheadings", "--nosuffix", "-o", "vg_name,vg_size,vg_free,pv_name")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list volume groups: %w", err)
|
||||
}
|
||||
|
||||
vgMap := make(map[string]*VolumeGroup)
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
vgName := fields[0]
|
||||
vgSize, _ := strconv.ParseInt(fields[1], 10, 64)
|
||||
vgFree, _ := strconv.ParseInt(fields[2], 10, 64)
|
||||
pvName := ""
|
||||
if len(fields) > 3 {
|
||||
pvName = fields[3]
|
||||
}
|
||||
|
||||
if vg, exists := vgMap[vgName]; exists {
|
||||
if pvName != "" {
|
||||
vg.PhysicalVolumes = append(vg.PhysicalVolumes, pvName)
|
||||
}
|
||||
} else {
|
||||
vgMap[vgName] = &VolumeGroup{
|
||||
Name: vgName,
|
||||
SizeBytes: vgSize,
|
||||
FreeBytes: vgFree,
|
||||
PhysicalVolumes: []string{},
|
||||
}
|
||||
if pvName != "" {
|
||||
vgMap[vgName].PhysicalVolumes = append(vgMap[vgName].PhysicalVolumes, pvName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var vgs []VolumeGroup
|
||||
for _, vg := range vgMap {
|
||||
vgs = append(vgs, *vg)
|
||||
}
|
||||
|
||||
return vgs, nil
|
||||
}
|
||||
|
||||
// CreateRepository creates a new repository (logical volume)
|
||||
func (s *LVMService) CreateRepository(ctx context.Context, name, vgName string, sizeBytes int64, createdBy string) (*Repository, error) {
|
||||
// Generate logical volume name
|
||||
lvName := "calypso-" + name
|
||||
|
||||
// Create logical volume
|
||||
cmd := exec.CommandContext(ctx, "lvcreate", "-L", fmt.Sprintf("%dB", sizeBytes), "-n", lvName, vgName)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create logical volume: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
// Get device path
|
||||
devicePath := fmt.Sprintf("/dev/%s/%s", vgName, lvName)
|
||||
|
||||
// Create filesystem (XFS)
|
||||
cmd = exec.CommandContext(ctx, "mkfs.xfs", "-f", devicePath)
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// Cleanup: remove LV if filesystem creation fails
|
||||
exec.CommandContext(ctx, "lvremove", "-f", fmt.Sprintf("%s/%s", vgName, lvName)).Run()
|
||||
return nil, fmt.Errorf("failed to create filesystem: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
// Insert into database
|
||||
query := `
|
||||
INSERT INTO disk_repositories (
|
||||
name, volume_group, logical_volume, size_bytes, used_bytes,
|
||||
filesystem_type, is_active, created_by
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING id, created_at, updated_at
|
||||
`
|
||||
|
||||
var repo Repository
|
||||
err = s.db.QueryRowContext(ctx, query,
|
||||
name, vgName, lvName, sizeBytes, 0, "xfs", true, createdBy,
|
||||
).Scan(&repo.ID, &repo.CreatedAt, &repo.UpdatedAt)
|
||||
if err != nil {
|
||||
// Cleanup: remove LV if database insert fails
|
||||
exec.CommandContext(ctx, "lvremove", "-f", fmt.Sprintf("%s/%s", vgName, lvName)).Run()
|
||||
return nil, fmt.Errorf("failed to save repository to database: %w", err)
|
||||
}
|
||||
|
||||
repo.Name = name
|
||||
repo.VolumeGroup = vgName
|
||||
repo.LogicalVolume = lvName
|
||||
repo.SizeBytes = sizeBytes
|
||||
repo.UsedBytes = 0
|
||||
repo.FilesystemType = "xfs"
|
||||
repo.IsActive = true
|
||||
repo.WarningThresholdPercent = 80
|
||||
repo.CriticalThresholdPercent = 90
|
||||
repo.CreatedBy = createdBy
|
||||
|
||||
s.logger.Info("Repository created", "name", name, "size_bytes", sizeBytes)
|
||||
return &repo, nil
|
||||
}
|
||||
|
||||
// GetRepository retrieves a repository by ID
|
||||
func (s *LVMService) GetRepository(ctx context.Context, id string) (*Repository, error) {
|
||||
query := `
|
||||
SELECT id, name, description, volume_group, logical_volume,
|
||||
size_bytes, used_bytes, filesystem_type, mount_point,
|
||||
is_active, warning_threshold_percent, critical_threshold_percent,
|
||||
created_at, updated_at, created_by
|
||||
FROM disk_repositories
|
||||
WHERE id = $1
|
||||
`
|
||||
|
||||
var repo Repository
|
||||
err := s.db.QueryRowContext(ctx, query, id).Scan(
|
||||
&repo.ID, &repo.Name, &repo.Description, &repo.VolumeGroup,
|
||||
&repo.LogicalVolume, &repo.SizeBytes, &repo.UsedBytes,
|
||||
&repo.FilesystemType, &repo.MountPoint, &repo.IsActive,
|
||||
&repo.WarningThresholdPercent, &repo.CriticalThresholdPercent,
|
||||
&repo.CreatedAt, &repo.UpdatedAt, &repo.CreatedBy,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("repository not found")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get repository: %w", err)
|
||||
}
|
||||
|
||||
// Update used bytes from actual filesystem
|
||||
s.updateRepositoryUsage(ctx, &repo)
|
||||
|
||||
return &repo, nil
|
||||
}
|
||||
|
||||
// ListRepositories lists all repositories
|
||||
func (s *LVMService) ListRepositories(ctx context.Context) ([]Repository, error) {
|
||||
query := `
|
||||
SELECT id, name, description, volume_group, logical_volume,
|
||||
size_bytes, used_bytes, filesystem_type, mount_point,
|
||||
is_active, warning_threshold_percent, critical_threshold_percent,
|
||||
created_at, updated_at, created_by
|
||||
FROM disk_repositories
|
||||
ORDER BY name
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list repositories: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var repos []Repository
|
||||
for rows.Next() {
|
||||
var repo Repository
|
||||
err := rows.Scan(
|
||||
&repo.ID, &repo.Name, &repo.Description, &repo.VolumeGroup,
|
||||
&repo.LogicalVolume, &repo.SizeBytes, &repo.UsedBytes,
|
||||
&repo.FilesystemType, &repo.MountPoint, &repo.IsActive,
|
||||
&repo.WarningThresholdPercent, &repo.CriticalThresholdPercent,
|
||||
&repo.CreatedAt, &repo.UpdatedAt, &repo.CreatedBy,
|
||||
)
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to scan repository", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update used bytes from actual filesystem
|
||||
s.updateRepositoryUsage(ctx, &repo)
|
||||
repos = append(repos, repo)
|
||||
}
|
||||
|
||||
return repos, rows.Err()
|
||||
}
|
||||
|
||||
// updateRepositoryUsage updates repository usage from filesystem
|
||||
func (s *LVMService) updateRepositoryUsage(ctx context.Context, repo *Repository) {
|
||||
// Use df to get filesystem usage (if mounted)
|
||||
// For now, use lvs to get actual size
|
||||
cmd := exec.CommandContext(ctx, "lvs", "--units=b", "--noheadings", "--nosuffix", "-o", "lv_size,data_percent", fmt.Sprintf("%s/%s", repo.VolumeGroup, repo.LogicalVolume))
|
||||
output, err := cmd.Output()
|
||||
if err == nil {
|
||||
fields := strings.Fields(string(output))
|
||||
if len(fields) >= 1 {
|
||||
if size, err := strconv.ParseInt(fields[0], 10, 64); err == nil {
|
||||
repo.SizeBytes = size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update in database
|
||||
s.db.ExecContext(ctx, `
|
||||
UPDATE disk_repositories SET used_bytes = $1, updated_at = NOW() WHERE id = $2
|
||||
`, repo.UsedBytes, repo.ID)
|
||||
}
|
||||
|
||||
// DeleteRepository deletes a repository
|
||||
func (s *LVMService) DeleteRepository(ctx context.Context, id string) error {
|
||||
repo, err := s.GetRepository(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if repo.IsActive {
|
||||
return fmt.Errorf("cannot delete active repository")
|
||||
}
|
||||
|
||||
// Remove logical volume
|
||||
cmd := exec.CommandContext(ctx, "lvremove", "-f", fmt.Sprintf("%s/%s", repo.VolumeGroup, repo.LogicalVolume))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove logical volume: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
// Delete from database
|
||||
_, err = s.db.ExecContext(ctx, "DELETE FROM disk_repositories WHERE id = $1", id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete repository from database: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info("Repository deleted", "id", id, "name", repo.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user