working on storage dashboard
This commit is contained in:
@@ -30,19 +30,20 @@ func NewDiskService(db *database.DB, log *logger.Logger) *DiskService {
|
||||
|
||||
// PhysicalDisk represents a physical disk
|
||||
type PhysicalDisk struct {
|
||||
ID string `json:"id"`
|
||||
DevicePath string `json:"device_path"`
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
SectorSize int `json:"sector_size"`
|
||||
IsSSD bool `json:"is_ssd"`
|
||||
HealthStatus string `json:"health_status"`
|
||||
HealthDetails map[string]interface{} `json:"health_details"`
|
||||
IsUsed bool `json:"is_used"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
ID string `json:"id"`
|
||||
DevicePath string `json:"device_path"`
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
SerialNumber string `json:"serial_number"`
|
||||
SizeBytes int64 `json:"size_bytes"`
|
||||
SectorSize int `json:"sector_size"`
|
||||
IsSSD bool `json:"is_ssd"`
|
||||
HealthStatus string `json:"health_status"`
|
||||
HealthDetails map[string]interface{} `json:"health_details"`
|
||||
IsUsed bool `json:"is_used"`
|
||||
AttachedToPool string `json:"attached_to_pool"` // Pool name if disk is used in a ZFS pool
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// DiscoverDisks discovers physical disks on the system
|
||||
@@ -74,6 +75,13 @@ func (s *DiskService) DiscoverDisks(ctx context.Context) ([]PhysicalDisk, error)
|
||||
}
|
||||
|
||||
devicePath := "/dev/" + device.Name
|
||||
|
||||
// Skip OS disk (disk that has root or boot partition)
|
||||
if s.isOSDisk(ctx, devicePath) {
|
||||
s.logger.Debug("Skipping OS disk", "device", devicePath)
|
||||
continue
|
||||
}
|
||||
|
||||
disk, err := s.getDiskInfo(ctx, devicePath)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to get disk info", "device", devicePath, "error", err)
|
||||
@@ -131,9 +139,16 @@ func (s *DiskService) getDiskInfo(ctx context.Context, devicePath string) (*Phys
|
||||
disk.SectorSize = sectorSize
|
||||
}
|
||||
|
||||
// Check if disk is in use (part of a volume group)
|
||||
// Check if disk is in use (part of a volume group or ZFS pool)
|
||||
disk.IsUsed = s.isDiskInUse(ctx, devicePath)
|
||||
|
||||
// Check if disk is used in a ZFS pool
|
||||
poolName := s.getZFSPoolForDisk(ctx, devicePath)
|
||||
if poolName != "" {
|
||||
disk.IsUsed = true
|
||||
disk.AttachedToPool = poolName
|
||||
}
|
||||
|
||||
// Get health status (simplified - would use smartctl in production)
|
||||
disk.HealthStatus = "healthy" // Placeholder
|
||||
|
||||
@@ -160,6 +175,87 @@ func (s *DiskService) isDiskInUse(ctx context.Context, devicePath string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// getZFSPoolForDisk checks if a disk is used in a ZFS pool and returns the pool name
|
||||
func (s *DiskService) getZFSPoolForDisk(ctx context.Context, devicePath string) string {
|
||||
// Extract device name (e.g., /dev/sde -> sde)
|
||||
deviceName := strings.TrimPrefix(devicePath, "/dev/")
|
||||
|
||||
// Get all ZFS pools
|
||||
cmd := exec.CommandContext(ctx, "zpool", "list", "-H", "-o", "name")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
pools := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, poolName := range pools {
|
||||
if poolName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check pool status for this device
|
||||
statusCmd := exec.CommandContext(ctx, "zpool", "status", poolName)
|
||||
statusOutput, err := statusCmd.Output()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
statusStr := string(statusOutput)
|
||||
// Check if device is in the pool (as data disk or spare)
|
||||
if strings.Contains(statusStr, deviceName) {
|
||||
return poolName
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// isOSDisk checks if a disk is used as OS disk (has root or boot partition)
|
||||
func (s *DiskService) isOSDisk(ctx context.Context, devicePath string) bool {
|
||||
// Extract device name (e.g., /dev/sda -> sda)
|
||||
deviceName := strings.TrimPrefix(devicePath, "/dev/")
|
||||
|
||||
// Check if any partition of this disk is mounted as root or boot
|
||||
// Use lsblk to get mount points for this device and its children
|
||||
cmd := exec.CommandContext(ctx, "lsblk", "-n", "-o", "NAME,MOUNTPOINT", devicePath)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 2 {
|
||||
mountPoint := fields[1]
|
||||
// Check if mounted as root or boot
|
||||
if mountPoint == "/" || mountPoint == "/boot" || mountPoint == "/boot/efi" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also check all partitions of this disk using lsblk with recursive listing
|
||||
partCmd := exec.CommandContext(ctx, "lsblk", "-n", "-o", "NAME,MOUNTPOINT", "-l")
|
||||
partOutput, err := partCmd.Output()
|
||||
if err == nil {
|
||||
partLines := strings.Split(string(partOutput), "\n")
|
||||
for _, line := range partLines {
|
||||
if strings.HasPrefix(line, deviceName) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 2 {
|
||||
mountPoint := fields[1]
|
||||
if mountPoint == "/" || mountPoint == "/boot" || mountPoint == "/boot/efi" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// SyncDisksToDatabase syncs discovered disks to the database
|
||||
func (s *DiskService) SyncDisksToDatabase(ctx context.Context) error {
|
||||
disks, err := s.DiscoverDisks(ctx)
|
||||
|
||||
Reference in New Issue
Block a user