1012 lines
33 KiB
Go
1012 lines
33 KiB
Go
package storage
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"fmt"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/atlasos/calypso/internal/common/database"
|
|
"github.com/atlasos/calypso/internal/common/logger"
|
|
"github.com/lib/pq"
|
|
)
|
|
|
|
// ZFSService handles ZFS pool management
|
|
type ZFSService struct {
|
|
db *database.DB
|
|
logger *logger.Logger
|
|
}
|
|
|
|
// NewZFSService creates a new ZFS service
|
|
func NewZFSService(db *database.DB, log *logger.Logger) *ZFSService {
|
|
return &ZFSService{
|
|
db: db,
|
|
logger: log,
|
|
}
|
|
}
|
|
|
|
// ZFSPool represents a ZFS pool
|
|
type ZFSPool struct {
|
|
ID string `json:"id"`
|
|
Name string `json:"name"`
|
|
Description string `json:"description"`
|
|
RaidLevel string `json:"raid_level"` // stripe, mirror, raidz, raidz2, raidz3
|
|
Disks []string `json:"disks"` // device paths
|
|
SpareDisks []string `json:"spare_disks"` // spare disk paths
|
|
SizeBytes int64 `json:"size_bytes"`
|
|
UsedBytes int64 `json:"used_bytes"`
|
|
Compression string `json:"compression"` // off, lz4, zstd, gzip
|
|
Deduplication bool `json:"deduplication"`
|
|
AutoExpand bool `json:"auto_expand"`
|
|
ScrubInterval int `json:"scrub_interval"` // days
|
|
IsActive bool `json:"is_active"`
|
|
HealthStatus string `json:"health_status"` // online, degraded, faulted, offline
|
|
CompressRatio float64 `json:"compress_ratio"` // compression ratio (e.g., 1.45x)
|
|
CreatedAt time.Time `json:"created_at"`
|
|
UpdatedAt time.Time `json:"updated_at"`
|
|
CreatedBy string `json:"created_by"`
|
|
}
|
|
|
|
// CreatePool creates a new ZFS pool
|
|
func (s *ZFSService) CreatePool(ctx context.Context, name string, raidLevel string, disks []string, compression string, deduplication bool, autoExpand bool, createdBy string) (*ZFSPool, error) {
|
|
// Validate inputs
|
|
if name == "" {
|
|
return nil, fmt.Errorf("pool name is required")
|
|
}
|
|
if len(disks) == 0 {
|
|
return nil, fmt.Errorf("at least one disk is required")
|
|
}
|
|
|
|
// Validate RAID level
|
|
validRaidLevels := map[string]int{
|
|
"stripe": 1,
|
|
"mirror": 2,
|
|
"raidz": 3,
|
|
"raidz2": 4,
|
|
"raidz3": 5,
|
|
}
|
|
minDisks, ok := validRaidLevels[raidLevel]
|
|
if !ok {
|
|
return nil, fmt.Errorf("invalid RAID level: %s", raidLevel)
|
|
}
|
|
if len(disks) < minDisks {
|
|
return nil, fmt.Errorf("RAID level %s requires at least %d disks, got %d", raidLevel, minDisks, len(disks))
|
|
}
|
|
|
|
// Check if pool already exists
|
|
var existingID string
|
|
err := s.db.QueryRowContext(ctx,
|
|
"SELECT id FROM zfs_pools WHERE name = $1",
|
|
name,
|
|
).Scan(&existingID)
|
|
if err == nil {
|
|
return nil, fmt.Errorf("pool with name %s already exists", name)
|
|
} else if err != sql.ErrNoRows {
|
|
// Check if table exists - if not, this is a migration issue
|
|
if strings.Contains(err.Error(), "does not exist") || strings.Contains(err.Error(), "relation") {
|
|
return nil, fmt.Errorf("zfs_pools table does not exist - please run database migrations")
|
|
}
|
|
return nil, fmt.Errorf("failed to check existing pool: %w", err)
|
|
}
|
|
|
|
// Check if disks are available (not used)
|
|
for _, diskPath := range disks {
|
|
var isUsed bool
|
|
err := s.db.QueryRowContext(ctx,
|
|
"SELECT is_used FROM physical_disks WHERE device_path = $1",
|
|
diskPath,
|
|
).Scan(&isUsed)
|
|
if err == sql.ErrNoRows {
|
|
// Disk not in database, that's okay - we'll still try to use it
|
|
s.logger.Warn("Disk not found in database, will attempt to use anyway", "disk", diskPath)
|
|
} else if err != nil {
|
|
return nil, fmt.Errorf("failed to check disk %s: %w", diskPath, err)
|
|
} else if isUsed {
|
|
return nil, fmt.Errorf("disk %s is already in use", diskPath)
|
|
}
|
|
}
|
|
|
|
// Build zpool create command
|
|
var args []string
|
|
args = append(args, "create", "-f") // -f to force creation
|
|
|
|
// Note: compression is a filesystem property, not a pool property
|
|
// We'll set it after pool creation using zfs set
|
|
|
|
// Add deduplication property (this IS a pool property)
|
|
if deduplication {
|
|
args = append(args, "-o", "dedup=on")
|
|
}
|
|
|
|
// Add autoexpand property (this IS a pool property)
|
|
if autoExpand {
|
|
args = append(args, "-o", "autoexpand=on")
|
|
}
|
|
|
|
// Add pool name
|
|
args = append(args, name)
|
|
|
|
// Add RAID level and disks
|
|
switch raidLevel {
|
|
case "stripe":
|
|
// Simple stripe: just list all disks
|
|
args = append(args, disks...)
|
|
case "mirror":
|
|
// Mirror: group disks in pairs
|
|
if len(disks)%2 != 0 {
|
|
return nil, fmt.Errorf("mirror requires even number of disks")
|
|
}
|
|
for i := 0; i < len(disks); i += 2 {
|
|
args = append(args, "mirror", disks[i], disks[i+1])
|
|
}
|
|
case "raidz":
|
|
args = append(args, "raidz")
|
|
args = append(args, disks...)
|
|
case "raidz2":
|
|
args = append(args, "raidz2")
|
|
args = append(args, disks...)
|
|
case "raidz3":
|
|
args = append(args, "raidz3")
|
|
args = append(args, disks...)
|
|
}
|
|
|
|
// Execute zpool create
|
|
s.logger.Info("Creating ZFS pool", "name", name, "raid_level", raidLevel, "disks", disks, "args", args)
|
|
cmd := exec.CommandContext(ctx, "zpool", args...)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
s.logger.Error("Failed to create ZFS pool", "name", name, "error", err, "output", errorMsg)
|
|
return nil, fmt.Errorf("failed to create ZFS pool: %s", errorMsg)
|
|
}
|
|
|
|
s.logger.Info("ZFS pool created successfully", "name", name, "output", string(output))
|
|
|
|
// Set filesystem properties (compression, etc.) after pool creation
|
|
// ZFS creates a root filesystem with the same name as the pool
|
|
if compression != "" && compression != "off" {
|
|
cmd = exec.CommandContext(ctx, "zfs", "set", fmt.Sprintf("compression=%s", compression), name)
|
|
output, err = cmd.CombinedOutput()
|
|
if err != nil {
|
|
s.logger.Warn("Failed to set compression property", "pool", name, "compression", compression, "error", string(output))
|
|
// Don't fail pool creation if compression setting fails, just log warning
|
|
} else {
|
|
s.logger.Info("Compression property set", "pool", name, "compression", compression)
|
|
}
|
|
}
|
|
|
|
// Get pool information
|
|
poolInfo, err := s.getPoolInfo(ctx, name)
|
|
if err != nil {
|
|
// Try to destroy the pool if we can't get info
|
|
s.logger.Warn("Failed to get pool info, attempting to destroy pool", "name", name, "error", err)
|
|
exec.CommandContext(ctx, "zpool", "destroy", "-f", name).Run()
|
|
return nil, fmt.Errorf("failed to get pool info after creation: %w", err)
|
|
}
|
|
|
|
// Mark disks as used
|
|
for _, diskPath := range disks {
|
|
_, err = s.db.ExecContext(ctx,
|
|
"UPDATE physical_disks SET is_used = true, updated_at = NOW() WHERE device_path = $1",
|
|
diskPath,
|
|
)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to mark disk as used", "disk", diskPath, "error", err)
|
|
}
|
|
}
|
|
|
|
// Insert into database
|
|
query := `
|
|
INSERT INTO zfs_pools (
|
|
name, raid_level, disks, size_bytes, used_bytes,
|
|
compression, deduplication, auto_expand, scrub_interval,
|
|
is_active, health_status, created_by
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
RETURNING id, created_at, updated_at
|
|
`
|
|
|
|
var pool ZFSPool
|
|
err = s.db.QueryRowContext(ctx, query,
|
|
name, raidLevel, pq.Array(disks), poolInfo.SizeBytes, poolInfo.UsedBytes,
|
|
compression, deduplication, autoExpand, 30, // default scrub interval 30 days
|
|
true, "online", createdBy,
|
|
).Scan(&pool.ID, &pool.CreatedAt, &pool.UpdatedAt)
|
|
if err != nil {
|
|
// Cleanup: destroy pool if database insert fails
|
|
s.logger.Error("Failed to save pool to database, destroying pool", "name", name, "error", err)
|
|
exec.CommandContext(ctx, "zpool", "destroy", "-f", name).Run()
|
|
return nil, fmt.Errorf("failed to save pool to database: %w", err)
|
|
}
|
|
|
|
pool.Name = name
|
|
pool.RaidLevel = raidLevel
|
|
pool.Disks = disks
|
|
pool.SizeBytes = poolInfo.SizeBytes
|
|
pool.UsedBytes = poolInfo.UsedBytes
|
|
pool.Compression = compression
|
|
pool.Deduplication = deduplication
|
|
pool.AutoExpand = autoExpand
|
|
pool.ScrubInterval = 30
|
|
pool.IsActive = true
|
|
pool.HealthStatus = "online"
|
|
pool.CreatedBy = createdBy
|
|
|
|
s.logger.Info("ZFS pool created", "name", name, "raid_level", raidLevel, "disks", len(disks))
|
|
return &pool, nil
|
|
}
|
|
|
|
// getPoolInfo retrieves information about a ZFS pool
|
|
func (s *ZFSService) getPoolInfo(ctx context.Context, poolName string) (*ZFSPool, error) {
|
|
// Get pool size and used space
|
|
cmd := exec.CommandContext(ctx, "zpool", "list", "-H", "-o", "name,size,allocated", poolName)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
s.logger.Error("Failed to get pool info", "pool", poolName, "error", err, "output", errorMsg)
|
|
return nil, fmt.Errorf("failed to get pool info: %s", errorMsg)
|
|
}
|
|
|
|
outputStr := strings.TrimSpace(string(output))
|
|
if outputStr == "" {
|
|
return nil, fmt.Errorf("pool %s not found or empty output", poolName)
|
|
}
|
|
|
|
fields := strings.Fields(outputStr)
|
|
if len(fields) < 3 {
|
|
s.logger.Error("Unexpected zpool list output", "pool", poolName, "output", outputStr, "fields", len(fields))
|
|
return nil, fmt.Errorf("unexpected zpool list output: %s (expected 3+ fields, got %d)", outputStr, len(fields))
|
|
}
|
|
|
|
// Parse size (format: 100G, 1T, etc.)
|
|
sizeBytes, err := parseZFSSize(fields[1])
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to parse pool size: %w", err)
|
|
}
|
|
|
|
usedBytes, err := parseZFSSize(fields[2])
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to parse used size: %w", err)
|
|
}
|
|
|
|
return &ZFSPool{
|
|
Name: poolName,
|
|
SizeBytes: sizeBytes,
|
|
UsedBytes: usedBytes,
|
|
}, nil
|
|
}
|
|
|
|
// parseZFSSize parses ZFS size strings like "100G", "1T", "500M"
|
|
func parseZFSSize(sizeStr string) (int64, error) {
|
|
sizeStr = strings.TrimSpace(sizeStr)
|
|
if sizeStr == "" {
|
|
return 0, nil
|
|
}
|
|
|
|
var multiplier int64 = 1
|
|
lastChar := sizeStr[len(sizeStr)-1]
|
|
if lastChar >= '0' && lastChar <= '9' {
|
|
// No suffix, assume bytes
|
|
var size int64
|
|
_, err := fmt.Sscanf(sizeStr, "%d", &size)
|
|
return size, err
|
|
}
|
|
|
|
switch strings.ToUpper(string(lastChar)) {
|
|
case "K":
|
|
multiplier = 1024
|
|
case "M":
|
|
multiplier = 1024 * 1024
|
|
case "G":
|
|
multiplier = 1024 * 1024 * 1024
|
|
case "T":
|
|
multiplier = 1024 * 1024 * 1024 * 1024
|
|
case "P":
|
|
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
|
|
default:
|
|
return 0, fmt.Errorf("unknown size suffix: %c", lastChar)
|
|
}
|
|
|
|
var size int64
|
|
_, err := fmt.Sscanf(sizeStr[:len(sizeStr)-1], "%d", &size)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
|
|
return size * multiplier, nil
|
|
}
|
|
|
|
// getSpareDisks retrieves spare disks from zpool status
|
|
func (s *ZFSService) getSpareDisks(ctx context.Context, poolName string) ([]string, error) {
|
|
cmd := exec.CommandContext(ctx, "zpool", "status", poolName)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get pool status: %w", err)
|
|
}
|
|
|
|
outputStr := string(output)
|
|
var spareDisks []string
|
|
|
|
// Parse spare disks from zpool status output
|
|
// Format: spares\n sde AVAIL
|
|
lines := strings.Split(outputStr, "\n")
|
|
inSparesSection := false
|
|
for _, line := range lines {
|
|
line = strings.TrimSpace(line)
|
|
if strings.HasPrefix(line, "spares") {
|
|
inSparesSection = true
|
|
continue
|
|
}
|
|
if inSparesSection {
|
|
if line == "" || strings.HasPrefix(line, "errors:") || strings.HasPrefix(line, "config:") {
|
|
break
|
|
}
|
|
// Extract disk name (e.g., "sde AVAIL" -> "sde")
|
|
fields := strings.Fields(line)
|
|
if len(fields) > 0 {
|
|
diskName := fields[0]
|
|
// Convert to full device path
|
|
if !strings.HasPrefix(diskName, "/dev/") {
|
|
diskName = "/dev/" + diskName
|
|
}
|
|
spareDisks = append(spareDisks, diskName)
|
|
}
|
|
}
|
|
}
|
|
|
|
return spareDisks, nil
|
|
}
|
|
|
|
// getCompressRatio gets the compression ratio from ZFS
|
|
func (s *ZFSService) getCompressRatio(ctx context.Context, poolName string) (float64, error) {
|
|
cmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "compressratio", poolName)
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
return 1.0, err
|
|
}
|
|
|
|
ratioStr := strings.TrimSpace(string(output))
|
|
// Remove 'x' suffix if present (e.g., "1.45x" -> "1.45")
|
|
ratioStr = strings.TrimSuffix(ratioStr, "x")
|
|
|
|
ratio, err := strconv.ParseFloat(ratioStr, 64)
|
|
if err != nil {
|
|
return 1.0, err
|
|
}
|
|
|
|
return ratio, nil
|
|
}
|
|
|
|
// ListPools lists all ZFS pools
|
|
func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
|
query := `
|
|
SELECT id, name, description, raid_level, disks, size_bytes, used_bytes,
|
|
compression, deduplication, auto_expand, scrub_interval,
|
|
is_active, health_status, created_at, updated_at, created_by
|
|
FROM zfs_pools
|
|
ORDER BY created_at DESC
|
|
`
|
|
|
|
rows, err := s.db.QueryContext(ctx, query)
|
|
if err != nil {
|
|
// Check if table exists
|
|
errStr := err.Error()
|
|
if strings.Contains(errStr, "does not exist") || strings.Contains(errStr, "relation") {
|
|
return nil, fmt.Errorf("zfs_pools table does not exist - please run database migrations")
|
|
}
|
|
return nil, fmt.Errorf("failed to query pools: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var pools []*ZFSPool
|
|
for rows.Next() {
|
|
var pool ZFSPool
|
|
var description sql.NullString
|
|
err := rows.Scan(
|
|
&pool.ID, &pool.Name, &description, &pool.RaidLevel, pq.Array(&pool.Disks),
|
|
&pool.SizeBytes, &pool.UsedBytes, &pool.Compression, &pool.Deduplication,
|
|
&pool.AutoExpand, &pool.ScrubInterval, &pool.IsActive, &pool.HealthStatus,
|
|
&pool.CreatedAt, &pool.UpdatedAt, &pool.CreatedBy,
|
|
)
|
|
if err != nil {
|
|
s.logger.Error("Failed to scan pool row", "error", err)
|
|
continue // Skip this pool instead of failing entire query
|
|
}
|
|
if description.Valid {
|
|
pool.Description = description.String
|
|
}
|
|
|
|
// Get spare disks from zpool status
|
|
spareDisks, err := s.getSpareDisks(ctx, pool.Name)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to get spare disks", "pool", pool.Name, "error", err)
|
|
pool.SpareDisks = []string{}
|
|
} else {
|
|
pool.SpareDisks = spareDisks
|
|
}
|
|
|
|
// Get compressratio from ZFS system
|
|
compressRatio, err := s.getCompressRatio(ctx, pool.Name)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to get compressratio", "pool", pool.Name, "error", err)
|
|
pool.CompressRatio = 1.0 // Default to 1.0 if can't get ratio
|
|
} else {
|
|
pool.CompressRatio = compressRatio
|
|
}
|
|
|
|
pools = append(pools, &pool)
|
|
s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name, "compressratio", pool.CompressRatio)
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating pool rows: %w", err)
|
|
}
|
|
|
|
s.logger.Debug("Listed ZFS pools", "count", len(pools))
|
|
return pools, nil
|
|
}
|
|
|
|
// GetPool retrieves a ZFS pool by ID
|
|
func (s *ZFSService) GetPool(ctx context.Context, poolID string) (*ZFSPool, error) {
|
|
query := `
|
|
SELECT id, name, description, raid_level, disks, size_bytes, used_bytes,
|
|
compression, deduplication, auto_expand, scrub_interval,
|
|
is_active, health_status, created_at, updated_at, created_by
|
|
FROM zfs_pools
|
|
WHERE id = $1
|
|
`
|
|
|
|
var pool ZFSPool
|
|
var description sql.NullString
|
|
err := s.db.QueryRowContext(ctx, query, poolID).Scan(
|
|
&pool.ID, &pool.Name, &description, &pool.RaidLevel, pq.Array(&pool.Disks),
|
|
&pool.SizeBytes, &pool.UsedBytes, &pool.Compression, &pool.Deduplication,
|
|
&pool.AutoExpand, &pool.ScrubInterval, &pool.IsActive, &pool.HealthStatus,
|
|
&pool.CreatedAt, &pool.UpdatedAt, &pool.CreatedBy,
|
|
)
|
|
if err == sql.ErrNoRows {
|
|
return nil, fmt.Errorf("pool not found")
|
|
}
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to get pool: %w", err)
|
|
}
|
|
|
|
if description.Valid {
|
|
pool.Description = description.String
|
|
}
|
|
|
|
// Get spare disks from zpool status
|
|
spareDisks, err := s.getSpareDisks(ctx, pool.Name)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to get spare disks", "pool", pool.Name, "error", err)
|
|
pool.SpareDisks = []string{}
|
|
} else {
|
|
pool.SpareDisks = spareDisks
|
|
}
|
|
|
|
return &pool, nil
|
|
}
|
|
|
|
// DeletePool destroys a ZFS pool
|
|
func (s *ZFSService) DeletePool(ctx context.Context, poolID string) error {
|
|
pool, err := s.GetPool(ctx, poolID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Destroy ZFS pool with -f flag to force destroy (works for both empty and non-empty pools)
|
|
// The -f flag is needed to destroy pools even if they have datasets or are in use
|
|
s.logger.Info("Destroying ZFS pool", "pool", pool.Name)
|
|
cmd := exec.CommandContext(ctx, "zpool", "destroy", "-f", pool.Name)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
// Check if pool doesn't exist (might have been destroyed already)
|
|
if strings.Contains(errorMsg, "no such pool") || strings.Contains(errorMsg, "cannot open") {
|
|
s.logger.Warn("Pool does not exist in ZFS, continuing with database cleanup", "pool", pool.Name)
|
|
// Continue with database cleanup even if pool doesn't exist
|
|
} else {
|
|
return fmt.Errorf("failed to destroy ZFS pool: %s: %w", errorMsg, err)
|
|
}
|
|
} else {
|
|
s.logger.Info("ZFS pool destroyed successfully", "pool", pool.Name)
|
|
}
|
|
|
|
// Mark disks as unused
|
|
for _, diskPath := range pool.Disks {
|
|
_, err = s.db.ExecContext(ctx,
|
|
"UPDATE physical_disks SET is_used = false, updated_at = NOW() WHERE device_path = $1",
|
|
diskPath,
|
|
)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to mark disk as unused", "disk", diskPath, "error", err)
|
|
}
|
|
}
|
|
|
|
// Delete from database
|
|
_, err = s.db.ExecContext(ctx, "DELETE FROM zfs_pools WHERE id = $1", poolID)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to delete pool from database: %w", err)
|
|
}
|
|
|
|
s.logger.Info("ZFS pool deleted", "name", pool.Name)
|
|
return nil
|
|
}
|
|
|
|
// AddSpareDisk adds one or more spare disks to a ZFS pool
|
|
func (s *ZFSService) AddSpareDisk(ctx context.Context, poolID string, diskPaths []string) error {
|
|
if len(diskPaths) == 0 {
|
|
return fmt.Errorf("at least one disk must be specified")
|
|
}
|
|
|
|
// Get pool information
|
|
pool, err := s.GetPool(ctx, poolID)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
// Verify pool exists in ZFS and check if disks are already spare
|
|
cmd := exec.CommandContext(ctx, "zpool", "status", pool.Name)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("pool %s does not exist in ZFS: %w", pool.Name, err)
|
|
}
|
|
outputStr := string(output)
|
|
|
|
// Check if any disk is already a spare in this pool
|
|
for _, diskPath := range diskPaths {
|
|
// Extract just the device name (e.g., /dev/sde -> sde)
|
|
diskName := strings.TrimPrefix(diskPath, "/dev/")
|
|
if strings.Contains(outputStr, "spares") && strings.Contains(outputStr, diskName) {
|
|
s.logger.Warn("Disk is already a spare in this pool", "disk", diskPath, "pool", pool.Name)
|
|
// Don't return error, just skip - zpool add will handle duplicate gracefully
|
|
}
|
|
}
|
|
|
|
// Verify pool exists in ZFS (already checked above with zpool status)
|
|
|
|
// Build zpool add command with spare option
|
|
args := []string{"add", pool.Name, "spare"}
|
|
args = append(args, diskPaths...)
|
|
|
|
// Execute zpool add
|
|
s.logger.Info("Adding spare disks to ZFS pool", "pool", pool.Name, "disks", diskPaths)
|
|
cmd = exec.CommandContext(ctx, "zpool", args...)
|
|
output, err = cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
s.logger.Error("Failed to add spare disks to ZFS pool", "pool", pool.Name, "disks", diskPaths, "error", err, "output", errorMsg)
|
|
return fmt.Errorf("failed to add spare disks: %s", errorMsg)
|
|
}
|
|
|
|
s.logger.Info("Spare disks added successfully", "pool", pool.Name, "disks", diskPaths)
|
|
|
|
// Mark disks as used
|
|
for _, diskPath := range diskPaths {
|
|
_, err = s.db.ExecContext(ctx,
|
|
"UPDATE physical_disks SET is_used = true, updated_at = NOW() WHERE device_path = $1",
|
|
diskPath,
|
|
)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to mark disk as used", "disk", diskPath, "error", err)
|
|
}
|
|
}
|
|
|
|
// Update pool's updated_at timestamp
|
|
_, err = s.db.ExecContext(ctx,
|
|
"UPDATE zfs_pools SET updated_at = NOW() WHERE id = $1",
|
|
poolID,
|
|
)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to update pool timestamp", "pool_id", poolID, "error", err)
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// ZFSDataset represents a ZFS dataset
|
|
type ZFSDataset struct {
|
|
Name string `json:"name"`
|
|
Pool string `json:"pool"`
|
|
Type string `json:"type"` // filesystem, volume, snapshot
|
|
MountPoint string `json:"mount_point"`
|
|
UsedBytes int64 `json:"used_bytes"`
|
|
AvailableBytes int64 `json:"available_bytes"`
|
|
ReferencedBytes int64 `json:"referenced_bytes"`
|
|
Compression string `json:"compression"`
|
|
Deduplication string `json:"deduplication"`
|
|
Quota int64 `json:"quota"` // -1 for unlimited
|
|
Reservation int64 `json:"reservation"`
|
|
CreatedAt time.Time `json:"created_at"`
|
|
}
|
|
|
|
// ListDatasets lists all datasets in a ZFS pool from database
|
|
func (s *ZFSService) ListDatasets(ctx context.Context, poolName string) ([]*ZFSDataset, error) {
|
|
// Get datasets from database
|
|
query := `
|
|
SELECT name, pool_name, type, mount_point,
|
|
used_bytes, available_bytes, referenced_bytes,
|
|
compression, deduplication, quota, reservation,
|
|
created_at
|
|
FROM zfs_datasets
|
|
WHERE pool_name = $1
|
|
ORDER BY name
|
|
`
|
|
|
|
rows, err := s.db.QueryContext(ctx, query, poolName)
|
|
if err != nil {
|
|
// If table doesn't exist, return empty list (migration not run yet)
|
|
if strings.Contains(err.Error(), "does not exist") {
|
|
s.logger.Warn("zfs_datasets table does not exist, returning empty list", "pool", poolName)
|
|
return []*ZFSDataset{}, nil
|
|
}
|
|
return nil, fmt.Errorf("failed to list datasets from database: %w", err)
|
|
}
|
|
defer rows.Close()
|
|
|
|
var datasets []*ZFSDataset
|
|
for rows.Next() {
|
|
var ds ZFSDataset
|
|
var mountPoint sql.NullString
|
|
|
|
err := rows.Scan(
|
|
&ds.Name, &ds.Pool, &ds.Type, &mountPoint,
|
|
&ds.UsedBytes, &ds.AvailableBytes, &ds.ReferencedBytes,
|
|
&ds.Compression, &ds.Deduplication, &ds.Quota, &ds.Reservation,
|
|
&ds.CreatedAt,
|
|
)
|
|
if err != nil {
|
|
s.logger.Error("Failed to scan dataset row", "error", err)
|
|
continue
|
|
}
|
|
|
|
// Handle nullable mount_point
|
|
if mountPoint.Valid {
|
|
ds.MountPoint = mountPoint.String
|
|
} else {
|
|
ds.MountPoint = "none"
|
|
}
|
|
|
|
datasets = append(datasets, &ds)
|
|
}
|
|
|
|
if err := rows.Err(); err != nil {
|
|
return nil, fmt.Errorf("error iterating dataset rows: %w", err)
|
|
}
|
|
|
|
return datasets, nil
|
|
}
|
|
|
|
// CreateDatasetRequest represents a request to create a ZFS dataset
|
|
type CreateDatasetRequest struct {
|
|
Name string `json:"name"` // Dataset name (e.g., "pool/dataset" or just "dataset")
|
|
Type string `json:"type"` // "filesystem" or "volume"
|
|
Compression string `json:"compression"` // off, lz4, zstd, gzip, etc.
|
|
Quota int64 `json:"quota"` // -1 for unlimited
|
|
Reservation int64 `json:"reservation"` // 0 for none
|
|
MountPoint string `json:"mount_point"` // Optional mount point
|
|
}
|
|
|
|
// CreateDataset creates a new ZFS dataset
|
|
func (s *ZFSService) CreateDataset(ctx context.Context, poolName string, req CreateDatasetRequest) (*ZFSDataset, error) {
|
|
// Construct full dataset name
|
|
fullName := poolName + "/" + req.Name
|
|
|
|
// For filesystem datasets, create mount directory if mount point is provided
|
|
if req.Type == "filesystem" && req.MountPoint != "" {
|
|
// Clean and validate mount point path
|
|
mountPath := filepath.Clean(req.MountPoint)
|
|
|
|
// Check if directory already exists
|
|
if info, err := os.Stat(mountPath); err == nil {
|
|
if !info.IsDir() {
|
|
return nil, fmt.Errorf("mount point path exists but is not a directory: %s", mountPath)
|
|
}
|
|
// Directory exists, check if it's empty
|
|
dir, err := os.Open(mountPath)
|
|
if err == nil {
|
|
entries, err := dir.Readdirnames(1)
|
|
dir.Close()
|
|
if err == nil && len(entries) > 0 {
|
|
s.logger.Warn("Mount directory is not empty", "path", mountPath)
|
|
// Continue anyway, ZFS will mount over it
|
|
}
|
|
}
|
|
} else if os.IsNotExist(err) {
|
|
// Create directory with proper permissions (0755)
|
|
s.logger.Info("Creating mount directory", "path", mountPath)
|
|
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
|
return nil, fmt.Errorf("failed to create mount directory %s: %w", mountPath, err)
|
|
}
|
|
s.logger.Info("Mount directory created successfully", "path", mountPath)
|
|
} else {
|
|
return nil, fmt.Errorf("failed to check mount directory %s: %w", mountPath, err)
|
|
}
|
|
}
|
|
|
|
// Build zfs create command
|
|
args := []string{"create"}
|
|
|
|
// Add type if volume
|
|
if req.Type == "volume" {
|
|
// For volumes, we need size (use quota as size)
|
|
if req.Quota <= 0 {
|
|
return nil, fmt.Errorf("volume size (quota) must be specified and greater than 0")
|
|
}
|
|
args = append(args, "-V", fmt.Sprintf("%d", req.Quota), fullName)
|
|
} else {
|
|
// For filesystems
|
|
args = append(args, fullName)
|
|
}
|
|
|
|
// Set compression
|
|
if req.Compression != "" && req.Compression != "off" {
|
|
args = append(args, "-o", fmt.Sprintf("compression=%s", req.Compression))
|
|
}
|
|
|
|
// Set mount point if provided (only for filesystems, not volumes)
|
|
if req.Type == "filesystem" && req.MountPoint != "" {
|
|
args = append(args, "-o", fmt.Sprintf("mountpoint=%s", req.MountPoint))
|
|
}
|
|
|
|
// Execute zfs create
|
|
s.logger.Info("Creating ZFS dataset", "name", fullName, "type", req.Type)
|
|
cmd := exec.CommandContext(ctx, "zfs", args...)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
s.logger.Error("Failed to create dataset", "name", fullName, "error", err, "output", errorMsg)
|
|
return nil, fmt.Errorf("failed to create dataset: %s", errorMsg)
|
|
}
|
|
|
|
// Set quota if specified (for filesystems)
|
|
if req.Type == "filesystem" && req.Quota > 0 {
|
|
quotaCmd := exec.CommandContext(ctx, "zfs", "set", fmt.Sprintf("quota=%d", req.Quota), fullName)
|
|
if quotaOutput, err := quotaCmd.CombinedOutput(); err != nil {
|
|
s.logger.Warn("Failed to set quota", "dataset", fullName, "error", err, "output", string(quotaOutput))
|
|
}
|
|
}
|
|
|
|
// Set reservation if specified
|
|
if req.Reservation > 0 {
|
|
resvCmd := exec.CommandContext(ctx, "zfs", "set", fmt.Sprintf("reservation=%d", req.Reservation), fullName)
|
|
if resvOutput, err := resvCmd.CombinedOutput(); err != nil {
|
|
s.logger.Warn("Failed to set reservation", "dataset", fullName, "error", err, "output", string(resvOutput))
|
|
}
|
|
}
|
|
|
|
// Get pool ID from pool name
|
|
var poolID string
|
|
err = s.db.QueryRowContext(ctx, "SELECT id FROM zfs_pools WHERE name = $1", poolName).Scan(&poolID)
|
|
if err != nil {
|
|
s.logger.Error("Failed to get pool ID", "pool", poolName, "error", err)
|
|
// Try to destroy the dataset if we can't save to database
|
|
exec.CommandContext(ctx, "zfs", "destroy", "-r", fullName).Run()
|
|
return nil, fmt.Errorf("failed to get pool ID: %w", err)
|
|
}
|
|
|
|
// Get dataset info from ZFS to save to database
|
|
cmd = exec.CommandContext(ctx, "zfs", "list", "-H", "-o", "name,used,avail,refer,compress,dedup,quota,reservation,mountpoint", fullName)
|
|
output, err = cmd.CombinedOutput()
|
|
if err != nil {
|
|
s.logger.Error("Failed to get dataset info", "name", fullName, "error", err)
|
|
// Try to destroy the dataset if we can't get info
|
|
exec.CommandContext(ctx, "zfs", "destroy", "-r", fullName).Run()
|
|
return nil, fmt.Errorf("failed to get dataset info: %w", err)
|
|
}
|
|
|
|
// Parse dataset info
|
|
lines := strings.TrimSpace(string(output))
|
|
if lines == "" {
|
|
exec.CommandContext(ctx, "zfs", "destroy", "-r", fullName).Run()
|
|
return nil, fmt.Errorf("dataset not found after creation")
|
|
}
|
|
|
|
fields := strings.Fields(lines)
|
|
if len(fields) < 9 {
|
|
exec.CommandContext(ctx, "zfs", "destroy", "-r", fullName).Run()
|
|
return nil, fmt.Errorf("invalid dataset info format")
|
|
}
|
|
|
|
usedBytes, _ := parseZFSSize(fields[1])
|
|
availableBytes, _ := parseZFSSize(fields[2])
|
|
referencedBytes, _ := parseZFSSize(fields[3])
|
|
compression := fields[4]
|
|
deduplication := fields[5]
|
|
quotaStr := fields[6]
|
|
reservationStr := fields[7]
|
|
mountPoint := fields[8]
|
|
|
|
// Determine dataset type
|
|
datasetType := req.Type
|
|
typeCmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "type", fullName)
|
|
if typeOutput, err := typeCmd.Output(); err == nil {
|
|
volType := strings.TrimSpace(string(typeOutput))
|
|
if volType == "volume" {
|
|
datasetType = "volume"
|
|
} else if strings.Contains(volType, "snapshot") {
|
|
datasetType = "snapshot"
|
|
}
|
|
}
|
|
|
|
// Parse quota
|
|
quota := int64(-1)
|
|
if datasetType == "volume" {
|
|
// For volumes, get volsize
|
|
volsizeCmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "volsize", fullName)
|
|
if volsizeOutput, err := volsizeCmd.Output(); err == nil {
|
|
volsizeStr := strings.TrimSpace(string(volsizeOutput))
|
|
if volsizeStr != "-" && volsizeStr != "none" {
|
|
if vs, err := parseZFSSize(volsizeStr); err == nil {
|
|
quota = vs
|
|
}
|
|
}
|
|
}
|
|
} else if quotaStr != "-" && quotaStr != "none" {
|
|
if q, err := parseZFSSize(quotaStr); err == nil {
|
|
quota = q
|
|
}
|
|
}
|
|
|
|
// Parse reservation
|
|
reservation := int64(0)
|
|
if reservationStr != "-" && reservationStr != "none" {
|
|
if r, err := parseZFSSize(reservationStr); err == nil {
|
|
reservation = r
|
|
}
|
|
}
|
|
|
|
// Normalize mount point for volumes
|
|
if datasetType == "volume" && mountPoint == "-" {
|
|
mountPoint = "none"
|
|
}
|
|
|
|
// Get creation time
|
|
createdAt := time.Now()
|
|
creationCmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "creation", fullName)
|
|
if creationOutput, err := creationCmd.Output(); err == nil {
|
|
creationStr := strings.TrimSpace(string(creationOutput))
|
|
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", creationStr); err == nil {
|
|
createdAt = t
|
|
} else if t, err := time.Parse(time.RFC3339, creationStr); err == nil {
|
|
createdAt = t
|
|
}
|
|
}
|
|
|
|
// Save to database (works for both filesystem and volume datasets)
|
|
// Volume datasets are stored in the same zfs_datasets table with type='volume'
|
|
insertQuery := `
|
|
INSERT INTO zfs_datasets (
|
|
name, pool_id, pool_name, type, mount_point,
|
|
used_bytes, available_bytes, referenced_bytes,
|
|
compression, deduplication, quota, reservation,
|
|
created_at, updated_at
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, NOW())
|
|
RETURNING id
|
|
`
|
|
|
|
var datasetID string
|
|
err = s.db.QueryRowContext(ctx, insertQuery,
|
|
fullName, poolID, poolName, datasetType, mountPoint,
|
|
usedBytes, availableBytes, referencedBytes,
|
|
compression, deduplication, quota, reservation,
|
|
createdAt,
|
|
).Scan(&datasetID)
|
|
if err != nil {
|
|
s.logger.Error("Failed to save dataset to database", "name", fullName, "error", err)
|
|
// Try to destroy the dataset if we can't save to database
|
|
exec.CommandContext(ctx, "zfs", "destroy", "-r", fullName).Run()
|
|
return nil, fmt.Errorf("failed to save dataset to database: %w", err)
|
|
}
|
|
|
|
// Return dataset info
|
|
dataset := &ZFSDataset{
|
|
Name: fullName,
|
|
Pool: poolName,
|
|
Type: datasetType,
|
|
MountPoint: mountPoint,
|
|
UsedBytes: usedBytes,
|
|
AvailableBytes: availableBytes,
|
|
ReferencedBytes: referencedBytes,
|
|
Compression: compression,
|
|
Deduplication: deduplication,
|
|
Quota: quota,
|
|
Reservation: reservation,
|
|
CreatedAt: createdAt,
|
|
}
|
|
|
|
s.logger.Info("ZFS dataset created and saved to database", "name", fullName, "id", datasetID)
|
|
return dataset, nil
|
|
}
|
|
|
|
// DeleteDataset deletes a ZFS dataset
|
|
func (s *ZFSService) DeleteDataset(ctx context.Context, datasetName string) error {
|
|
// Check if dataset exists and get its mount point before deletion
|
|
var mountPoint string
|
|
cmd := exec.CommandContext(ctx, "zfs", "list", "-H", "-o", "name,mountpoint", datasetName)
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("dataset %s does not exist: %w", datasetName, err)
|
|
}
|
|
|
|
lines := strings.TrimSpace(string(output))
|
|
if lines == "" {
|
|
return fmt.Errorf("dataset %s not found", datasetName)
|
|
}
|
|
|
|
// Parse output to get mount point
|
|
fields := strings.Fields(lines)
|
|
if len(fields) >= 2 {
|
|
mountPoint = fields[1]
|
|
}
|
|
|
|
// Get dataset type to determine if we should clean up mount directory
|
|
var datasetType string
|
|
typeCmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "type", datasetName)
|
|
typeOutput, err := typeCmd.Output()
|
|
if err == nil {
|
|
datasetType = strings.TrimSpace(string(typeOutput))
|
|
}
|
|
|
|
// Delete from database first (before ZFS deletion, so we have the record)
|
|
// This ensures we can clean up even if ZFS deletion partially fails
|
|
// Works for both filesystem and volume datasets
|
|
deleteQuery := "DELETE FROM zfs_datasets WHERE name = $1"
|
|
result, err := s.db.ExecContext(ctx, deleteQuery, datasetName)
|
|
if err != nil {
|
|
s.logger.Warn("Failed to delete dataset from database (may not exist)", "name", datasetName, "error", err)
|
|
// Continue with ZFS deletion anyway
|
|
} else {
|
|
rowsAffected, _ := result.RowsAffected()
|
|
if rowsAffected > 0 {
|
|
s.logger.Info("Dataset removed from database", "name", datasetName)
|
|
}
|
|
}
|
|
|
|
// Delete the dataset from ZFS (use -r for recursive to delete children)
|
|
s.logger.Info("Deleting ZFS dataset", "name", datasetName, "mountpoint", mountPoint)
|
|
cmd = exec.CommandContext(ctx, "zfs", "destroy", "-r", datasetName)
|
|
output, err = cmd.CombinedOutput()
|
|
if err != nil {
|
|
errorMsg := string(output)
|
|
s.logger.Error("Failed to delete dataset", "name", datasetName, "error", err, "output", errorMsg)
|
|
return fmt.Errorf("failed to delete dataset: %s", errorMsg)
|
|
}
|
|
|
|
// Clean up mount directory if it exists and is a filesystem dataset
|
|
// Only remove if mount point is not "-" (volumes) and not "none" or "legacy"
|
|
if datasetType == "filesystem" && mountPoint != "" && mountPoint != "-" && mountPoint != "none" && mountPoint != "legacy" {
|
|
mountPath := filepath.Clean(mountPoint)
|
|
|
|
// Check if directory exists
|
|
if info, err := os.Stat(mountPath); err == nil && info.IsDir() {
|
|
// Check if directory is empty
|
|
dir, err := os.Open(mountPath)
|
|
if err == nil {
|
|
entries, err := dir.Readdirnames(1)
|
|
dir.Close()
|
|
|
|
// Only remove if directory is empty
|
|
if err == nil && len(entries) == 0 {
|
|
s.logger.Info("Removing empty mount directory", "path", mountPath)
|
|
if err := os.Remove(mountPath); err != nil {
|
|
s.logger.Warn("Failed to remove mount directory", "path", mountPath, "error", err)
|
|
// Don't fail the deletion if we can't remove the directory
|
|
} else {
|
|
s.logger.Info("Mount directory removed successfully", "path", mountPath)
|
|
}
|
|
} else {
|
|
s.logger.Info("Mount directory is not empty, keeping it", "path", mountPath)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
s.logger.Info("ZFS dataset deleted successfully", "name", datasetName)
|
|
return nil
|
|
}
|