fix storage pool, datasets,volume and disk
Some checks failed
CI / test-build (push) Has been cancelled

This commit is contained in:
2025-12-20 02:18:51 +00:00
parent 8029bcfa15
commit 98bedf6487
333 changed files with 352 additions and 64 deletions

View File

@@ -50,6 +50,70 @@ func findCommandPath(cmd string) string {
return cmd
}
// translateZFSError converts technical ZFS error messages to user-friendly ones
func translateZFSError(err error, operation, name string) error {
if err == nil {
return nil
}
errStr := err.Error()
// Extract the actual ZFS error message (usually after the last colon or in quotes)
// Common patterns:
// - "cannot open 'name': no such pool"
// - "cannot open 'name': dataset does not exist"
// - "cannot destroy 'name': dataset is busy"
// Check for common ZFS error patterns
if strings.Contains(errStr, "no such pool") {
return fmt.Errorf("pool '%s' tidak ditemukan. Pastikan nama pool benar dan pool sudah dibuat", name)
}
if strings.Contains(errStr, "dataset does not exist") || strings.Contains(errStr, "no such dataset") {
return fmt.Errorf("dataset atau volume '%s' tidak ditemukan. Pastikan nama benar dan sudah dibuat", name)
}
if strings.Contains(errStr, "dataset is busy") {
return fmt.Errorf("dataset atau volume '%s' sedang digunakan. Tutup semua koneksi atau unmount terlebih dahulu", name)
}
if strings.Contains(errStr, "operation does not apply to pools") {
return fmt.Errorf("operasi ini tidak dapat diterapkan pada pool. Gunakan 'Delete Pool' untuk menghapus pool, atau 'Delete Dataset' untuk menghapus dataset di dalam pool")
}
if strings.Contains(errStr, "cannot destroy") {
// Try to extract the reason
if strings.Contains(errStr, "has children") {
return fmt.Errorf("tidak dapat menghapus '%s' karena masih memiliki dataset atau snapshot di dalamnya. Hapus semua dataset/snapshot terlebih dahulu, atau gunakan opsi recursive", name)
}
if strings.Contains(errStr, "is busy") {
return fmt.Errorf("tidak dapat menghapus '%s' karena sedang digunakan. Tutup semua koneksi atau unmount terlebih dahulu", name)
}
return fmt.Errorf("tidak dapat menghapus '%s'. Pastikan tidak sedang digunakan dan tidak memiliki dataset/snapshot di dalamnya", name)
}
if strings.Contains(errStr, "cannot open") {
return fmt.Errorf("tidak dapat mengakses '%s'. Pastikan nama benar dan resource sudah ada", name)
}
// If no pattern matches, return a cleaner version of the error
// Remove technical details like "sudo failed", "direct execution also failed", etc.
if strings.Contains(errStr, "sudo failed") || strings.Contains(errStr, "direct execution") {
// Extract just the ZFS error part
parts := strings.Split(errStr, ":")
if len(parts) > 1 {
// Get the last meaningful part (usually the actual ZFS error)
lastPart := strings.TrimSpace(parts[len(parts)-1])
if lastPart != "" {
return fmt.Errorf("gagal %s '%s': %s", operation, name, lastPart)
}
}
}
// Fallback: return a user-friendly message with the operation
return fmt.Errorf("gagal %s '%s': %s", operation, name, errStr)
}
// execCommand executes a shell command and returns output
// For ZFS operations that require elevated privileges, it uses sudo
func (s *Service) execCommand(name string, args ...string) (string, error) {
@@ -96,15 +160,16 @@ func (s *Service) execCommand(name string, args ...string) (string, error) {
log.Printf("direct command execution succeeded (without sudo)")
return strings.TrimSpace(directStdout.String()), nil
}
// Both sudo and direct failed, return detailed error
// Both sudo and direct failed, return the direct error (usually cleaner)
log.Printf("both sudo and direct execution failed - sudo error: %v, direct error: %v", err, directErr)
log.Printf("sudo stderr: %s, direct stderr: %s", stderr.String(), directStderr.String())
return "", fmt.Errorf("%s: sudo failed (%v: %s), direct execution also failed (%v: %s)", name, err, stderr.String(), directErr, directStderr.String())
// Return the direct error (usually has the actual ZFS error message)
return "", fmt.Errorf("%s: %s", name, strings.TrimSpace(directStderr.String()))
}
if err != nil {
log.Printf("command execution failed: %s %v (error: %v, stderr: %s)", name, args, err, stderr.String())
return "", fmt.Errorf("%s: %v: %s", name, err, stderr.String())
return "", fmt.Errorf("%s: %s", name, strings.TrimSpace(stderr.String()))
}
return strings.TrimSpace(stdout.String()), nil
@@ -213,14 +278,14 @@ func (s *Service) CreatePool(name string, vdevs []string, options map[string]str
log.Printf("warning: failed to pre-create mountpoint %s: %v (continuing anyway)", mountpoint, err)
}
}
// handle canmount as DATASET property not the vdev property
canmount := "noauto"
// handle canmount as DATASET property not the vdev property
canmount := "noauto"
if v, ok := options["canmount"]; ok && v != "" {
canmount = v
}
delete(options, "canmount")
delete(options, "canmount")
// Set canmount=noauto to prevent automatic mounting during creation
// This allows pool creation to succeed even if mountpoint can't be created
@@ -239,8 +304,8 @@ func (s *Service) CreatePool(name string, vdevs []string, options map[string]str
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
}
args = append(args, "-O", fmt.Sprintf("canmount=%s", canmount))
args = append(args, "-O", fmt.Sprintf("canmount=%s", canmount))
args = append(args, name)
// Normalize vdev paths - ensure they start with /dev/ if they don't already
@@ -661,7 +726,10 @@ func (s *Service) DestroyDataset(name string, recursive bool) error {
}
args = append(args, name)
_, err := s.execCommand(s.zfsPath, args...)
return err
if err != nil {
return translateZFSError(err, "menghapus dataset", name)
}
return nil
}
// ListZVOLs returns all ZVOLs
@@ -735,7 +803,55 @@ func (s *Service) CreateZVOL(name string, size uint64, options map[string]string
// DestroyZVOL destroys a ZVOL
func (s *Service) DestroyZVOL(name string) error {
_, err := s.execCommand(s.zfsPath, "destroy", name)
return err
if err != nil {
return translateZFSError(err, "menghapus volume", name)
}
return nil
}
// getUsedDisks returns a set of disk names that are currently used in ZFS pools
func (s *Service) getUsedDisks() map[string]bool {
usedDisks := make(map[string]bool)
// Get all pools
pools, err := s.ListPools()
if err != nil {
log.Printf("warning: failed to list pools to check disk usage: %v", err)
return usedDisks
}
// For each pool, get the status to see which disks are used
for _, pool := range pools {
// Get pool status which shows vdevs (disks)
statusOutput, err := s.execCommand(s.zpoolPath, "status", pool.Name)
if err != nil {
log.Printf("warning: failed to get status for pool %s: %v", pool.Name, err)
continue
}
// Parse status output to find disk names
// Format: lines like " sdb ONLINE 0 0 0"
lines := strings.Split(statusOutput, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
// Skip empty lines and headers
if line == "" || strings.HasPrefix(line, "NAME") || strings.HasPrefix(line, "state:") || strings.HasPrefix(line, "pool:") {
continue
}
// Extract disk name (first field after indentation)
fields := strings.Fields(line)
if len(fields) > 0 {
diskName := fields[0]
// Check if it's a disk device (starts with sd, hd, nvme, etc.)
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
usedDisks[diskName] = true
}
}
}
}
return usedDisks
}
// ListDisks returns available disks (read-only)
@@ -761,14 +877,36 @@ func (s *Service) ListDisks() ([]map[string]string, error) {
return nil, err
}
// Get list of disks currently used in pools
usedDisks := s.getUsedDisks()
var disks []map[string]string
for _, dev := range result.BlockDevices {
if dev.Type == "disk" && dev.FSType == "" && dev.Mountpoint == "" {
disks = append(disks, map[string]string{
"name": dev.Name,
"size": dev.Size,
"path": "/dev/" + dev.Name,
})
// Skip OS disk (sda) - typically the first disk used for OS installation
if dev.Name == "sda" {
continue
}
// Check if disk is used in a pool
isUsed := usedDisks[dev.Name]
// Include all disks (both available and used) so we can show status
if dev.Type == "disk" {
disk := map[string]string{
"name": dev.Name,
"size": dev.Size,
"path": "/dev/" + dev.Name,
"status": "available",
}
if isUsed {
disk["status"] = "unavailable"
} else if dev.FSType != "" || dev.Mountpoint != "" {
// Disk has filesystem or mountpoint (not suitable for ZFS pool)
disk["status"] = "unavailable"
}
disks = append(disks, disk)
}
}