Files
atlas/internal/zfs/service.go
Othman Hendy Suseo 6202ef8e83
Some checks failed
CI / test-build (push) Has been cancelled
fixing UI and iscsi sync
2025-12-20 19:16:50 +00:00

1665 lines
50 KiB
Go

package zfs
import (
"bytes"
"encoding/json"
"fmt"
"log"
"os"
"os/exec"
"strconv"
"strings"
"time"
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
)
// Service provides ZFS operations
type Service struct {
zfsPath string
zpoolPath string
}
// New creates a new ZFS service
func New() *Service {
// Find full paths to zfs and zpool commands
zfsPath := findCommandPath("zfs")
zpoolPath := findCommandPath("zpool")
return &Service{
zfsPath: zfsPath,
zpoolPath: zpoolPath,
}
}
// findCommandPath finds the full path to a command
func findCommandPath(cmd string) string {
// Try which first
if output, err := exec.Command("which", cmd).Output(); err == nil {
path := strings.TrimSpace(string(output))
if path != "" {
return path
}
}
// Try LookPath
if path, err := exec.LookPath(cmd); err == nil {
return path
}
// Fallback to command name (will use PATH)
return cmd
}
// translateZFSError converts technical ZFS error messages to user-friendly ones
func translateZFSError(err error, operation, name string) error {
if err == nil {
return nil
}
errStr := err.Error()
// Extract the actual ZFS error message (usually after the last colon or in quotes)
// Common patterns:
// - "cannot open 'name': no such pool"
// - "cannot open 'name': dataset does not exist"
// - "cannot destroy 'name': dataset is busy"
// Check for common ZFS error patterns
if strings.Contains(errStr, "no such pool") {
return fmt.Errorf("pool '%s' tidak ditemukan. Pastikan nama pool benar dan pool sudah dibuat", name)
}
if strings.Contains(errStr, "dataset already exists") {
return fmt.Errorf("dataset '%s' sudah ada. Gunakan nama yang berbeda atau hapus dataset yang sudah ada terlebih dahulu", name)
}
if strings.Contains(errStr, "dataset does not exist") || strings.Contains(errStr, "no such dataset") {
return fmt.Errorf("dataset atau volume '%s' tidak ditemukan. Pastikan nama benar dan sudah dibuat", name)
}
if strings.Contains(errStr, "dataset is busy") {
return fmt.Errorf("dataset atau volume '%s' sedang digunakan. Tutup semua koneksi atau unmount terlebih dahulu", name)
}
if strings.Contains(errStr, "operation does not apply to pools") {
return fmt.Errorf("operasi ini tidak dapat diterapkan pada pool. Gunakan 'Delete Pool' untuk menghapus pool, atau 'Delete Dataset' untuk menghapus dataset di dalam pool")
}
if strings.Contains(errStr, "cannot destroy") {
// Try to extract the reason
if strings.Contains(errStr, "has children") {
return fmt.Errorf("tidak dapat menghapus '%s' karena masih memiliki dataset atau snapshot di dalamnya. Hapus semua dataset/snapshot terlebih dahulu, atau gunakan opsi recursive", name)
}
if strings.Contains(errStr, "is busy") {
return fmt.Errorf("tidak dapat menghapus '%s' karena sedang digunakan. Tutup semua koneksi atau unmount terlebih dahulu", name)
}
return fmt.Errorf("tidak dapat menghapus '%s'. Pastikan tidak sedang digunakan dan tidak memiliki dataset/snapshot di dalamnya", name)
}
if strings.Contains(errStr, "cannot open") {
return fmt.Errorf("tidak dapat mengakses '%s'. Pastikan nama benar dan resource sudah ada", name)
}
// If no pattern matches, return a cleaner version of the error
// Remove technical details like "sudo failed", "direct execution also failed", etc.
if strings.Contains(errStr, "sudo failed") || strings.Contains(errStr, "direct execution") {
// Extract just the ZFS error part
parts := strings.Split(errStr, ":")
if len(parts) > 1 {
// Get the last meaningful part (usually the actual ZFS error)
lastPart := strings.TrimSpace(parts[len(parts)-1])
if lastPart != "" {
return fmt.Errorf("gagal %s '%s': %s", operation, name, lastPart)
}
}
}
// Fallback: return a user-friendly message with the operation
return fmt.Errorf("gagal %s '%s': %s", operation, name, errStr)
}
// execCommand executes a shell command and returns output
// For ZFS operations that require elevated privileges, it uses sudo
func (s *Service) execCommand(name string, args ...string) (string, error) {
// Commands that require root privileges
privilegedCommands := []string{"zpool", "zfs"}
useSudo := false
for _, cmd := range privilegedCommands {
if strings.Contains(name, cmd) {
useSudo = true
break
}
}
var cmd *exec.Cmd
if useSudo {
// Use sudo -n (non-interactive) for privileged commands
// This prevents password prompts and will fail if sudoers is not configured
sudoArgs := append([]string{"-n", name}, args...)
cmd = exec.Command("sudo", sudoArgs...)
} else {
cmd = exec.Command(name, args...)
}
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil && useSudo {
// Log that sudo failed
log.Printf("sudo command failed, trying direct execution: %s %v (error: %v, stderr: %s)", name, args, err, stderr.String())
// If sudo failed, try running the command directly
// (user might already have permissions or be root)
directCmd := exec.Command(name, args...)
var directStdout, directStderr bytes.Buffer
directCmd.Stdout = &directStdout
directCmd.Stderr = &directStderr
directErr := directCmd.Run()
if directErr == nil {
// Direct execution succeeded, return that result
log.Printf("direct command execution succeeded (without sudo)")
return strings.TrimSpace(directStdout.String()), nil
}
// Both sudo and direct failed, return the direct error (usually cleaner)
log.Printf("both sudo and direct execution failed - sudo error: %v, direct error: %v", err, directErr)
log.Printf("sudo stderr: %s, direct stderr: %s", stderr.String(), directStderr.String())
// Return the direct error (usually has the actual ZFS error message)
return "", fmt.Errorf("%s: %s", name, strings.TrimSpace(directStderr.String()))
}
if err != nil {
log.Printf("command execution failed: %s %v (error: %v, stderr: %s)", name, args, err, stderr.String())
return "", fmt.Errorf("%s: %s", name, strings.TrimSpace(stderr.String()))
}
return strings.TrimSpace(stdout.String()), nil
}
// ListPools returns all ZFS pools
func (s *Service) ListPools() ([]models.Pool, error) {
output, err := s.execCommand(s.zpoolPath, "list", "-H", "-o", "name,size,allocated,free,health")
if err != nil {
// Return empty slice instead of nil to ensure JSON encodes as [] not null
return []models.Pool{}, err
}
pools := []models.Pool{}
lines := strings.Split(output, "\n")
for _, line := range lines {
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) < 5 {
continue
}
pool := models.Pool{
Name: fields[0],
Status: "ONLINE", // Default, will be updated from health
Health: fields[4],
}
// Parse sizes (handles K, M, G, T suffixes)
if size, err := parseSize(fields[1]); err == nil {
pool.Size = size
}
if allocated, err := parseSize(fields[2]); err == nil {
pool.Allocated = allocated
}
if free, err := parseSize(fields[3]); err == nil {
pool.Free = free
}
// Get pool status
status, _ := s.execCommand(s.zpoolPath, "status", "-x", pool.Name)
if strings.Contains(status, "all pools are healthy") {
pool.Status = "ONLINE"
} else if strings.Contains(status, "DEGRADED") {
pool.Status = "DEGRADED"
} else if strings.Contains(status, "FAULTED") {
pool.Status = "FAULTED"
}
// Get creation time
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", pool.Name)
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
pool.CreatedAt = t
}
pools = append(pools, pool)
}
return pools, nil
}
// GetPool returns a specific pool
func (s *Service) GetPool(name string) (*models.Pool, error) {
pools, err := s.ListPools()
if err != nil {
return nil, err
}
for _, pool := range pools {
if pool.Name == name {
return &pool, nil
}
}
return nil, fmt.Errorf("pool %s not found", name)
}
// GetPoolDetail returns detailed pool information from zpool status
func (s *Service) GetPoolDetail(name string) (*models.PoolDetail, error) {
output, err := s.execCommand(s.zpoolPath, "status", name)
if err != nil {
return nil, fmt.Errorf("failed to get pool status: %w", err)
}
detail := &models.PoolDetail{
Name: name,
VDEVs: []models.VDEV{},
Spares: []string{},
}
lines := strings.Split(output, "\n")
var currentVDEV *models.VDEV
inConfig := false
inSpares := false
for _, line := range lines {
line = strings.TrimSpace(line)
// Parse pool name and state
if strings.HasPrefix(line, "pool:") {
parts := strings.Fields(line)
if len(parts) > 1 {
detail.Name = parts[1]
}
continue
}
if strings.HasPrefix(line, "state:") {
parts := strings.Fields(line)
if len(parts) > 1 {
detail.State = parts[1]
detail.Status = strings.Join(parts[1:], " ")
}
continue
}
// Parse errors line
if strings.HasPrefix(line, "errors:") {
detail.Errors = strings.TrimPrefix(line, "errors:")
detail.Errors = strings.TrimSpace(detail.Errors)
continue
}
// Parse scrub information
if strings.Contains(line, "scrub") {
detail.ScrubInfo = line
continue
}
// Check if we're entering config section
if strings.HasPrefix(line, "config:") {
inConfig = true
continue
}
// Check if we're in spares section
if strings.Contains(line, "spares") {
inSpares = true
inConfig = false
continue
}
// Parse VDEV and disk information
if inConfig {
// Check if this is a VDEV header (indented but not a disk)
fields := strings.Fields(line)
if len(fields) >= 5 {
// Check if it's a VDEV type line (mirror, raidz, etc.)
if fields[0] == "mirror" || strings.HasPrefix(fields[0], "raidz") || fields[0] == "log" || fields[0] == "cache" {
// Save previous VDEV if exists
if currentVDEV != nil {
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
}
// Start new VDEV
currentVDEV = &models.VDEV{
Name: fields[0],
Type: fields[0],
State: "ONLINE",
Disks: []models.Disk{},
}
// Try to parse state if available
if len(fields) > 1 {
for _, field := range fields[1:] {
if field == "ONLINE" || field == "DEGRADED" || field == "FAULTED" || field == "OFFLINE" {
currentVDEV.State = field
break
}
}
}
continue
}
// Check if it's a disk line (starts with sd, hd, nvme, etc.)
diskName := fields[0]
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
// This is a disk
state := "ONLINE"
read := 0
write := 0
checksum := 0
if len(fields) > 1 {
state = fields[1]
}
if len(fields) > 2 {
if val, err := strconv.Atoi(fields[2]); err == nil {
read = val
}
}
if len(fields) > 3 {
if val, err := strconv.Atoi(fields[3]); err == nil {
write = val
}
}
if len(fields) > 4 {
if val, err := strconv.Atoi(fields[4]); err == nil {
checksum = val
}
}
disk := models.Disk{
Name: diskName,
State: state,
Read: read,
Write: write,
Checksum: checksum,
}
// If we have a current VDEV, add disk to it
if currentVDEV != nil {
currentVDEV.Disks = append(currentVDEV.Disks, disk)
// Update VDEV errors
currentVDEV.Read += read
currentVDEV.Write += write
currentVDEV.Checksum += checksum
} else {
// Standalone disk, create a VDEV for it
currentVDEV = &models.VDEV{
Name: diskName,
Type: "disk",
State: state,
Disks: []models.Disk{disk},
Read: read,
Write: write,
Checksum: checksum,
}
}
continue
}
}
}
// Parse spares section
if inSpares {
fields := strings.Fields(line)
if len(fields) > 0 {
diskName := fields[0]
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
detail.Spares = append(detail.Spares, diskName)
}
}
}
// Empty line might indicate end of section
if line == "" && currentVDEV != nil {
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
currentVDEV = nil
}
}
// Save last VDEV if exists
if currentVDEV != nil {
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
}
return detail, nil
}
// AddSpareDisk adds a spare disk to a pool
func (s *Service) AddSpareDisk(poolName, diskPath string) error {
args := []string{"add", poolName, "spare", diskPath}
_, err := s.execCommand(s.zpoolPath, args...)
if err != nil {
return translateZFSError(err, "menambahkan spare disk", poolName)
}
return nil
}
// CreatePool creates a new ZFS pool
func (s *Service) CreatePool(name string, vdevs []string, options map[string]string) error {
args := []string{"create"}
if options == nil {
options = make(map[string]string)
}
// Add -f flag to force creation even if devices have existing filesystems
// This handles cases where devices are "in use" or contain "unknown filesystem"
args = append(args, "-f")
// If mountpoint is not explicitly set, use dedicated storage directory
mountpoint := options["mountpoint"]
if mountpoint == "" {
// Default mountpoint: /storage/pools/{poolname}
mountpoint = "/storage/pools/" + name
options["mountpoint"] = mountpoint
}
// Pre-create the mountpoint directory with sudo (non-blocking - log errors but continue)
if mountpoint != "none" {
if err := s.createMountpointWithSudo(mountpoint); err != nil {
// Log the error but don't fail - ZFS might still create the pool
// The mountpoint can be fixed later if needed
log.Printf("warning: failed to pre-create mountpoint %s: %v (continuing anyway)", mountpoint, err)
}
}
// handle canmount as DATASET property not the vdev property
canmount := "noauto"
if v, ok := options["canmount"]; ok && v != "" {
canmount = v
}
delete(options, "canmount")
// Set canmount=noauto to prevent automatic mounting during creation
// This allows pool creation to succeed even if mountpoint can't be created
// if _, hasCanmount := options["canmount"]; !hasCanmount {
// options["canmount"] = "noauto"
// }
// IMPORTANT: Don't set mountpoint during pool creation
// ZFS tries to mount immediately during creation, which can fail
// We'll set mountpoint after pool is created
mountpointOption := options["mountpoint"]
delete(options, "mountpoint") // Remove from options temporarily
// Add remaining options
for k, v := range options {
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
}
args = append(args, "-O", fmt.Sprintf("canmount=%s", canmount))
args = append(args, name)
// Normalize vdev paths - ensure they start with /dev/ if they don't already
normalizedVdevs := make([]string, 0, len(vdevs))
for _, vdev := range vdevs {
vdev = strings.TrimSpace(vdev)
if vdev == "" {
continue
}
// If vdev doesn't start with /dev/ or /, assume it's a device name and add /dev/
if !strings.HasPrefix(vdev, "/dev/") && !strings.HasPrefix(vdev, "/") {
vdev = "/dev/" + vdev
}
normalizedVdevs = append(normalizedVdevs, vdev)
}
if len(normalizedVdevs) == 0 {
return fmt.Errorf("no valid vdevs provided after normalization")
}
args = append(args, normalizedVdevs...)
// Log the command we're about to run for debugging
// Note: execCommand will use sudo automatically for zpool commands
log.Printf("executing zpool create: %s %v", s.zpoolPath, args)
log.Printf("pool name: %s, original vdevs: %v, normalized vdevs: %v", name, vdevs, normalizedVdevs)
// Create the pool (without mountpoint to avoid mount errors)
createOutput, err := s.execCommand(s.zpoolPath, args...)
// Log the command output for debugging
if err != nil {
log.Printf("zpool create command failed - output: %s, error: %v", createOutput, err)
log.Printf("this error might be a false positive - checking if pool was actually created...")
} else {
log.Printf("zpool create command succeeded - output: %s", createOutput)
}
// CRITICAL: Always check if pool exists, even if creation reported an error
// ZFS often reports mountpoint errors but pool is still created successfully
// Retry checking pool existence up to 3 times with delays
poolExists := false
for i := 0; i < 3; i++ {
if i > 0 {
// Wait before retry (100ms, 200ms, 300ms)
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if existingPools, listErr := s.ListPools(); listErr == nil {
log.Printf("checking pool existence (attempt %d/%d): found %d pools", i+1, 3, len(existingPools))
for _, pool := range existingPools {
if pool.Name == name {
poolExists = true
log.Printf("pool %s found after %d check(s)", name, i+1)
break
}
}
} else {
log.Printf("warning: failed to list pools during existence check (attempt %d): %v", i+1, listErr)
}
if poolExists {
break
}
}
if poolExists {
// Pool exists! This is success, regardless of any reported errors
if err != nil {
log.Printf("info: pool %s created successfully despite reported error: %v", name, err)
} else {
log.Printf("info: pool %s created successfully", name)
}
// Clear error since pool was created
err = nil
} else if err != nil {
// Pool doesn't exist and we have an error - return it with full context
log.Printf("error: pool %s creation failed and pool does not exist", name)
log.Printf("error details: %v", err)
log.Printf("command that failed: %s %v", s.zpoolPath, args)
log.Printf("command output: %s", createOutput)
return fmt.Errorf("failed to create pool %s: %v (command: %s %v)", name, err, s.zpoolPath, args)
} else {
// No error reported but pool doesn't exist - this shouldn't happen
log.Printf("warning: pool %s creation reported no error but pool does not exist", name)
log.Printf("command output: %s", createOutput)
return fmt.Errorf("pool %s creation reported success but pool was not found (command: %s %v)", name, s.zpoolPath, args)
}
// Pool created successfully - now set mountpoint and mount if needed
if mountpoint != "none" && mountpointOption != "" && poolExists {
// Ensure mountpoint directory exists
if err := s.createMountpointWithSudo(mountpoint); err != nil {
log.Printf("warning: failed to create mountpoint %s: %v", mountpoint, err)
}
// Set mountpoint property on the root filesystem of the pool
setMountpointArgs := []string{"set", fmt.Sprintf("mountpoint=%s", mountpoint), name}
if _, setErr := s.execCommand(s.zfsPath, setMountpointArgs...); setErr != nil {
log.Printf("warning: failed to set mountpoint property: %v (pool created but not mounted)", setErr)
// Don't return error - pool is created successfully
} else {
// Try to mount the pool
mountArgs := []string{"mount", name}
if _, mountErr := s.execCommand(s.zfsPath, mountArgs...); mountErr != nil {
log.Printf("warning: failed to mount pool: %v (pool created but not mounted)", mountErr)
// Don't return error - pool is created successfully, just not mounted
}
}
}
return nil
}
// createMountpointWithSudo creates a mountpoint directory using sudo
// This allows ZFS to mount pools even if root filesystem appears read-only
func (s *Service) createMountpointWithSudo(path string) error {
// Check if directory already exists
if _, err := os.Stat(path); err == nil {
// Directory already exists
return nil
}
// Create parent directories first
parentDir := ""
parts := strings.Split(path, "/")
if len(parts) > 1 {
// Build parent directory path (skip empty first part from leading /)
parentParts := []string{}
for i, part := range parts {
if i == 0 && part == "" {
continue // Skip leading empty part
}
if i < len(parts)-1 {
parentParts = append(parentParts, part)
}
}
if len(parentParts) > 0 {
parentDir = "/" + strings.Join(parentParts, "/")
if parentDir != "/" {
// Recursively create parent directories
if err := s.createMountpointWithSudo(parentDir); err != nil {
log.Printf("warning: failed to create parent directory %s: %v", parentDir, err)
}
}
}
}
// Use sudo to create the directory with proper permissions
// Try multiple methods to ensure directory is created
methods := []struct {
name string
cmd *exec.Cmd
}{
{"sudo mkdir", exec.Command("sudo", "-n", "mkdir", "-p", path)},
{"direct mkdir", exec.Command("mkdir", "-p", path)},
}
var lastErr error
for _, method := range methods {
var stderr bytes.Buffer
method.cmd.Stderr = &stderr
if err := method.cmd.Run(); err == nil {
// Success - verify directory was created and set permissions
if _, err := os.Stat(path); err == nil {
// Set proper permissions (755) and ownership if needed
chmodCmd := exec.Command("sudo", "-n", "chmod", "755", path)
_ = chmodCmd.Run() // Ignore errors, permissions might already be correct
return nil
}
} else {
lastErr = fmt.Errorf("%s failed: %v: %s", method.name, err, stderr.String())
log.Printf("warning: %s failed: %v", method.name, lastErr)
}
}
// All methods failed, but check if directory exists anyway (might have been created by ZFS or another process)
if _, err := os.Stat(path); err == nil {
return nil
}
return fmt.Errorf("all methods failed to create mountpoint %s: %v", path, lastErr)
}
// DestroyPool destroys a ZFS pool
func (s *Service) DestroyPool(name string) error {
_, err := s.execCommand(s.zpoolPath, "destroy", name)
return err
}
// ImportPool imports a ZFS pool
func (s *Service) ImportPool(name string, options map[string]string) error {
args := []string{"import"}
// Add options
for k, v := range options {
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
}
args = append(args, name)
_, err := s.execCommand(s.zpoolPath, args...)
return err
}
// ExportPool exports a ZFS pool
func (s *Service) ExportPool(name string, force bool) error {
args := []string{"export"}
if force {
args = append(args, "-f")
}
args = append(args, name)
_, err := s.execCommand(s.zpoolPath, args...)
return err
}
// ListAvailablePools returns pools that can be imported
func (s *Service) ListAvailablePools() ([]string, error) {
output, err := s.execCommand(s.zpoolPath, "import")
if err != nil {
return nil, err
}
var pools []string
lines := strings.Split(output, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
if line == "" {
continue
}
// Parse pool name from output like "pool: tank"
if strings.HasPrefix(line, "pool:") {
parts := strings.Fields(line)
if len(parts) >= 2 {
pools = append(pools, parts[1])
}
}
}
return pools, nil
}
// ScrubPool starts a scrub operation on a pool
func (s *Service) ScrubPool(name string) error {
_, err := s.execCommand(s.zpoolPath, "scrub", name)
return err
}
// ScrubStatus represents detailed scrub operation status
type ScrubStatus struct {
Status string `json:"status"` // idle, in_progress, completed, error
Progress float64 `json:"progress"` // 0-100
TimeElapsed string `json:"time_elapsed"` // e.g., "2h 15m"
TimeRemain string `json:"time_remain"` // e.g., "30m"
Speed string `json:"speed"` // e.g., "100M/s"
Errors int `json:"errors"` // number of errors found
Repaired int `json:"repaired"` // number of errors repaired
LastScrub string `json:"last_scrub"` // timestamp of last completed scrub
}
// GetScrubStatus returns detailed scrub status with progress
func (s *Service) GetScrubStatus(name string) (*ScrubStatus, error) {
status := &ScrubStatus{
Status: "idle",
}
// Get pool status
output, err := s.execCommand(s.zpoolPath, "status", name)
if err != nil {
return nil, err
}
// Parse scrub information
lines := strings.Split(output, "\n")
inScrubSection := false
for _, line := range lines {
line = strings.TrimSpace(line)
// Check if scrub is in progress
if strings.Contains(line, "scrub in progress") {
status.Status = "in_progress"
inScrubSection = true
continue
}
// Check if scrub completed
if strings.Contains(line, "scrub repaired") || strings.Contains(line, "scrub completed") {
status.Status = "completed"
status.Progress = 100.0
// Extract repair information
if strings.Contains(line, "repaired") {
// Try to extract number of repairs
parts := strings.Fields(line)
for i, part := range parts {
if part == "repaired" && i > 0 {
// Previous part might be the number
if repaired, err := strconv.Atoi(parts[i-1]); err == nil {
status.Repaired = repaired
}
}
}
}
continue
}
// Parse progress percentage
if strings.Contains(line, "%") && inScrubSection {
// Extract percentage from line like "scan: 45.2% done"
parts := strings.Fields(line)
for _, part := range parts {
if strings.HasSuffix(part, "%") {
if pct, err := strconv.ParseFloat(strings.TrimSuffix(part, "%"), 64); err == nil {
status.Progress = pct
}
}
}
}
// Parse time elapsed
if strings.Contains(line, "elapsed") && inScrubSection {
// Extract time like "elapsed: 2h15m"
parts := strings.Fields(line)
for i, part := range parts {
if part == "elapsed:" && i+1 < len(parts) {
status.TimeElapsed = parts[i+1]
}
}
}
// Parse time remaining
if strings.Contains(line, "remaining") && inScrubSection {
parts := strings.Fields(line)
for i, part := range parts {
if part == "remaining:" && i+1 < len(parts) {
status.TimeRemain = parts[i+1]
}
}
}
// Parse speed
if strings.Contains(line, "scan rate") && inScrubSection {
parts := strings.Fields(line)
for i, part := range parts {
if part == "rate" && i+1 < len(parts) {
status.Speed = parts[i+1]
}
}
}
// Parse errors
if strings.Contains(line, "errors:") && inScrubSection {
parts := strings.Fields(line)
for i, part := range parts {
if part == "errors:" && i+1 < len(parts) {
if errs, err := strconv.Atoi(parts[i+1]); err == nil {
status.Errors = errs
}
}
}
}
}
// Get last scrub time from pool properties
lastScrub, err := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "lastscrub", name)
if err == nil && lastScrub != "-" && lastScrub != "" {
status.LastScrub = strings.TrimSpace(lastScrub)
}
return status, nil
}
// ListDatasets returns all datasets in a pool (or all if pool is empty)
func (s *Service) ListDatasets(pool string) ([]models.Dataset, error) {
args := []string{"list", "-H", "-o", "name,type,used,avail,mountpoint"}
if pool != "" {
args = append(args, "-r", pool)
} else {
args = append(args, "-r")
}
output, err := s.execCommand(s.zfsPath, args...)
if err != nil {
// Return empty slice instead of nil to ensure JSON encodes as [] not null
return []models.Dataset{}, err
}
datasets := []models.Dataset{}
lines := strings.Split(output, "\n")
for _, line := range lines {
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) < 5 {
continue
}
fullName := fields[0]
parts := strings.Split(fullName, "/")
poolName := parts[0]
dataset := models.Dataset{
Name: fullName,
Pool: poolName,
Type: fields[1],
Mountpoint: fields[4],
}
if used, err := parseSize(fields[2]); err == nil {
dataset.Used = used
}
if avail, err := parseSize(fields[3]); err == nil {
dataset.Available = avail
}
dataset.Size = dataset.Used + dataset.Available
// Get creation time
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", fullName)
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
dataset.CreatedAt = t
}
datasets = append(datasets, dataset)
}
return datasets, nil
}
// CreateDataset creates a new ZFS dataset
func (s *Service) CreateDataset(name string, options map[string]string) error {
args := []string{"create"}
if options == nil {
options = make(map[string]string)
}
// If mountpoint is not explicitly set, use dedicated storage directory
mountpoint := options["mountpoint"]
if mountpoint == "" {
// Extract dataset name (last part after /)
parts := strings.Split(name, "/")
datasetName := parts[len(parts)-1]
// Default mountpoint: /storage/datasets/{datasetname}
mountpoint = "/storage/datasets/" + datasetName
options["mountpoint"] = mountpoint
// Pre-create the mountpoint directory with sudo
_ = s.createMountpointWithSudo(mountpoint)
} else if mountpoint != "none" {
// Ensure mountpoint directory exists with sudo
_ = s.createMountpointWithSudo(mountpoint)
}
// Handle canmount property - set to "on" by default to allow mounting
canmount := "on"
if v, ok := options["canmount"]; ok && v != "" {
canmount = v
}
delete(options, "canmount")
// Add options
for k, v := range options {
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
}
// Add canmount property
args = append(args, "-o", fmt.Sprintf("canmount=%s", canmount))
args = append(args, name)
_, err := s.execCommand(s.zfsPath, args...)
// CRITICAL: Always check if dataset exists, even if creation reported an error
// ZFS often reports mountpoint errors but dataset is still created successfully
// Retry checking dataset existence up to 3 times with delays
datasetExists := false
for i := 0; i < 3; i++ {
if i > 0 {
// Wait before retry (100ms, 200ms, 300ms)
time.Sleep(time.Duration(i*100) * time.Millisecond)
}
if existingDatasets, listErr := s.ListDatasets(""); listErr == nil {
log.Printf("checking dataset existence (attempt %d/%d): found %d datasets", i+1, 3, len(existingDatasets))
for _, ds := range existingDatasets {
if ds.Name == name {
datasetExists = true
log.Printf("dataset %s found after %d check(s)", name, i+1)
break
}
}
} else {
log.Printf("warning: failed to list datasets during existence check (attempt %d): %v", i+1, listErr)
}
if datasetExists {
break
}
}
if datasetExists {
// Dataset exists! This is success, regardless of any reported errors
if err != nil {
log.Printf("info: dataset %s created successfully despite reported error: %v", name, err)
} else {
log.Printf("info: dataset %s created successfully", name)
}
// Dataset created successfully - now set mountpoint and mount if needed
if mountpoint != "" && mountpoint != "none" {
// CRITICAL: Create parent directory first if it doesn't exist
// This is needed because ZFS can't create directories in read-only filesystems
parentDir := ""
parts := strings.Split(mountpoint, "/")
if len(parts) > 1 {
// Build parent directory path
parentDir = strings.Join(parts[:len(parts)-1], "/")
if parentDir == "" {
parentDir = "/"
}
log.Printf("ensuring parent directory exists: %s", parentDir)
_ = s.createMountpointWithSudo(parentDir)
}
// CRITICAL: Create mountpoint directory BEFORE setting mountpoint property
// ZFS will try to create it during mount, but may fail on read-only filesystem
// So we create it explicitly with sudo first
log.Printf("creating mountpoint directory: %s", mountpoint)
if err := s.createMountpointWithSudo(mountpoint); err != nil {
log.Printf("warning: failed to create mountpoint %s: %v (will try to continue)", mountpoint, err)
} else {
log.Printf("mountpoint directory created successfully: %s", mountpoint)
}
// Set mountpoint property on the dataset
log.Printf("setting mountpoint property: %s = %s", name, mountpoint)
setMountpointArgs := []string{"set", fmt.Sprintf("mountpoint=%s", mountpoint), name}
if _, setErr := s.execCommand(s.zfsPath, setMountpointArgs...); setErr != nil {
log.Printf("warning: failed to set mountpoint property: %v (dataset created but mountpoint not set)", setErr)
} else {
log.Printf("mountpoint property set successfully")
// Wait a moment for mountpoint to be registered
time.Sleep(200 * time.Millisecond)
// Ensure directory exists again (ZFS might have cleared it or parent might be read-only)
// Try creating parent and child directories
if parentDir != "" && parentDir != "/" {
_ = s.createMountpointWithSudo(parentDir)
}
_ = s.createMountpointWithSudo(mountpoint)
// Try to mount the dataset - ZFS will create the directory if it doesn't exist
// But we need to ensure parent is writable
log.Printf("attempting to mount dataset: %s", name)
mountArgs := []string{"mount", name}
if _, mountErr := s.execCommand(s.zfsPath, mountArgs...); mountErr != nil {
log.Printf("warning: failed to mount dataset: %v (dataset created but not mounted)", mountErr)
// If mount failed due to read-only filesystem, try using legacy mount
if strings.Contains(mountErr.Error(), "Read-only file system") || strings.Contains(mountErr.Error(), "read-only") {
log.Printf("detected read-only filesystem issue, trying alternative approach")
// Try to remount parent as rw if possible, or use a different mountpoint
// For now, just log the issue - user may need to manually mount
log.Printf("error: cannot mount dataset due to read-only filesystem at %s. You may need to manually mount it or fix filesystem permissions", mountpoint)
} else {
// Try one more time after a short delay for other errors
time.Sleep(300 * time.Millisecond)
if _, mountErr2 := s.execCommand(s.zfsPath, mountArgs...); mountErr2 != nil {
log.Printf("warning: second mount attempt also failed: %v", mountErr2)
} else {
log.Printf("info: dataset %s mounted successfully at %s (on second attempt)", name, mountpoint)
}
}
} else {
log.Printf("info: dataset %s mounted successfully at %s", name, mountpoint)
}
}
}
// Clear error since dataset was created
return nil
} else if err != nil {
// Dataset doesn't exist and we have an error - return translated error
log.Printf("error: dataset %s creation failed and dataset does not exist", name)
return translateZFSError(err, "membuat dataset", name)
} else {
// No error reported but dataset doesn't exist - this shouldn't happen
log.Printf("warning: dataset %s creation reported no error but dataset does not exist", name)
return translateZFSError(fmt.Errorf("dataset creation reported success but dataset was not found"), "membuat dataset", name)
}
}
// DestroyDataset destroys a ZFS dataset
func (s *Service) DestroyDataset(name string, recursive bool) error {
// Always try to unmount first, regardless of mounted status
// This prevents "dataset is busy" errors
log.Printf("attempting to unmount dataset %s before destroy", name)
unmountArgs := []string{"umount", name}
unmountOutput, unmountErr := s.execCommand(s.zfsPath, unmountArgs...)
if unmountErr != nil {
log.Printf("regular unmount failed for %s: %v (output: %s), trying force unmount", name, unmountErr, unmountOutput)
// Try force unmount if regular unmount fails
forceUnmountArgs := []string{"umount", "-f", name}
forceOutput, forceErr := s.execCommand(s.zfsPath, forceUnmountArgs...)
if forceErr != nil {
log.Printf("warning: force unmount also failed for %s: %v (output: %s)", name, forceErr, forceOutput)
} else {
log.Printf("dataset %s force unmounted successfully", name)
}
} else {
log.Printf("dataset %s unmounted successfully", name)
}
// Wait a moment for unmount to complete
time.Sleep(300 * time.Millisecond)
// Now destroy the dataset
args := []string{"destroy"}
if recursive {
args = append(args, "-r")
}
args = append(args, name)
destroyOutput, err := s.execCommand(s.zfsPath, args...)
if err != nil {
log.Printf("first destroy attempt failed for %s: %v (output: %s)", name, err, destroyOutput)
// If destroy fails with "dataset is busy", try unmounting again and retry
if strings.Contains(err.Error(), "dataset is busy") || strings.Contains(err.Error(), "is busy") {
log.Printf("dataset %s still busy, trying force unmount again and retry destroy", name)
// Try force unmount again
forceUnmountArgs := []string{"umount", "-f", name}
_, _ = s.execCommand(s.zfsPath, forceUnmountArgs...)
time.Sleep(500 * time.Millisecond)
// Retry destroy
destroyOutput2, err2 := s.execCommand(s.zfsPath, args...)
if err2 != nil {
log.Printf("second destroy attempt also failed for %s: %v (output: %s)", name, err2, destroyOutput2)
return translateZFSError(err2, "menghapus dataset", name)
} else {
log.Printf("dataset %s destroyed successfully on second attempt", name)
return nil
}
}
return translateZFSError(err, "menghapus dataset", name)
}
log.Printf("dataset %s destroyed successfully", name)
return nil
}
// UpdateDataset updates ZFS dataset properties
func (s *Service) UpdateDataset(name string, quota string, compression string, options map[string]string) error {
// Update quota if provided
if quota != "" {
quotaValue := quota
if quota == "none" || quota == "0" {
quotaValue = "none"
}
args := []string{"set", fmt.Sprintf("quota=%s", quotaValue), name}
if _, err := s.execCommand(s.zfsPath, args...); err != nil {
return translateZFSError(err, "mengupdate quota dataset", name)
}
}
// Update compression if provided
if compression != "" {
args := []string{"set", fmt.Sprintf("compression=%s", compression), name}
if _, err := s.execCommand(s.zfsPath, args...); err != nil {
return translateZFSError(err, "mengupdate compression dataset", name)
}
}
// Update other options if provided
if options != nil {
for key, value := range options {
args := []string{"set", fmt.Sprintf("%s=%s", key, value), name}
if _, err := s.execCommand(s.zfsPath, args...); err != nil {
return translateZFSError(err, fmt.Sprintf("mengupdate property %s dataset", key), name)
}
}
}
return nil
}
// ListZVOLs returns all ZVOLs
func (s *Service) ListZVOLs(pool string) ([]models.ZVOL, error) {
args := []string{"list", "-H", "-o", "name,volsize,used", "-t", "volume"}
if pool != "" {
args = append(args, "-r", pool)
} else {
args = append(args, "-r")
}
output, err := s.execCommand(s.zfsPath, args...)
if err != nil {
// Return empty slice instead of nil to ensure JSON encodes as [] not null
return []models.ZVOL{}, err
}
zvols := []models.ZVOL{}
lines := strings.Split(output, "\n")
for _, line := range lines {
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) < 3 {
continue
}
fullName := fields[0]
parts := strings.Split(fullName, "/")
poolName := parts[0]
zvol := models.ZVOL{
Name: fullName,
Pool: poolName,
}
if size, err := parseSize(fields[1]); err == nil {
zvol.Size = size
}
if used, err := parseSize(fields[2]); err == nil {
zvol.Used = used
}
// Get creation time
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", fullName)
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
zvol.CreatedAt = t
}
zvols = append(zvols, zvol)
}
return zvols, nil
}
// CreateZVOL creates a new ZVOL
func (s *Service) CreateZVOL(name string, size uint64, options map[string]string) error {
args := []string{"create", "-V", fmt.Sprintf("%d", size)}
for k, v := range options {
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
}
args = append(args, name)
_, err := s.execCommand(s.zfsPath, args...)
return err
}
// DestroyZVOL destroys a ZVOL
func (s *Service) DestroyZVOL(name string) error {
_, err := s.execCommand(s.zfsPath, "destroy", name)
if err != nil {
return translateZFSError(err, "menghapus volume", name)
}
return nil
}
// getUsedDisks returns a set of disk names that are currently used in ZFS pools
func (s *Service) getUsedDisks() map[string]bool {
usedDisks := make(map[string]bool)
// Get all pools
pools, err := s.ListPools()
if err != nil {
log.Printf("warning: failed to list pools to check disk usage: %v", err)
return usedDisks
}
// For each pool, get the status to see which disks are used
for _, pool := range pools {
// Get pool status which shows vdevs (disks)
statusOutput, err := s.execCommand(s.zpoolPath, "status", pool.Name)
if err != nil {
log.Printf("warning: failed to get status for pool %s: %v", pool.Name, err)
continue
}
// Parse status output to find disk names
// Format: lines like " sdb ONLINE 0 0 0"
lines := strings.Split(statusOutput, "\n")
for _, line := range lines {
line = strings.TrimSpace(line)
// Skip empty lines and headers
if line == "" || strings.HasPrefix(line, "NAME") || strings.HasPrefix(line, "state:") || strings.HasPrefix(line, "pool:") {
continue
}
// Extract disk name (first field after indentation)
fields := strings.Fields(line)
if len(fields) > 0 {
diskName := fields[0]
// Check if it's a disk device (starts with sd, hd, nvme, etc.)
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
usedDisks[diskName] = true
}
}
}
}
return usedDisks
}
// ListDisks returns available disks (read-only)
func (s *Service) ListDisks() ([]map[string]string, error) {
// Use lsblk to list block devices
output, err := s.execCommand("lsblk", "-J", "-o", "name,size,type,fstype,mountpoint")
if err != nil {
return nil, err
}
var result struct {
BlockDevices []struct {
Name string `json:"name"`
Size string `json:"size"`
Type string `json:"type"`
FSType string `json:"fstype"`
Mountpoint string `json:"mountpoint"`
Children []interface{} `json:"children"`
} `json:"blockdevices"`
}
if err := json.Unmarshal([]byte(output), &result); err != nil {
return nil, err
}
// Get list of disks currently used in pools
usedDisks := s.getUsedDisks()
var disks []map[string]string
for _, dev := range result.BlockDevices {
// Skip OS disk (sda) - typically the first disk used for OS installation
if dev.Name == "sda" {
continue
}
// Skip virtual disks (ZVOLs) - these are zd* devices
// Must check BEFORE checking dev.Type == "disk" because zd* devices
// are reported as type "disk" by lsblk
if strings.HasPrefix(dev.Name, "zd") {
log.Printf("debug: skipping virtual disk %s (zd* device)", dev.Name)
continue
}
// Include all physical disks (both available and used) so we can show status
// Only include actual disk devices (not partitions, loops, etc.)
if dev.Type == "disk" {
// Check if disk is used in a pool
isUsed := usedDisks[dev.Name]
disk := map[string]string{
"name": dev.Name,
"size": dev.Size,
"path": "/dev/" + dev.Name,
"status": "available",
}
if isUsed {
disk["status"] = "unavailable"
} else if dev.FSType != "" || dev.Mountpoint != "" {
// Disk has filesystem or mountpoint (not suitable for ZFS pool)
disk["status"] = "unavailable"
}
// Get SMART health info
healthInfo := s.getDiskHealth(dev.Name)
if healthInfo != nil {
disk["health_status"] = healthInfo["status"]
disk["health_temperature"] = healthInfo["temperature"]
disk["health_power_on_hours"] = healthInfo["power_on_hours"]
disk["health_reallocated_sectors"] = healthInfo["reallocated_sectors"]
disk["health_pending_sectors"] = healthInfo["pending_sectors"]
}
disks = append(disks, disk)
}
}
return disks, nil
}
// getDiskHealth retrieves SMART health information for a disk
func (s *Service) getDiskHealth(diskName string) map[string]string {
health := make(map[string]string)
diskPath := "/dev/" + diskName
// Check if smartctl is available
smartctlPath := "smartctl"
if path, err := exec.LookPath("smartctl"); err != nil {
// smartctl not available, skip health check
return nil
} else {
smartctlPath = path
}
// Get overall health status
cmd := exec.Command("sudo", "-n", smartctlPath, "-H", diskPath)
output, err := cmd.Output()
if err != nil {
// Check if it's because SMART is not supported (common for virtual disks)
// If exit code indicates unsupported, return nil (don't show health info)
if exitError, ok := err.(*exec.ExitError); ok {
exitCode := exitError.ExitCode()
// Exit code 2 usually means "SMART not supported" or "device doesn't support SMART"
if exitCode == 2 {
return nil // Don't show health for unsupported devices
}
}
// For other errors, also return nil (don't show unknown)
return nil
}
outputStr := string(output)
// Check if SMART is unsupported (common messages)
if strings.Contains(outputStr, "SMART support is: Unavailable") ||
strings.Contains(outputStr, "Device does not support SMART") ||
strings.Contains(outputStr, "SMART not supported") ||
strings.Contains(outputStr, "Unable to detect device type") ||
strings.Contains(outputStr, "SMART support is: Disabled") {
return nil // Don't show health for unsupported devices
}
// Parse health status - multiple possible formats:
// - "SMART overall-health self-assessment test result: PASSED" or "FAILED"
// - "SMART Health Status: OK"
// - "SMART Status: OK" or "SMART Status: FAILED"
if strings.Contains(outputStr, "PASSED") ||
strings.Contains(outputStr, "SMART Health Status: OK") ||
strings.Contains(outputStr, "SMART Status: OK") {
health["status"] = "healthy"
} else if strings.Contains(outputStr, "FAILED") ||
strings.Contains(outputStr, "SMART Status: FAILED") {
health["status"] = "failed"
} else {
// If we can't determine status but SMART is supported, return nil instead of unknown
// This avoids showing "Unknown" for virtual disks or devices with unclear status
return nil
}
// Get detailed SMART attributes
cmd = exec.Command("sudo", "-n", smartctlPath, "-A", diskPath)
attrOutput, err := cmd.Output()
if err != nil {
// Return what we have
return health
}
attrStr := string(attrOutput)
lines := strings.Split(attrStr, "\n")
// Parse key attributes
for _, line := range lines {
fields := strings.Fields(line)
if len(fields) < 10 {
continue
}
// ID 194: Temperature_Celsius
if strings.Contains(line, "194") && strings.Contains(line, "Temperature") {
if len(fields) >= 9 {
health["temperature"] = fields[9] + "°C"
}
}
// ID 9: Power_On_Hours
if strings.Contains(line, "9") && (strings.Contains(line, "Power_On") || strings.Contains(line, "Power-on")) {
if len(fields) >= 9 {
hours := fields[9]
health["power_on_hours"] = hours
}
}
// ID 5: Reallocated_Sector_Ct
if strings.Contains(line, "5") && strings.Contains(line, "Reallocated") {
if len(fields) >= 9 {
health["reallocated_sectors"] = fields[9]
}
}
// ID 197: Current_Pending_Sector
if strings.Contains(line, "197") && strings.Contains(line, "Pending") {
if len(fields) >= 9 {
health["pending_sectors"] = fields[9]
}
}
}
return health
}
// parseSize converts human-readable size to bytes
func parseSize(s string) (uint64, error) {
s = strings.TrimSpace(s)
if s == "-" || s == "" {
return 0, nil
}
multiplier := uint64(1)
suffix := strings.ToUpper(s[len(s)-1:])
switch suffix {
case "K":
multiplier = 1024
s = s[:len(s)-1]
case "M":
multiplier = 1024 * 1024
s = s[:len(s)-1]
case "G":
multiplier = 1024 * 1024 * 1024
s = s[:len(s)-1]
case "T":
multiplier = 1024 * 1024 * 1024 * 1024
s = s[:len(s)-1]
case "P":
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
s = s[:len(s)-1]
default:
// Check if last char is a digit
if suffix[0] < '0' || suffix[0] > '9' {
return 0, fmt.Errorf("unknown suffix: %s", suffix)
}
}
// Handle decimal values (e.g., "1.5G")
if strings.Contains(s, ".") {
val, err := strconv.ParseFloat(s, 64)
if err != nil {
return 0, err
}
return uint64(val * float64(multiplier)), nil
}
val, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, err
}
return val * multiplier, nil
}
// ListSnapshots returns all snapshots for a dataset (or all if dataset is empty)
func (s *Service) ListSnapshots(dataset string) ([]models.Snapshot, error) {
args := []string{"list", "-H", "-o", "name,used,creation", "-t", "snapshot", "-s", "creation"}
if dataset != "" {
args = append(args, "-r", dataset)
} else {
args = append(args, "-r")
}
output, err := s.execCommand(s.zfsPath, args...)
if err != nil {
// Return empty slice instead of nil to ensure JSON encodes as [] not null
return []models.Snapshot{}, err
}
snapshots := []models.Snapshot{}
lines := strings.Split(output, "\n")
for _, line := range lines {
if line == "" {
continue
}
fields := strings.Fields(line)
if len(fields) < 3 {
continue
}
fullName := fields[0]
// Snapshot name format: dataset@snapshot
parts := strings.Split(fullName, "@")
if len(parts) != 2 {
continue
}
datasetName := parts[0]
snapshot := models.Snapshot{
Name: fullName,
Dataset: datasetName,
}
// Parse size
if used, err := parseSize(fields[1]); err == nil {
snapshot.Size = used
}
// Parse creation time
// ZFS creation format: "Mon Jan 2 15:04:05 2006"
createdStr := strings.Join(fields[2:], " ")
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", createdStr); err == nil {
snapshot.CreatedAt = t
} else {
// Try RFC3339 format if available
if t, err := time.Parse(time.RFC3339, createdStr); err == nil {
snapshot.CreatedAt = t
}
}
snapshots = append(snapshots, snapshot)
}
return snapshots, nil
}
// CreateSnapshot creates a new snapshot
func (s *Service) CreateSnapshot(dataset, name string, recursive bool) error {
args := []string{"snapshot"}
if recursive {
args = append(args, "-r")
}
snapshotName := fmt.Sprintf("%s@%s", dataset, name)
args = append(args, snapshotName)
_, err := s.execCommand(s.zfsPath, args...)
return err
}
// DestroySnapshot destroys a snapshot
func (s *Service) DestroySnapshot(name string, recursive bool) error {
args := []string{"destroy"}
if recursive {
args = append(args, "-r")
}
args = append(args, name)
_, err := s.execCommand(s.zfsPath, args...)
return err
}
// GetSnapshot returns snapshot details
func (s *Service) GetSnapshot(name string) (*models.Snapshot, error) {
snapshots, err := s.ListSnapshots("")
if err != nil {
return nil, err
}
for _, snap := range snapshots {
if snap.Name == name {
return &snap, nil
}
}
return nil, fmt.Errorf("snapshot %s not found", name)
}
// RestoreSnapshot rolls back a dataset to a snapshot
func (s *Service) RestoreSnapshot(snapshotName string, force bool) error {
args := []string{"rollback"}
if force {
args = append(args, "-r") // Recursive rollback for child datasets
}
args = append(args, snapshotName)
_, err := s.execCommand(s.zfsPath, args...)
if err != nil {
return translateZFSError(err, "merestore snapshot", snapshotName)
}
return nil
}