This commit is contained in:
@@ -2,6 +2,7 @@ package httpapp
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
@@ -9,88 +10,279 @@ import (
|
||||
|
||||
// pathParam is now in router_helpers.go
|
||||
|
||||
// Disk Handlers
|
||||
func (a *App) handleListDisks(w http.ResponseWriter, r *http.Request) {
|
||||
disks, err := a.zfs.ListDisks()
|
||||
if err != nil {
|
||||
log.Printf("list disks error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, disks)
|
||||
}
|
||||
|
||||
// ZFS Pool Handlers
|
||||
func (a *App) handleListPools(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: Implement ZFS pool listing
|
||||
pools := []models.Pool{} // Stub
|
||||
pools, err := a.zfs.ListPools()
|
||||
if err != nil {
|
||||
log.Printf("list pools error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, pools)
|
||||
}
|
||||
|
||||
func (a *App) handleCreatePool(w http.ResponseWriter, r *http.Request) {
|
||||
// TODO: Implement pool creation
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"})
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
VDEVs []string `json:"vdevs"`
|
||||
Options map[string]string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || len(req.VDEVs) == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and vdevs are required"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Options == nil {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
if err := a.zfs.CreatePool(req.Name, req.VDEVs, req.Options); err != nil {
|
||||
log.Printf("create pool error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
pool, err := a.zfs.GetPool(req.Name)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "pool created", "name": req.Name})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, pool)
|
||||
}
|
||||
|
||||
func (a *App) handleGetPool(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/pools/")
|
||||
// TODO: Implement pool retrieval
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "pool name required"})
|
||||
return
|
||||
}
|
||||
|
||||
pool, err := a.zfs.GetPool(name)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, pool)
|
||||
}
|
||||
|
||||
func (a *App) handleDeletePool(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/pools/")
|
||||
// TODO: Implement pool deletion
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "pool name required"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.zfs.DestroyPool(name); err != nil {
|
||||
log.Printf("destroy pool error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"message": "pool destroyed", "name": name})
|
||||
}
|
||||
|
||||
func (a *App) handleScrubPool(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/pools/")
|
||||
// TODO: Implement pool scrub
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "pool name required"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.zfs.ScrubPool(name); err != nil {
|
||||
log.Printf("scrub pool error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"message": "scrub started", "pool": name})
|
||||
}
|
||||
|
||||
// Dataset Handlers
|
||||
func (a *App) handleListDatasets(w http.ResponseWriter, r *http.Request) {
|
||||
datasets := []models.Dataset{} // Stub
|
||||
pool := r.URL.Query().Get("pool")
|
||||
datasets, err := a.zfs.ListDatasets(pool)
|
||||
if err != nil {
|
||||
log.Printf("list datasets error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, datasets)
|
||||
}
|
||||
|
||||
func (a *App) handleCreateDataset(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Pool string `json:"pool"`
|
||||
Name string `json:"name"`
|
||||
Options map[string]string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request"})
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
// TODO: Implement dataset creation
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"})
|
||||
|
||||
if req.Name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name is required"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Options == nil {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
if err := a.zfs.CreateDataset(req.Name, req.Options); err != nil {
|
||||
log.Printf("create dataset error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "dataset created", "name": req.Name})
|
||||
}
|
||||
|
||||
func (a *App) handleGetDataset(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/datasets/")
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name required"})
|
||||
return
|
||||
}
|
||||
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
for _, ds := range datasets {
|
||||
if ds.Name == name {
|
||||
writeJSON(w, http.StatusOK, ds)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusNotFound, map[string]string{"error": "dataset not found"})
|
||||
}
|
||||
|
||||
func (a *App) handleUpdateDataset(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/datasets/")
|
||||
// TODO: Implement dataset property updates
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
}
|
||||
|
||||
func (a *App) handleDeleteDataset(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/datasets/")
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name required"})
|
||||
return
|
||||
}
|
||||
|
||||
recursive := r.URL.Query().Get("recursive") == "true"
|
||||
|
||||
if err := a.zfs.DestroyDataset(name, recursive); err != nil {
|
||||
log.Printf("destroy dataset error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"message": "dataset destroyed", "name": name})
|
||||
}
|
||||
|
||||
// ZVOL Handlers
|
||||
func (a *App) handleListZVOLs(w http.ResponseWriter, r *http.Request) {
|
||||
zvols := []models.ZVOL{} // Stub
|
||||
pool := r.URL.Query().Get("pool")
|
||||
zvols, err := a.zfs.ListZVOLs(pool)
|
||||
if err != nil {
|
||||
log.Printf("list zvols error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, zvols)
|
||||
}
|
||||
|
||||
func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented"})
|
||||
var req struct {
|
||||
Name string `json:"name"`
|
||||
Size uint64 `json:"size"` // in bytes
|
||||
Options map[string]string `json:"options,omitempty"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Name == "" || req.Size == 0 {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "name and size are required"})
|
||||
return
|
||||
}
|
||||
|
||||
if req.Options == nil {
|
||||
req.Options = make(map[string]string)
|
||||
}
|
||||
|
||||
if err := a.zfs.CreateZVOL(req.Name, req.Size, req.Options); err != nil {
|
||||
log.Printf("create zvol error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusCreated, map[string]string{"message": "zvol created", "name": req.Name})
|
||||
}
|
||||
|
||||
func (a *App) handleGetZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/zvols/")
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol name required"})
|
||||
return
|
||||
}
|
||||
|
||||
zvols, err := a.zfs.ListZVOLs("")
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
for _, zvol := range zvols {
|
||||
if zvol.Name == name {
|
||||
writeJSON(w, http.StatusOK, zvol)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusNotFound, map[string]string{"error": "zvol not found"})
|
||||
}
|
||||
|
||||
func (a *App) handleDeleteZVOL(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/zvols/")
|
||||
writeJSON(w, http.StatusNotImplemented, map[string]string{"error": "not implemented", "name": name})
|
||||
if name == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol name required"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := a.zfs.DestroyZVOL(name); err != nil {
|
||||
log.Printf("destroy zvol error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"message": "zvol destroyed", "name": name})
|
||||
}
|
||||
|
||||
// Snapshot Handlers
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/zfs"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
@@ -18,6 +20,7 @@ type App struct {
|
||||
cfg Config
|
||||
tmpl *template.Template
|
||||
mux *http.ServeMux
|
||||
zfs *zfs.Service
|
||||
}
|
||||
|
||||
func New(cfg Config) (*App, error) {
|
||||
@@ -37,6 +40,7 @@ func New(cfg Config) (*App, error) {
|
||||
cfg: cfg,
|
||||
tmpl: tmpl,
|
||||
mux: http.NewServeMux(),
|
||||
zfs: zfs.New(),
|
||||
}
|
||||
|
||||
a.routes()
|
||||
|
||||
@@ -15,6 +15,10 @@ func (a *App) routes() {
|
||||
a.mux.HandleFunc("/metrics", a.handleMetrics)
|
||||
|
||||
// API v1 routes - ZFS Management
|
||||
a.mux.HandleFunc("/api/v1/disks", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleListDisks(w, r) },
|
||||
nil, nil, nil, nil,
|
||||
))
|
||||
a.mux.HandleFunc("/api/v1/pools", methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleListPools(w, r) },
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleCreatePool(w, r) },
|
||||
|
||||
400
internal/zfs/service.go
Normal file
400
internal/zfs/service.go
Normal file
@@ -0,0 +1,400 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
)
|
||||
|
||||
// Service provides ZFS operations
|
||||
type Service struct {
|
||||
zfsPath string
|
||||
zpoolPath string
|
||||
}
|
||||
|
||||
// New creates a new ZFS service
|
||||
func New() *Service {
|
||||
return &Service{
|
||||
zfsPath: "zfs",
|
||||
zpoolPath: "zpool",
|
||||
}
|
||||
}
|
||||
|
||||
// execCommand executes a shell command and returns output
|
||||
func (s *Service) execCommand(name string, args ...string) (string, error) {
|
||||
cmd := exec.Command(name, args...)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return "", fmt.Errorf("%s: %v: %s", name, err, stderr.String())
|
||||
}
|
||||
|
||||
return strings.TrimSpace(stdout.String()), nil
|
||||
}
|
||||
|
||||
// ListPools returns all ZFS pools
|
||||
func (s *Service) ListPools() ([]models.Pool, error) {
|
||||
output, err := s.execCommand(s.zpoolPath, "list", "-H", "-o", "name,size,allocated,free,health")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pools []models.Pool
|
||||
lines := strings.Split(output, "\n")
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
pool := models.Pool{
|
||||
Name: fields[0],
|
||||
Status: "ONLINE", // Default, will be updated from health
|
||||
Health: fields[4],
|
||||
}
|
||||
|
||||
// Parse sizes (handles K, M, G, T suffixes)
|
||||
if size, err := parseSize(fields[1]); err == nil {
|
||||
pool.Size = size
|
||||
}
|
||||
if allocated, err := parseSize(fields[2]); err == nil {
|
||||
pool.Allocated = allocated
|
||||
}
|
||||
if free, err := parseSize(fields[3]); err == nil {
|
||||
pool.Free = free
|
||||
}
|
||||
|
||||
// Get pool status
|
||||
status, _ := s.execCommand(s.zpoolPath, "status", "-x", pool.Name)
|
||||
if strings.Contains(status, "all pools are healthy") {
|
||||
pool.Status = "ONLINE"
|
||||
} else if strings.Contains(status, "DEGRADED") {
|
||||
pool.Status = "DEGRADED"
|
||||
} else if strings.Contains(status, "FAULTED") {
|
||||
pool.Status = "FAULTED"
|
||||
}
|
||||
|
||||
// Get creation time
|
||||
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", pool.Name)
|
||||
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
|
||||
pool.CreatedAt = t
|
||||
}
|
||||
|
||||
pools = append(pools, pool)
|
||||
}
|
||||
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
// GetPool returns a specific pool
|
||||
func (s *Service) GetPool(name string) (*models.Pool, error) {
|
||||
pools, err := s.ListPools()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
if pool.Name == name {
|
||||
return &pool, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("pool %s not found", name)
|
||||
}
|
||||
|
||||
// CreatePool creates a new ZFS pool
|
||||
func (s *Service) CreatePool(name string, vdevs []string, options map[string]string) error {
|
||||
args := []string{"create"}
|
||||
|
||||
// Add options
|
||||
for k, v := range options {
|
||||
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
args = append(args, name)
|
||||
args = append(args, vdevs...)
|
||||
|
||||
_, err := s.execCommand(s.zpoolPath, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// DestroyPool destroys a ZFS pool
|
||||
func (s *Service) DestroyPool(name string) error {
|
||||
_, err := s.execCommand(s.zpoolPath, "destroy", name)
|
||||
return err
|
||||
}
|
||||
|
||||
// ScrubPool starts a scrub operation on a pool
|
||||
func (s *Service) ScrubPool(name string) error {
|
||||
_, err := s.execCommand(s.zpoolPath, "scrub", name)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetScrubStatus returns the current scrub status
|
||||
func (s *Service) GetScrubStatus(name string) (string, error) {
|
||||
output, err := s.execCommand(s.zpoolPath, "status", name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if strings.Contains(output, "scrub in progress") {
|
||||
return "in_progress", nil
|
||||
}
|
||||
if strings.Contains(output, "scrub repaired") {
|
||||
return "completed", nil
|
||||
}
|
||||
return "idle", nil
|
||||
}
|
||||
|
||||
// ListDatasets returns all datasets in a pool (or all if pool is empty)
|
||||
func (s *Service) ListDatasets(pool string) ([]models.Dataset, error) {
|
||||
args := []string{"list", "-H", "-o", "name,type,used,avail,mountpoint"}
|
||||
if pool != "" {
|
||||
args = append(args, "-r", pool)
|
||||
} else {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
|
||||
output, err := s.execCommand(s.zfsPath, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var datasets []models.Dataset
|
||||
lines := strings.Split(output, "\n")
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
fullName := fields[0]
|
||||
parts := strings.Split(fullName, "/")
|
||||
poolName := parts[0]
|
||||
|
||||
dataset := models.Dataset{
|
||||
Name: fullName,
|
||||
Pool: poolName,
|
||||
Type: fields[1],
|
||||
Mountpoint: fields[4],
|
||||
}
|
||||
|
||||
if used, err := parseSize(fields[2]); err == nil {
|
||||
dataset.Used = used
|
||||
}
|
||||
if avail, err := parseSize(fields[3]); err == nil {
|
||||
dataset.Available = avail
|
||||
}
|
||||
dataset.Size = dataset.Used + dataset.Available
|
||||
|
||||
// Get creation time
|
||||
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", fullName)
|
||||
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
|
||||
dataset.CreatedAt = t
|
||||
}
|
||||
|
||||
datasets = append(datasets, dataset)
|
||||
}
|
||||
|
||||
return datasets, nil
|
||||
}
|
||||
|
||||
// CreateDataset creates a new ZFS dataset
|
||||
func (s *Service) CreateDataset(name string, options map[string]string) error {
|
||||
args := []string{"create"}
|
||||
|
||||
for k, v := range options {
|
||||
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
args = append(args, name)
|
||||
_, err := s.execCommand(s.zfsPath, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// DestroyDataset destroys a ZFS dataset
|
||||
func (s *Service) DestroyDataset(name string, recursive bool) error {
|
||||
args := []string{"destroy"}
|
||||
if recursive {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
args = append(args, name)
|
||||
_, err := s.execCommand(s.zfsPath, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListZVOLs returns all ZVOLs
|
||||
func (s *Service) ListZVOLs(pool string) ([]models.ZVOL, error) {
|
||||
args := []string{"list", "-H", "-o", "name,volsize,used", "-t", "volume"}
|
||||
if pool != "" {
|
||||
args = append(args, "-r", pool)
|
||||
} else {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
|
||||
output, err := s.execCommand(s.zfsPath, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zvols []models.ZVOL
|
||||
lines := strings.Split(output, "\n")
|
||||
for _, line := range lines {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
fullName := fields[0]
|
||||
parts := strings.Split(fullName, "/")
|
||||
poolName := parts[0]
|
||||
|
||||
zvol := models.ZVOL{
|
||||
Name: fullName,
|
||||
Pool: poolName,
|
||||
}
|
||||
|
||||
if size, err := parseSize(fields[1]); err == nil {
|
||||
zvol.Size = size
|
||||
}
|
||||
if used, err := parseSize(fields[2]); err == nil {
|
||||
zvol.Used = used
|
||||
}
|
||||
|
||||
// Get creation time
|
||||
created, _ := s.execCommand(s.zfsPath, "get", "-H", "-o", "value", "creation", fullName)
|
||||
if t, err := time.Parse("Mon Jan 2 15:04:05 2006", created); err == nil {
|
||||
zvol.CreatedAt = t
|
||||
}
|
||||
|
||||
zvols = append(zvols, zvol)
|
||||
}
|
||||
|
||||
return zvols, nil
|
||||
}
|
||||
|
||||
// CreateZVOL creates a new ZVOL
|
||||
func (s *Service) CreateZVOL(name string, size uint64, options map[string]string) error {
|
||||
args := []string{"create", "-V", fmt.Sprintf("%d", size)}
|
||||
|
||||
for k, v := range options {
|
||||
args = append(args, "-o", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
|
||||
args = append(args, name)
|
||||
_, err := s.execCommand(s.zfsPath, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// DestroyZVOL destroys a ZVOL
|
||||
func (s *Service) DestroyZVOL(name string) error {
|
||||
_, err := s.execCommand(s.zfsPath, "destroy", name)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListDisks returns available disks (read-only)
|
||||
func (s *Service) ListDisks() ([]map[string]string, error) {
|
||||
// Use lsblk to list block devices
|
||||
output, err := s.execCommand("lsblk", "-J", "-o", "name,size,type,fstype,mountpoint")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result struct {
|
||||
BlockDevices []struct {
|
||||
Name string `json:"name"`
|
||||
Size string `json:"size"`
|
||||
Type string `json:"type"`
|
||||
FSType string `json:"fstype"`
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
Children []interface{} `json:"children"`
|
||||
} `json:"blockdevices"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(output), &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var disks []map[string]string
|
||||
for _, dev := range result.BlockDevices {
|
||||
if dev.Type == "disk" && dev.FSType == "" && dev.Mountpoint == "" {
|
||||
disks = append(disks, map[string]string{
|
||||
"name": dev.Name,
|
||||
"size": dev.Size,
|
||||
"path": "/dev/" + dev.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return disks, nil
|
||||
}
|
||||
|
||||
// parseSize converts human-readable size to bytes
|
||||
func parseSize(s string) (uint64, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
if s == "-" || s == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
multiplier := uint64(1)
|
||||
suffix := strings.ToUpper(s[len(s)-1:])
|
||||
|
||||
switch suffix {
|
||||
case "K":
|
||||
multiplier = 1024
|
||||
s = s[:len(s)-1]
|
||||
case "M":
|
||||
multiplier = 1024 * 1024
|
||||
s = s[:len(s)-1]
|
||||
case "G":
|
||||
multiplier = 1024 * 1024 * 1024
|
||||
s = s[:len(s)-1]
|
||||
case "T":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024
|
||||
s = s[:len(s)-1]
|
||||
case "P":
|
||||
multiplier = 1024 * 1024 * 1024 * 1024 * 1024
|
||||
s = s[:len(s)-1]
|
||||
default:
|
||||
// Check if last char is a digit
|
||||
if suffix[0] < '0' || suffix[0] > '9' {
|
||||
return 0, fmt.Errorf("unknown suffix: %s", suffix)
|
||||
}
|
||||
}
|
||||
|
||||
// Handle decimal values (e.g., "1.5G")
|
||||
if strings.Contains(s, ".") {
|
||||
val, err := strconv.ParseFloat(s, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(val * float64(multiplier)), nil
|
||||
}
|
||||
|
||||
val, err := strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return val * multiplier, nil
|
||||
}
|
||||
Reference in New Issue
Block a user