2240 lines
67 KiB
Go
2240 lines
67 KiB
Go
package httpapp
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"net/http"
|
|
"net/url"
|
|
"os"
|
|
"os/exec"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"gitea.avt.data-center.id/othman.suseno/atlas/internal/auth"
|
|
"gitea.avt.data-center.id/othman.suseno/atlas/internal/errors"
|
|
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
|
"gitea.avt.data-center.id/othman.suseno/atlas/internal/storage"
|
|
"gitea.avt.data-center.id/othman.suseno/atlas/internal/validation"
|
|
)
|
|
|
|
// pathParam is now in router_helpers.go
|
|
|
|
// Disk Handlers
|
|
func (a *App) handleListDisks(w http.ResponseWriter, r *http.Request) {
|
|
disks, err := a.zfs.ListDisks()
|
|
if err != nil {
|
|
log.Printf("list disks error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, disks)
|
|
}
|
|
|
|
// ZFS Pool Handlers
|
|
func (a *App) handleListPools(w http.ResponseWriter, r *http.Request) {
|
|
pools, err := a.zfs.ListPools()
|
|
if err != nil {
|
|
log.Printf("list pools error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
// Ensure we always return an array, not null
|
|
if pools == nil {
|
|
pools = []models.Pool{}
|
|
}
|
|
|
|
// Set cache-control headers to prevent caching - pools list changes frequently
|
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
w.Header().Set("Pragma", "no-cache")
|
|
w.Header().Set("Expires", "0")
|
|
|
|
writeJSON(w, http.StatusOK, pools)
|
|
}
|
|
|
|
func (a *App) handleCreatePool(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
VDEVs []string `json:"vdevs"`
|
|
Options map[string]string `json:"options,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid request body"))
|
|
return
|
|
}
|
|
|
|
// Validate pool name
|
|
if err := validation.ValidateZFSName(req.Name); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
if len(req.VDEVs) == 0 {
|
|
writeError(w, errors.ErrValidation("at least one vdev is required"))
|
|
return
|
|
}
|
|
|
|
if req.Options == nil {
|
|
req.Options = make(map[string]string)
|
|
}
|
|
|
|
log.Printf("creating pool: name=%s, vdevs=%v, options=%v", req.Name, req.VDEVs, req.Options)
|
|
|
|
err := a.zfs.CreatePool(req.Name, req.VDEVs, req.Options)
|
|
|
|
// CRITICAL: Always check if pool exists, regardless of reported error
|
|
// ZFS often reports mountpoint errors but pool is still created
|
|
// The CreatePool function already does retries, but we double-check here
|
|
// Wait a brief moment for pool to be fully registered
|
|
time.Sleep(300 * time.Millisecond)
|
|
|
|
pool, getErr := a.zfs.GetPool(req.Name)
|
|
if getErr == nil {
|
|
// Pool exists - this is success!
|
|
if err != nil {
|
|
log.Printf("info: pool %s created successfully despite CreatePool reporting error: %v", req.Name, err)
|
|
} else {
|
|
log.Printf("info: pool %s created successfully", req.Name)
|
|
}
|
|
// Set cache-control headers
|
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
w.Header().Set("Pragma", "no-cache")
|
|
w.Header().Set("Expires", "0")
|
|
writeJSON(w, http.StatusCreated, pool)
|
|
return
|
|
}
|
|
|
|
// Pool doesn't exist - return the error with detailed context
|
|
if err != nil {
|
|
log.Printf("error: pool %s creation failed - CreatePool error: %v, GetPool error: %v", req.Name, err, getErr)
|
|
writeError(w, errors.ErrInternal("failed to create pool").WithDetails(fmt.Sprintf("Pool '%s' was not created: %v. Check logs for zpool command output.", req.Name, err)))
|
|
return
|
|
}
|
|
|
|
// No error but pool doesn't exist (shouldn't happen, but handle it)
|
|
log.Printf("warning: pool %s creation reported no error but pool was not found", req.Name)
|
|
writeError(w, errors.ErrInternal(fmt.Sprintf("Pool '%s' creation reported success but pool was not found", req.Name)))
|
|
}
|
|
|
|
func (a *App) handleGetPool(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
if name == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "pool name required"})
|
|
return
|
|
}
|
|
|
|
// Check if detail is requested
|
|
if r.URL.Query().Get("detail") == "true" {
|
|
detail, err := a.zfs.GetPoolDetail(name)
|
|
if err != nil {
|
|
writeError(w, errors.ErrNotFound("pool not found").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, detail)
|
|
return
|
|
}
|
|
|
|
pool, err := a.zfs.GetPool(name)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, pool)
|
|
}
|
|
|
|
func (a *App) handleAddSpareDisk(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
name = strings.TrimSuffix(name, "/spare")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Disk string `json:"disk"` // Disk path like /dev/sdb or sdb
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid request body"))
|
|
return
|
|
}
|
|
|
|
if req.Disk == "" {
|
|
writeError(w, errors.ErrValidation("disk path required"))
|
|
return
|
|
}
|
|
|
|
// Ensure disk path starts with /dev/ if not already
|
|
diskPath := req.Disk
|
|
if !strings.HasPrefix(diskPath, "/dev/") {
|
|
diskPath = "/dev/" + diskPath
|
|
}
|
|
|
|
if err := a.zfs.AddSpareDisk(name, diskPath); err != nil {
|
|
log.Printf("add spare disk error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to add spare disk").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "spare disk added", "pool": name, "disk": diskPath})
|
|
}
|
|
|
|
func (a *App) handleDeletePool(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
if err := a.zfs.DestroyPool(name); err != nil {
|
|
log.Printf("destroy pool error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to destroy pool").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Set cache-control headers to prevent caching of this response
|
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
w.Header().Set("Pragma", "no-cache")
|
|
w.Header().Set("Expires", "0")
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "pool destroyed", "name": name})
|
|
}
|
|
|
|
func (a *App) handleImportPool(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
Options map[string]string `json:"options,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid request body").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
if req.Name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
if err := a.zfs.ImportPool(req.Name, req.Options); err != nil {
|
|
log.Printf("import pool error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to import pool").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "pool imported", "name": req.Name})
|
|
}
|
|
|
|
func (a *App) handleExportPool(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Force bool `json:"force,omitempty"`
|
|
}
|
|
// Force is optional, decode if body exists
|
|
_ = json.NewDecoder(r.Body).Decode(&req)
|
|
|
|
if err := a.zfs.ExportPool(name, req.Force); err != nil {
|
|
log.Printf("export pool error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to export pool").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "pool exported", "name": name})
|
|
}
|
|
|
|
func (a *App) handleListAvailablePools(w http.ResponseWriter, r *http.Request) {
|
|
pools, err := a.zfs.ListAvailablePools()
|
|
if err != nil {
|
|
log.Printf("list available pools error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to list available pools").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"pools": pools,
|
|
})
|
|
}
|
|
|
|
func (a *App) handleScrubPool(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
if err := a.zfs.ScrubPool(name); err != nil {
|
|
log.Printf("scrub pool error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to start scrub").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "scrub started", "pool": name})
|
|
}
|
|
|
|
func (a *App) handleGetScrubStatus(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/pools/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("pool name required"))
|
|
return
|
|
}
|
|
|
|
status, err := a.zfs.GetScrubStatus(name)
|
|
if err != nil {
|
|
log.Printf("get scrub status error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to get scrub status").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, status)
|
|
}
|
|
|
|
// Dataset Handlers
|
|
func (a *App) handleListDatasets(w http.ResponseWriter, r *http.Request) {
|
|
pool := r.URL.Query().Get("pool")
|
|
datasets, err := a.zfs.ListDatasets(pool)
|
|
if err != nil {
|
|
log.Printf("list datasets error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
// Ensure we always return an array, not null
|
|
if datasets == nil {
|
|
datasets = []models.Dataset{}
|
|
}
|
|
writeJSON(w, http.StatusOK, datasets)
|
|
}
|
|
|
|
func (a *App) handleCreateDataset(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
Options map[string]string `json:"options,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if req.Name == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name is required"})
|
|
return
|
|
}
|
|
|
|
if req.Options == nil {
|
|
req.Options = make(map[string]string)
|
|
}
|
|
|
|
err := a.zfs.CreateDataset(req.Name, req.Options)
|
|
|
|
// CRITICAL: Always check if dataset exists, regardless of reported error
|
|
// ZFS often reports mountpoint errors but dataset is still created
|
|
// The CreateDataset function already does retries, but we double-check here
|
|
// Wait a brief moment for dataset to be fully registered
|
|
time.Sleep(300 * time.Millisecond)
|
|
|
|
datasets, getErr := a.zfs.ListDatasets("")
|
|
var datasetExists bool
|
|
if getErr == nil {
|
|
for _, ds := range datasets {
|
|
if ds.Name == req.Name {
|
|
datasetExists = true
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
if datasetExists {
|
|
// Dataset exists - this is success!
|
|
if err != nil {
|
|
log.Printf("info: dataset %s created successfully despite CreateDataset reporting error: %v", req.Name, err)
|
|
} else {
|
|
log.Printf("info: dataset %s created successfully", req.Name)
|
|
}
|
|
// Set cache-control headers
|
|
w.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
|
w.Header().Set("Pragma", "no-cache")
|
|
w.Header().Set("Expires", "0")
|
|
writeJSON(w, http.StatusCreated, map[string]string{"message": "dataset created", "name": req.Name})
|
|
return
|
|
}
|
|
|
|
// Dataset doesn't exist - return the error with detailed context
|
|
if err != nil {
|
|
log.Printf("error: dataset %s creation failed - CreateDataset error: %v, ListDatasets error: %v", req.Name, err, getErr)
|
|
writeError(w, errors.ErrInternal("failed to create dataset").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// No error but dataset doesn't exist (shouldn't happen, but handle it)
|
|
log.Printf("warning: dataset %s creation reported no error but dataset was not found", req.Name)
|
|
writeError(w, errors.ErrInternal(fmt.Sprintf("Dataset '%s' creation reported success but dataset was not found", req.Name)))
|
|
}
|
|
|
|
func (a *App) handleGetDataset(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParamFull(r, "/api/v1/datasets/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("dataset name required"))
|
|
return
|
|
}
|
|
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
if err != nil {
|
|
writeError(w, errors.ErrInternal("failed to list datasets").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
for _, ds := range datasets {
|
|
if ds.Name == name {
|
|
writeJSON(w, http.StatusOK, ds)
|
|
return
|
|
}
|
|
}
|
|
|
|
writeError(w, errors.ErrNotFound("dataset"))
|
|
}
|
|
|
|
func (a *App) handleUpdateDataset(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParamFull(r, "/api/v1/datasets/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("dataset name required"))
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Quota string `json:"quota"` // e.g., "10G", "1T", or "none" to remove
|
|
Compression string `json:"compression"` // e.g., "lz4", "gzip", "off"
|
|
Options map[string]string `json:"options"` // other ZFS properties
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid request body"))
|
|
return
|
|
}
|
|
|
|
// Validate dataset exists
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
if err != nil {
|
|
writeError(w, errors.ErrInternal("failed to validate dataset").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
datasetExists := false
|
|
for _, ds := range datasets {
|
|
if ds.Name == name {
|
|
datasetExists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !datasetExists {
|
|
writeError(w, errors.ErrNotFound(fmt.Sprintf("dataset '%s' not found", name)))
|
|
return
|
|
}
|
|
|
|
// Update dataset properties
|
|
if err := a.zfs.UpdateDataset(name, req.Quota, req.Compression, req.Options); err != nil {
|
|
log.Printf("update dataset error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to update dataset").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Get updated dataset info
|
|
datasets, _ = a.zfs.ListDatasets("")
|
|
for _, ds := range datasets {
|
|
if ds.Name == name {
|
|
writeJSON(w, http.StatusOK, ds)
|
|
return
|
|
}
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "dataset updated", "name": name})
|
|
}
|
|
|
|
func (a *App) handleDeleteDataset(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParamFull(r, "/api/v1/datasets/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("dataset name required"))
|
|
return
|
|
}
|
|
|
|
recursive := r.URL.Query().Get("recursive") == "true"
|
|
|
|
if err := a.zfs.DestroyDataset(name, recursive); err != nil {
|
|
log.Printf("destroy dataset error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to destroy dataset").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "dataset destroyed", "name": name})
|
|
}
|
|
|
|
// ZVOL Handlers
|
|
func (a *App) handleListZVOLs(w http.ResponseWriter, r *http.Request) {
|
|
pool := r.URL.Query().Get("pool")
|
|
zvols, err := a.zfs.ListZVOLs(pool)
|
|
if err != nil {
|
|
log.Printf("list zvols error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusOK, zvols)
|
|
}
|
|
|
|
func (a *App) handleCreateZVOL(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
Size string `json:"size"` // human-readable format (e.g., "10G")
|
|
Options map[string]string `json:"options,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid request body"))
|
|
return
|
|
}
|
|
|
|
// Validate ZVOL name
|
|
if err := validation.ValidateZFSName(req.Name); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate size format
|
|
if err := validation.ValidateSize(req.Size); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Parse size to bytes
|
|
sizeBytes, err := a.parseSizeString(req.Size)
|
|
if err != nil {
|
|
writeError(w, errors.ErrValidation(fmt.Sprintf("invalid size: %v", err)))
|
|
return
|
|
}
|
|
|
|
if req.Options == nil {
|
|
req.Options = make(map[string]string)
|
|
}
|
|
|
|
if err := a.zfs.CreateZVOL(req.Name, sizeBytes, req.Options); err != nil {
|
|
log.Printf("create zvol error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, map[string]string{"message": "zvol created", "name": req.Name})
|
|
}
|
|
|
|
func (a *App) handleGetZVOL(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParamFull(r, "/api/v1/zvols/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("zvol name required"))
|
|
return
|
|
}
|
|
|
|
zvols, err := a.zfs.ListZVOLs("")
|
|
if err != nil {
|
|
writeError(w, errors.ErrInternal("failed to list zvols").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
for _, zvol := range zvols {
|
|
if zvol.Name == name {
|
|
writeJSON(w, http.StatusOK, zvol)
|
|
return
|
|
}
|
|
}
|
|
|
|
writeError(w, errors.ErrNotFound("zvol"))
|
|
}
|
|
|
|
func (a *App) handleDeleteZVOL(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParamFull(r, "/api/v1/zvols/")
|
|
if name == "" {
|
|
writeError(w, errors.ErrBadRequest("zvol name required"))
|
|
return
|
|
}
|
|
|
|
if err := a.zfs.DestroyZVOL(name); err != nil {
|
|
log.Printf("destroy zvol error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to destroy zvol").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "zvol destroyed", "name": name})
|
|
}
|
|
|
|
// Snapshot Handlers
|
|
func (a *App) handleListSnapshots(w http.ResponseWriter, r *http.Request) {
|
|
dataset := r.URL.Query().Get("dataset")
|
|
snapshots, err := a.zfs.ListSnapshots(dataset)
|
|
if err != nil {
|
|
log.Printf("list snapshots error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
// Ensure we always return an array, not null
|
|
if snapshots == nil {
|
|
snapshots = []models.Snapshot{}
|
|
}
|
|
writeJSON(w, http.StatusOK, snapshots)
|
|
}
|
|
|
|
func (a *App) handleCreateSnapshot(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Dataset string `json:"dataset"`
|
|
Name string `json:"name"`
|
|
Recursive bool `json:"recursive,omitempty"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate dataset name
|
|
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate snapshot name (can contain @ but we'll validate the base name)
|
|
snapshotBaseName := strings.ReplaceAll(req.Name, "@", "")
|
|
if err := validation.ValidateZFSName(snapshotBaseName); err != nil {
|
|
writeError(w, errors.ErrValidation("invalid snapshot name"))
|
|
return
|
|
}
|
|
|
|
if err := a.zfs.CreateSnapshot(req.Dataset, req.Name, req.Recursive); err != nil {
|
|
log.Printf("create snapshot error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
fullSnapshotName := fmt.Sprintf("%s@%s", req.Dataset, req.Name)
|
|
snap, err := a.zfs.GetSnapshot(fullSnapshotName)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusCreated, map[string]string{"message": "snapshot created", "name": fullSnapshotName})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, snap)
|
|
}
|
|
|
|
func (a *App) handleGetSnapshot(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/snapshots/")
|
|
if name == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "snapshot name required"})
|
|
return
|
|
}
|
|
|
|
snap, err := a.zfs.GetSnapshot(name)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, snap)
|
|
}
|
|
|
|
func (a *App) handleDeleteSnapshot(w http.ResponseWriter, r *http.Request) {
|
|
name := pathParam(r, "/api/v1/snapshots/")
|
|
if name == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "snapshot name required"})
|
|
return
|
|
}
|
|
|
|
recursive := r.URL.Query().Get("recursive") == "true"
|
|
|
|
if err := a.zfs.DestroySnapshot(name, recursive); err != nil {
|
|
log.Printf("destroy snapshot error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "snapshot destroyed", "name": name})
|
|
}
|
|
|
|
func (a *App) handleRestoreSnapshot(w http.ResponseWriter, r *http.Request) {
|
|
// Extract snapshot name from path like /api/v1/snapshots/pool@snapshot/restore
|
|
path := strings.TrimPrefix(r.URL.Path, "/api/v1/snapshots/")
|
|
path = strings.TrimSuffix(path, "/restore")
|
|
|
|
// URL decode the snapshot name
|
|
snapshotName, err := url.QueryUnescape(path)
|
|
if err != nil {
|
|
writeError(w, errors.ErrBadRequest("invalid snapshot name"))
|
|
return
|
|
}
|
|
|
|
if snapshotName == "" {
|
|
writeError(w, errors.ErrBadRequest("snapshot name required"))
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Force bool `json:"force,omitempty"` // Force rollback (recursive for child datasets)
|
|
}
|
|
|
|
if r.Body != nil {
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
// If body is empty or invalid, use defaults
|
|
req.Force = false
|
|
}
|
|
}
|
|
|
|
// Validate snapshot exists
|
|
_, err = a.zfs.GetSnapshot(snapshotName)
|
|
if err != nil {
|
|
writeError(w, errors.ErrNotFound(fmt.Sprintf("snapshot '%s' not found", snapshotName)))
|
|
return
|
|
}
|
|
|
|
// Restore snapshot
|
|
if err := a.zfs.RestoreSnapshot(snapshotName, req.Force); err != nil {
|
|
log.Printf("restore snapshot error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to restore snapshot").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{
|
|
"message": "snapshot restored successfully",
|
|
"name": snapshotName,
|
|
})
|
|
}
|
|
|
|
// Snapshot Policy Handlers
|
|
func (a *App) handleListSnapshotPolicies(w http.ResponseWriter, r *http.Request) {
|
|
dataset := r.URL.Query().Get("dataset")
|
|
var policies []models.SnapshotPolicy
|
|
if dataset != "" {
|
|
policies = a.snapshotPolicy.ListForDataset(dataset)
|
|
} else {
|
|
policies = a.snapshotPolicy.List()
|
|
}
|
|
// Ensure we always return an array, not null
|
|
if policies == nil {
|
|
policies = []models.SnapshotPolicy{}
|
|
}
|
|
writeJSON(w, http.StatusOK, policies)
|
|
}
|
|
|
|
func (a *App) handleCreateSnapshotPolicy(w http.ResponseWriter, r *http.Request) {
|
|
var policy models.SnapshotPolicy
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&policy); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if policy.Dataset == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset is required"})
|
|
return
|
|
}
|
|
|
|
a.snapshotPolicy.Set(&policy)
|
|
writeJSON(w, http.StatusCreated, policy)
|
|
}
|
|
|
|
func (a *App) handleGetSnapshotPolicy(w http.ResponseWriter, r *http.Request) {
|
|
dataset := pathParam(r, "/api/v1/snapshot-policies/")
|
|
if dataset == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name required"})
|
|
return
|
|
}
|
|
|
|
policy, err := a.snapshotPolicy.Get(dataset)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
if policy == nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": "policy not found"})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, policy)
|
|
}
|
|
|
|
func (a *App) handleUpdateSnapshotPolicy(w http.ResponseWriter, r *http.Request) {
|
|
dataset := pathParam(r, "/api/v1/snapshot-policies/")
|
|
if dataset == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name required"})
|
|
return
|
|
}
|
|
|
|
var policy models.SnapshotPolicy
|
|
if err := json.NewDecoder(r.Body).Decode(&policy); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Ensure dataset matches URL parameter
|
|
policy.Dataset = dataset
|
|
|
|
a.snapshotPolicy.Set(&policy)
|
|
writeJSON(w, http.StatusOK, policy)
|
|
}
|
|
|
|
func (a *App) handleDeleteSnapshotPolicy(w http.ResponseWriter, r *http.Request) {
|
|
dataset := pathParam(r, "/api/v1/snapshot-policies/")
|
|
if dataset == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset name required"})
|
|
return
|
|
}
|
|
|
|
if err := a.snapshotPolicy.Delete(dataset); err != nil {
|
|
log.Printf("delete snapshot policy error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "policy deleted", "dataset": dataset})
|
|
}
|
|
|
|
// SMB Share Handlers
|
|
func (a *App) handleListSMBShares(w http.ResponseWriter, r *http.Request) {
|
|
// Sync shares from OS (smb.conf) to store
|
|
// This ensures shares created before service restart are visible
|
|
if err := a.syncSMBSharesFromOS(); err != nil {
|
|
log.Printf("warning: failed to sync SMB shares from OS: %v", err)
|
|
// Continue anyway - return what's in store
|
|
}
|
|
|
|
shares := a.smbStore.List()
|
|
writeJSON(w, http.StatusOK, shares)
|
|
}
|
|
|
|
func (a *App) handleCreateSMBShare(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Name string `json:"name"`
|
|
Path string `json:"path"`
|
|
Dataset string `json:"dataset"`
|
|
Description string `json:"description"`
|
|
ReadOnly bool `json:"read_only"`
|
|
GuestOK bool `json:"guest_ok"`
|
|
ValidUsers []string `json:"valid_users"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate share name
|
|
if err := validation.ValidateShareName(req.Name); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate dataset name
|
|
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Sanitize path if provided
|
|
if req.Path != "" {
|
|
req.Path = validation.SanitizePath(req.Path)
|
|
if err := validation.ValidatePath(req.Path); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Validate dataset exists
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
if err != nil {
|
|
log.Printf("list datasets error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate dataset"})
|
|
return
|
|
}
|
|
|
|
datasetExists := false
|
|
for _, ds := range datasets {
|
|
if ds.Name == req.Dataset {
|
|
datasetExists = true
|
|
if req.Path == "" {
|
|
req.Path = ds.Mountpoint
|
|
}
|
|
break
|
|
}
|
|
}
|
|
|
|
if !datasetExists {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "dataset not found"})
|
|
return
|
|
}
|
|
|
|
share, err := a.smbStore.Create(req.Name, req.Path, req.Dataset, req.Description, req.ReadOnly, req.GuestOK, req.ValidUsers)
|
|
if err != nil {
|
|
if err == storage.ErrSMBShareExists {
|
|
writeError(w, errors.ErrConflict("share name already exists"))
|
|
return
|
|
}
|
|
log.Printf("create SMB share error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to create SMB share").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Apply configuration to Samba service (with graceful degradation)
|
|
shares := a.smbStore.List()
|
|
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
|
// Log but don't fail the request - desired state is stored
|
|
// Service configuration can be retried later
|
|
if svcErr := a.handleServiceError("SMB", err); svcErr != nil {
|
|
log.Printf("SMB service configuration failed (non-fatal): %v", err)
|
|
}
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, share)
|
|
}
|
|
|
|
func (a *App) handleGetSMBShare(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/shares/smb/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"})
|
|
return
|
|
}
|
|
|
|
share, err := a.smbStore.Get(id)
|
|
if err != nil {
|
|
if err == storage.ErrSMBShareNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, share)
|
|
}
|
|
|
|
func (a *App) handleUpdateSMBShare(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/shares/smb/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"})
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Description string `json:"description"`
|
|
ReadOnly bool `json:"read_only"`
|
|
GuestOK bool `json:"guest_ok"`
|
|
ValidUsers []string `json:"valid_users"`
|
|
Enabled bool `json:"enabled"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if err := a.smbStore.Update(id, req.Description, req.ReadOnly, req.GuestOK, req.Enabled, req.ValidUsers); err != nil {
|
|
if err == storage.ErrSMBShareNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
log.Printf("update SMB share error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
share, _ := a.smbStore.Get(id)
|
|
|
|
// Apply configuration to Samba service
|
|
shares := a.smbStore.List()
|
|
if err := a.smbService.ApplyConfiguration(shares); err != nil {
|
|
log.Printf("apply SMB configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, share)
|
|
}
|
|
|
|
func (a *App) handleDeleteSMBShare(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/shares/smb/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "share id required"})
|
|
return
|
|
}
|
|
|
|
if err := a.smbStore.Delete(id); err != nil {
|
|
if err == storage.ErrSMBShareNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
log.Printf("delete SMB share error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "share deleted", "id": id})
|
|
}
|
|
|
|
// NFS Export Handlers
|
|
func (a *App) handleListNFSExports(w http.ResponseWriter, r *http.Request) {
|
|
// Sync exports from OS (ZFS sharenfs) to store
|
|
// This ensures exports created before service restart are visible
|
|
if err := a.syncNFSExportsFromOS(); err != nil {
|
|
log.Printf("warning: failed to sync NFS exports from OS: %v", err)
|
|
// Continue anyway - return what's in store
|
|
}
|
|
|
|
exports := a.nfsStore.List()
|
|
writeJSON(w, http.StatusOK, exports)
|
|
}
|
|
|
|
func (a *App) handleCreateNFSExport(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Path string `json:"path"`
|
|
Dataset string `json:"dataset"`
|
|
Clients []string `json:"clients"`
|
|
ReadOnly bool `json:"read_only"`
|
|
RootSquash bool `json:"root_squash"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeError(w, errors.ErrValidation("invalid request body"))
|
|
return
|
|
}
|
|
|
|
// Validate clients first
|
|
for i, client := range req.Clients {
|
|
if err := validation.ValidateCIDR(client); err != nil {
|
|
writeError(w, errors.ErrValidation(fmt.Sprintf("client[%d]: %s", i, err.Error())))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Validate and sanitize path if provided
|
|
if req.Path != "" {
|
|
req.Path = validation.SanitizePath(req.Path)
|
|
if err := validation.ValidatePath(req.Path); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Get all datasets to validate and find dataset
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
if err != nil {
|
|
log.Printf("list datasets error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to validate dataset").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Check if req.Dataset is a filesystem path (starts with /) or a dataset name
|
|
var datasetName string
|
|
var datasetMountpoint string
|
|
datasetExists := false
|
|
|
|
if strings.HasPrefix(req.Dataset, "/") {
|
|
// Input is a filesystem path (mountpoint), find dataset by mountpoint
|
|
for _, ds := range datasets {
|
|
if ds.Mountpoint == req.Dataset {
|
|
datasetExists = true
|
|
datasetName = ds.Name
|
|
datasetMountpoint = ds.Mountpoint
|
|
break
|
|
}
|
|
}
|
|
if !datasetExists {
|
|
writeError(w, errors.ErrNotFound(fmt.Sprintf("dataset with mountpoint '%s' not found", req.Dataset)))
|
|
return
|
|
}
|
|
} else {
|
|
// Input is a dataset name, validate it first
|
|
if err := validation.ValidateZFSName(req.Dataset); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
// Find dataset by name
|
|
for _, ds := range datasets {
|
|
if ds.Name == req.Dataset {
|
|
datasetExists = true
|
|
datasetName = ds.Name
|
|
datasetMountpoint = ds.Mountpoint
|
|
break
|
|
}
|
|
}
|
|
if !datasetExists {
|
|
writeError(w, errors.ErrNotFound(fmt.Sprintf("dataset '%s' not found", req.Dataset)))
|
|
return
|
|
}
|
|
}
|
|
|
|
// Set the correct dataset name and path
|
|
req.Dataset = datasetName
|
|
if req.Path == "" {
|
|
req.Path = datasetMountpoint
|
|
}
|
|
|
|
// Default clients to "*" (all) if not specified
|
|
if len(req.Clients) == 0 {
|
|
req.Clients = []string{"*"}
|
|
}
|
|
|
|
export, err := a.nfsStore.Create(req.Path, req.Dataset, req.Clients, req.ReadOnly, req.RootSquash)
|
|
if err != nil {
|
|
if err == storage.ErrNFSExportExists {
|
|
writeError(w, errors.ErrConflict(fmt.Sprintf("export for path '%s' already exists", req.Path)))
|
|
return
|
|
}
|
|
log.Printf("create NFS export error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to create NFS export").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Apply configuration to NFS service
|
|
exports := a.nfsStore.List()
|
|
if err := a.nfsService.ApplyConfiguration(exports); err != nil {
|
|
log.Printf("apply NFS configuration error: %v", err)
|
|
// Export was created in store but failed to apply to system
|
|
// Try to remove from store to maintain consistency
|
|
if delErr := a.nfsStore.Delete(export.ID); delErr != nil {
|
|
log.Printf("warning: failed to rollback export creation after ApplyConfiguration failure: %v", delErr)
|
|
}
|
|
writeError(w, errors.ErrInternal("failed to apply NFS configuration").WithDetails(fmt.Sprintf("Export was created but failed to apply to NFS service: %v", err)))
|
|
return
|
|
}
|
|
|
|
// Double-check export exists
|
|
if _, getErr := a.nfsStore.Get(export.ID); getErr != nil {
|
|
log.Printf("warning: export %s was created but not found in store: %v", export.ID, getErr)
|
|
writeError(w, errors.ErrInternal("failed to verify export creation").WithDetails("Export may not have been created properly"))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, export)
|
|
}
|
|
|
|
func (a *App) handleGetNFSExport(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/exports/nfs/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "export id required"})
|
|
return
|
|
}
|
|
|
|
export, err := a.nfsStore.Get(id)
|
|
if err != nil {
|
|
if err == storage.ErrNFSExportNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, export)
|
|
}
|
|
|
|
func (a *App) handleUpdateNFSExport(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/exports/nfs/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "export id required"})
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Clients []string `json:"clients"`
|
|
ReadOnly bool `json:"read_only"`
|
|
RootSquash bool `json:"root_squash"`
|
|
Enabled bool `json:"enabled"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if err := a.nfsStore.Update(id, req.Clients, req.ReadOnly, req.RootSquash, req.Enabled); err != nil {
|
|
if err == storage.ErrNFSExportNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
log.Printf("update NFS export error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
export, _ := a.nfsStore.Get(id)
|
|
|
|
// Apply configuration to NFS service
|
|
exports := a.nfsStore.List()
|
|
if err := a.nfsService.ApplyConfiguration(exports); err != nil {
|
|
log.Printf("apply NFS configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, export)
|
|
}
|
|
|
|
func (a *App) handleDeleteNFSExport(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/exports/nfs/")
|
|
if id == "" {
|
|
writeError(w, errors.ErrValidation("export id required"))
|
|
return
|
|
}
|
|
|
|
if err := a.nfsStore.Delete(id); err != nil {
|
|
if err == storage.ErrNFSExportNotFound {
|
|
writeError(w, errors.ErrNotFound(fmt.Sprintf("NFS export '%s' not found", id)))
|
|
return
|
|
}
|
|
log.Printf("delete NFS export error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to delete NFS export").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Apply configuration to NFS service
|
|
exports := a.nfsStore.List()
|
|
if err := a.nfsService.ApplyConfiguration(exports); err != nil {
|
|
log.Printf("apply NFS configuration error: %v", err)
|
|
// Export was deleted from store but failed to apply to system
|
|
// Log warning but don't fail the request since deletion succeeded
|
|
log.Printf("warning: NFS export '%s' was deleted from store but failed to apply configuration: %v", id, err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "export deleted", "id": id})
|
|
}
|
|
|
|
// iSCSI Handlers
|
|
func (a *App) handleListISCSITargets(w http.ResponseWriter, r *http.Request) {
|
|
// Sync targets from OS before listing
|
|
if err := a.syncISCSITargetsFromOS(); err != nil {
|
|
log.Printf("warning: failed to sync iSCSI targets from OS: %v", err)
|
|
}
|
|
|
|
targets := a.iscsiStore.List()
|
|
writeJSON(w, http.StatusOK, targets)
|
|
}
|
|
|
|
func (a *App) handleCreateISCSITarget(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
IQN string `json:"iqn"`
|
|
Type string `json:"type"` // "disk" or "tape" (default: "disk")
|
|
Initiators []string `json:"initiators"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
log.Printf("create iSCSI target: invalid request body: %v", err)
|
|
writeError(w, errors.ErrBadRequest("invalid request body").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate and set target type
|
|
targetType := models.ISCSITargetTypeDisk // Default to disk mode
|
|
if req.Type != "" {
|
|
if req.Type != "disk" && req.Type != "tape" {
|
|
writeError(w, errors.ErrValidation("invalid target type: must be 'disk' or 'tape'"))
|
|
return
|
|
}
|
|
targetType = models.ISCSITargetType(req.Type)
|
|
}
|
|
|
|
log.Printf("create iSCSI target: IQN=%s, Type=%s, Initiators=%v", req.IQN, targetType, req.Initiators)
|
|
|
|
// Validate IQN format
|
|
if err := validation.ValidateIQN(req.IQN); err != nil {
|
|
log.Printf("IQN validation error: %v (IQN: %s)", err, req.IQN)
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
target, err := a.iscsiStore.CreateWithType(req.IQN, targetType, req.Initiators)
|
|
if err != nil {
|
|
if err == storage.ErrISCSITargetExists {
|
|
log.Printf("create iSCSI target: target already exists (IQN: %s)", req.IQN)
|
|
writeError(w, errors.ErrConflict("target with this IQN already exists"))
|
|
return
|
|
}
|
|
log.Printf("create iSCSI target error: %v", err)
|
|
writeError(w, errors.ErrInternal("failed to create iSCSI target").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
log.Printf("create iSCSI target: target created in store (ID: %s, IQN: %s)", target.ID, target.IQN)
|
|
|
|
// Apply configuration to iSCSI service
|
|
targets := a.iscsiStore.List()
|
|
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
|
log.Printf("create iSCSI target: apply configuration error: %v", err)
|
|
// Don't fail the request if configuration fails - target is already in store
|
|
// User can retry configuration later
|
|
writeJSON(w, http.StatusCreated, map[string]interface{}{
|
|
"target": target,
|
|
"warning": "target created but configuration may have failed. check logs.",
|
|
})
|
|
return
|
|
}
|
|
|
|
log.Printf("create iSCSI target: success (ID: %s, IQN: %s)", target.ID, target.IQN)
|
|
writeJSON(w, http.StatusCreated, target)
|
|
}
|
|
|
|
func (a *App) handleGetISCSITarget(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/iscsi/targets/")
|
|
if id == "" {
|
|
writeError(w, errors.ErrBadRequest("target id required"))
|
|
return
|
|
}
|
|
|
|
target, err := a.iscsiStore.Get(id)
|
|
if err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeError(w, errors.ErrNotFound("iSCSI target"))
|
|
return
|
|
}
|
|
writeError(w, errors.ErrInternal("failed to get iSCSI target").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, target)
|
|
}
|
|
|
|
func (a *App) handleGetISCSIConnectionInstructions(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/iscsi/targets/")
|
|
if id == "" {
|
|
writeError(w, errors.ErrBadRequest("target id required"))
|
|
return
|
|
}
|
|
|
|
target, err := a.iscsiStore.Get(id)
|
|
if err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeError(w, errors.ErrNotFound("iSCSI target"))
|
|
return
|
|
}
|
|
writeError(w, errors.ErrInternal("failed to get iSCSI target").WithDetails(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Get portal IP (with fallback)
|
|
portalIP, err := a.iscsiService.GetPortalIP()
|
|
if err != nil {
|
|
log.Printf("get portal IP error: %v", err)
|
|
portalIP = "127.0.0.1" // Fallback
|
|
}
|
|
|
|
// Get portal port from query parameter or use default
|
|
portalPort := 3260
|
|
if portStr := r.URL.Query().Get("port"); portStr != "" {
|
|
if port, err := strconv.Atoi(portStr); err == nil && port > 0 && port < 65536 {
|
|
portalPort = port
|
|
}
|
|
}
|
|
|
|
// Generate connection instructions
|
|
instructions := a.iscsiService.GetConnectionInstructions(*target, portalIP, portalPort)
|
|
|
|
writeJSON(w, http.StatusOK, instructions)
|
|
}
|
|
|
|
func (a *App) handleUpdateISCSITarget(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/iscsi/targets/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"})
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Initiators []string `json:"initiators"`
|
|
Enabled bool `json:"enabled"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if err := a.iscsiStore.Update(id, req.Initiators, req.Enabled); err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
log.Printf("update iSCSI target error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
target, _ := a.iscsiStore.Get(id)
|
|
|
|
// Apply configuration to iSCSI service
|
|
targets := a.iscsiStore.List()
|
|
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
|
log.Printf("apply iSCSI configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, target)
|
|
}
|
|
|
|
func (a *App) handleDeleteISCSITarget(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/iscsi/targets/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"})
|
|
return
|
|
}
|
|
|
|
if err := a.iscsiStore.Delete(id); err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
log.Printf("delete iSCSI target error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
// Apply configuration to iSCSI service
|
|
targets := a.iscsiStore.List()
|
|
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
|
log.Printf("apply iSCSI configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "target deleted", "id": id})
|
|
}
|
|
|
|
func (a *App) handleAddLUN(w http.ResponseWriter, r *http.Request) {
|
|
// Extract target ID from path like /api/v1/iscsi/targets/{id}/luns
|
|
path := strings.TrimPrefix(r.URL.Path, "/api/v1/iscsi/targets/")
|
|
parts := strings.Split(path, "/")
|
|
if len(parts) == 0 || parts[0] == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"})
|
|
return
|
|
}
|
|
id := parts[0]
|
|
|
|
var req struct {
|
|
ZVOL string `json:"zvol"` // ZVOL name (for block backstore)
|
|
Device string `json:"device"` // Device path (e.g., /dev/st0 for tape)
|
|
Backstore string `json:"backstore"` // Backstore type: "block", "pscsi", "fileio" (optional, auto-detected)
|
|
BackstoreName string `json:"backstore_name"` // Custom backstore name (optional)
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate: must have either ZVOL or Device
|
|
if req.ZVOL == "" && req.Device == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "either zvol or device is required"})
|
|
return
|
|
}
|
|
|
|
var zvolSize uint64
|
|
var zvolName string
|
|
|
|
if req.ZVOL != "" {
|
|
// Validate ZVOL exists
|
|
zvols, err := a.zfs.ListZVOLs("")
|
|
if err != nil {
|
|
log.Printf("list zvols error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate zvol"})
|
|
return
|
|
}
|
|
|
|
zvolExists := false
|
|
for _, zvol := range zvols {
|
|
if zvol.Name == req.ZVOL {
|
|
zvolExists = true
|
|
zvolSize = zvol.Size
|
|
zvolName = zvol.Name
|
|
break
|
|
}
|
|
}
|
|
|
|
if !zvolExists {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol not found"})
|
|
return
|
|
}
|
|
} else if req.Device != "" {
|
|
// Validate device exists
|
|
if _, err := os.Stat(req.Device); err != nil {
|
|
if os.IsNotExist(err) {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": fmt.Sprintf("device not found: %s", req.Device)})
|
|
return
|
|
}
|
|
log.Printf("stat device error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate device"})
|
|
return
|
|
}
|
|
// For tape devices, size is typically 0 or unknown
|
|
zvolSize = 0
|
|
}
|
|
|
|
// Use updated AddLUN signature that supports device and backstore
|
|
lun, err := a.iscsiStore.AddLUNWithDevice(id, zvolName, req.Device, zvolSize, req.Backstore, req.BackstoreName)
|
|
if err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": "target not found"})
|
|
return
|
|
}
|
|
if err == storage.ErrLUNExists {
|
|
writeJSON(w, http.StatusConflict, map[string]string{"error": "zvol already mapped to this target"})
|
|
return
|
|
}
|
|
log.Printf("add LUN error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
// Apply configuration to iSCSI service
|
|
targets := a.iscsiStore.List()
|
|
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
|
log.Printf("apply iSCSI configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, lun)
|
|
}
|
|
|
|
func (a *App) handleRemoveLUN(w http.ResponseWriter, r *http.Request) {
|
|
// Extract target ID from path like /api/v1/iscsi/targets/{id}/luns/remove
|
|
path := strings.TrimPrefix(r.URL.Path, "/api/v1/iscsi/targets/")
|
|
parts := strings.Split(path, "/")
|
|
if len(parts) == 0 || parts[0] == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "target id required"})
|
|
return
|
|
}
|
|
id := parts[0]
|
|
|
|
var req struct {
|
|
LUNID int `json:"lun_id"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
if err := a.iscsiStore.RemoveLUN(id, req.LUNID); err != nil {
|
|
if err == storage.ErrISCSITargetNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": "target not found"})
|
|
return
|
|
}
|
|
if err == storage.ErrLUNNotFound {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": "LUN not found"})
|
|
return
|
|
}
|
|
log.Printf("remove LUN error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
// Apply configuration to iSCSI service
|
|
targets := a.iscsiStore.List()
|
|
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
|
log.Printf("apply iSCSI configuration error: %v", err)
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "LUN removed", "target_id": id, "lun_id": strconv.Itoa(req.LUNID)})
|
|
}
|
|
|
|
// Job Handlers
|
|
func (a *App) handleListJobs(w http.ResponseWriter, r *http.Request) {
|
|
status := models.JobStatus(r.URL.Query().Get("status"))
|
|
jobs := a.jobManager.List(status)
|
|
writeJSON(w, http.StatusOK, jobs)
|
|
}
|
|
|
|
func (a *App) handleGetJob(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/jobs/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "job id required"})
|
|
return
|
|
}
|
|
|
|
job, err := a.jobManager.Get(id)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, job)
|
|
}
|
|
|
|
func (a *App) handleCancelJob(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/jobs/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "job id required"})
|
|
return
|
|
}
|
|
|
|
if err := a.jobManager.Cancel(id); err != nil {
|
|
log.Printf("cancel job error: %v", err)
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "job cancelled", "id": id})
|
|
}
|
|
|
|
// Auth Handlers
|
|
func (a *App) handleLogin(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Username string `json:"username"`
|
|
Password string `json:"password"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate username (login is less strict - just check not empty)
|
|
if req.Username == "" {
|
|
writeError(w, errors.ErrValidation("username is required"))
|
|
return
|
|
}
|
|
|
|
if req.Password == "" {
|
|
writeError(w, errors.ErrValidation("password is required"))
|
|
return
|
|
}
|
|
|
|
user, err := a.userStore.Authenticate(req.Username, req.Password)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusUnauthorized, map[string]string{"error": "invalid credentials"})
|
|
return
|
|
}
|
|
|
|
token, err := a.authService.GenerateToken(user.ID, string(user.Role))
|
|
if err != nil {
|
|
log.Printf("generate token error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to generate token"})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]interface{}{
|
|
"token": token,
|
|
"user": user,
|
|
"expires_in": 86400, // 24 hours in seconds
|
|
})
|
|
}
|
|
|
|
func (a *App) handleLogout(w http.ResponseWriter, r *http.Request) {
|
|
// JWT is stateless, so logout is just client-side token removal
|
|
// In a stateful system, you'd invalidate the token here
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "logged out"})
|
|
}
|
|
|
|
// User Handlers
|
|
func (a *App) handleListUsers(w http.ResponseWriter, r *http.Request) {
|
|
// Only administrators can list users
|
|
users := a.userStore.List()
|
|
writeJSON(w, http.StatusOK, users)
|
|
}
|
|
|
|
func (a *App) handleCreateUser(w http.ResponseWriter, r *http.Request) {
|
|
var req struct {
|
|
Username string `json:"username"`
|
|
Email string `json:"email"`
|
|
Password string `json:"password"`
|
|
Role models.Role `json:"role"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Validate username
|
|
if err := validation.ValidateUsername(req.Username); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate password
|
|
if err := validation.ValidatePassword(req.Password); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
|
|
// Validate email if provided
|
|
if req.Email != "" {
|
|
if err := validation.ValidateEmail(req.Email); err != nil {
|
|
writeError(w, errors.ErrValidation(err.Error()))
|
|
return
|
|
}
|
|
}
|
|
|
|
if req.Role == "" {
|
|
req.Role = models.RoleViewer // Default role
|
|
}
|
|
|
|
// Normalize role to lowercase for comparison
|
|
roleStr := strings.ToLower(string(req.Role))
|
|
req.Role = models.Role(roleStr)
|
|
|
|
// Validate role
|
|
if req.Role != models.RoleAdministrator && req.Role != models.RoleOperator && req.Role != models.RoleViewer {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid role"})
|
|
return
|
|
}
|
|
|
|
user, err := a.userStore.Create(req.Username, req.Email, req.Password, req.Role)
|
|
if err != nil {
|
|
if err == auth.ErrUserExists {
|
|
writeJSON(w, http.StatusConflict, map[string]string{"error": "username already exists"})
|
|
return
|
|
}
|
|
log.Printf("create user error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusCreated, user)
|
|
}
|
|
|
|
func (a *App) handleGetUser(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/users/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"})
|
|
return
|
|
}
|
|
|
|
user, err := a.userStore.GetByID(id)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, user)
|
|
}
|
|
|
|
func (a *App) handleUpdateUser(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/users/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"})
|
|
return
|
|
}
|
|
|
|
var req struct {
|
|
Email string `json:"email"`
|
|
Role models.Role `json:"role"`
|
|
Active bool `json:"active"`
|
|
}
|
|
|
|
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
|
return
|
|
}
|
|
|
|
// Normalize role to lowercase if provided
|
|
if req.Role != "" {
|
|
roleStr := strings.ToLower(string(req.Role))
|
|
req.Role = models.Role(roleStr)
|
|
}
|
|
|
|
// Validate role if provided
|
|
if req.Role != "" && req.Role != models.RoleAdministrator && req.Role != models.RoleOperator && req.Role != models.RoleViewer {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid role"})
|
|
return
|
|
}
|
|
|
|
// Use existing role if not provided
|
|
if req.Role == "" {
|
|
existingUser, err := a.userStore.GetByID(id)
|
|
if err != nil {
|
|
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
req.Role = existingUser.Role
|
|
}
|
|
|
|
if err := a.userStore.Update(id, req.Email, req.Role, req.Active); err != nil {
|
|
log.Printf("update user error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
user, _ := a.userStore.GetByID(id)
|
|
writeJSON(w, http.StatusOK, user)
|
|
}
|
|
|
|
func (a *App) handleDeleteUser(w http.ResponseWriter, r *http.Request) {
|
|
id := pathParam(r, "/api/v1/users/")
|
|
if id == "" {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "user id required"})
|
|
return
|
|
}
|
|
|
|
// Prevent deleting yourself
|
|
currentUser, ok := getUserFromContext(r)
|
|
if ok && currentUser.ID == id {
|
|
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "cannot delete your own account"})
|
|
return
|
|
}
|
|
|
|
if err := a.userStore.Delete(id); err != nil {
|
|
log.Printf("delete user error: %v", err)
|
|
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
return
|
|
}
|
|
|
|
writeJSON(w, http.StatusOK, map[string]string{"message": "user deleted", "id": id})
|
|
}
|
|
|
|
// Audit Log Handlers
|
|
func (a *App) handleListAuditLogs(w http.ResponseWriter, r *http.Request) {
|
|
// Get query parameters
|
|
actor := r.URL.Query().Get("actor")
|
|
action := r.URL.Query().Get("action")
|
|
resource := r.URL.Query().Get("resource")
|
|
limitStr := r.URL.Query().Get("limit")
|
|
|
|
limit := 0
|
|
if limitStr != "" {
|
|
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 {
|
|
limit = l
|
|
}
|
|
}
|
|
|
|
// Default limit to 100 if not specified
|
|
if limit == 0 {
|
|
limit = 100
|
|
}
|
|
|
|
logs := a.auditStore.List(actor, action, resource, limit)
|
|
writeJSON(w, http.StatusOK, logs)
|
|
}
|
|
|
|
// syncNFSExportsFromOS syncs NFS exports from ZFS sharenfs properties to the store
|
|
func (a *App) syncNFSExportsFromOS() error {
|
|
// Get all datasets
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
if err != nil {
|
|
return fmt.Errorf("list datasets: %w", err)
|
|
}
|
|
|
|
// Find datasets with sharenfs property set
|
|
for _, ds := range datasets {
|
|
// Get sharenfs property
|
|
zfsPath := "zfs"
|
|
if path, err := exec.LookPath("zfs"); err == nil {
|
|
zfsPath = path
|
|
}
|
|
|
|
cmd := exec.Command(zfsPath, "get", "-H", "-o", "value", "sharenfs", ds.Name)
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
continue // Skip if can't get property
|
|
}
|
|
|
|
sharenfsValue := strings.TrimSpace(string(output))
|
|
if sharenfsValue == "off" || sharenfsValue == "-" || sharenfsValue == "" {
|
|
continue // Not shared
|
|
}
|
|
|
|
// Check if export already exists in store
|
|
existingExports := a.nfsStore.List()
|
|
exists := false
|
|
for _, exp := range existingExports {
|
|
if exp.Dataset == ds.Name || exp.Path == ds.Mountpoint {
|
|
exists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if exists {
|
|
continue // Already in store
|
|
}
|
|
|
|
// Parse sharenfs value to extract configuration
|
|
// Format examples:
|
|
// - "rw=*,no_root_squash"
|
|
// - "ro=*,root_squash"
|
|
// - "rw=10.0.0.0/8,ro=192.168.1.0/24,root_squash"
|
|
readOnly := false
|
|
rootSquash := true // Default
|
|
clients := []string{"*"} // Default to all
|
|
|
|
// Parse sharenfs value
|
|
parts := strings.Split(sharenfsValue, ",")
|
|
clientSpecs := []string{}
|
|
for _, part := range parts {
|
|
part = strings.TrimSpace(part)
|
|
if part == "root_squash" {
|
|
rootSquash = true
|
|
} else if part == "no_root_squash" {
|
|
rootSquash = false
|
|
} else if strings.HasPrefix(part, "ro=") {
|
|
client := strings.TrimPrefix(part, "ro=")
|
|
if client == "*" {
|
|
readOnly = true
|
|
clients = []string{"*"}
|
|
} else {
|
|
clientSpecs = append(clientSpecs, client)
|
|
readOnly = true // At least one client is read-only
|
|
}
|
|
} else if strings.HasPrefix(part, "rw=") {
|
|
client := strings.TrimPrefix(part, "rw=")
|
|
if client == "*" {
|
|
readOnly = false
|
|
clients = []string{"*"}
|
|
} else {
|
|
clientSpecs = append(clientSpecs, client)
|
|
}
|
|
} else if part == "ro" {
|
|
readOnly = true
|
|
clients = []string{"*"}
|
|
} else if part == "rw" {
|
|
readOnly = false
|
|
clients = []string{"*"}
|
|
}
|
|
}
|
|
|
|
// If we found specific clients, use them
|
|
if len(clientSpecs) > 0 {
|
|
clients = clientSpecs
|
|
}
|
|
|
|
// Use mountpoint as path, or default path
|
|
path := ds.Mountpoint
|
|
if path == "" || path == "none" {
|
|
// Generate default path
|
|
parts := strings.Split(ds.Name, "/")
|
|
datasetName := parts[len(parts)-1]
|
|
path = "/storage/datasets/" + datasetName
|
|
}
|
|
|
|
// Create export in store (ignore error if already exists)
|
|
_, err = a.nfsStore.Create(path, ds.Name, clients, readOnly, rootSquash)
|
|
if err != nil && err != storage.ErrNFSExportExists {
|
|
log.Printf("warning: failed to sync export for dataset %s: %v", ds.Name, err)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// syncSMBSharesFromOS syncs SMB shares from /etc/samba/smb.conf to the store
|
|
func (a *App) syncSMBSharesFromOS() error {
|
|
configPath := "/etc/samba/smb.conf"
|
|
cmd := exec.Command("cat", configPath)
|
|
output, err := cmd.Output()
|
|
if err != nil {
|
|
// If can't read smb.conf, that's okay - might not exist yet
|
|
return nil
|
|
}
|
|
|
|
lines := strings.Split(string(output), "\n")
|
|
currentShare := ""
|
|
inShareSection := false
|
|
sharePath := ""
|
|
shareReadOnly := false
|
|
shareGuestOK := false
|
|
shareDescription := ""
|
|
shareValidUsers := []string{}
|
|
|
|
for _, line := range lines {
|
|
line = strings.TrimSpace(line)
|
|
if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
|
|
continue
|
|
}
|
|
|
|
// Check if this is a share section
|
|
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
|
// Save previous share if exists
|
|
if inShareSection && currentShare != "" && sharePath != "" {
|
|
// Try to find corresponding dataset
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
var dataset string
|
|
if err == nil {
|
|
for _, ds := range datasets {
|
|
if ds.Mountpoint == sharePath {
|
|
dataset = ds.Name
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
// Check if share already exists
|
|
existingShares := a.smbStore.List()
|
|
exists := false
|
|
for _, share := range existingShares {
|
|
if share.Name == currentShare || share.Path == sharePath {
|
|
exists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !exists {
|
|
_, err = a.smbStore.Create(currentShare, sharePath, dataset, shareDescription, shareReadOnly, shareGuestOK, shareValidUsers)
|
|
if err != nil && err != storage.ErrSMBShareExists {
|
|
log.Printf("warning: failed to sync SMB share %s: %v", currentShare, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Start new share section
|
|
shareName := strings.Trim(line, "[]")
|
|
if shareName != "global" && shareName != "printers" && shareName != "print$" {
|
|
currentShare = shareName
|
|
inShareSection = true
|
|
sharePath = ""
|
|
shareReadOnly = false
|
|
shareGuestOK = false
|
|
shareDescription = ""
|
|
shareValidUsers = []string{}
|
|
} else {
|
|
inShareSection = false
|
|
currentShare = ""
|
|
}
|
|
continue
|
|
}
|
|
|
|
// Parse share properties
|
|
if inShareSection && currentShare != "" {
|
|
if strings.HasPrefix(line, "path = ") {
|
|
sharePath = strings.TrimSpace(strings.TrimPrefix(line, "path = "))
|
|
} else if strings.HasPrefix(line, "read only = ") {
|
|
value := strings.TrimSpace(strings.TrimPrefix(line, "read only = "))
|
|
shareReadOnly = (value == "yes" || value == "true")
|
|
} else if strings.HasPrefix(line, "guest ok = ") {
|
|
value := strings.TrimSpace(strings.TrimPrefix(line, "guest ok = "))
|
|
shareGuestOK = (value == "yes" || value == "true")
|
|
} else if strings.HasPrefix(line, "comment = ") {
|
|
shareDescription = strings.TrimSpace(strings.TrimPrefix(line, "comment = "))
|
|
} else if strings.HasPrefix(line, "valid users = ") {
|
|
usersStr := strings.TrimSpace(strings.TrimPrefix(line, "valid users = "))
|
|
shareValidUsers = strings.Split(usersStr, ",")
|
|
for i := range shareValidUsers {
|
|
shareValidUsers[i] = strings.TrimSpace(shareValidUsers[i])
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Save last share if exists
|
|
if inShareSection && currentShare != "" && sharePath != "" {
|
|
datasets, err := a.zfs.ListDatasets("")
|
|
var dataset string
|
|
if err == nil {
|
|
for _, ds := range datasets {
|
|
if ds.Mountpoint == sharePath {
|
|
dataset = ds.Name
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
existingShares := a.smbStore.List()
|
|
exists := false
|
|
for _, share := range existingShares {
|
|
if share.Name == currentShare || share.Path == sharePath {
|
|
exists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !exists {
|
|
_, err = a.smbStore.Create(currentShare, sharePath, dataset, shareDescription, shareReadOnly, shareGuestOK, shareValidUsers)
|
|
if err != nil && err != storage.ErrSMBShareExists {
|
|
log.Printf("warning: failed to sync SMB share %s: %v", currentShare, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// syncISCSITargetsFromOS syncs iSCSI targets from targetcli to the store
|
|
func (a *App) syncISCSITargetsFromOS() error {
|
|
log.Printf("debug: starting syncISCSITargetsFromOS")
|
|
// Get list of targets from targetcli
|
|
// Set TARGETCLI_HOME and TARGETCLI_LOCK_DIR to writable directories
|
|
// Create the directories first if they don't exist
|
|
os.MkdirAll("/tmp/.targetcli", 0755)
|
|
os.MkdirAll("/tmp/targetcli-run", 0755)
|
|
// Service runs as root, no need for sudo
|
|
cmd := exec.Command("sh", "-c", "TARGETCLI_HOME=/tmp/.targetcli TARGETCLI_LOCK_DIR=/tmp/targetcli-run targetcli /iscsi ls")
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
// Log the error but don't fail - targetcli might not be configured
|
|
log.Printf("warning: failed to list iSCSI targets from targetcli: %v (output: %s)", err, string(output))
|
|
return nil
|
|
}
|
|
|
|
log.Printf("debug: targetcli output: %s", string(output))
|
|
lines := strings.Split(string(output), "\n")
|
|
var currentIQN string
|
|
|
|
for _, line := range lines {
|
|
line = strings.TrimSpace(line)
|
|
if line == "" {
|
|
continue
|
|
}
|
|
|
|
// Check if this is a target line (starts with "o- iqn.")
|
|
if strings.HasPrefix(line, "o- iqn.") {
|
|
log.Printf("debug: found target line: %s", line)
|
|
// Extract IQN from line like "o- iqn.2025-12.com.atlas:target-1"
|
|
parts := strings.Fields(line)
|
|
if len(parts) >= 2 {
|
|
currentIQN = parts[1]
|
|
|
|
// Check if target already exists in store
|
|
existingTargets := a.iscsiStore.List()
|
|
exists := false
|
|
for _, t := range existingTargets {
|
|
if t.IQN == currentIQN {
|
|
exists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !exists {
|
|
// Try to determine target type from IQN
|
|
targetType := models.ISCSITargetTypeDisk // Default to disk mode
|
|
if strings.Contains(strings.ToLower(currentIQN), "tape") {
|
|
targetType = models.ISCSITargetTypeTape
|
|
}
|
|
|
|
// Create target in store
|
|
target, err := a.iscsiStore.CreateWithType(currentIQN, targetType, []string{})
|
|
if err != nil && err != storage.ErrISCSITargetExists {
|
|
log.Printf("warning: failed to sync iSCSI target %s: %v", currentIQN, err)
|
|
} else if err == nil {
|
|
log.Printf("synced iSCSI target from OS: %s (type: %s)", currentIQN, targetType)
|
|
|
|
// Now try to sync LUNs for this target
|
|
if err := a.syncLUNsFromOS(currentIQN, target.ID, targetType); err != nil {
|
|
log.Printf("warning: failed to sync LUNs for target %s: %v", currentIQN, err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// syncLUNsFromOS syncs LUNs for a specific target from targetcli
|
|
func (a *App) syncLUNsFromOS(iqn, targetID string, targetType models.ISCSITargetType) error {
|
|
// Get LUNs for this target
|
|
// Service runs as root, no need for sudo
|
|
cmd := exec.Command("sh", "-c", "TARGETCLI_HOME=/tmp/.targetcli TARGETCLI_LOCK_DIR=/tmp/targetcli-run targetcli /iscsi/"+iqn+"/tpg1/luns ls")
|
|
output, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
// No LUNs or can't read - that's okay, log for debugging
|
|
log.Printf("debug: failed to list LUNs for target %s: %v (output: %s)", iqn, err, string(output))
|
|
return nil
|
|
}
|
|
|
|
lines := strings.Split(string(output), "\n")
|
|
for _, line := range lines {
|
|
line = strings.TrimSpace(line)
|
|
if strings.HasPrefix(line, "o- lun") {
|
|
// Parse LUN line like "o- lun0 ....................................... [block/pool-test-02-vol01 (/dev/zvol/pool-test-02/vol01) (default_tg_pt_gp)]"
|
|
parts := strings.Fields(line)
|
|
if len(parts) >= 2 {
|
|
// Extract LUN ID from "lun0"
|
|
lunIDStr := strings.TrimPrefix(parts[1], "lun")
|
|
lunID, err := strconv.Atoi(lunIDStr)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
// Extract backstore path and device from the line
|
|
var backstorePath string
|
|
var devicePath string
|
|
var zvolName string
|
|
|
|
// Find the part with brackets - might span multiple parts
|
|
fullLine := strings.Join(parts, " ")
|
|
start := strings.Index(fullLine, "[")
|
|
end := strings.LastIndex(fullLine, "]")
|
|
if start >= 0 && end > start {
|
|
content := fullLine[start+1 : end]
|
|
// Parse content like "block/pool-test-02-vol01 (/dev/zvol/pool-test-02/vol01)"
|
|
if strings.Contains(content, "(") {
|
|
// Has device path
|
|
parts2 := strings.Split(content, "(")
|
|
if len(parts2) >= 2 {
|
|
backstorePath = strings.TrimSpace(parts2[0])
|
|
devicePath = strings.Trim(strings.TrimSpace(parts2[1]), "()")
|
|
|
|
// If device is a zvol, extract ZVOL name
|
|
if strings.HasPrefix(devicePath, "/dev/zvol/") {
|
|
zvolName = strings.TrimPrefix(devicePath, "/dev/zvol/")
|
|
}
|
|
}
|
|
} else {
|
|
backstorePath = content
|
|
}
|
|
}
|
|
|
|
// Check if LUN already exists
|
|
target, err := a.iscsiStore.Get(targetID)
|
|
if err != nil {
|
|
continue
|
|
}
|
|
|
|
lunExists := false
|
|
for _, lun := range target.LUNs {
|
|
if lun.ID == lunID {
|
|
lunExists = true
|
|
break
|
|
}
|
|
}
|
|
|
|
if !lunExists {
|
|
// Determine backstore type
|
|
backstoreType := "block"
|
|
if strings.HasPrefix(backstorePath, "pscsi/") {
|
|
backstoreType = "pscsi"
|
|
} else if strings.HasPrefix(backstorePath, "fileio/") {
|
|
backstoreType = "fileio"
|
|
}
|
|
|
|
// Get size if it's a ZVOL
|
|
var size uint64
|
|
if zvolName != "" {
|
|
zvols, err := a.zfs.ListZVOLs("")
|
|
if err == nil {
|
|
for _, zvol := range zvols {
|
|
if zvol.Name == zvolName {
|
|
size = zvol.Size
|
|
break
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Add LUN to store
|
|
if targetType == models.ISCSITargetTypeTape && devicePath != "" {
|
|
// Tape mode: use device
|
|
_, err := a.iscsiStore.AddLUNWithDevice(targetID, "", devicePath, size, backstoreType, "")
|
|
if err != nil && err != storage.ErrLUNExists {
|
|
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
|
}
|
|
} else if zvolName != "" {
|
|
// Disk mode: use ZVOL
|
|
_, err := a.iscsiStore.AddLUNWithDevice(targetID, zvolName, "", size, backstoreType, "")
|
|
if err != nil && err != storage.ErrLUNExists {
|
|
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
|
}
|
|
} else if devicePath != "" {
|
|
// Generic device
|
|
_, err := a.iscsiStore.AddLUNWithDevice(targetID, "", devicePath, size, backstoreType, "")
|
|
if err != nil && err != storage.ErrLUNExists {
|
|
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|