This commit is contained in:
@@ -6,6 +6,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -124,6 +125,17 @@ func (a *App) handleGetPool(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if detail is requested
|
||||
if r.URL.Query().Get("detail") == "true" {
|
||||
detail, err := a.zfs.GetPoolDetail(name)
|
||||
if err != nil {
|
||||
writeError(w, errors.ErrNotFound("pool not found").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
writeJSON(w, http.StatusOK, detail)
|
||||
return
|
||||
}
|
||||
|
||||
pool, err := a.zfs.GetPool(name)
|
||||
if err != nil {
|
||||
writeJSON(w, http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
@@ -133,6 +145,43 @@ func (a *App) handleGetPool(w http.ResponseWriter, r *http.Request) {
|
||||
writeJSON(w, http.StatusOK, pool)
|
||||
}
|
||||
|
||||
func (a *App) handleAddSpareDisk(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/pools/")
|
||||
name = strings.TrimSuffix(name, "/spare")
|
||||
if name == "" {
|
||||
writeError(w, errors.ErrBadRequest("pool name required"))
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
Disk string `json:"disk"` // Disk path like /dev/sdb or sdb
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeError(w, errors.ErrBadRequest("invalid request body"))
|
||||
return
|
||||
}
|
||||
|
||||
if req.Disk == "" {
|
||||
writeError(w, errors.ErrValidation("disk path required"))
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure disk path starts with /dev/ if not already
|
||||
diskPath := req.Disk
|
||||
if !strings.HasPrefix(diskPath, "/dev/") {
|
||||
diskPath = "/dev/" + diskPath
|
||||
}
|
||||
|
||||
if err := a.zfs.AddSpareDisk(name, diskPath); err != nil {
|
||||
log.Printf("add spare disk error: %v", err)
|
||||
writeError(w, errors.ErrInternal("failed to add spare disk").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
writeJSON(w, http.StatusOK, map[string]string{"message": "spare disk added", "pool": name, "disk": diskPath})
|
||||
}
|
||||
|
||||
func (a *App) handleDeletePool(w http.ResponseWriter, r *http.Request) {
|
||||
name := pathParam(r, "/api/v1/pools/")
|
||||
if name == "" {
|
||||
@@ -753,6 +802,13 @@ func (a *App) handleDeleteSnapshotPolicy(w http.ResponseWriter, r *http.Request)
|
||||
|
||||
// SMB Share Handlers
|
||||
func (a *App) handleListSMBShares(w http.ResponseWriter, r *http.Request) {
|
||||
// Sync shares from OS (smb.conf) to store
|
||||
// This ensures shares created before service restart are visible
|
||||
if err := a.syncSMBSharesFromOS(); err != nil {
|
||||
log.Printf("warning: failed to sync SMB shares from OS: %v", err)
|
||||
// Continue anyway - return what's in store
|
||||
}
|
||||
|
||||
shares := a.smbStore.List()
|
||||
writeJSON(w, http.StatusOK, shares)
|
||||
}
|
||||
@@ -1151,6 +1207,11 @@ func (a *App) handleDeleteNFSExport(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// iSCSI Handlers
|
||||
func (a *App) handleListISCSITargets(w http.ResponseWriter, r *http.Request) {
|
||||
// Sync targets from OS before listing
|
||||
if err := a.syncISCSITargetsFromOS(); err != nil {
|
||||
log.Printf("warning: failed to sync iSCSI targets from OS: %v", err)
|
||||
}
|
||||
|
||||
targets := a.iscsiStore.List()
|
||||
writeJSON(w, http.StatusOK, targets)
|
||||
}
|
||||
@@ -1158,37 +1219,63 @@ func (a *App) handleListISCSITargets(w http.ResponseWriter, r *http.Request) {
|
||||
func (a *App) handleCreateISCSITarget(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
IQN string `json:"iqn"`
|
||||
Type string `json:"type"` // "disk" or "tape" (default: "disk")
|
||||
Initiators []string `json:"initiators"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "invalid request body"})
|
||||
log.Printf("create iSCSI target: invalid request body: %v", err)
|
||||
writeError(w, errors.ErrBadRequest("invalid request body").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate and set target type
|
||||
targetType := models.ISCSITargetTypeDisk // Default to disk mode
|
||||
if req.Type != "" {
|
||||
if req.Type != "disk" && req.Type != "tape" {
|
||||
writeError(w, errors.ErrValidation("invalid target type: must be 'disk' or 'tape'"))
|
||||
return
|
||||
}
|
||||
targetType = models.ISCSITargetType(req.Type)
|
||||
}
|
||||
|
||||
log.Printf("create iSCSI target: IQN=%s, Type=%s, Initiators=%v", req.IQN, targetType, req.Initiators)
|
||||
|
||||
// Validate IQN format
|
||||
if err := validation.ValidateIQN(req.IQN); err != nil {
|
||||
log.Printf("IQN validation error: %v (IQN: %s)", err, req.IQN)
|
||||
writeError(w, errors.ErrValidation(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
target, err := a.iscsiStore.Create(req.IQN, req.Initiators)
|
||||
target, err := a.iscsiStore.CreateWithType(req.IQN, targetType, req.Initiators)
|
||||
if err != nil {
|
||||
if err == storage.ErrISCSITargetExists {
|
||||
writeJSON(w, http.StatusConflict, map[string]string{"error": "target with this IQN already exists"})
|
||||
log.Printf("create iSCSI target: target already exists (IQN: %s)", req.IQN)
|
||||
writeError(w, errors.ErrConflict("target with this IQN already exists"))
|
||||
return
|
||||
}
|
||||
log.Printf("create iSCSI target error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
writeError(w, errors.ErrInternal("failed to create iSCSI target").WithDetails(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("create iSCSI target: target created in store (ID: %s, IQN: %s)", target.ID, target.IQN)
|
||||
|
||||
// Apply configuration to iSCSI service
|
||||
targets := a.iscsiStore.List()
|
||||
if err := a.iscsiService.ApplyConfiguration(targets); err != nil {
|
||||
log.Printf("apply iSCSI configuration error: %v", err)
|
||||
log.Printf("create iSCSI target: apply configuration error: %v", err)
|
||||
// Don't fail the request if configuration fails - target is already in store
|
||||
// User can retry configuration later
|
||||
writeJSON(w, http.StatusCreated, map[string]interface{}{
|
||||
"target": target,
|
||||
"warning": "target created but configuration may have failed. check logs.",
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("create iSCSI target: success (ID: %s, IQN: %s)", target.ID, target.IQN)
|
||||
writeJSON(w, http.StatusCreated, target)
|
||||
}
|
||||
|
||||
@@ -1325,7 +1412,10 @@ func (a *App) handleAddLUN(w http.ResponseWriter, r *http.Request) {
|
||||
id := parts[0]
|
||||
|
||||
var req struct {
|
||||
ZVOL string `json:"zvol"`
|
||||
ZVOL string `json:"zvol"` // ZVOL name (for block backstore)
|
||||
Device string `json:"device"` // Device path (e.g., /dev/st0 for tape)
|
||||
Backstore string `json:"backstore"` // Backstore type: "block", "pscsi", "fileio" (optional, auto-detected)
|
||||
BackstoreName string `json:"backstore_name"` // Custom backstore name (optional)
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
@@ -1333,35 +1423,55 @@ func (a *App) handleAddLUN(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if req.ZVOL == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate ZVOL exists
|
||||
zvols, err := a.zfs.ListZVOLs("")
|
||||
if err != nil {
|
||||
log.Printf("list zvols error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate zvol"})
|
||||
// Validate: must have either ZVOL or Device
|
||||
if req.ZVOL == "" && req.Device == "" {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "either zvol or device is required"})
|
||||
return
|
||||
}
|
||||
|
||||
var zvolSize uint64
|
||||
zvolExists := false
|
||||
for _, zvol := range zvols {
|
||||
if zvol.Name == req.ZVOL {
|
||||
zvolExists = true
|
||||
zvolSize = zvol.Size
|
||||
break
|
||||
var zvolName string
|
||||
|
||||
if req.ZVOL != "" {
|
||||
// Validate ZVOL exists
|
||||
zvols, err := a.zfs.ListZVOLs("")
|
||||
if err != nil {
|
||||
log.Printf("list zvols error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate zvol"})
|
||||
return
|
||||
}
|
||||
|
||||
zvolExists := false
|
||||
for _, zvol := range zvols {
|
||||
if zvol.Name == req.ZVOL {
|
||||
zvolExists = true
|
||||
zvolSize = zvol.Size
|
||||
zvolName = zvol.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !zvolExists {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol not found"})
|
||||
return
|
||||
}
|
||||
} else if req.Device != "" {
|
||||
// Validate device exists
|
||||
if _, err := os.Stat(req.Device); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": fmt.Sprintf("device not found: %s", req.Device)})
|
||||
return
|
||||
}
|
||||
log.Printf("stat device error: %v", err)
|
||||
writeJSON(w, http.StatusInternalServerError, map[string]string{"error": "failed to validate device"})
|
||||
return
|
||||
}
|
||||
// For tape devices, size is typically 0 or unknown
|
||||
zvolSize = 0
|
||||
}
|
||||
|
||||
if !zvolExists {
|
||||
writeJSON(w, http.StatusBadRequest, map[string]string{"error": "zvol not found"})
|
||||
return
|
||||
}
|
||||
|
||||
lun, err := a.iscsiStore.AddLUN(id, req.ZVOL, zvolSize)
|
||||
// Use updated AddLUN signature that supports device and backstore
|
||||
lun, err := a.iscsiStore.AddLUNWithDevice(id, zvolName, req.Device, zvolSize, req.Backstore, req.BackstoreName)
|
||||
if err != nil {
|
||||
if err == storage.ErrISCSITargetNotFound {
|
||||
writeJSON(w, http.StatusNotFound, map[string]string{"error": "target not found"})
|
||||
@@ -1804,3 +1914,326 @@ func (a *App) syncNFSExportsFromOS() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncSMBSharesFromOS syncs SMB shares from /etc/samba/smb.conf to the store
|
||||
func (a *App) syncSMBSharesFromOS() error {
|
||||
configPath := "/etc/samba/smb.conf"
|
||||
cmd := exec.Command("sudo", "-n", "cat", configPath)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If can't read smb.conf, that's okay - might not exist yet
|
||||
return nil
|
||||
}
|
||||
|
||||
lines := strings.Split(string(output), "\n")
|
||||
currentShare := ""
|
||||
inShareSection := false
|
||||
sharePath := ""
|
||||
shareReadOnly := false
|
||||
shareGuestOK := false
|
||||
shareDescription := ""
|
||||
shareValidUsers := []string{}
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is a share section
|
||||
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||
// Save previous share if exists
|
||||
if inShareSection && currentShare != "" && sharePath != "" {
|
||||
// Try to find corresponding dataset
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
var dataset string
|
||||
if err == nil {
|
||||
for _, ds := range datasets {
|
||||
if ds.Mountpoint == sharePath {
|
||||
dataset = ds.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if share already exists
|
||||
existingShares := a.smbStore.List()
|
||||
exists := false
|
||||
for _, share := range existingShares {
|
||||
if share.Name == currentShare || share.Path == sharePath {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !exists {
|
||||
_, err = a.smbStore.Create(currentShare, sharePath, dataset, shareDescription, shareReadOnly, shareGuestOK, shareValidUsers)
|
||||
if err != nil && err != storage.ErrSMBShareExists {
|
||||
log.Printf("warning: failed to sync SMB share %s: %v", currentShare, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start new share section
|
||||
shareName := strings.Trim(line, "[]")
|
||||
if shareName != "global" && shareName != "printers" && shareName != "print$" {
|
||||
currentShare = shareName
|
||||
inShareSection = true
|
||||
sharePath = ""
|
||||
shareReadOnly = false
|
||||
shareGuestOK = false
|
||||
shareDescription = ""
|
||||
shareValidUsers = []string{}
|
||||
} else {
|
||||
inShareSection = false
|
||||
currentShare = ""
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse share properties
|
||||
if inShareSection && currentShare != "" {
|
||||
if strings.HasPrefix(line, "path = ") {
|
||||
sharePath = strings.TrimSpace(strings.TrimPrefix(line, "path = "))
|
||||
} else if strings.HasPrefix(line, "read only = ") {
|
||||
value := strings.TrimSpace(strings.TrimPrefix(line, "read only = "))
|
||||
shareReadOnly = (value == "yes" || value == "true")
|
||||
} else if strings.HasPrefix(line, "guest ok = ") {
|
||||
value := strings.TrimSpace(strings.TrimPrefix(line, "guest ok = "))
|
||||
shareGuestOK = (value == "yes" || value == "true")
|
||||
} else if strings.HasPrefix(line, "comment = ") {
|
||||
shareDescription = strings.TrimSpace(strings.TrimPrefix(line, "comment = "))
|
||||
} else if strings.HasPrefix(line, "valid users = ") {
|
||||
usersStr := strings.TrimSpace(strings.TrimPrefix(line, "valid users = "))
|
||||
shareValidUsers = strings.Split(usersStr, ",")
|
||||
for i := range shareValidUsers {
|
||||
shareValidUsers[i] = strings.TrimSpace(shareValidUsers[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save last share if exists
|
||||
if inShareSection && currentShare != "" && sharePath != "" {
|
||||
datasets, err := a.zfs.ListDatasets("")
|
||||
var dataset string
|
||||
if err == nil {
|
||||
for _, ds := range datasets {
|
||||
if ds.Mountpoint == sharePath {
|
||||
dataset = ds.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
existingShares := a.smbStore.List()
|
||||
exists := false
|
||||
for _, share := range existingShares {
|
||||
if share.Name == currentShare || share.Path == sharePath {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !exists {
|
||||
_, err = a.smbStore.Create(currentShare, sharePath, dataset, shareDescription, shareReadOnly, shareGuestOK, shareValidUsers)
|
||||
if err != nil && err != storage.ErrSMBShareExists {
|
||||
log.Printf("warning: failed to sync SMB share %s: %v", currentShare, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncISCSITargetsFromOS syncs iSCSI targets from targetcli to the store
|
||||
func (a *App) syncISCSITargetsFromOS() error {
|
||||
log.Printf("debug: starting syncISCSITargetsFromOS")
|
||||
// Get list of targets from targetcli
|
||||
// Set TARGETCLI_HOME and TARGETCLI_LOCK_DIR to writable directories
|
||||
// Create the directories first if they don't exist
|
||||
os.MkdirAll("/tmp/.targetcli", 0755)
|
||||
os.MkdirAll("/tmp/targetcli-run", 0755)
|
||||
// Use sudo to run as root, then set environment variables in the command
|
||||
cmd := exec.Command("sh", "-c", "sudo -n sh -c 'TARGETCLI_HOME=/tmp/.targetcli TARGETCLI_LOCK_DIR=/tmp/targetcli-run targetcli /iscsi ls'")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// Log the error but don't fail - targetcli might not be configured
|
||||
log.Printf("warning: failed to list iSCSI targets from targetcli: %v (output: %s)", err, string(output))
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("debug: targetcli output: %s", string(output))
|
||||
lines := strings.Split(string(output), "\n")
|
||||
var currentIQN string
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is a target line (starts with "o- iqn.")
|
||||
if strings.HasPrefix(line, "o- iqn.") {
|
||||
log.Printf("debug: found target line: %s", line)
|
||||
// Extract IQN from line like "o- iqn.2025-12.com.atlas:target-1"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 {
|
||||
currentIQN = parts[1]
|
||||
|
||||
// Check if target already exists in store
|
||||
existingTargets := a.iscsiStore.List()
|
||||
exists := false
|
||||
for _, t := range existingTargets {
|
||||
if t.IQN == currentIQN {
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !exists {
|
||||
// Try to determine target type from IQN
|
||||
targetType := models.ISCSITargetTypeDisk // Default to disk mode
|
||||
if strings.Contains(strings.ToLower(currentIQN), "tape") {
|
||||
targetType = models.ISCSITargetTypeTape
|
||||
}
|
||||
|
||||
// Create target in store
|
||||
target, err := a.iscsiStore.CreateWithType(currentIQN, targetType, []string{})
|
||||
if err != nil && err != storage.ErrISCSITargetExists {
|
||||
log.Printf("warning: failed to sync iSCSI target %s: %v", currentIQN, err)
|
||||
} else if err == nil {
|
||||
log.Printf("synced iSCSI target from OS: %s (type: %s)", currentIQN, targetType)
|
||||
|
||||
// Now try to sync LUNs for this target
|
||||
if err := a.syncLUNsFromOS(currentIQN, target.ID, targetType); err != nil {
|
||||
log.Printf("warning: failed to sync LUNs for target %s: %v", currentIQN, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncLUNsFromOS syncs LUNs for a specific target from targetcli
|
||||
func (a *App) syncLUNsFromOS(iqn, targetID string, targetType models.ISCSITargetType) error {
|
||||
// Get LUNs for this target
|
||||
// Use sudo to run as root, then set environment variables in the command
|
||||
cmd := exec.Command("sh", "-c", "sudo -n sh -c 'TARGETCLI_HOME=/tmp/.targetcli TARGETCLI_LOCK_DIR=/tmp/targetcli-run targetcli /iscsi/"+iqn+"/tpg1/luns ls'")
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// No LUNs or can't read - that's okay, log for debugging
|
||||
log.Printf("debug: failed to list LUNs for target %s: %v (output: %s)", iqn, err, string(output))
|
||||
return nil
|
||||
}
|
||||
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "o- lun") {
|
||||
// Parse LUN line like "o- lun0 ....................................... [block/pool-test-02-vol01 (/dev/zvol/pool-test-02/vol01) (default_tg_pt_gp)]"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 {
|
||||
// Extract LUN ID from "lun0"
|
||||
lunIDStr := strings.TrimPrefix(parts[1], "lun")
|
||||
lunID, err := strconv.Atoi(lunIDStr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract backstore path and device from the line
|
||||
var backstorePath string
|
||||
var devicePath string
|
||||
var zvolName string
|
||||
|
||||
// Find the part with brackets - might span multiple parts
|
||||
fullLine := strings.Join(parts, " ")
|
||||
start := strings.Index(fullLine, "[")
|
||||
end := strings.LastIndex(fullLine, "]")
|
||||
if start >= 0 && end > start {
|
||||
content := fullLine[start+1 : end]
|
||||
// Parse content like "block/pool-test-02-vol01 (/dev/zvol/pool-test-02/vol01)"
|
||||
if strings.Contains(content, "(") {
|
||||
// Has device path
|
||||
parts2 := strings.Split(content, "(")
|
||||
if len(parts2) >= 2 {
|
||||
backstorePath = strings.TrimSpace(parts2[0])
|
||||
devicePath = strings.Trim(strings.TrimSpace(parts2[1]), "()")
|
||||
|
||||
// If device is a zvol, extract ZVOL name
|
||||
if strings.HasPrefix(devicePath, "/dev/zvol/") {
|
||||
zvolName = strings.TrimPrefix(devicePath, "/dev/zvol/")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
backstorePath = content
|
||||
}
|
||||
}
|
||||
|
||||
// Check if LUN already exists
|
||||
target, err := a.iscsiStore.Get(targetID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
lunExists := false
|
||||
for _, lun := range target.LUNs {
|
||||
if lun.ID == lunID {
|
||||
lunExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !lunExists {
|
||||
// Determine backstore type
|
||||
backstoreType := "block"
|
||||
if strings.HasPrefix(backstorePath, "pscsi/") {
|
||||
backstoreType = "pscsi"
|
||||
} else if strings.HasPrefix(backstorePath, "fileio/") {
|
||||
backstoreType = "fileio"
|
||||
}
|
||||
|
||||
// Get size if it's a ZVOL
|
||||
var size uint64
|
||||
if zvolName != "" {
|
||||
zvols, err := a.zfs.ListZVOLs("")
|
||||
if err == nil {
|
||||
for _, zvol := range zvols {
|
||||
if zvol.Name == zvolName {
|
||||
size = zvol.Size
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add LUN to store
|
||||
if targetType == models.ISCSITargetTypeTape && devicePath != "" {
|
||||
// Tape mode: use device
|
||||
_, err := a.iscsiStore.AddLUNWithDevice(targetID, "", devicePath, size, backstoreType, "")
|
||||
if err != nil && err != storage.ErrLUNExists {
|
||||
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
||||
}
|
||||
} else if zvolName != "" {
|
||||
// Disk mode: use ZVOL
|
||||
_, err := a.iscsiStore.AddLUNWithDevice(targetID, zvolName, "", size, backstoreType, "")
|
||||
if err != nil && err != storage.ErrLUNExists {
|
||||
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
||||
}
|
||||
} else if devicePath != "" {
|
||||
// Generic device
|
||||
_, err := a.iscsiStore.AddLUNWithDevice(targetID, "", devicePath, size, backstoreType, "")
|
||||
if err != nil && err != storage.ErrLUNExists {
|
||||
log.Printf("warning: failed to sync LUN %d for target %s: %v", lunID, iqn, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package httpapp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -64,6 +65,14 @@ func (a *App) handleDashboardAPI(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// Service statistics
|
||||
// Sync from OS first to ensure accurate counts
|
||||
if err := a.syncSMBSharesFromOS(); err != nil {
|
||||
log.Printf("warning: failed to sync SMB shares from OS in dashboard: %v", err)
|
||||
}
|
||||
if err := a.syncNFSExportsFromOS(); err != nil {
|
||||
log.Printf("warning: failed to sync NFS exports from OS in dashboard: %v", err)
|
||||
}
|
||||
|
||||
smbShares := a.smbStore.List()
|
||||
data.Services.SMBShares = len(smbShares)
|
||||
|
||||
|
||||
@@ -104,6 +104,15 @@ func (a *App) handlePoolOps(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasSuffix(r.URL.Path, "/spare") {
|
||||
if r.Method == http.MethodPost {
|
||||
a.handleAddSpareDisk(w, r)
|
||||
} else {
|
||||
writeError(w, errors.NewAPIError(errors.ErrCodeBadRequest, "method not allowed", http.StatusMethodNotAllowed))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
methodHandler(
|
||||
func(w http.ResponseWriter, r *http.Request) { a.handleGetPool(w, r) },
|
||||
nil,
|
||||
|
||||
@@ -24,19 +24,31 @@ type NFSExport struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// ISCSITargetType represents the type of iSCSI target
|
||||
type ISCSITargetType string
|
||||
|
||||
const (
|
||||
ISCSITargetTypeDisk ISCSITargetType = "disk" // For ZVOL/block devices
|
||||
ISCSITargetTypeTape ISCSITargetType = "tape" // For tape library passthrough
|
||||
)
|
||||
|
||||
// ISCSITarget represents an iSCSI target
|
||||
type ISCSITarget struct {
|
||||
ID string `json:"id"`
|
||||
IQN string `json:"iqn"` // iSCSI Qualified Name
|
||||
LUNs []LUN `json:"luns"`
|
||||
Initiators []string `json:"initiators"` // ACL list
|
||||
Enabled bool `json:"enabled"`
|
||||
ID string `json:"id"`
|
||||
IQN string `json:"iqn"` // iSCSI Qualified Name
|
||||
Type ISCSITargetType `json:"type"` // "disk" or "tape"
|
||||
LUNs []LUN `json:"luns"`
|
||||
Initiators []string `json:"initiators"` // ACL list
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// LUN represents a Logical Unit Number backed by a ZVOL
|
||||
// LUN represents a Logical Unit Number backed by various storage types
|
||||
type LUN struct {
|
||||
ID int `json:"id"` // LUN number
|
||||
ZVOL string `json:"zvol"` // ZVOL name
|
||||
Size uint64 `json:"size"` // bytes
|
||||
Backend string `json:"backend"` // "zvol"
|
||||
ID int `json:"id"` // LUN number
|
||||
ZVOL string `json:"zvol"` // ZVOL name (for block backstore)
|
||||
Device string `json:"device"` // Device path (e.g., /dev/st0 for tape, /dev/sdX for disk)
|
||||
Size uint64 `json:"size"` // bytes (0 for unknown/tape devices)
|
||||
Backend string `json:"backend"` // "zvol", "block", "pscsi", "fileio"
|
||||
Backstore string `json:"backstore"` // Backstore type: "block", "pscsi", "fileio" (default: "block")
|
||||
BackstoreName string `json:"backstore_name"` // Name used in targetcli
|
||||
}
|
||||
|
||||
@@ -13,6 +13,37 @@ type Pool struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
// PoolDetail represents detailed pool information from zpool status
|
||||
type PoolDetail struct {
|
||||
Name string `json:"name"`
|
||||
State string `json:"state"` // ONLINE, DEGRADED, FAULTED
|
||||
Status string `json:"status"` // Full status message
|
||||
VDEVs []VDEV `json:"vdevs"` // Virtual devices
|
||||
Spares []string `json:"spares"` // Spare disks
|
||||
Errors string `json:"errors"` // Error summary
|
||||
ScrubInfo string `json:"scrub_info"` // Scrub information
|
||||
}
|
||||
|
||||
// VDEV represents a virtual device in a pool
|
||||
type VDEV struct {
|
||||
Name string `json:"name"` // VDEV name or type
|
||||
Type string `json:"type"` // mirror, raidz, raidz2, etc.
|
||||
State string `json:"state"` // ONLINE, DEGRADED, etc.
|
||||
Disks []Disk `json:"disks"` // Disks in this VDEV
|
||||
Read int `json:"read"` // Read errors
|
||||
Write int `json:"write"` // Write errors
|
||||
Checksum int `json:"checksum"` // Checksum errors
|
||||
}
|
||||
|
||||
// Disk represents a disk in a VDEV
|
||||
type Disk struct {
|
||||
Name string `json:"name"` // Disk name (e.g., sdb)
|
||||
State string `json:"state"` // ONLINE, DEGRADED, FAULTED, etc.
|
||||
Read int `json:"read"` // Read errors
|
||||
Write int `json:"write"` // Write errors
|
||||
Checksum int `json:"checksum"` // Checksum errors
|
||||
}
|
||||
|
||||
// Dataset represents a ZFS filesystem
|
||||
type Dataset struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
@@ -3,6 +3,7 @@ package services
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -41,6 +42,7 @@ func (s *ISCSIService) ApplyConfiguration(targets []models.ISCSITarget) error {
|
||||
// Disable target if it exists
|
||||
if err := s.disableTarget(target.IQN); err != nil {
|
||||
// Log but continue
|
||||
fmt.Printf("warning: failed to disable target %s: %v\n", target.IQN, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@@ -49,16 +51,19 @@ func (s *ISCSIService) ApplyConfiguration(targets []models.ISCSITarget) error {
|
||||
if err := s.createTarget(target); err != nil {
|
||||
return fmt.Errorf("create target %s: %w", target.IQN, err)
|
||||
}
|
||||
fmt.Printf("iSCSI target created/verified: %s\n", target.IQN)
|
||||
|
||||
// Configure ACLs
|
||||
if err := s.configureACLs(target); err != nil {
|
||||
return fmt.Errorf("configure ACLs for %s: %w", target.IQN, err)
|
||||
}
|
||||
fmt.Printf("iSCSI ACLs configured for: %s\n", target.IQN)
|
||||
|
||||
// Configure LUNs
|
||||
if err := s.configureLUNs(target); err != nil {
|
||||
return fmt.Errorf("configure LUNs for %s: %w", target.IQN, err)
|
||||
}
|
||||
fmt.Printf("iSCSI LUNs configured for: %s\n", target.IQN)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -68,27 +73,93 @@ func (s *ISCSIService) ApplyConfiguration(targets []models.ISCSITarget) error {
|
||||
func (s *ISCSIService) createTarget(target models.ISCSITarget) error {
|
||||
// Use targetcli to create target
|
||||
// Format: targetcli /iscsi create <IQN>
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi", "create", target.IQN)
|
||||
if err := cmd.Run(); err != nil {
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi", "create", target.IQN)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
// Target might already exist, which is OK
|
||||
// Check if it actually exists
|
||||
if !s.targetExists(target.IQN) {
|
||||
return fmt.Errorf("create target failed: %w", err)
|
||||
return fmt.Errorf("create target failed: %w (output: %s)", err, string(output))
|
||||
}
|
||||
fmt.Printf("target %s already exists, continuing\n", target.IQN)
|
||||
} else {
|
||||
fmt.Printf("target %s created successfully\n", target.IQN)
|
||||
}
|
||||
|
||||
// Enable TPG1 (Target Portal Group 1)
|
||||
// Disable authentication
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1", "set", "attribute", "authentication=0")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf("warning: failed to set authentication=0: %v (output: %s)\n", err, string(output))
|
||||
}
|
||||
|
||||
// Enable generate_node_acls (allow all initiators if no ACLs specified)
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1", "set", "attribute", "generate_node_acls=1")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
fmt.Printf("warning: failed to set generate_node_acls=1: %v (output: %s)\n", err, string(output))
|
||||
} else {
|
||||
fmt.Printf("set generate_node_acls=1 for target %s\n", target.IQN)
|
||||
}
|
||||
|
||||
// Create portal if not exists (listen on all interfaces, port 3260)
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/portals", "create")
|
||||
if err := cmd.Run(); err != nil {
|
||||
// Portal might already exist, which is OK
|
||||
// Check if portal exists
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/portals", "ls")
|
||||
output, err2 := cmd.Output()
|
||||
if err2 != nil || len(strings.TrimSpace(string(output))) == 0 {
|
||||
// No portal exists, try to create with specific IP
|
||||
// Get system IP
|
||||
systemIP, _ := s.getSystemIP()
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/portals", "create", systemIP)
|
||||
if err3 := cmd.Run(); err3 != nil {
|
||||
// Try with 0.0.0.0 (all interfaces)
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/portals", "create", "0.0.0.0")
|
||||
if err4 := cmd.Run(); err4 != nil {
|
||||
// Log but don't fail - portal might already exist
|
||||
fmt.Printf("warning: failed to create portal: %v", err4)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save configuration
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "saveconfig")
|
||||
cmd.Run() // Ignore errors
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureACLs configures initiator ACLs for a target
|
||||
func (s *ISCSIService) configureACLs(target models.ISCSITarget) error {
|
||||
// If no initiators specified, allow all (generate_node_acls=1)
|
||||
if len(target.Initiators) == 0 {
|
||||
// Set to allow all initiators
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1", "set", "attribute", "generate_node_acls=1")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("set generate_node_acls: %w", err)
|
||||
}
|
||||
// Disable authentication for open access
|
||||
cmd = exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1", "set", "attribute", "authentication=0")
|
||||
cmd.Run() // Ignore errors
|
||||
return nil
|
||||
}
|
||||
|
||||
// If initiators specified, use ACL-based access
|
||||
// Set generate_node_acls=0 to use explicit ACLs
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1", "set", "attribute", "generate_node_acls=0")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("set generate_node_acls=0: %w", err)
|
||||
}
|
||||
|
||||
// Get current ACLs
|
||||
currentACLs, _ := s.getACLs(target.IQN)
|
||||
|
||||
// Remove ACLs not in desired list
|
||||
for _, acl := range currentACLs {
|
||||
if !contains(target.Initiators, acl) {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/acls", "delete", acl)
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/acls", "delete", acl)
|
||||
cmd.Run() // Ignore errors
|
||||
}
|
||||
}
|
||||
@@ -96,7 +167,7 @@ func (s *ISCSIService) configureACLs(target models.ISCSITarget) error {
|
||||
// Add new ACLs
|
||||
for _, initiator := range target.Initiators {
|
||||
if !contains(currentACLs, initiator) {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/acls", "create", initiator)
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/acls", "create", initiator)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("create ACL %s: %w", initiator, err)
|
||||
}
|
||||
@@ -114,23 +185,100 @@ func (s *ISCSIService) configureLUNs(target models.ISCSITarget) error {
|
||||
// Remove LUNs not in desired list
|
||||
for _, lun := range currentLUNs {
|
||||
if !s.hasLUN(target.LUNs, lun) {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/luns", "delete", fmt.Sprintf("lun/%d", lun))
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/luns", "delete", fmt.Sprintf("lun/%d", lun))
|
||||
cmd.Run() // Ignore errors
|
||||
}
|
||||
}
|
||||
|
||||
// Add/update LUNs
|
||||
for _, lun := range target.LUNs {
|
||||
// Create LUN mapping
|
||||
// Format: targetcli /iscsi/<IQN>/tpg1/luns create /backstores/zvol/<zvol>
|
||||
zvolPath := "/backstores/zvol/" + lun.ZVOL
|
||||
// Determine backstore type (default to block for ZVOL, pscsi for tape devices)
|
||||
backstoreType := lun.Backstore
|
||||
if backstoreType == "" {
|
||||
if lun.Device != "" {
|
||||
// If device is specified and looks like tape device, use pscsi
|
||||
if strings.HasPrefix(lun.Device, "/dev/st") || strings.HasPrefix(lun.Device, "/dev/nst") {
|
||||
backstoreType = "pscsi"
|
||||
} else {
|
||||
backstoreType = "block"
|
||||
}
|
||||
} else if lun.ZVOL != "" {
|
||||
// Default to block for ZVOL
|
||||
backstoreType = "block"
|
||||
} else {
|
||||
return fmt.Errorf("LUN must have either ZVOL or Device specified")
|
||||
}
|
||||
}
|
||||
|
||||
// First ensure the zvol backend exists
|
||||
cmd := exec.Command(s.targetcliPath, "/backstores/zvol", "create", lun.ZVOL, lun.ZVOL)
|
||||
cmd.Run() // Ignore if already exists
|
||||
// Determine backstore name
|
||||
backstoreName := lun.BackstoreName
|
||||
if backstoreName == "" {
|
||||
if lun.ZVOL != "" {
|
||||
backstoreName = strings.ReplaceAll(lun.ZVOL, "/", "-")
|
||||
} else if lun.Device != "" {
|
||||
// Use device name (e.g., st0, sdb)
|
||||
backstoreName = strings.TrimPrefix(strings.TrimPrefix(lun.Device, "/dev/"), "/dev/")
|
||||
backstoreName = strings.ReplaceAll(backstoreName, "/", "-")
|
||||
} else {
|
||||
backstoreName = fmt.Sprintf("lun-%d", lun.ID)
|
||||
}
|
||||
}
|
||||
|
||||
// Determine device path
|
||||
var devicePath string
|
||||
if lun.Device != "" {
|
||||
devicePath = lun.Device
|
||||
} else if lun.ZVOL != "" {
|
||||
devicePath = "/dev/zvol/" + lun.ZVOL
|
||||
} else {
|
||||
return fmt.Errorf("LUN must have either ZVOL or Device specified")
|
||||
}
|
||||
|
||||
backstorePath := "/backstores/" + backstoreType + "/" + backstoreName
|
||||
|
||||
// Create backstore based on type
|
||||
switch backstoreType {
|
||||
case "block":
|
||||
// Format: targetcli /backstores/block create name=<name> dev=<dev>
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/backstores/block", "create", "name="+backstoreName, "dev="+devicePath)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
if !strings.Contains(string(output), "already exists") {
|
||||
fmt.Printf("warning: failed to create block backstore %s: %v (output: %s)\n", backstoreName, err, string(output))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("created block backstore %s for %s\n", backstoreName, devicePath)
|
||||
}
|
||||
|
||||
case "pscsi":
|
||||
// Format: targetcli /backstores/pscsi create name=<name> dev=<dev>
|
||||
// pscsi is for SCSI pass-through (tape devices, etc.)
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/backstores/pscsi", "create", "name="+backstoreName, "dev="+devicePath)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
if !strings.Contains(string(output), "already exists") {
|
||||
return fmt.Errorf("failed to create pscsi backstore %s: %w (output: %s)", backstoreName, err, string(output))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("created pscsi backstore %s for %s\n", backstoreName, devicePath)
|
||||
}
|
||||
|
||||
case "fileio":
|
||||
// Format: targetcli /backstores/fileio create name=<name> file_or_dev=<path> [size=<size>]
|
||||
// fileio is for file-based storage
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/backstores/fileio", "create", "name="+backstoreName, "file_or_dev="+devicePath)
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
if !strings.Contains(string(output), "already exists") {
|
||||
return fmt.Errorf("failed to create fileio backstore %s: %w (output: %s)", backstoreName, err, string(output))
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("created fileio backstore %s for %s\n", backstoreName, devicePath)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported backstore type: %s", backstoreType)
|
||||
}
|
||||
|
||||
// Create LUN
|
||||
cmd = exec.Command(s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/luns", "create", zvolPath)
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+target.IQN+"/tpg1/luns", "create", backstorePath)
|
||||
if err := cmd.Run(); err != nil {
|
||||
// LUN might already exist
|
||||
if !s.hasLUNID(currentLUNs, lun.ID) {
|
||||
@@ -144,7 +292,7 @@ func (s *ISCSIService) configureLUNs(target models.ISCSITarget) error {
|
||||
|
||||
// Helper functions
|
||||
func (s *ISCSIService) targetExists(iqn string) bool {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi", "ls")
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi", "ls")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -153,25 +301,56 @@ func (s *ISCSIService) targetExists(iqn string) bool {
|
||||
}
|
||||
|
||||
func (s *ISCSIService) getACLs(iqn string) ([]string, error) {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+iqn+"/tpg1/acls", "ls")
|
||||
_, err := cmd.Output()
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+iqn+"/tpg1/acls", "ls")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return []string{}, nil // Return empty if can't get ACLs
|
||||
}
|
||||
|
||||
// Parse output to extract ACL names
|
||||
// This is simplified - real implementation would parse targetcli output
|
||||
return []string{}, nil
|
||||
// Format: o- acls ................................................................................................ [ACLs: 1]
|
||||
// o- iqn.1994-05.com.redhat:client1 ........................................................ [Mapped LUNs: 1]
|
||||
lines := strings.Split(string(output), "\n")
|
||||
acls := []string{}
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "o- iqn.") {
|
||||
// Extract IQN from line like "o- iqn.1994-05.com.redhat:client1"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 && strings.HasPrefix(parts[1], "iqn.") {
|
||||
acls = append(acls, parts[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
return acls, nil
|
||||
}
|
||||
|
||||
func (s *ISCSIService) getLUNs(iqn string) ([]int, error) {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+iqn+"/tpg1/luns", "ls")
|
||||
_, err := cmd.Output()
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+iqn+"/tpg1/luns", "ls")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return []int{}, nil // Return empty if can't get LUNs
|
||||
}
|
||||
|
||||
// Parse output to extract LUN IDs
|
||||
// This is simplified - real implementation would parse targetcli output
|
||||
return []int{}, nil
|
||||
// Format: o- luns ................................................................................................ [LUNs: 1]
|
||||
// o- lun0 ................................................................................ [zvol/pool-test-02/vol-1(/dev/zvol/pool-test-02/vol-1)]
|
||||
lines := strings.Split(string(output), "\n")
|
||||
luns := []int{}
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "o- lun") {
|
||||
// Extract LUN number from line like "o- lun0"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 && strings.HasPrefix(parts[1], "lun") {
|
||||
lunIDStr := strings.TrimPrefix(parts[1], "lun")
|
||||
if lunID, err := strconv.Atoi(lunIDStr); err == nil {
|
||||
luns = append(luns, lunID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return luns, nil
|
||||
}
|
||||
|
||||
func (s *ISCSIService) hasLUN(luns []models.LUN, id int) bool {
|
||||
@@ -193,7 +372,7 @@ func (s *ISCSIService) hasLUNID(luns []int, id int) bool {
|
||||
}
|
||||
|
||||
func (s *ISCSIService) disableTarget(iqn string) error {
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi/"+iqn+"/tpg1", "set", "attribute", "enable=0")
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi/"+iqn+"/tpg1", "set", "attribute", "enable=0")
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
@@ -326,7 +505,7 @@ func (s *ISCSIService) GetConnectionInstructions(target models.ISCSITarget, port
|
||||
// GetPortalIP attempts to detect the portal IP address
|
||||
func (s *ISCSIService) GetPortalIP() (string, error) {
|
||||
// Try to get IP from targetcli
|
||||
cmd := exec.Command(s.targetcliPath, "/iscsi", "ls")
|
||||
cmd := exec.Command("sudo", "-n", s.targetcliPath, "/iscsi", "ls")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// Fallback: try to get system IP
|
||||
|
||||
@@ -3,6 +3,7 @@ package storage
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"gitea.avt.data-center.id/othman.suseno/atlas/internal/models"
|
||||
@@ -69,6 +70,12 @@ func (s *ISCSIStore) GetByIQN(iqn string) (*models.ISCSITarget, error) {
|
||||
|
||||
// Create creates a new iSCSI target
|
||||
func (s *ISCSIStore) Create(iqn string, initiators []string) (*models.ISCSITarget, error) {
|
||||
// Default to disk mode for backward compatibility
|
||||
return s.CreateWithType(iqn, models.ISCSITargetTypeDisk, initiators)
|
||||
}
|
||||
|
||||
// CreateWithType creates a new iSCSI target with specified type
|
||||
func (s *ISCSIStore) CreateWithType(iqn string, targetType models.ISCSITargetType, initiators []string) (*models.ISCSITarget, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
@@ -85,6 +92,7 @@ func (s *ISCSIStore) Create(iqn string, initiators []string) (*models.ISCSITarge
|
||||
target := &models.ISCSITarget{
|
||||
ID: id,
|
||||
IQN: iqn,
|
||||
Type: targetType,
|
||||
LUNs: []models.LUN{},
|
||||
Initiators: initiators,
|
||||
Enabled: true,
|
||||
@@ -151,10 +159,64 @@ func (s *ISCSIStore) AddLUN(targetID string, zvol string, size uint64) (*models.
|
||||
}
|
||||
|
||||
lun := models.LUN{
|
||||
ID: lunID,
|
||||
ZVOL: zvol,
|
||||
Size: size,
|
||||
Backend: "zvol",
|
||||
ID: lunID,
|
||||
ZVOL: zvol,
|
||||
Size: size,
|
||||
Backend: "zvol",
|
||||
Backstore: "block", // Default to block for ZVOL
|
||||
}
|
||||
|
||||
target.LUNs = append(target.LUNs, lun)
|
||||
return &lun, nil
|
||||
}
|
||||
|
||||
// AddLUNWithDevice adds a LUN to a target with support for device and backstore type
|
||||
func (s *ISCSIStore) AddLUNWithDevice(targetID string, zvol string, device string, size uint64, backstore string, backstoreName string) (*models.LUN, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
target, ok := s.targets[targetID]
|
||||
if !ok {
|
||||
return nil, ErrISCSITargetNotFound
|
||||
}
|
||||
|
||||
// Check if device/ZVOL already mapped
|
||||
for _, lun := range target.LUNs {
|
||||
if (zvol != "" && lun.ZVOL == zvol) || (device != "" && lun.Device == device) {
|
||||
return nil, ErrLUNExists
|
||||
}
|
||||
}
|
||||
|
||||
// Find next available LUN ID
|
||||
lunID := 0
|
||||
for _, lun := range target.LUNs {
|
||||
if lun.ID >= lunID {
|
||||
lunID = lun.ID + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-detect backstore type if not specified
|
||||
if backstore == "" {
|
||||
if device != "" {
|
||||
// If device is specified and looks like tape device, use pscsi
|
||||
if strings.HasPrefix(device, "/dev/st") || strings.HasPrefix(device, "/dev/nst") {
|
||||
backstore = "pscsi"
|
||||
} else {
|
||||
backstore = "block"
|
||||
}
|
||||
} else if zvol != "" {
|
||||
backstore = "block"
|
||||
}
|
||||
}
|
||||
|
||||
lun := models.LUN{
|
||||
ID: lunID,
|
||||
ZVOL: zvol,
|
||||
Device: device,
|
||||
Size: size,
|
||||
Backend: "zvol", // Keep for backward compatibility
|
||||
Backstore: backstore,
|
||||
BackstoreName: backstoreName,
|
||||
}
|
||||
|
||||
target.LUNs = append(target.LUNs, lun)
|
||||
|
||||
@@ -19,7 +19,9 @@ var (
|
||||
shareNamePattern = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_\-\.]{0,79}$`)
|
||||
|
||||
// IQN pattern (simplified - iqn.yyyy-mm.reversed.domain:identifier)
|
||||
iqnPattern = regexp.MustCompile(`^iqn\.\d{4}-\d{2}\.[a-zA-Z0-9][a-zA-Z0-9\-\.]*:[a-zA-Z0-9][a-zA-Z0-9\-_\.]*$`)
|
||||
// Domain must have at least 2 levels (e.g., com.atlas, org.example)
|
||||
// Format: iqn.YYYY-MM.domain.subdomain:identifier
|
||||
iqnPattern = regexp.MustCompile(`^iqn\.\d{4}-\d{2}\.[a-zA-Z0-9][a-zA-Z0-9\-]*\.[a-zA-Z0-9][a-zA-Z0-9\-\.]*:[a-zA-Z0-9][a-zA-Z0-9\-_\.]*$`)
|
||||
|
||||
// Email pattern (basic)
|
||||
emailPattern = regexp.MustCompile(`^[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9.\-]+\.[a-zA-Z]{2,}$`)
|
||||
@@ -166,6 +168,8 @@ func ValidateShareName(name string) error {
|
||||
}
|
||||
|
||||
// ValidateIQN validates an iSCSI Qualified Name
|
||||
// IQN format: iqn.YYYY-MM.domain.subdomain:identifier
|
||||
// Domain must have at least 2 levels (e.g., com.atlas, org.example)
|
||||
func ValidateIQN(iqn string) error {
|
||||
if iqn == "" {
|
||||
return &ValidationError{Field: "iqn", Message: "IQN cannot be empty"}
|
||||
@@ -181,7 +185,7 @@ func ValidateIQN(iqn string) error {
|
||||
|
||||
// Basic format validation (can be more strict)
|
||||
if !iqnPattern.MatchString(iqn) {
|
||||
return &ValidationError{Field: "iqn", Message: "invalid IQN format (expected: iqn.yyyy-mm.reversed.domain:identifier)"}
|
||||
return &ValidationError{Field: "iqn", Message: "invalid IQN format (expected: iqn.YYYY-MM.domain.subdomain:identifier, domain must have at least 2 levels, e.g., com.atlas, org.example)"}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -255,6 +255,197 @@ func (s *Service) GetPool(name string) (*models.Pool, error) {
|
||||
return nil, fmt.Errorf("pool %s not found", name)
|
||||
}
|
||||
|
||||
// GetPoolDetail returns detailed pool information from zpool status
|
||||
func (s *Service) GetPoolDetail(name string) (*models.PoolDetail, error) {
|
||||
output, err := s.execCommand(s.zpoolPath, "status", name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pool status: %w", err)
|
||||
}
|
||||
|
||||
detail := &models.PoolDetail{
|
||||
Name: name,
|
||||
VDEVs: []models.VDEV{},
|
||||
Spares: []string{},
|
||||
}
|
||||
|
||||
lines := strings.Split(output, "\n")
|
||||
var currentVDEV *models.VDEV
|
||||
inConfig := false
|
||||
inSpares := false
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Parse pool name and state
|
||||
if strings.HasPrefix(line, "pool:") {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) > 1 {
|
||||
detail.Name = parts[1]
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(line, "state:") {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) > 1 {
|
||||
detail.State = parts[1]
|
||||
detail.Status = strings.Join(parts[1:], " ")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse errors line
|
||||
if strings.HasPrefix(line, "errors:") {
|
||||
detail.Errors = strings.TrimPrefix(line, "errors:")
|
||||
detail.Errors = strings.TrimSpace(detail.Errors)
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse scrub information
|
||||
if strings.Contains(line, "scrub") {
|
||||
detail.ScrubInfo = line
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we're entering config section
|
||||
if strings.HasPrefix(line, "config:") {
|
||||
inConfig = true
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we're in spares section
|
||||
if strings.Contains(line, "spares") {
|
||||
inSpares = true
|
||||
inConfig = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse VDEV and disk information
|
||||
if inConfig {
|
||||
// Check if this is a VDEV header (indented but not a disk)
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) >= 5 {
|
||||
// Check if it's a VDEV type line (mirror, raidz, etc.)
|
||||
if fields[0] == "mirror" || strings.HasPrefix(fields[0], "raidz") || fields[0] == "log" || fields[0] == "cache" {
|
||||
// Save previous VDEV if exists
|
||||
if currentVDEV != nil {
|
||||
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
|
||||
}
|
||||
// Start new VDEV
|
||||
currentVDEV = &models.VDEV{
|
||||
Name: fields[0],
|
||||
Type: fields[0],
|
||||
State: "ONLINE",
|
||||
Disks: []models.Disk{},
|
||||
}
|
||||
// Try to parse state if available
|
||||
if len(fields) > 1 {
|
||||
for _, field := range fields[1:] {
|
||||
if field == "ONLINE" || field == "DEGRADED" || field == "FAULTED" || field == "OFFLINE" {
|
||||
currentVDEV.State = field
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if it's a disk line (starts with sd, hd, nvme, etc.)
|
||||
diskName := fields[0]
|
||||
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
|
||||
// This is a disk
|
||||
state := "ONLINE"
|
||||
read := 0
|
||||
write := 0
|
||||
checksum := 0
|
||||
|
||||
if len(fields) > 1 {
|
||||
state = fields[1]
|
||||
}
|
||||
if len(fields) > 2 {
|
||||
if val, err := strconv.Atoi(fields[2]); err == nil {
|
||||
read = val
|
||||
}
|
||||
}
|
||||
if len(fields) > 3 {
|
||||
if val, err := strconv.Atoi(fields[3]); err == nil {
|
||||
write = val
|
||||
}
|
||||
}
|
||||
if len(fields) > 4 {
|
||||
if val, err := strconv.Atoi(fields[4]); err == nil {
|
||||
checksum = val
|
||||
}
|
||||
}
|
||||
|
||||
disk := models.Disk{
|
||||
Name: diskName,
|
||||
State: state,
|
||||
Read: read,
|
||||
Write: write,
|
||||
Checksum: checksum,
|
||||
}
|
||||
|
||||
// If we have a current VDEV, add disk to it
|
||||
if currentVDEV != nil {
|
||||
currentVDEV.Disks = append(currentVDEV.Disks, disk)
|
||||
// Update VDEV errors
|
||||
currentVDEV.Read += read
|
||||
currentVDEV.Write += write
|
||||
currentVDEV.Checksum += checksum
|
||||
} else {
|
||||
// Standalone disk, create a VDEV for it
|
||||
currentVDEV = &models.VDEV{
|
||||
Name: diskName,
|
||||
Type: "disk",
|
||||
State: state,
|
||||
Disks: []models.Disk{disk},
|
||||
Read: read,
|
||||
Write: write,
|
||||
Checksum: checksum,
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse spares section
|
||||
if inSpares {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) > 0 {
|
||||
diskName := fields[0]
|
||||
if strings.HasPrefix(diskName, "sd") || strings.HasPrefix(diskName, "hd") || strings.HasPrefix(diskName, "nvme") {
|
||||
detail.Spares = append(detail.Spares, diskName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line might indicate end of section
|
||||
if line == "" && currentVDEV != nil {
|
||||
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
|
||||
currentVDEV = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Save last VDEV if exists
|
||||
if currentVDEV != nil {
|
||||
detail.VDEVs = append(detail.VDEVs, *currentVDEV)
|
||||
}
|
||||
|
||||
return detail, nil
|
||||
}
|
||||
|
||||
// AddSpareDisk adds a spare disk to a pool
|
||||
func (s *Service) AddSpareDisk(poolName, diskPath string) error {
|
||||
args := []string{"add", poolName, "spare", diskPath}
|
||||
_, err := s.execCommand(s.zpoolPath, args...)
|
||||
if err != nil {
|
||||
return translateZFSError(err, "menambahkan spare disk", poolName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreatePool creates a new ZFS pool
|
||||
func (s *Service) CreatePool(name string, vdevs []string, options map[string]string) error {
|
||||
args := []string{"create"}
|
||||
@@ -1148,11 +1339,19 @@ func (s *Service) ListDisks() ([]map[string]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if disk is used in a pool
|
||||
isUsed := usedDisks[dev.Name]
|
||||
// Skip virtual disks (ZVOLs) - these are zd* devices
|
||||
// Must check BEFORE checking dev.Type == "disk" because zd* devices
|
||||
// are reported as type "disk" by lsblk
|
||||
if strings.HasPrefix(dev.Name, "zd") {
|
||||
log.Printf("debug: skipping virtual disk %s (zd* device)", dev.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Include all disks (both available and used) so we can show status
|
||||
// Include all physical disks (both available and used) so we can show status
|
||||
// Only include actual disk devices (not partitions, loops, etc.)
|
||||
if dev.Type == "disk" {
|
||||
// Check if disk is used in a pool
|
||||
isUsed := usedDisks[dev.Name]
|
||||
disk := map[string]string{
|
||||
"name": dev.Name,
|
||||
"size": dev.Size,
|
||||
|
||||
Reference in New Issue
Block a user