Compare commits
12 Commits
main
...
snapshot-r
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
20af99b244 | ||
| 990c114531 | |||
|
|
0c8a9efecc | ||
|
|
70d25e13b8 | ||
|
|
2bb64620d4 | ||
|
|
7543b3a850 | ||
|
|
a558c97088 | ||
|
|
2de3c5f6ab | ||
|
|
8ece52992b | ||
|
|
03965e35fb | ||
|
|
ebaf718424 | ||
|
|
cb923704db |
Binary file not shown.
@@ -65,12 +65,13 @@ func main() {
|
||||
r := router.NewRouter(cfg, db, logger)
|
||||
|
||||
// Create HTTP server
|
||||
// Note: WriteTimeout should be 0 for WebSocket connections (they handle their own timeouts)
|
||||
srv := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", cfg.Server.Port),
|
||||
Handler: r,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
WriteTimeout: 0, // 0 means no timeout - needed for WebSocket connections
|
||||
IdleTimeout: 120 * time.Second, // Increased for WebSocket keep-alive
|
||||
}
|
||||
|
||||
// Setup graceful shutdown
|
||||
|
||||
@@ -23,6 +23,7 @@ require (
|
||||
github.com/bytedance/sonic/loader v0.1.1 // indirect
|
||||
github.com/cloudwego/base64x v0.1.4 // indirect
|
||||
github.com/cloudwego/iasm v0.2.0 // indirect
|
||||
github.com/creack/pty v1.1.24 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
|
||||
@@ -6,6 +6,8 @@ github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/
|
||||
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
|
||||
@@ -116,3 +116,268 @@ func (h *Handler) CreateJob(c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusCreated, job)
|
||||
}
|
||||
|
||||
// ExecuteBconsoleCommand executes a bconsole command
|
||||
func (h *Handler) ExecuteBconsoleCommand(c *gin.Context) {
|
||||
var req struct {
|
||||
Command string `json:"command" binding:"required"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "command is required"})
|
||||
return
|
||||
}
|
||||
|
||||
output, err := h.service.ExecuteBconsoleCommand(c.Request.Context(), req.Command)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to execute bconsole command", "error", err, "command", req.Command)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "failed to execute command",
|
||||
"output": output,
|
||||
"details": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"output": output,
|
||||
})
|
||||
}
|
||||
|
||||
// ListClients lists all backup clients with optional filters
|
||||
func (h *Handler) ListClients(c *gin.Context) {
|
||||
opts := ListClientsOptions{}
|
||||
|
||||
// Parse enabled filter
|
||||
if enabledStr := c.Query("enabled"); enabledStr != "" {
|
||||
enabled := enabledStr == "true"
|
||||
opts.Enabled = &enabled
|
||||
}
|
||||
|
||||
// Parse search query
|
||||
opts.Search = c.Query("search")
|
||||
|
||||
clients, err := h.service.ListClients(c.Request.Context(), opts)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list clients", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "failed to list clients",
|
||||
"details": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
if clients == nil {
|
||||
clients = []Client{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"clients": clients,
|
||||
"total": len(clients),
|
||||
})
|
||||
}
|
||||
|
||||
// GetDashboardStats returns dashboard statistics
|
||||
func (h *Handler) GetDashboardStats(c *gin.Context) {
|
||||
stats, err := h.service.GetDashboardStats(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get dashboard stats", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get dashboard stats"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, stats)
|
||||
}
|
||||
|
||||
// ListStoragePools lists all storage pools
|
||||
func (h *Handler) ListStoragePools(c *gin.Context) {
|
||||
pools, err := h.service.ListStoragePools(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list storage pools", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage pools"})
|
||||
return
|
||||
}
|
||||
|
||||
if pools == nil {
|
||||
pools = []StoragePool{}
|
||||
}
|
||||
|
||||
h.logger.Info("Listed storage pools", "count", len(pools))
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"pools": pools,
|
||||
"total": len(pools),
|
||||
})
|
||||
}
|
||||
|
||||
// ListStorageVolumes lists all storage volumes
|
||||
func (h *Handler) ListStorageVolumes(c *gin.Context) {
|
||||
poolName := c.Query("pool_name")
|
||||
|
||||
volumes, err := h.service.ListStorageVolumes(c.Request.Context(), poolName)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list storage volumes", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage volumes"})
|
||||
return
|
||||
}
|
||||
|
||||
if volumes == nil {
|
||||
volumes = []StorageVolume{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"volumes": volumes,
|
||||
"total": len(volumes),
|
||||
})
|
||||
}
|
||||
|
||||
// ListStorageDaemons lists all storage daemons
|
||||
func (h *Handler) ListStorageDaemons(c *gin.Context) {
|
||||
daemons, err := h.service.ListStorageDaemons(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list storage daemons", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage daemons"})
|
||||
return
|
||||
}
|
||||
|
||||
if daemons == nil {
|
||||
daemons = []StorageDaemon{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"daemons": daemons,
|
||||
"total": len(daemons),
|
||||
})
|
||||
}
|
||||
|
||||
// CreateStoragePool creates a new storage pool
|
||||
func (h *Handler) CreateStoragePool(c *gin.Context) {
|
||||
var req CreatePoolRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
pool, err := h.service.CreateStoragePool(c.Request.Context(), req)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to create storage pool", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, pool)
|
||||
}
|
||||
|
||||
// DeleteStoragePool deletes a storage pool
|
||||
func (h *Handler) DeleteStoragePool(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
if idStr == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "pool ID is required"})
|
||||
return
|
||||
}
|
||||
|
||||
var poolID int
|
||||
if _, err := fmt.Sscanf(idStr, "%d", &poolID); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pool ID"})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.service.DeleteStoragePool(c.Request.Context(), poolID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to delete storage pool", "error", err, "pool_id", poolID)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "pool deleted successfully"})
|
||||
}
|
||||
|
||||
// CreateStorageVolume creates a new storage volume
|
||||
func (h *Handler) CreateStorageVolume(c *gin.Context) {
|
||||
var req CreateVolumeRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
volume, err := h.service.CreateStorageVolume(c.Request.Context(), req)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to create storage volume", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, volume)
|
||||
}
|
||||
|
||||
// UpdateStorageVolume updates a storage volume
|
||||
func (h *Handler) UpdateStorageVolume(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
if idStr == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"})
|
||||
return
|
||||
}
|
||||
|
||||
var volumeID int
|
||||
if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"})
|
||||
return
|
||||
}
|
||||
|
||||
var req UpdateVolumeRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
volume, err := h.service.UpdateStorageVolume(c.Request.Context(), volumeID, req)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to update storage volume", "error", err, "volume_id", volumeID)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, volume)
|
||||
}
|
||||
|
||||
// DeleteStorageVolume deletes a storage volume
|
||||
func (h *Handler) DeleteStorageVolume(c *gin.Context) {
|
||||
idStr := c.Param("id")
|
||||
if idStr == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"})
|
||||
return
|
||||
}
|
||||
|
||||
var volumeID int
|
||||
if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.service.DeleteStorageVolume(c.Request.Context(), volumeID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to delete storage volume", "error", err, "volume_id", volumeID)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "volume deleted successfully"})
|
||||
}
|
||||
|
||||
// ListMedia lists all media from bconsole "list media" command
|
||||
func (h *Handler) ListMedia(c *gin.Context) {
|
||||
media, err := h.service.ListMedia(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list media", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
if media == nil {
|
||||
media = []Media{}
|
||||
}
|
||||
|
||||
h.logger.Info("Listed media", "count", len(media))
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"media": media,
|
||||
"total": len(media),
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -13,24 +13,30 @@ import (
|
||||
// authMiddleware validates JWT tokens and sets user context
|
||||
func authMiddleware(authHandler *auth.Handler) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Extract token from Authorization header
|
||||
var token string
|
||||
|
||||
// Try to extract token from Authorization header first
|
||||
authHeader := c.GetHeader("Authorization")
|
||||
if authHeader == "" {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "missing authorization header"})
|
||||
if authHeader != "" {
|
||||
// Parse Bearer token
|
||||
parts := strings.SplitN(authHeader, " ", 2)
|
||||
if len(parts) == 2 && parts[0] == "Bearer" {
|
||||
token = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
// If no token from header, try query parameter (for WebSocket)
|
||||
if token == "" {
|
||||
token = c.Query("token")
|
||||
}
|
||||
|
||||
// If still no token, return error
|
||||
if token == "" {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "missing authorization token"})
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
// Parse Bearer token
|
||||
parts := strings.SplitN(authHeader, " ", 2)
|
||||
if len(parts) != 2 || parts[0] != "Bearer" {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "invalid authorization header format"})
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
token := parts[1]
|
||||
|
||||
// Validate token and get user
|
||||
user, err := authHandler.ValidateToken(token)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/atlasos/calypso/internal/iam"
|
||||
"github.com/atlasos/calypso/internal/monitoring"
|
||||
"github.com/atlasos/calypso/internal/scst"
|
||||
"github.com/atlasos/calypso/internal/shares"
|
||||
"github.com/atlasos/calypso/internal/storage"
|
||||
"github.com/atlasos/calypso/internal/system"
|
||||
"github.com/atlasos/calypso/internal/tape_physical"
|
||||
@@ -198,6 +199,18 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
storageGroup.GET("/zfs/arc/stats", storageHandler.GetARCStats)
|
||||
}
|
||||
|
||||
// Shares (CIFS/NFS)
|
||||
sharesHandler := shares.NewHandler(db, log)
|
||||
sharesGroup := protected.Group("/shares")
|
||||
sharesGroup.Use(requirePermission("storage", "read"))
|
||||
{
|
||||
sharesGroup.GET("", sharesHandler.ListShares)
|
||||
sharesGroup.GET("/:id", sharesHandler.GetShare)
|
||||
sharesGroup.POST("", requirePermission("storage", "write"), sharesHandler.CreateShare)
|
||||
sharesGroup.PUT("/:id", requirePermission("storage", "write"), sharesHandler.UpdateShare)
|
||||
sharesGroup.DELETE("/:id", requirePermission("storage", "write"), sharesHandler.DeleteShare)
|
||||
}
|
||||
|
||||
// SCST
|
||||
scstHandler := scst.NewHandler(db, log)
|
||||
scstGroup := protected.Group("/scst")
|
||||
@@ -206,10 +219,12 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
scstGroup.GET("/targets", scstHandler.ListTargets)
|
||||
scstGroup.GET("/targets/:id", scstHandler.GetTarget)
|
||||
scstGroup.POST("/targets", scstHandler.CreateTarget)
|
||||
scstGroup.POST("/targets/:id/luns", scstHandler.AddLUN)
|
||||
scstGroup.POST("/targets/:id/luns", requirePermission("iscsi", "write"), scstHandler.AddLUN)
|
||||
scstGroup.DELETE("/targets/:id/luns/:lunId", requirePermission("iscsi", "write"), scstHandler.RemoveLUN)
|
||||
scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator)
|
||||
scstGroup.POST("/targets/:id/enable", scstHandler.EnableTarget)
|
||||
scstGroup.POST("/targets/:id/disable", scstHandler.DisableTarget)
|
||||
scstGroup.DELETE("/targets/:id", requirePermission("iscsi", "write"), scstHandler.DeleteTarget)
|
||||
scstGroup.GET("/initiators", scstHandler.ListAllInitiators)
|
||||
scstGroup.GET("/initiators/:id", scstHandler.GetInitiator)
|
||||
scstGroup.DELETE("/initiators/:id", scstHandler.RemoveInitiator)
|
||||
@@ -223,6 +238,16 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
scstGroup.POST("/portals", scstHandler.CreatePortal)
|
||||
scstGroup.PUT("/portals/:id", scstHandler.UpdatePortal)
|
||||
scstGroup.DELETE("/portals/:id", scstHandler.DeletePortal)
|
||||
// Initiator Groups routes
|
||||
scstGroup.GET("/initiator-groups", scstHandler.ListAllInitiatorGroups)
|
||||
scstGroup.GET("/initiator-groups/:id", scstHandler.GetInitiatorGroup)
|
||||
scstGroup.POST("/initiator-groups", requirePermission("iscsi", "write"), scstHandler.CreateInitiatorGroup)
|
||||
scstGroup.PUT("/initiator-groups/:id", requirePermission("iscsi", "write"), scstHandler.UpdateInitiatorGroup)
|
||||
scstGroup.DELETE("/initiator-groups/:id", requirePermission("iscsi", "write"), scstHandler.DeleteInitiatorGroup)
|
||||
scstGroup.POST("/initiator-groups/:id/initiators", requirePermission("iscsi", "write"), scstHandler.AddInitiatorToGroup)
|
||||
// Config file management
|
||||
scstGroup.GET("/config/file", requirePermission("iscsi", "read"), scstHandler.GetConfigFile)
|
||||
scstGroup.PUT("/config/file", requirePermission("iscsi", "write"), scstHandler.UpdateConfigFile)
|
||||
}
|
||||
|
||||
// Physical Tape Libraries
|
||||
@@ -260,7 +285,18 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
}
|
||||
|
||||
// System Management
|
||||
systemService := system.NewService(log)
|
||||
systemHandler := system.NewHandler(log, tasks.NewEngine(db, log))
|
||||
// Set service in handler (if handler needs direct access)
|
||||
// Note: Handler already has service via NewHandler, but we need to ensure it's the same instance
|
||||
|
||||
// Start network monitoring with RRD
|
||||
if err := systemService.StartNetworkMonitoring(context.Background()); err != nil {
|
||||
log.Warn("Failed to start network monitoring", "error", err)
|
||||
} else {
|
||||
log.Info("Network monitoring started with RRD")
|
||||
}
|
||||
|
||||
systemGroup := protected.Group("/system")
|
||||
systemGroup.Use(requirePermission("system", "read"))
|
||||
{
|
||||
@@ -268,8 +304,14 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
systemGroup.GET("/services/:name", systemHandler.GetServiceStatus)
|
||||
systemGroup.POST("/services/:name/restart", systemHandler.RestartService)
|
||||
systemGroup.GET("/services/:name/logs", systemHandler.GetServiceLogs)
|
||||
systemGroup.GET("/logs", systemHandler.GetSystemLogs)
|
||||
systemGroup.GET("/network/throughput", systemHandler.GetNetworkThroughput)
|
||||
systemGroup.POST("/support-bundle", systemHandler.GenerateSupportBundle)
|
||||
systemGroup.GET("/interfaces", systemHandler.ListNetworkInterfaces)
|
||||
systemGroup.PUT("/interfaces/:name", systemHandler.UpdateNetworkInterface)
|
||||
systemGroup.GET("/ntp", systemHandler.GetNTPSettings)
|
||||
systemGroup.POST("/ntp", systemHandler.SaveNTPSettings)
|
||||
systemGroup.POST("/execute", requirePermission("system", "write"), systemHandler.ExecuteCommand)
|
||||
}
|
||||
|
||||
// IAM routes - GetUser can be accessed by user viewing own profile or admin
|
||||
@@ -330,9 +372,21 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
||||
backupGroup := protected.Group("/backup")
|
||||
backupGroup.Use(requirePermission("backup", "read"))
|
||||
{
|
||||
backupGroup.GET("/dashboard/stats", backupHandler.GetDashboardStats)
|
||||
backupGroup.GET("/jobs", backupHandler.ListJobs)
|
||||
backupGroup.GET("/jobs/:id", backupHandler.GetJob)
|
||||
backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob)
|
||||
backupGroup.GET("/clients", backupHandler.ListClients)
|
||||
backupGroup.GET("/storage/pools", backupHandler.ListStoragePools)
|
||||
backupGroup.POST("/storage/pools", requirePermission("backup", "write"), backupHandler.CreateStoragePool)
|
||||
backupGroup.DELETE("/storage/pools/:id", requirePermission("backup", "write"), backupHandler.DeleteStoragePool)
|
||||
backupGroup.GET("/storage/volumes", backupHandler.ListStorageVolumes)
|
||||
backupGroup.POST("/storage/volumes", requirePermission("backup", "write"), backupHandler.CreateStorageVolume)
|
||||
backupGroup.PUT("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.UpdateStorageVolume)
|
||||
backupGroup.DELETE("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.DeleteStorageVolume)
|
||||
backupGroup.GET("/media", backupHandler.ListMedia)
|
||||
backupGroup.GET("/storage/daemons", backupHandler.ListStorageDaemons)
|
||||
backupGroup.POST("/console/execute", requirePermission("backup", "write"), backupHandler.ExecuteBconsoleCommand)
|
||||
}
|
||||
|
||||
// Monitoring
|
||||
|
||||
@@ -88,11 +88,14 @@ func GetUserGroups(db *database.DB, userID string) ([]string, error) {
|
||||
for rows.Next() {
|
||||
var groupName string
|
||||
if err := rows.Scan(&groupName); err != nil {
|
||||
return nil, err
|
||||
return []string{}, err
|
||||
}
|
||||
groups = append(groups, groupName)
|
||||
}
|
||||
|
||||
if groups == nil {
|
||||
groups = []string{}
|
||||
}
|
||||
return groups, rows.Err()
|
||||
}
|
||||
|
||||
|
||||
@@ -69,6 +69,17 @@ func (h *Handler) ListUsers(c *gin.Context) {
|
||||
permissions, _ := GetUserPermissions(h.db, u.ID)
|
||||
groups, _ := GetUserGroups(h.db, u.ID)
|
||||
|
||||
// Ensure arrays are never nil (use empty slice instead)
|
||||
if roles == nil {
|
||||
roles = []string{}
|
||||
}
|
||||
if permissions == nil {
|
||||
permissions = []string{}
|
||||
}
|
||||
if groups == nil {
|
||||
groups = []string{}
|
||||
}
|
||||
|
||||
users = append(users, map[string]interface{}{
|
||||
"id": u.ID,
|
||||
"username": u.Username,
|
||||
@@ -138,6 +149,17 @@ func (h *Handler) GetUser(c *gin.Context) {
|
||||
permissions, _ := GetUserPermissions(h.db, userID)
|
||||
groups, _ := GetUserGroups(h.db, userID)
|
||||
|
||||
// Ensure arrays are never nil (use empty slice instead)
|
||||
if roles == nil {
|
||||
roles = []string{}
|
||||
}
|
||||
if permissions == nil {
|
||||
permissions = []string{}
|
||||
}
|
||||
if groups == nil {
|
||||
groups = []string{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"id": user.ID,
|
||||
"username": user.Username,
|
||||
@@ -236,6 +258,8 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
}
|
||||
|
||||
// Allow update if roles or groups are provided, even if no other fields are updated
|
||||
// Note: req.Roles and req.Groups can be empty arrays ([]), which is different from nil
|
||||
// Empty array means "remove all roles/groups", nil means "don't change roles/groups"
|
||||
if len(updates) == 1 && req.Roles == nil && req.Groups == nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "no fields to update"})
|
||||
return
|
||||
@@ -259,13 +283,14 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
|
||||
// Update roles if provided
|
||||
if req.Roles != nil {
|
||||
h.logger.Info("Updating user roles", "user_id", userID, "roles", *req.Roles)
|
||||
h.logger.Info("Updating user roles", "user_id", userID, "requested_roles", *req.Roles)
|
||||
currentRoles, err := GetUserRoles(h.db, userID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get current roles for user", "user_id", userID, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process user roles"})
|
||||
return
|
||||
}
|
||||
h.logger.Info("Current user roles", "user_id", userID, "current_roles", currentRoles)
|
||||
|
||||
rolesToAdd := []string{}
|
||||
rolesToRemove := []string{}
|
||||
@@ -298,8 +323,15 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
h.logger.Info("Roles to add", "user_id", userID, "roles_to_add", rolesToAdd, "count", len(rolesToAdd))
|
||||
h.logger.Info("Roles to remove", "user_id", userID, "roles_to_remove", rolesToRemove, "count", len(rolesToRemove))
|
||||
|
||||
// Add new roles
|
||||
if len(rolesToAdd) == 0 {
|
||||
h.logger.Info("No roles to add", "user_id", userID)
|
||||
}
|
||||
for _, roleName := range rolesToAdd {
|
||||
h.logger.Info("Processing role to add", "user_id", userID, "role_name", roleName)
|
||||
roleID, err := GetRoleIDByName(h.db, roleName)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
@@ -311,12 +343,13 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to process roles"})
|
||||
return
|
||||
}
|
||||
h.logger.Info("Attempting to add role", "user_id", userID, "role_id", roleID, "role_name", roleName, "assigned_by", currentUser.ID)
|
||||
if err := AddUserRole(h.db, userID, roleID, currentUser.ID); err != nil {
|
||||
h.logger.Error("Failed to add role to user", "user_id", userID, "role_id", roleID, "error", err)
|
||||
// Don't return early, continue with other roles
|
||||
continue
|
||||
h.logger.Error("Failed to add role to user", "user_id", userID, "role_id", roleID, "role_name", roleName, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("failed to add role '%s': %v", roleName, err)})
|
||||
return
|
||||
}
|
||||
h.logger.Info("Role added to user", "user_id", userID, "role_name", roleName)
|
||||
h.logger.Info("Role successfully added to user", "user_id", userID, "role_id", roleID, "role_name", roleName)
|
||||
}
|
||||
|
||||
// Remove old roles
|
||||
@@ -415,8 +448,48 @@ func (h *Handler) UpdateUser(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
h.logger.Info("User updated", "user_id", userID)
|
||||
c.JSON(http.StatusOK, gin.H{"message": "user updated successfully"})
|
||||
// Fetch updated user data to return
|
||||
updatedUser, err := GetUserByID(h.db, userID)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to fetch updated user", "user_id", userID, "error", err)
|
||||
c.JSON(http.StatusOK, gin.H{"message": "user updated successfully"})
|
||||
return
|
||||
}
|
||||
|
||||
// Get updated roles, permissions, and groups
|
||||
updatedRoles, _ := GetUserRoles(h.db, userID)
|
||||
updatedPermissions, _ := GetUserPermissions(h.db, userID)
|
||||
updatedGroups, _ := GetUserGroups(h.db, userID)
|
||||
|
||||
// Ensure arrays are never nil
|
||||
if updatedRoles == nil {
|
||||
updatedRoles = []string{}
|
||||
}
|
||||
if updatedPermissions == nil {
|
||||
updatedPermissions = []string{}
|
||||
}
|
||||
if updatedGroups == nil {
|
||||
updatedGroups = []string{}
|
||||
}
|
||||
|
||||
h.logger.Info("User updated", "user_id", userID, "roles", updatedRoles, "groups", updatedGroups)
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "user updated successfully",
|
||||
"user": gin.H{
|
||||
"id": updatedUser.ID,
|
||||
"username": updatedUser.Username,
|
||||
"email": updatedUser.Email,
|
||||
"full_name": updatedUser.FullName,
|
||||
"is_active": updatedUser.IsActive,
|
||||
"is_system": updatedUser.IsSystem,
|
||||
"roles": updatedRoles,
|
||||
"permissions": updatedPermissions,
|
||||
"groups": updatedGroups,
|
||||
"created_at": updatedUser.CreatedAt,
|
||||
"updated_at": updatedUser.UpdatedAt,
|
||||
"last_login_at": updatedUser.LastLoginAt,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteUser deletes a user
|
||||
|
||||
@@ -2,6 +2,7 @@ package iam
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
@@ -90,11 +91,14 @@ func GetUserRoles(db *database.DB, userID string) ([]string, error) {
|
||||
for rows.Next() {
|
||||
var role string
|
||||
if err := rows.Scan(&role); err != nil {
|
||||
return nil, err
|
||||
return []string{}, err
|
||||
}
|
||||
roles = append(roles, role)
|
||||
}
|
||||
|
||||
if roles == nil {
|
||||
roles = []string{}
|
||||
}
|
||||
return roles, rows.Err()
|
||||
}
|
||||
|
||||
@@ -118,11 +122,14 @@ func GetUserPermissions(db *database.DB, userID string) ([]string, error) {
|
||||
for rows.Next() {
|
||||
var perm string
|
||||
if err := rows.Scan(&perm); err != nil {
|
||||
return nil, err
|
||||
return []string{}, err
|
||||
}
|
||||
permissions = append(permissions, perm)
|
||||
}
|
||||
|
||||
if permissions == nil {
|
||||
permissions = []string{}
|
||||
}
|
||||
return permissions, rows.Err()
|
||||
}
|
||||
|
||||
@@ -133,8 +140,23 @@ func AddUserRole(db *database.DB, userID, roleID, assignedBy string) error {
|
||||
VALUES ($1, $2, $3)
|
||||
ON CONFLICT (user_id, role_id) DO NOTHING
|
||||
`
|
||||
_, err := db.Exec(query, userID, roleID, assignedBy)
|
||||
return err
|
||||
result, err := db.Exec(query, userID, roleID, assignedBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert user role: %w", err)
|
||||
}
|
||||
|
||||
// Check if row was actually inserted (not just skipped due to conflict)
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get rows affected: %w", err)
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
// Row already exists, this is not an error but we should know about it
|
||||
return nil // ON CONFLICT DO NOTHING means this is expected
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveUserRole removes a role from a user
|
||||
|
||||
@@ -3,11 +3,13 @@ package scst
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/atlasos/calypso/internal/tasks"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
// Handler handles SCST-related API requests
|
||||
@@ -37,6 +39,11 @@ func (h *Handler) ListTargets(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure we return an empty array instead of null
|
||||
if targets == nil {
|
||||
targets = []Target{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"targets": targets})
|
||||
}
|
||||
|
||||
@@ -112,6 +119,11 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// Set alias to name for frontend compatibility (same as ListTargets)
|
||||
target.Alias = target.Name
|
||||
// LUNCount will be 0 for newly created target
|
||||
target.LUNCount = 0
|
||||
|
||||
c.JSON(http.StatusCreated, target)
|
||||
}
|
||||
|
||||
@@ -119,7 +131,7 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
||||
type AddLUNRequest struct {
|
||||
DeviceName string `json:"device_name" binding:"required"`
|
||||
DevicePath string `json:"device_path" binding:"required"`
|
||||
LUNNumber int `json:"lun_number" binding:"required"`
|
||||
LUNNumber int `json:"lun_number"` // Note: cannot use binding:"required" for int as 0 is valid
|
||||
HandlerType string `json:"handler_type" binding:"required"`
|
||||
}
|
||||
|
||||
@@ -136,17 +148,45 @@ func (h *Handler) AddLUN(c *gin.Context) {
|
||||
var req AddLUNRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
h.logger.Error("Failed to bind AddLUN request", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)})
|
||||
// Provide more detailed error message
|
||||
if validationErr, ok := err.(validator.ValidationErrors); ok {
|
||||
var errorMessages []string
|
||||
for _, fieldErr := range validationErr {
|
||||
errorMessages = append(errorMessages, fmt.Sprintf("%s is required", fieldErr.Field()))
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("validation failed: %s", strings.Join(errorMessages, ", "))})
|
||||
} else {
|
||||
// Extract error message without full struct name
|
||||
errMsg := err.Error()
|
||||
if idx := strings.Index(errMsg, "Key: '"); idx >= 0 {
|
||||
// Extract field name from error message
|
||||
fieldStart := idx + 6 // Length of "Key: '"
|
||||
if fieldEnd := strings.Index(errMsg[fieldStart:], "'"); fieldEnd >= 0 {
|
||||
fieldName := errMsg[fieldStart : fieldStart+fieldEnd]
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid or missing field: %s", fieldName)})
|
||||
} else {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request format"})
|
||||
}
|
||||
} else {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
// Validate required fields (additional check in case binding doesn't catch it)
|
||||
if req.DeviceName == "" || req.DevicePath == "" || req.HandlerType == "" {
|
||||
h.logger.Error("Missing required fields in AddLUN request", "device_name", req.DeviceName, "device_path", req.DevicePath, "handler_type", req.HandlerType)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "device_name, device_path, and handler_type are required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate LUN number range
|
||||
if req.LUNNumber < 0 || req.LUNNumber > 255 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "lun_number must be between 0 and 255"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.AddLUN(c.Request.Context(), target.IQN, req.DeviceName, req.DevicePath, req.LUNNumber, req.HandlerType); err != nil {
|
||||
h.logger.Error("Failed to add LUN", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
@@ -156,6 +196,48 @@ func (h *Handler) AddLUN(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"message": "LUN added successfully"})
|
||||
}
|
||||
|
||||
// RemoveLUN removes a LUN from a target
|
||||
func (h *Handler) RemoveLUN(c *gin.Context) {
|
||||
targetID := c.Param("id")
|
||||
lunID := c.Param("lunId")
|
||||
|
||||
// Get target
|
||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
||||
return
|
||||
}
|
||||
|
||||
// Get LUN to get the LUN number
|
||||
var lunNumber int
|
||||
err = h.db.QueryRowContext(c.Request.Context(),
|
||||
"SELECT lun_number FROM scst_luns WHERE id = $1 AND target_id = $2",
|
||||
lunID, targetID,
|
||||
).Scan(&lunNumber)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no rows") {
|
||||
// LUN already deleted from database - check if it still exists in SCST
|
||||
// Try to get LUN number from URL or try common LUN numbers
|
||||
// For now, return success since it's already deleted (idempotent)
|
||||
h.logger.Info("LUN not found in database, may already be deleted", "lun_id", lunID, "target_id", targetID)
|
||||
c.JSON(http.StatusOK, gin.H{"message": "LUN already removed or not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get LUN", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get LUN"})
|
||||
return
|
||||
}
|
||||
|
||||
// Remove LUN
|
||||
if err := h.service.RemoveLUN(c.Request.Context(), target.IQN, lunNumber); err != nil {
|
||||
h.logger.Error("Failed to remove LUN", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "LUN removed successfully"})
|
||||
}
|
||||
|
||||
// AddInitiatorRequest represents an initiator addition request
|
||||
type AddInitiatorRequest struct {
|
||||
InitiatorIQN string `json:"initiator_iqn" binding:"required"`
|
||||
@@ -186,6 +268,45 @@ func (h *Handler) AddInitiator(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"})
|
||||
}
|
||||
|
||||
// AddInitiatorToGroupRequest represents a request to add an initiator to a group
|
||||
type AddInitiatorToGroupRequest struct {
|
||||
InitiatorIQN string `json:"initiator_iqn" binding:"required"`
|
||||
}
|
||||
|
||||
// AddInitiatorToGroup adds an initiator to a specific group
|
||||
func (h *Handler) AddInitiatorToGroup(c *gin.Context) {
|
||||
groupID := c.Param("id")
|
||||
|
||||
var req AddInitiatorToGroupRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
validationErrors := make(map[string]string)
|
||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
||||
for _, fe := range ve {
|
||||
field := strings.ToLower(fe.Field())
|
||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "invalid request",
|
||||
"validation_errors": validationErrors,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
err := h.service.AddInitiatorToGroup(c.Request.Context(), groupID, req.InitiatorIQN)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "single initiator only") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to add initiator to group", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to add initiator to group"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator added to group successfully"})
|
||||
}
|
||||
|
||||
// ListAllInitiators lists all initiators across all targets
|
||||
func (h *Handler) ListAllInitiators(c *gin.Context) {
|
||||
initiators, err := h.service.ListAllInitiators(c.Request.Context())
|
||||
@@ -440,6 +561,23 @@ func (h *Handler) DisableTarget(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Target disabled successfully"})
|
||||
}
|
||||
|
||||
// DeleteTarget deletes a target
|
||||
func (h *Handler) DeleteTarget(c *gin.Context) {
|
||||
targetID := c.Param("id")
|
||||
|
||||
if err := h.service.DeleteTarget(c.Request.Context(), targetID); err != nil {
|
||||
if err.Error() == "target not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to delete target", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "Target deleted successfully"})
|
||||
}
|
||||
|
||||
// DeletePortal deletes a portal
|
||||
func (h *Handler) DeletePortal(c *gin.Context) {
|
||||
id := c.Param("id")
|
||||
@@ -474,3 +612,182 @@ func (h *Handler) GetPortal(c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusOK, portal)
|
||||
}
|
||||
|
||||
// CreateInitiatorGroupRequest represents a request to create an initiator group
|
||||
type CreateInitiatorGroupRequest struct {
|
||||
TargetID string `json:"target_id" binding:"required"`
|
||||
GroupName string `json:"group_name" binding:"required"`
|
||||
}
|
||||
|
||||
// CreateInitiatorGroup creates a new initiator group
|
||||
func (h *Handler) CreateInitiatorGroup(c *gin.Context) {
|
||||
var req CreateInitiatorGroupRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
validationErrors := make(map[string]string)
|
||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
||||
for _, fe := range ve {
|
||||
field := strings.ToLower(fe.Field())
|
||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "invalid request",
|
||||
"validation_errors": validationErrors,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
group, err := h.service.CreateInitiatorGroup(c.Request.Context(), req.TargetID, req.GroupName)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "not found") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to create initiator group", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create initiator group"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, group)
|
||||
}
|
||||
|
||||
// UpdateInitiatorGroupRequest represents a request to update an initiator group
|
||||
type UpdateInitiatorGroupRequest struct {
|
||||
GroupName string `json:"group_name" binding:"required"`
|
||||
}
|
||||
|
||||
// UpdateInitiatorGroup updates an initiator group
|
||||
func (h *Handler) UpdateInitiatorGroup(c *gin.Context) {
|
||||
groupID := c.Param("id")
|
||||
|
||||
var req UpdateInitiatorGroupRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
validationErrors := make(map[string]string)
|
||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
||||
for _, fe := range ve {
|
||||
field := strings.ToLower(fe.Field())
|
||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
||||
}
|
||||
}
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "invalid request",
|
||||
"validation_errors": validationErrors,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
group, err := h.service.UpdateInitiatorGroup(c.Request.Context(), groupID, req.GroupName)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "already exists") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to update initiator group", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update initiator group"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, group)
|
||||
}
|
||||
|
||||
// DeleteInitiatorGroup deletes an initiator group
|
||||
func (h *Handler) DeleteInitiatorGroup(c *gin.Context) {
|
||||
groupID := c.Param("id")
|
||||
|
||||
err := h.service.DeleteInitiatorGroup(c.Request.Context(), groupID)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
if strings.Contains(err.Error(), "cannot delete") || strings.Contains(err.Error(), "contains") {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to delete initiator group", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete initiator group"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "initiator group deleted successfully"})
|
||||
}
|
||||
|
||||
// GetInitiatorGroup retrieves an initiator group by ID
|
||||
func (h *Handler) GetInitiatorGroup(c *gin.Context) {
|
||||
groupID := c.Param("id")
|
||||
|
||||
group, err := h.service.GetInitiatorGroup(c.Request.Context(), groupID)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator group not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get initiator group", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator group"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, group)
|
||||
}
|
||||
|
||||
// ListAllInitiatorGroups lists all initiator groups
|
||||
func (h *Handler) ListAllInitiatorGroups(c *gin.Context) {
|
||||
groups, err := h.service.ListAllInitiatorGroups(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list initiator groups", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiator groups"})
|
||||
return
|
||||
}
|
||||
|
||||
if groups == nil {
|
||||
groups = []InitiatorGroup{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"groups": groups})
|
||||
}
|
||||
|
||||
// GetConfigFile reads the SCST configuration file content
|
||||
func (h *Handler) GetConfigFile(c *gin.Context) {
|
||||
configPath := c.DefaultQuery("path", "/etc/scst.conf")
|
||||
|
||||
content, err := h.service.ReadConfigFile(c.Request.Context(), configPath)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to read config file", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"content": content,
|
||||
"path": configPath,
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateConfigFile writes content to SCST configuration file
|
||||
func (h *Handler) UpdateConfigFile(c *gin.Context) {
|
||||
var req struct {
|
||||
Content string `json:"content" binding:"required"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||
return
|
||||
}
|
||||
|
||||
configPath := req.Path
|
||||
if configPath == "" {
|
||||
configPath = "/etc/scst.conf"
|
||||
}
|
||||
|
||||
if err := h.service.WriteConfigFile(c.Request.Context(), configPath, req.Content); err != nil {
|
||||
h.logger.Error("Failed to write config file", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "Configuration file updated successfully",
|
||||
"path": configPath,
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
147
backend/internal/shares/handler.go
Normal file
147
backend/internal/shares/handler.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package shares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/go-playground/validator/v10"
|
||||
)
|
||||
|
||||
// Handler handles Shares-related API requests
|
||||
type Handler struct {
|
||||
service *Service
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewHandler creates a new Shares handler
|
||||
func NewHandler(db *database.DB, log *logger.Logger) *Handler {
|
||||
return &Handler{
|
||||
service: NewService(db, log),
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// ListShares lists all shares
|
||||
func (h *Handler) ListShares(c *gin.Context) {
|
||||
shares, err := h.service.ListShares(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to list shares", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list shares"})
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure we return an empty array instead of null
|
||||
if shares == nil {
|
||||
shares = []*Share{}
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"shares": shares})
|
||||
}
|
||||
|
||||
// GetShare retrieves a share by ID
|
||||
func (h *Handler) GetShare(c *gin.Context) {
|
||||
shareID := c.Param("id")
|
||||
|
||||
share, err := h.service.GetShare(c.Request.Context(), shareID)
|
||||
if err != nil {
|
||||
if err.Error() == "share not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "share not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to get share", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get share"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, share)
|
||||
}
|
||||
|
||||
// CreateShare creates a new share
|
||||
func (h *Handler) CreateShare(c *gin.Context) {
|
||||
var req CreateShareRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
h.logger.Error("Invalid create share request", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request: " + err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate request
|
||||
validate := validator.New()
|
||||
if err := validate.Struct(req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "validation failed: " + err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Get user ID from context (set by auth middleware)
|
||||
userID, exists := c.Get("user_id")
|
||||
if !exists {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "unauthorized"})
|
||||
return
|
||||
}
|
||||
|
||||
share, err := h.service.CreateShare(c.Request.Context(), &req, userID.(string))
|
||||
if err != nil {
|
||||
if err.Error() == "dataset not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "dataset not found"})
|
||||
return
|
||||
}
|
||||
if err.Error() == "only filesystem datasets can be shared (not volumes)" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
if err.Error() == "at least one protocol (NFS or SMB) must be enabled" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to create share", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusCreated, share)
|
||||
}
|
||||
|
||||
// UpdateShare updates an existing share
|
||||
func (h *Handler) UpdateShare(c *gin.Context) {
|
||||
shareID := c.Param("id")
|
||||
|
||||
var req UpdateShareRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
h.logger.Error("Invalid update share request", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request: " + err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
share, err := h.service.UpdateShare(c.Request.Context(), shareID, &req)
|
||||
if err != nil {
|
||||
if err.Error() == "share not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "share not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to update share", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, share)
|
||||
}
|
||||
|
||||
// DeleteShare deletes a share
|
||||
func (h *Handler) DeleteShare(c *gin.Context) {
|
||||
shareID := c.Param("id")
|
||||
|
||||
err := h.service.DeleteShare(c.Request.Context(), shareID)
|
||||
if err != nil {
|
||||
if err.Error() == "share not found" {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "share not found"})
|
||||
return
|
||||
}
|
||||
h.logger.Error("Failed to delete share", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "share deleted successfully"})
|
||||
}
|
||||
806
backend/internal/shares/service.go
Normal file
806
backend/internal/shares/service.go
Normal file
@@ -0,0 +1,806 @@
|
||||
package shares
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
||||
// Service handles Shares (CIFS/NFS) operations
|
||||
type Service struct {
|
||||
db *database.DB
|
||||
logger *logger.Logger
|
||||
}
|
||||
|
||||
// NewService creates a new Shares service
|
||||
func NewService(db *database.DB, log *logger.Logger) *Service {
|
||||
return &Service{
|
||||
db: db,
|
||||
logger: log,
|
||||
}
|
||||
}
|
||||
|
||||
// Share represents a filesystem share (NFS/SMB)
|
||||
type Share struct {
|
||||
ID string `json:"id"`
|
||||
DatasetID string `json:"dataset_id"`
|
||||
DatasetName string `json:"dataset_name"`
|
||||
MountPoint string `json:"mount_point"`
|
||||
ShareType string `json:"share_type"` // 'nfs', 'smb', 'both'
|
||||
NFSEnabled bool `json:"nfs_enabled"`
|
||||
NFSOptions string `json:"nfs_options,omitempty"`
|
||||
NFSClients []string `json:"nfs_clients,omitempty"`
|
||||
SMBEnabled bool `json:"smb_enabled"`
|
||||
SMBShareName string `json:"smb_share_name,omitempty"`
|
||||
SMBPath string `json:"smb_path,omitempty"`
|
||||
SMBComment string `json:"smb_comment,omitempty"`
|
||||
SMBGuestOK bool `json:"smb_guest_ok"`
|
||||
SMBReadOnly bool `json:"smb_read_only"`
|
||||
SMBBrowseable bool `json:"smb_browseable"`
|
||||
IsActive bool `json:"is_active"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
CreatedBy string `json:"created_by"`
|
||||
}
|
||||
|
||||
// ListShares lists all shares
|
||||
func (s *Service) ListShares(ctx context.Context) ([]*Share, error) {
|
||||
query := `
|
||||
SELECT
|
||||
zs.id, zs.dataset_id, zd.name as dataset_name, zd.mount_point,
|
||||
zs.share_type, zs.nfs_enabled, zs.nfs_options, zs.nfs_clients,
|
||||
zs.smb_enabled, zs.smb_share_name, zs.smb_path, zs.smb_comment,
|
||||
zs.smb_guest_ok, zs.smb_read_only, zs.smb_browseable,
|
||||
zs.is_active, zs.created_at, zs.updated_at, zs.created_by
|
||||
FROM zfs_shares zs
|
||||
JOIN zfs_datasets zd ON zs.dataset_id = zd.id
|
||||
ORDER BY zd.name
|
||||
`
|
||||
|
||||
rows, err := s.db.QueryContext(ctx, query)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "does not exist") {
|
||||
s.logger.Warn("zfs_shares table does not exist, returning empty list")
|
||||
return []*Share{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("failed to list shares: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var shares []*Share
|
||||
for rows.Next() {
|
||||
var share Share
|
||||
var mountPoint sql.NullString
|
||||
var nfsOptions sql.NullString
|
||||
var smbShareName sql.NullString
|
||||
var smbPath sql.NullString
|
||||
var smbComment sql.NullString
|
||||
var nfsClients []string
|
||||
|
||||
err := rows.Scan(
|
||||
&share.ID, &share.DatasetID, &share.DatasetName, &mountPoint,
|
||||
&share.ShareType, &share.NFSEnabled, &nfsOptions, pq.Array(&nfsClients),
|
||||
&share.SMBEnabled, &smbShareName, &smbPath, &smbComment,
|
||||
&share.SMBGuestOK, &share.SMBReadOnly, &share.SMBBrowseable,
|
||||
&share.IsActive, &share.CreatedAt, &share.UpdatedAt, &share.CreatedBy,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to scan share row", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
share.NFSClients = nfsClients
|
||||
|
||||
if mountPoint.Valid {
|
||||
share.MountPoint = mountPoint.String
|
||||
}
|
||||
if nfsOptions.Valid {
|
||||
share.NFSOptions = nfsOptions.String
|
||||
}
|
||||
if smbShareName.Valid {
|
||||
share.SMBShareName = smbShareName.String
|
||||
}
|
||||
if smbPath.Valid {
|
||||
share.SMBPath = smbPath.String
|
||||
}
|
||||
if smbComment.Valid {
|
||||
share.SMBComment = smbComment.String
|
||||
}
|
||||
|
||||
shares = append(shares, &share)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error iterating share rows: %w", err)
|
||||
}
|
||||
|
||||
return shares, nil
|
||||
}
|
||||
|
||||
// GetShare retrieves a share by ID
|
||||
func (s *Service) GetShare(ctx context.Context, shareID string) (*Share, error) {
|
||||
query := `
|
||||
SELECT
|
||||
zs.id, zs.dataset_id, zd.name as dataset_name, zd.mount_point,
|
||||
zs.share_type, zs.nfs_enabled, zs.nfs_options, zs.nfs_clients,
|
||||
zs.smb_enabled, zs.smb_share_name, zs.smb_path, zs.smb_comment,
|
||||
zs.smb_guest_ok, zs.smb_read_only, zs.smb_browseable,
|
||||
zs.is_active, zs.created_at, zs.updated_at, zs.created_by
|
||||
FROM zfs_shares zs
|
||||
JOIN zfs_datasets zd ON zs.dataset_id = zd.id
|
||||
WHERE zs.id = $1
|
||||
`
|
||||
|
||||
var share Share
|
||||
var mountPoint sql.NullString
|
||||
var nfsOptions sql.NullString
|
||||
var smbShareName sql.NullString
|
||||
var smbPath sql.NullString
|
||||
var smbComment sql.NullString
|
||||
var nfsClients []string
|
||||
|
||||
err := s.db.QueryRowContext(ctx, query, shareID).Scan(
|
||||
&share.ID, &share.DatasetID, &share.DatasetName, &mountPoint,
|
||||
&share.ShareType, &share.NFSEnabled, &nfsOptions, pq.Array(&nfsClients),
|
||||
&share.SMBEnabled, &smbShareName, &smbPath, &smbComment,
|
||||
&share.SMBGuestOK, &share.SMBReadOnly, &share.SMBBrowseable,
|
||||
&share.IsActive, &share.CreatedAt, &share.UpdatedAt, &share.CreatedBy,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("share not found")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to get share: %w", err)
|
||||
}
|
||||
|
||||
share.NFSClients = nfsClients
|
||||
|
||||
if mountPoint.Valid {
|
||||
share.MountPoint = mountPoint.String
|
||||
}
|
||||
if nfsOptions.Valid {
|
||||
share.NFSOptions = nfsOptions.String
|
||||
}
|
||||
if smbShareName.Valid {
|
||||
share.SMBShareName = smbShareName.String
|
||||
}
|
||||
if smbPath.Valid {
|
||||
share.SMBPath = smbPath.String
|
||||
}
|
||||
if smbComment.Valid {
|
||||
share.SMBComment = smbComment.String
|
||||
}
|
||||
|
||||
return &share, nil
|
||||
}
|
||||
|
||||
// CreateShareRequest represents a share creation request
|
||||
type CreateShareRequest struct {
|
||||
DatasetID string `json:"dataset_id" binding:"required"`
|
||||
NFSEnabled bool `json:"nfs_enabled"`
|
||||
NFSOptions string `json:"nfs_options"`
|
||||
NFSClients []string `json:"nfs_clients"`
|
||||
SMBEnabled bool `json:"smb_enabled"`
|
||||
SMBShareName string `json:"smb_share_name"`
|
||||
SMBPath string `json:"smb_path"`
|
||||
SMBComment string `json:"smb_comment"`
|
||||
SMBGuestOK bool `json:"smb_guest_ok"`
|
||||
SMBReadOnly bool `json:"smb_read_only"`
|
||||
SMBBrowseable bool `json:"smb_browseable"`
|
||||
}
|
||||
|
||||
// CreateShare creates a new share
|
||||
func (s *Service) CreateShare(ctx context.Context, req *CreateShareRequest, userID string) (*Share, error) {
|
||||
// Validate dataset exists and is a filesystem (not volume)
|
||||
// req.DatasetID can be either UUID or dataset name
|
||||
var datasetID, datasetType, datasetName, mountPoint string
|
||||
var mountPointNull sql.NullString
|
||||
|
||||
// Try to find by ID first (UUID)
|
||||
err := s.db.QueryRowContext(ctx,
|
||||
"SELECT id, type, name, mount_point FROM zfs_datasets WHERE id = $1",
|
||||
req.DatasetID,
|
||||
).Scan(&datasetID, &datasetType, &datasetName, &mountPointNull)
|
||||
|
||||
// If not found by ID, try by name
|
||||
if err == sql.ErrNoRows {
|
||||
err = s.db.QueryRowContext(ctx,
|
||||
"SELECT id, type, name, mount_point FROM zfs_datasets WHERE name = $1",
|
||||
req.DatasetID,
|
||||
).Scan(&datasetID, &datasetType, &datasetName, &mountPointNull)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, fmt.Errorf("dataset not found")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to validate dataset: %w", err)
|
||||
}
|
||||
|
||||
if mountPointNull.Valid {
|
||||
mountPoint = mountPointNull.String
|
||||
} else {
|
||||
mountPoint = "none"
|
||||
}
|
||||
|
||||
if datasetType != "filesystem" {
|
||||
return nil, fmt.Errorf("only filesystem datasets can be shared (not volumes)")
|
||||
}
|
||||
|
||||
// Determine share type
|
||||
shareType := "none"
|
||||
if req.NFSEnabled && req.SMBEnabled {
|
||||
shareType = "both"
|
||||
} else if req.NFSEnabled {
|
||||
shareType = "nfs"
|
||||
} else if req.SMBEnabled {
|
||||
shareType = "smb"
|
||||
} else {
|
||||
return nil, fmt.Errorf("at least one protocol (NFS or SMB) must be enabled")
|
||||
}
|
||||
|
||||
// Set default NFS options if not provided
|
||||
nfsOptions := req.NFSOptions
|
||||
if nfsOptions == "" {
|
||||
nfsOptions = "rw,sync,no_subtree_check"
|
||||
}
|
||||
|
||||
// Set default SMB share name if not provided
|
||||
smbShareName := req.SMBShareName
|
||||
if smbShareName == "" {
|
||||
// Extract dataset name from full path (e.g., "pool/dataset" -> "dataset")
|
||||
parts := strings.Split(datasetName, "/")
|
||||
smbShareName = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// Set SMB path (use mount_point if available, otherwise use dataset name)
|
||||
smbPath := req.SMBPath
|
||||
if smbPath == "" {
|
||||
if mountPoint != "" && mountPoint != "none" {
|
||||
smbPath = mountPoint
|
||||
} else {
|
||||
smbPath = fmt.Sprintf("/mnt/%s", strings.ReplaceAll(datasetName, "/", "_"))
|
||||
}
|
||||
}
|
||||
|
||||
// Insert into database
|
||||
query := `
|
||||
INSERT INTO zfs_shares (
|
||||
dataset_id, share_type, nfs_enabled, nfs_options, nfs_clients,
|
||||
smb_enabled, smb_share_name, smb_path, smb_comment,
|
||||
smb_guest_ok, smb_read_only, smb_browseable, is_active, created_by
|
||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)
|
||||
RETURNING id, created_at, updated_at
|
||||
`
|
||||
|
||||
var shareID string
|
||||
var createdAt, updatedAt time.Time
|
||||
|
||||
// Handle nfs_clients array - use empty array if nil
|
||||
nfsClients := req.NFSClients
|
||||
if nfsClients == nil {
|
||||
nfsClients = []string{}
|
||||
}
|
||||
|
||||
err = s.db.QueryRowContext(ctx, query,
|
||||
datasetID, shareType, req.NFSEnabled, nfsOptions, pq.Array(nfsClients),
|
||||
req.SMBEnabled, smbShareName, smbPath, req.SMBComment,
|
||||
req.SMBGuestOK, req.SMBReadOnly, req.SMBBrowseable, true, userID,
|
||||
).Scan(&shareID, &createdAt, &updatedAt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create share: %w", err)
|
||||
}
|
||||
|
||||
// Apply NFS export if enabled
|
||||
if req.NFSEnabled {
|
||||
if err := s.applyNFSExport(ctx, mountPoint, nfsOptions, req.NFSClients); err != nil {
|
||||
s.logger.Error("Failed to apply NFS export", "error", err, "share_id", shareID)
|
||||
// Don't fail the creation, but log the error
|
||||
}
|
||||
}
|
||||
|
||||
// Apply SMB share if enabled
|
||||
if req.SMBEnabled {
|
||||
if err := s.applySMBShare(ctx, smbShareName, smbPath, req.SMBComment, req.SMBGuestOK, req.SMBReadOnly, req.SMBBrowseable); err != nil {
|
||||
s.logger.Error("Failed to apply SMB share", "error", err, "share_id", shareID)
|
||||
// Don't fail the creation, but log the error
|
||||
}
|
||||
}
|
||||
|
||||
// Return the created share
|
||||
return s.GetShare(ctx, shareID)
|
||||
}
|
||||
|
||||
// UpdateShareRequest represents a share update request
|
||||
type UpdateShareRequest struct {
|
||||
NFSEnabled *bool `json:"nfs_enabled"`
|
||||
NFSOptions *string `json:"nfs_options"`
|
||||
NFSClients *[]string `json:"nfs_clients"`
|
||||
SMBEnabled *bool `json:"smb_enabled"`
|
||||
SMBShareName *string `json:"smb_share_name"`
|
||||
SMBComment *string `json:"smb_comment"`
|
||||
SMBGuestOK *bool `json:"smb_guest_ok"`
|
||||
SMBReadOnly *bool `json:"smb_read_only"`
|
||||
SMBBrowseable *bool `json:"smb_browseable"`
|
||||
IsActive *bool `json:"is_active"`
|
||||
}
|
||||
|
||||
// UpdateShare updates an existing share
|
||||
func (s *Service) UpdateShare(ctx context.Context, shareID string, req *UpdateShareRequest) (*Share, error) {
|
||||
// Get current share
|
||||
share, err := s.GetShare(ctx, shareID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build update query dynamically
|
||||
updates := []string{}
|
||||
args := []interface{}{}
|
||||
argIndex := 1
|
||||
|
||||
if req.NFSEnabled != nil {
|
||||
updates = append(updates, fmt.Sprintf("nfs_enabled = $%d", argIndex))
|
||||
args = append(args, *req.NFSEnabled)
|
||||
argIndex++
|
||||
}
|
||||
if req.NFSOptions != nil {
|
||||
updates = append(updates, fmt.Sprintf("nfs_options = $%d", argIndex))
|
||||
args = append(args, *req.NFSOptions)
|
||||
argIndex++
|
||||
}
|
||||
if req.NFSClients != nil {
|
||||
updates = append(updates, fmt.Sprintf("nfs_clients = $%d", argIndex))
|
||||
args = append(args, pq.Array(*req.NFSClients))
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBEnabled != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_enabled = $%d", argIndex))
|
||||
args = append(args, *req.SMBEnabled)
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBShareName != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_share_name = $%d", argIndex))
|
||||
args = append(args, *req.SMBShareName)
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBComment != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_comment = $%d", argIndex))
|
||||
args = append(args, *req.SMBComment)
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBGuestOK != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_guest_ok = $%d", argIndex))
|
||||
args = append(args, *req.SMBGuestOK)
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBReadOnly != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_read_only = $%d", argIndex))
|
||||
args = append(args, *req.SMBReadOnly)
|
||||
argIndex++
|
||||
}
|
||||
if req.SMBBrowseable != nil {
|
||||
updates = append(updates, fmt.Sprintf("smb_browseable = $%d", argIndex))
|
||||
args = append(args, *req.SMBBrowseable)
|
||||
argIndex++
|
||||
}
|
||||
if req.IsActive != nil {
|
||||
updates = append(updates, fmt.Sprintf("is_active = $%d", argIndex))
|
||||
args = append(args, *req.IsActive)
|
||||
argIndex++
|
||||
}
|
||||
|
||||
if len(updates) == 0 {
|
||||
return share, nil // No changes
|
||||
}
|
||||
|
||||
// Update share_type based on enabled protocols
|
||||
nfsEnabled := share.NFSEnabled
|
||||
smbEnabled := share.SMBEnabled
|
||||
if req.NFSEnabled != nil {
|
||||
nfsEnabled = *req.NFSEnabled
|
||||
}
|
||||
if req.SMBEnabled != nil {
|
||||
smbEnabled = *req.SMBEnabled
|
||||
}
|
||||
|
||||
shareType := "none"
|
||||
if nfsEnabled && smbEnabled {
|
||||
shareType = "both"
|
||||
} else if nfsEnabled {
|
||||
shareType = "nfs"
|
||||
} else if smbEnabled {
|
||||
shareType = "smb"
|
||||
}
|
||||
|
||||
updates = append(updates, fmt.Sprintf("share_type = $%d", argIndex))
|
||||
args = append(args, shareType)
|
||||
argIndex++
|
||||
|
||||
updates = append(updates, fmt.Sprintf("updated_at = NOW()"))
|
||||
args = append(args, shareID)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
UPDATE zfs_shares
|
||||
SET %s
|
||||
WHERE id = $%d
|
||||
`, strings.Join(updates, ", "), argIndex)
|
||||
|
||||
_, err = s.db.ExecContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to update share: %w", err)
|
||||
}
|
||||
|
||||
// Re-apply NFS export if NFS is enabled
|
||||
if nfsEnabled {
|
||||
nfsOptions := share.NFSOptions
|
||||
if req.NFSOptions != nil {
|
||||
nfsOptions = *req.NFSOptions
|
||||
}
|
||||
nfsClients := share.NFSClients
|
||||
if req.NFSClients != nil {
|
||||
nfsClients = *req.NFSClients
|
||||
}
|
||||
if err := s.applyNFSExport(ctx, share.MountPoint, nfsOptions, nfsClients); err != nil {
|
||||
s.logger.Error("Failed to apply NFS export", "error", err, "share_id", shareID)
|
||||
}
|
||||
} else {
|
||||
// Remove NFS export if disabled
|
||||
if err := s.removeNFSExport(ctx, share.MountPoint); err != nil {
|
||||
s.logger.Error("Failed to remove NFS export", "error", err, "share_id", shareID)
|
||||
}
|
||||
}
|
||||
|
||||
// Re-apply SMB share if SMB is enabled
|
||||
if smbEnabled {
|
||||
smbShareName := share.SMBShareName
|
||||
if req.SMBShareName != nil {
|
||||
smbShareName = *req.SMBShareName
|
||||
}
|
||||
smbPath := share.SMBPath
|
||||
smbComment := share.SMBComment
|
||||
if req.SMBComment != nil {
|
||||
smbComment = *req.SMBComment
|
||||
}
|
||||
smbGuestOK := share.SMBGuestOK
|
||||
if req.SMBGuestOK != nil {
|
||||
smbGuestOK = *req.SMBGuestOK
|
||||
}
|
||||
smbReadOnly := share.SMBReadOnly
|
||||
if req.SMBReadOnly != nil {
|
||||
smbReadOnly = *req.SMBReadOnly
|
||||
}
|
||||
smbBrowseable := share.SMBBrowseable
|
||||
if req.SMBBrowseable != nil {
|
||||
smbBrowseable = *req.SMBBrowseable
|
||||
}
|
||||
if err := s.applySMBShare(ctx, smbShareName, smbPath, smbComment, smbGuestOK, smbReadOnly, smbBrowseable); err != nil {
|
||||
s.logger.Error("Failed to apply SMB share", "error", err, "share_id", shareID)
|
||||
}
|
||||
} else {
|
||||
// Remove SMB share if disabled
|
||||
if err := s.removeSMBShare(ctx, share.SMBShareName); err != nil {
|
||||
s.logger.Error("Failed to remove SMB share", "error", err, "share_id", shareID)
|
||||
}
|
||||
}
|
||||
|
||||
return s.GetShare(ctx, shareID)
|
||||
}
|
||||
|
||||
// DeleteShare deletes a share
|
||||
func (s *Service) DeleteShare(ctx context.Context, shareID string) error {
|
||||
// Get share to get mount point and share name
|
||||
share, err := s.GetShare(ctx, shareID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove NFS export
|
||||
if share.NFSEnabled {
|
||||
if err := s.removeNFSExport(ctx, share.MountPoint); err != nil {
|
||||
s.logger.Error("Failed to remove NFS export", "error", err, "share_id", shareID)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove SMB share
|
||||
if share.SMBEnabled {
|
||||
if err := s.removeSMBShare(ctx, share.SMBShareName); err != nil {
|
||||
s.logger.Error("Failed to remove SMB share", "error", err, "share_id", shareID)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from database
|
||||
_, err = s.db.ExecContext(ctx, "DELETE FROM zfs_shares WHERE id = $1", shareID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete share: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// applyNFSExport adds or updates an NFS export in /etc/exports
|
||||
func (s *Service) applyNFSExport(ctx context.Context, mountPoint, options string, clients []string) error {
|
||||
if mountPoint == "" || mountPoint == "none" {
|
||||
return fmt.Errorf("mount point is required for NFS export")
|
||||
}
|
||||
|
||||
// Build client list (default to * if empty)
|
||||
clientList := "*"
|
||||
if len(clients) > 0 {
|
||||
clientList = strings.Join(clients, " ")
|
||||
}
|
||||
|
||||
// Build export line
|
||||
exportLine := fmt.Sprintf("%s %s(%s)", mountPoint, clientList, options)
|
||||
|
||||
// Read current /etc/exports
|
||||
exportsPath := "/etc/exports"
|
||||
exportsContent, err := os.ReadFile(exportsPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to read exports file: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(exportsContent), "\n")
|
||||
var newLines []string
|
||||
found := false
|
||||
|
||||
// Check if this mount point already exists
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
newLines = append(newLines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this line is for our mount point
|
||||
if strings.HasPrefix(line, mountPoint+" ") {
|
||||
newLines = append(newLines, exportLine)
|
||||
found = true
|
||||
} else {
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Add if not found
|
||||
if !found {
|
||||
newLines = append(newLines, exportLine)
|
||||
}
|
||||
|
||||
// Write back to file
|
||||
newContent := strings.Join(newLines, "\n") + "\n"
|
||||
if err := os.WriteFile(exportsPath, []byte(newContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write exports file: %w", err)
|
||||
}
|
||||
|
||||
// Apply exports
|
||||
cmd := exec.CommandContext(ctx, "sudo", "exportfs", "-ra")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to apply exports: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
s.logger.Info("NFS export applied", "mount_point", mountPoint, "clients", clientList)
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeNFSExport removes an NFS export from /etc/exports
|
||||
func (s *Service) removeNFSExport(ctx context.Context, mountPoint string) error {
|
||||
if mountPoint == "" || mountPoint == "none" {
|
||||
return nil // Nothing to remove
|
||||
}
|
||||
|
||||
exportsPath := "/etc/exports"
|
||||
exportsContent, err := os.ReadFile(exportsPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil // File doesn't exist, nothing to remove
|
||||
}
|
||||
return fmt.Errorf("failed to read exports file: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(exportsContent), "\n")
|
||||
var newLines []string
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || strings.HasPrefix(line, "#") {
|
||||
newLines = append(newLines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip lines for this mount point
|
||||
if strings.HasPrefix(line, mountPoint+" ") {
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
// Write back to file
|
||||
newContent := strings.Join(newLines, "\n")
|
||||
if newContent != "" && !strings.HasSuffix(newContent, "\n") {
|
||||
newContent += "\n"
|
||||
}
|
||||
if err := os.WriteFile(exportsPath, []byte(newContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write exports file: %w", err)
|
||||
}
|
||||
|
||||
// Apply exports
|
||||
cmd := exec.CommandContext(ctx, "sudo", "exportfs", "-ra")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("failed to apply exports: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
s.logger.Info("NFS export removed", "mount_point", mountPoint)
|
||||
return nil
|
||||
}
|
||||
|
||||
// applySMBShare adds or updates an SMB share in /etc/samba/smb.conf
|
||||
func (s *Service) applySMBShare(ctx context.Context, shareName, path, comment string, guestOK, readOnly, browseable bool) error {
|
||||
if shareName == "" {
|
||||
return fmt.Errorf("SMB share name is required")
|
||||
}
|
||||
if path == "" {
|
||||
return fmt.Errorf("SMB path is required")
|
||||
}
|
||||
|
||||
smbConfPath := "/etc/samba/smb.conf"
|
||||
smbContent, err := os.ReadFile(smbConfPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read smb.conf: %w", err)
|
||||
}
|
||||
|
||||
// Parse and update smb.conf
|
||||
lines := strings.Split(string(smbContent), "\n")
|
||||
var newLines []string
|
||||
inShare := false
|
||||
shareStart := -1
|
||||
|
||||
for i, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
|
||||
// Check if we're entering our share section
|
||||
if strings.HasPrefix(trimmed, "[") && strings.HasSuffix(trimmed, "]") {
|
||||
sectionName := trimmed[1 : len(trimmed)-1]
|
||||
if sectionName == shareName {
|
||||
inShare = true
|
||||
shareStart = i
|
||||
continue
|
||||
} else if inShare {
|
||||
// We've left our share section, insert the share config here
|
||||
newLines = append(newLines, s.buildSMBShareConfig(shareName, path, comment, guestOK, readOnly, browseable))
|
||||
inShare = false
|
||||
}
|
||||
}
|
||||
|
||||
if inShare {
|
||||
// Skip lines until we find the next section or end of file
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
// If we were still in the share at the end, add it
|
||||
if inShare {
|
||||
newLines = append(newLines, s.buildSMBShareConfig(shareName, path, comment, guestOK, readOnly, browseable))
|
||||
} else if shareStart == -1 {
|
||||
// Share doesn't exist, add it at the end
|
||||
newLines = append(newLines, "")
|
||||
newLines = append(newLines, s.buildSMBShareConfig(shareName, path, comment, guestOK, readOnly, browseable))
|
||||
}
|
||||
|
||||
// Write back to file
|
||||
newContent := strings.Join(newLines, "\n")
|
||||
if err := os.WriteFile(smbConfPath, []byte(newContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write smb.conf: %w", err)
|
||||
}
|
||||
|
||||
// Reload Samba
|
||||
cmd := exec.CommandContext(ctx, "sudo", "systemctl", "reload", "smbd")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
// Try restart if reload fails
|
||||
cmd = exec.CommandContext(ctx, "sudo", "systemctl", "restart", "smbd")
|
||||
if output2, err2 := cmd.CombinedOutput(); err2 != nil {
|
||||
return fmt.Errorf("failed to reload/restart smbd: %s / %s: %w", string(output), string(output2), err2)
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("SMB share applied", "share_name", shareName, "path", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildSMBShareConfig builds the SMB share configuration block
|
||||
func (s *Service) buildSMBShareConfig(shareName, path, comment string, guestOK, readOnly, browseable bool) string {
|
||||
var config []string
|
||||
config = append(config, fmt.Sprintf("[%s]", shareName))
|
||||
if comment != "" {
|
||||
config = append(config, fmt.Sprintf(" comment = %s", comment))
|
||||
}
|
||||
config = append(config, fmt.Sprintf(" path = %s", path))
|
||||
if guestOK {
|
||||
config = append(config, " guest ok = yes")
|
||||
} else {
|
||||
config = append(config, " guest ok = no")
|
||||
}
|
||||
if readOnly {
|
||||
config = append(config, " read only = yes")
|
||||
} else {
|
||||
config = append(config, " read only = no")
|
||||
}
|
||||
if browseable {
|
||||
config = append(config, " browseable = yes")
|
||||
} else {
|
||||
config = append(config, " browseable = no")
|
||||
}
|
||||
return strings.Join(config, "\n")
|
||||
}
|
||||
|
||||
// removeSMBShare removes an SMB share from /etc/samba/smb.conf
|
||||
func (s *Service) removeSMBShare(ctx context.Context, shareName string) error {
|
||||
if shareName == "" {
|
||||
return nil // Nothing to remove
|
||||
}
|
||||
|
||||
smbConfPath := "/etc/samba/smb.conf"
|
||||
smbContent, err := os.ReadFile(smbConfPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil // File doesn't exist, nothing to remove
|
||||
}
|
||||
return fmt.Errorf("failed to read smb.conf: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(smbContent), "\n")
|
||||
var newLines []string
|
||||
inShare := false
|
||||
|
||||
for _, line := range lines {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
|
||||
// Check if we're entering our share section
|
||||
if strings.HasPrefix(trimmed, "[") && strings.HasSuffix(trimmed, "]") {
|
||||
sectionName := trimmed[1 : len(trimmed)-1]
|
||||
if sectionName == shareName {
|
||||
inShare = true
|
||||
continue
|
||||
} else if inShare {
|
||||
// We've left our share section
|
||||
inShare = false
|
||||
}
|
||||
}
|
||||
|
||||
if inShare {
|
||||
// Skip lines in this share section
|
||||
continue
|
||||
}
|
||||
|
||||
newLines = append(newLines, line)
|
||||
}
|
||||
|
||||
// Write back to file
|
||||
newContent := strings.Join(newLines, "\n")
|
||||
if err := os.WriteFile(smbConfPath, []byte(newContent), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write smb.conf: %w", err)
|
||||
}
|
||||
|
||||
// Reload Samba
|
||||
cmd := exec.CommandContext(ctx, "sudo", "systemctl", "reload", "smbd")
|
||||
if output, err := cmd.CombinedOutput(); err != nil {
|
||||
// Try restart if reload fails
|
||||
cmd = exec.CommandContext(ctx, "sudo", "systemctl", "restart", "smbd")
|
||||
if output2, err2 := cmd.CombinedOutput(); err2 != nil {
|
||||
return fmt.Errorf("failed to reload/restart smbd: %s / %s: %w", string(output), string(output2), err2)
|
||||
}
|
||||
}
|
||||
|
||||
s.logger.Info("SMB share removed", "share_name", shareName)
|
||||
return nil
|
||||
}
|
||||
@@ -610,6 +610,7 @@ func (s *ZFSService) AddSpareDisk(ctx context.Context, poolID string, diskPaths
|
||||
|
||||
// ZFSDataset represents a ZFS dataset
|
||||
type ZFSDataset struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pool string `json:"pool"`
|
||||
Type string `json:"type"` // filesystem, volume, snapshot
|
||||
@@ -628,7 +629,7 @@ type ZFSDataset struct {
|
||||
func (s *ZFSService) ListDatasets(ctx context.Context, poolName string) ([]*ZFSDataset, error) {
|
||||
// Get datasets from database
|
||||
query := `
|
||||
SELECT name, pool_name, type, mount_point,
|
||||
SELECT id, name, pool_name, type, mount_point,
|
||||
used_bytes, available_bytes, referenced_bytes,
|
||||
compression, deduplication, quota, reservation,
|
||||
created_at
|
||||
@@ -654,7 +655,7 @@ func (s *ZFSService) ListDatasets(ctx context.Context, poolName string) ([]*ZFSD
|
||||
var mountPoint sql.NullString
|
||||
|
||||
err := rows.Scan(
|
||||
&ds.Name, &ds.Pool, &ds.Type, &mountPoint,
|
||||
&ds.ID, &ds.Name, &ds.Pool, &ds.Type, &mountPoint,
|
||||
&ds.UsedBytes, &ds.AvailableBytes, &ds.ReferencedBytes,
|
||||
&ds.Compression, &ds.Deduplication, &ds.Quota, &ds.Reservation,
|
||||
&ds.CreatedAt,
|
||||
|
||||
@@ -3,6 +3,7 @@ package system
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/atlasos/calypso/internal/tasks"
|
||||
@@ -131,3 +132,151 @@ func (h *Handler) ListNetworkInterfaces(c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"interfaces": interfaces})
|
||||
}
|
||||
|
||||
// SaveNTPSettings saves NTP configuration to the OS
|
||||
func (h *Handler) SaveNTPSettings(c *gin.Context) {
|
||||
var settings NTPSettings
|
||||
if err := c.ShouldBindJSON(&settings); err != nil {
|
||||
h.logger.Error("Invalid request body", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate timezone
|
||||
if settings.Timezone == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "timezone is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NTP servers
|
||||
if len(settings.NTPServers) == 0 {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "at least one NTP server is required"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.SaveNTPSettings(c.Request.Context(), settings); err != nil {
|
||||
h.logger.Error("Failed to save NTP settings", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"message": "NTP settings saved successfully"})
|
||||
}
|
||||
|
||||
// GetNTPSettings retrieves current NTP configuration
|
||||
func (h *Handler) GetNTPSettings(c *gin.Context) {
|
||||
settings, err := h.service.GetNTPSettings(c.Request.Context())
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get NTP settings", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get NTP settings"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"settings": settings})
|
||||
}
|
||||
|
||||
// UpdateNetworkInterface updates a network interface configuration
|
||||
func (h *Handler) UpdateNetworkInterface(c *gin.Context) {
|
||||
ifaceName := c.Param("name")
|
||||
if ifaceName == "" {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "interface name is required"})
|
||||
return
|
||||
}
|
||||
|
||||
var req struct {
|
||||
IPAddress string `json:"ip_address" binding:"required"`
|
||||
Subnet string `json:"subnet" binding:"required"`
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
DNS1 string `json:"dns1,omitempty"`
|
||||
DNS2 string `json:"dns2,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
h.logger.Error("Invalid request body", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
|
||||
return
|
||||
}
|
||||
|
||||
// Convert to service request
|
||||
serviceReq := UpdateNetworkInterfaceRequest{
|
||||
IPAddress: req.IPAddress,
|
||||
Subnet: req.Subnet,
|
||||
Gateway: req.Gateway,
|
||||
DNS1: req.DNS1,
|
||||
DNS2: req.DNS2,
|
||||
Role: req.Role,
|
||||
}
|
||||
|
||||
updatedIface, err := h.service.UpdateNetworkInterface(c.Request.Context(), ifaceName, serviceReq)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to update network interface", "interface", ifaceName, "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"interface": updatedIface})
|
||||
}
|
||||
|
||||
// GetSystemLogs retrieves recent system logs
|
||||
func (h *Handler) GetSystemLogs(c *gin.Context) {
|
||||
limitStr := c.DefaultQuery("limit", "30")
|
||||
limit, err := strconv.Atoi(limitStr)
|
||||
if err != nil || limit <= 0 || limit > 100 {
|
||||
limit = 30
|
||||
}
|
||||
|
||||
logs, err := h.service.GetSystemLogs(c.Request.Context(), limit)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get system logs", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get system logs"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"logs": logs})
|
||||
}
|
||||
|
||||
// GetNetworkThroughput retrieves network throughput data from RRD
|
||||
func (h *Handler) GetNetworkThroughput(c *gin.Context) {
|
||||
// Default to last 5 minutes
|
||||
durationStr := c.DefaultQuery("duration", "5m")
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
duration = 5 * time.Minute
|
||||
}
|
||||
|
||||
data, err := h.service.GetNetworkThroughput(c.Request.Context(), duration)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to get network throughput", "error", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get network throughput"})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"data": data})
|
||||
}
|
||||
|
||||
// ExecuteCommand executes a shell command
|
||||
func (h *Handler) ExecuteCommand(c *gin.Context) {
|
||||
var req struct {
|
||||
Command string `json:"command" binding:"required"`
|
||||
Service string `json:"service,omitempty"` // Optional: system, scst, storage, backup, tape
|
||||
}
|
||||
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
h.logger.Error("Invalid request body", "error", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": "command is required"})
|
||||
return
|
||||
}
|
||||
|
||||
// Execute command based on service context
|
||||
output, err := h.service.ExecuteCommand(c.Request.Context(), req.Command, req.Service)
|
||||
if err != nil {
|
||||
h.logger.Error("Failed to execute command", "error", err, "command", req.Command, "service", req.Service)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": err.Error(),
|
||||
"output": output, // Include output even on error
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{"output": output})
|
||||
}
|
||||
|
||||
292
backend/internal/system/rrd.go
Normal file
292
backend/internal/system/rrd.go
Normal file
@@ -0,0 +1,292 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// RRDService handles RRD database operations for network monitoring
|
||||
type RRDService struct {
|
||||
logger *logger.Logger
|
||||
rrdDir string
|
||||
interfaceName string
|
||||
}
|
||||
|
||||
// NewRRDService creates a new RRD service
|
||||
func NewRRDService(log *logger.Logger, rrdDir string, interfaceName string) *RRDService {
|
||||
return &RRDService{
|
||||
logger: log,
|
||||
rrdDir: rrdDir,
|
||||
interfaceName: interfaceName,
|
||||
}
|
||||
}
|
||||
|
||||
// NetworkStats represents network interface statistics
|
||||
type NetworkStats struct {
|
||||
Interface string `json:"interface"`
|
||||
RxBytes uint64 `json:"rx_bytes"`
|
||||
TxBytes uint64 `json:"tx_bytes"`
|
||||
RxPackets uint64 `json:"rx_packets"`
|
||||
TxPackets uint64 `json:"tx_packets"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// GetNetworkStats reads network statistics from /proc/net/dev
|
||||
func (r *RRDService) GetNetworkStats(ctx context.Context, interfaceName string) (*NetworkStats, error) {
|
||||
data, err := os.ReadFile("/proc/net/dev")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read /proc/net/dev: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if !strings.HasPrefix(line, interfaceName+":") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse line: interface: rx_bytes rx_packets ... tx_bytes tx_packets ...
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) < 17 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract statistics
|
||||
// Format: interface: rx_bytes rx_packets rx_errs rx_drop ... tx_bytes tx_packets ...
|
||||
rxBytes, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rxPackets, err := strconv.ParseUint(parts[2], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
txBytes, err := strconv.ParseUint(parts[9], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
txPackets, err := strconv.ParseUint(parts[10], 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
return &NetworkStats{
|
||||
Interface: interfaceName,
|
||||
RxBytes: rxBytes,
|
||||
TxBytes: txBytes,
|
||||
RxPackets: rxPackets,
|
||||
TxPackets: txPackets,
|
||||
Timestamp: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("interface %s not found in /proc/net/dev", interfaceName)
|
||||
}
|
||||
|
||||
// InitializeRRD creates RRD database if it doesn't exist
|
||||
func (r *RRDService) InitializeRRD(ctx context.Context) error {
|
||||
// Ensure RRD directory exists
|
||||
if err := os.MkdirAll(r.rrdDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create RRD directory: %w", err)
|
||||
}
|
||||
|
||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", r.interfaceName))
|
||||
|
||||
// Check if RRD file already exists
|
||||
if _, err := os.Stat(rrdFile); err == nil {
|
||||
r.logger.Info("RRD file already exists", "file", rrdFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create RRD database
|
||||
// Use COUNTER type to track cumulative bytes, RRD will calculate rate automatically
|
||||
// DS:inbound:COUNTER:20:0:U - inbound cumulative bytes, 20s heartbeat
|
||||
// DS:outbound:COUNTER:20:0:U - outbound cumulative bytes, 20s heartbeat
|
||||
// RRA:AVERAGE:0.5:1:600 - 1 sample per step, 600 steps (100 minutes at 10s interval)
|
||||
// RRA:AVERAGE:0.5:6:700 - 6 samples per step, 700 steps (11.6 hours at 1min interval)
|
||||
// RRA:AVERAGE:0.5:60:730 - 60 samples per step, 730 steps (5 days at 1hour interval)
|
||||
// RRA:MAX:0.5:1:600 - Max values for same intervals
|
||||
// RRA:MAX:0.5:6:700
|
||||
// RRA:MAX:0.5:60:730
|
||||
cmd := exec.CommandContext(ctx, "rrdtool", "create", rrdFile,
|
||||
"--step", "10", // 10 second step
|
||||
"DS:inbound:COUNTER:20:0:U", // Inbound cumulative bytes, 20s heartbeat
|
||||
"DS:outbound:COUNTER:20:0:U", // Outbound cumulative bytes, 20s heartbeat
|
||||
"RRA:AVERAGE:0.5:1:600", // 10s resolution, 100 minutes
|
||||
"RRA:AVERAGE:0.5:6:700", // 1min resolution, 11.6 hours
|
||||
"RRA:AVERAGE:0.5:60:730", // 1hour resolution, 5 days
|
||||
"RRA:MAX:0.5:1:600", // Max values
|
||||
"RRA:MAX:0.5:6:700",
|
||||
"RRA:MAX:0.5:60:730",
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create RRD: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
r.logger.Info("RRD database created", "file", rrdFile)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateRRD updates RRD database with new network statistics
|
||||
func (r *RRDService) UpdateRRD(ctx context.Context, stats *NetworkStats) error {
|
||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", stats.Interface))
|
||||
|
||||
// Update with cumulative byte counts (COUNTER type)
|
||||
// RRD will automatically calculate the rate (bytes per second)
|
||||
cmd := exec.CommandContext(ctx, "rrdtool", "update", rrdFile,
|
||||
fmt.Sprintf("%d:%d:%d", stats.Timestamp.Unix(), stats.RxBytes, stats.TxBytes),
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update RRD: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchRRDData fetches data from RRD database for graphing
|
||||
func (r *RRDService) FetchRRDData(ctx context.Context, startTime time.Time, endTime time.Time, resolution string) ([]NetworkDataPoint, error) {
|
||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", r.interfaceName))
|
||||
|
||||
// Check if RRD file exists
|
||||
if _, err := os.Stat(rrdFile); os.IsNotExist(err) {
|
||||
return []NetworkDataPoint{}, nil
|
||||
}
|
||||
|
||||
// Fetch data using rrdtool fetch
|
||||
// Use AVERAGE consolidation with appropriate resolution
|
||||
cmd := exec.CommandContext(ctx, "rrdtool", "fetch", rrdFile,
|
||||
"AVERAGE",
|
||||
"--start", fmt.Sprintf("%d", startTime.Unix()),
|
||||
"--end", fmt.Sprintf("%d", endTime.Unix()),
|
||||
"--resolution", resolution,
|
||||
)
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch RRD data: %s: %w", string(output), err)
|
||||
}
|
||||
|
||||
// Parse rrdtool fetch output
|
||||
// Format:
|
||||
// inbound outbound
|
||||
// 1234567890: 1.2345678901e+06 2.3456789012e+06
|
||||
points := []NetworkDataPoint{}
|
||||
lines := strings.Split(string(output), "\n")
|
||||
|
||||
// Skip header lines
|
||||
dataStart := false
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is the data section
|
||||
if strings.Contains(line, "inbound") && strings.Contains(line, "outbound") {
|
||||
dataStart = true
|
||||
continue
|
||||
}
|
||||
|
||||
if !dataStart {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse data line: timestamp: inbound_value outbound_value
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse timestamp
|
||||
timestampStr := strings.TrimSuffix(parts[0], ":")
|
||||
timestamp, err := strconv.ParseInt(timestampStr, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse inbound (bytes per second from COUNTER, convert to Mbps)
|
||||
inboundStr := parts[1]
|
||||
inbound, err := strconv.ParseFloat(inboundStr, 64)
|
||||
if err != nil || inbound < 0 {
|
||||
// Skip NaN or negative values
|
||||
continue
|
||||
}
|
||||
// Convert bytes per second to Mbps (bytes/s * 8 / 1000000)
|
||||
inboundMbps := inbound * 8 / 1000000
|
||||
|
||||
// Parse outbound
|
||||
outboundStr := parts[2]
|
||||
outbound, err := strconv.ParseFloat(outboundStr, 64)
|
||||
if err != nil || outbound < 0 {
|
||||
// Skip NaN or negative values
|
||||
continue
|
||||
}
|
||||
outboundMbps := outbound * 8 / 1000000
|
||||
|
||||
// Format time as MM:SS
|
||||
t := time.Unix(timestamp, 0)
|
||||
timeStr := fmt.Sprintf("%02d:%02d", t.Minute(), t.Second())
|
||||
|
||||
points = append(points, NetworkDataPoint{
|
||||
Time: timeStr,
|
||||
Inbound: inboundMbps,
|
||||
Outbound: outboundMbps,
|
||||
})
|
||||
}
|
||||
|
||||
return points, nil
|
||||
}
|
||||
|
||||
// NetworkDataPoint represents a single data point for graphing
|
||||
type NetworkDataPoint struct {
|
||||
Time string `json:"time"`
|
||||
Inbound float64 `json:"inbound"` // Mbps
|
||||
Outbound float64 `json:"outbound"` // Mbps
|
||||
}
|
||||
|
||||
// StartCollector starts a background goroutine to periodically collect and update RRD
|
||||
func (r *RRDService) StartCollector(ctx context.Context, interval time.Duration) error {
|
||||
// Initialize RRD if needed
|
||||
if err := r.InitializeRRD(ctx); err != nil {
|
||||
return fmt.Errorf("failed to initialize RRD: %w", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
// Get current stats
|
||||
stats, err := r.GetNetworkStats(ctx, r.interfaceName)
|
||||
if err != nil {
|
||||
r.logger.Warn("Failed to get network stats", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update RRD with cumulative byte counts
|
||||
// RRD COUNTER type will automatically calculate rate
|
||||
if err := r.UpdateRRD(ctx, stats); err != nil {
|
||||
r.logger.Warn("Failed to update RRD", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -11,18 +12,98 @@ import (
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
)
|
||||
|
||||
// NTPSettings represents NTP configuration
|
||||
type NTPSettings struct {
|
||||
Timezone string `json:"timezone"`
|
||||
NTPServers []string `json:"ntp_servers"`
|
||||
}
|
||||
|
||||
// Service handles system management operations
|
||||
type Service struct {
|
||||
logger *logger.Logger
|
||||
logger *logger.Logger
|
||||
rrdService *RRDService
|
||||
}
|
||||
|
||||
// detectPrimaryInterface detects the primary network interface (first non-loopback with IP)
|
||||
func detectPrimaryInterface(ctx context.Context) string {
|
||||
// Try to get default route interface
|
||||
cmd := exec.CommandContext(ctx, "ip", "route", "show", "default")
|
||||
output, err := cmd.Output()
|
||||
if err == nil {
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, "dev ") {
|
||||
parts := strings.Fields(line)
|
||||
for i, part := range parts {
|
||||
if part == "dev" && i+1 < len(parts) {
|
||||
iface := parts[i+1]
|
||||
if iface != "lo" {
|
||||
return iface
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: get first non-loopback interface with IP
|
||||
cmd = exec.CommandContext(ctx, "ip", "-4", "addr", "show")
|
||||
output, err = cmd.Output()
|
||||
if err == nil {
|
||||
lines := strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
// Look for interface name line (e.g., "2: ens18: <BROADCAST...")
|
||||
if len(line) > 0 && line[0] >= '0' && line[0] <= '9' && strings.Contains(line, ":") {
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 2 {
|
||||
iface := strings.TrimSuffix(parts[1], ":")
|
||||
if iface != "" && iface != "lo" {
|
||||
// Check if this interface has an IP (next lines will have "inet")
|
||||
// For simplicity, return first non-loopback interface
|
||||
return iface
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Final fallback
|
||||
return "eth0"
|
||||
}
|
||||
|
||||
// NewService creates a new system service
|
||||
func NewService(log *logger.Logger) *Service {
|
||||
// Initialize RRD service for network monitoring
|
||||
rrdDir := "/var/lib/calypso/rrd"
|
||||
|
||||
// Auto-detect primary interface
|
||||
ctx := context.Background()
|
||||
interfaceName := detectPrimaryInterface(ctx)
|
||||
log.Info("Detected primary network interface", "interface", interfaceName)
|
||||
|
||||
rrdService := NewRRDService(log, rrdDir, interfaceName)
|
||||
|
||||
return &Service{
|
||||
logger: log,
|
||||
logger: log,
|
||||
rrdService: rrdService,
|
||||
}
|
||||
}
|
||||
|
||||
// StartNetworkMonitoring starts the RRD collector for network monitoring
|
||||
func (s *Service) StartNetworkMonitoring(ctx context.Context) error {
|
||||
return s.rrdService.StartCollector(ctx, 10*time.Second)
|
||||
}
|
||||
|
||||
// GetNetworkThroughput fetches network throughput data from RRD
|
||||
func (s *Service) GetNetworkThroughput(ctx context.Context, duration time.Duration) ([]NetworkDataPoint, error) {
|
||||
endTime := time.Now()
|
||||
startTime := endTime.Add(-duration)
|
||||
|
||||
// Use 10 second resolution for recent data
|
||||
return s.rrdService.FetchRRDData(ctx, startTime, endTime, "10")
|
||||
}
|
||||
|
||||
// ServiceStatus represents a systemd service status
|
||||
type ServiceStatus struct {
|
||||
Name string `json:"name"`
|
||||
@@ -35,31 +116,37 @@ type ServiceStatus struct {
|
||||
|
||||
// GetServiceStatus retrieves the status of a systemd service
|
||||
func (s *Service) GetServiceStatus(ctx context.Context, serviceName string) (*ServiceStatus, error) {
|
||||
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName,
|
||||
"--property=ActiveState,SubState,LoadState,Description,ActiveEnterTimestamp",
|
||||
"--value", "--no-pager")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get service status: %w", err)
|
||||
}
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
if len(lines) < 4 {
|
||||
return nil, fmt.Errorf("invalid service status output")
|
||||
}
|
||||
|
||||
status := &ServiceStatus{
|
||||
Name: serviceName,
|
||||
ActiveState: strings.TrimSpace(lines[0]),
|
||||
SubState: strings.TrimSpace(lines[1]),
|
||||
LoadState: strings.TrimSpace(lines[2]),
|
||||
Description: strings.TrimSpace(lines[3]),
|
||||
Name: serviceName,
|
||||
}
|
||||
|
||||
// Parse timestamp if available
|
||||
if len(lines) > 4 && lines[4] != "" {
|
||||
if t, err := time.Parse("Mon 2006-01-02 15:04:05 MST", strings.TrimSpace(lines[4])); err == nil {
|
||||
status.Since = t
|
||||
// Get each property individually to ensure correct parsing
|
||||
properties := map[string]*string{
|
||||
"ActiveState": &status.ActiveState,
|
||||
"SubState": &status.SubState,
|
||||
"LoadState": &status.LoadState,
|
||||
"Description": &status.Description,
|
||||
}
|
||||
|
||||
for prop, target := range properties {
|
||||
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName, "--property", prop, "--value", "--no-pager")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to get property", "service", serviceName, "property", prop, "error", err)
|
||||
continue
|
||||
}
|
||||
*target = strings.TrimSpace(string(output))
|
||||
}
|
||||
|
||||
// Get timestamp if available
|
||||
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName, "--property", "ActiveEnterTimestamp", "--value", "--no-pager")
|
||||
output, err := cmd.Output()
|
||||
if err == nil {
|
||||
timestamp := strings.TrimSpace(string(output))
|
||||
if timestamp != "" {
|
||||
if t, err := time.Parse("Mon 2006-01-02 15:04:05 MST", timestamp); err == nil {
|
||||
status.Since = t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,10 +156,15 @@ func (s *Service) GetServiceStatus(ctx context.Context, serviceName string) (*Se
|
||||
// ListServices lists all Calypso-related services
|
||||
func (s *Service) ListServices(ctx context.Context) ([]ServiceStatus, error) {
|
||||
services := []string{
|
||||
"ssh",
|
||||
"sshd",
|
||||
"smbd",
|
||||
"iscsi-scst",
|
||||
"nfs-server",
|
||||
"nfs",
|
||||
"mhvtl",
|
||||
"calypso-api",
|
||||
"scst",
|
||||
"iscsi-scst",
|
||||
"mhvtl",
|
||||
"postgresql",
|
||||
}
|
||||
|
||||
@@ -128,6 +220,108 @@ func (s *Service) GetJournalLogs(ctx context.Context, serviceName string, lines
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// SystemLogEntry represents a parsed system log entry
|
||||
type SystemLogEntry struct {
|
||||
Time string `json:"time"`
|
||||
Level string `json:"level"`
|
||||
Source string `json:"source"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// GetSystemLogs retrieves recent system logs from journalctl
|
||||
func (s *Service) GetSystemLogs(ctx context.Context, limit int) ([]SystemLogEntry, error) {
|
||||
if limit <= 0 || limit > 100 {
|
||||
limit = 30 // Default to 30 logs
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "journalctl",
|
||||
"-n", fmt.Sprintf("%d", limit),
|
||||
"-o", "json",
|
||||
"--no-pager",
|
||||
"--since", "1 hour ago") // Only get logs from last hour
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get system logs: %w", err)
|
||||
}
|
||||
|
||||
var logs []SystemLogEntry
|
||||
linesOutput := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||
for _, line := range linesOutput {
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
var logEntry map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(line), &logEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse timestamp (__REALTIME_TIMESTAMP is in microseconds)
|
||||
var timeStr string
|
||||
if timestamp, ok := logEntry["__REALTIME_TIMESTAMP"].(float64); ok {
|
||||
// Convert microseconds to nanoseconds for time.Unix (1 microsecond = 1000 nanoseconds)
|
||||
t := time.Unix(0, int64(timestamp)*1000)
|
||||
timeStr = t.Format("15:04:05")
|
||||
} else if timestamp, ok := logEntry["_SOURCE_REALTIME_TIMESTAMP"].(float64); ok {
|
||||
t := time.Unix(0, int64(timestamp)*1000)
|
||||
timeStr = t.Format("15:04:05")
|
||||
} else {
|
||||
timeStr = time.Now().Format("15:04:05")
|
||||
}
|
||||
|
||||
// Parse log level (priority)
|
||||
level := "INFO"
|
||||
if priority, ok := logEntry["PRIORITY"].(float64); ok {
|
||||
switch int(priority) {
|
||||
case 0: // emerg
|
||||
level = "EMERG"
|
||||
case 1, 2, 3: // alert, crit, err
|
||||
level = "ERROR"
|
||||
case 4: // warning
|
||||
level = "WARN"
|
||||
case 5: // notice
|
||||
level = "NOTICE"
|
||||
case 6: // info
|
||||
level = "INFO"
|
||||
case 7: // debug
|
||||
level = "DEBUG"
|
||||
}
|
||||
}
|
||||
|
||||
// Parse source (systemd unit or syslog identifier)
|
||||
source := "system"
|
||||
if unit, ok := logEntry["_SYSTEMD_UNIT"].(string); ok && unit != "" {
|
||||
// Remove .service suffix if present
|
||||
source = strings.TrimSuffix(unit, ".service")
|
||||
} else if ident, ok := logEntry["SYSLOG_IDENTIFIER"].(string); ok && ident != "" {
|
||||
source = ident
|
||||
} else if comm, ok := logEntry["_COMM"].(string); ok && comm != "" {
|
||||
source = comm
|
||||
}
|
||||
|
||||
// Parse message
|
||||
message := ""
|
||||
if msg, ok := logEntry["MESSAGE"].(string); ok {
|
||||
message = msg
|
||||
}
|
||||
|
||||
if message != "" {
|
||||
logs = append(logs, SystemLogEntry{
|
||||
Time: timeStr,
|
||||
Level: level,
|
||||
Source: source,
|
||||
Message: message,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse to get newest first
|
||||
for i, j := 0, len(logs)-1; i < j; i, j = i+1, j-1 {
|
||||
logs[i], logs[j] = logs[j], logs[i]
|
||||
}
|
||||
|
||||
return logs, nil
|
||||
}
|
||||
|
||||
// GenerateSupportBundle generates a diagnostic support bundle
|
||||
func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string) error {
|
||||
// Create bundle directory
|
||||
@@ -183,6 +377,9 @@ type NetworkInterface struct {
|
||||
Status string `json:"status"` // "Connected" or "Down"
|
||||
Speed string `json:"speed"` // e.g., "10 Gbps", "1 Gbps"
|
||||
Role string `json:"role"` // "Management", "ISCSI", or empty
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
DNS1 string `json:"dns1,omitempty"`
|
||||
DNS2 string `json:"dns2,omitempty"`
|
||||
}
|
||||
|
||||
// ListNetworkInterfaces lists all network interfaces
|
||||
@@ -297,6 +494,103 @@ func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface
|
||||
}
|
||||
}
|
||||
|
||||
// Get default gateway for each interface
|
||||
cmd = exec.CommandContext(ctx, "ip", "route", "show")
|
||||
output, err = cmd.Output()
|
||||
if err == nil {
|
||||
lines = strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse default route: "default via 10.10.14.1 dev ens18"
|
||||
if strings.HasPrefix(line, "default via ") {
|
||||
parts := strings.Fields(line)
|
||||
// Find "via" and "dev" in the parts
|
||||
var gateway string
|
||||
var ifaceName string
|
||||
for i, part := range parts {
|
||||
if part == "via" && i+1 < len(parts) {
|
||||
gateway = parts[i+1]
|
||||
}
|
||||
if part == "dev" && i+1 < len(parts) {
|
||||
ifaceName = parts[i+1]
|
||||
}
|
||||
}
|
||||
if gateway != "" && ifaceName != "" {
|
||||
if iface, exists := interfaceMap[ifaceName]; exists {
|
||||
iface.Gateway = gateway
|
||||
s.logger.Info("Set default gateway for interface", "name", ifaceName, "gateway", gateway)
|
||||
}
|
||||
}
|
||||
} else if strings.Contains(line, " via ") && strings.Contains(line, " dev ") {
|
||||
// Parse network route: "10.10.14.0/24 via 10.10.14.1 dev ens18"
|
||||
// Or: "192.168.1.0/24 via 192.168.1.1 dev eth0"
|
||||
parts := strings.Fields(line)
|
||||
var gateway string
|
||||
var ifaceName string
|
||||
for i, part := range parts {
|
||||
if part == "via" && i+1 < len(parts) {
|
||||
gateway = parts[i+1]
|
||||
}
|
||||
if part == "dev" && i+1 < len(parts) {
|
||||
ifaceName = parts[i+1]
|
||||
}
|
||||
}
|
||||
// Only set gateway if it's not already set (prefer default route)
|
||||
if gateway != "" && ifaceName != "" {
|
||||
if iface, exists := interfaceMap[ifaceName]; exists {
|
||||
if iface.Gateway == "" {
|
||||
iface.Gateway = gateway
|
||||
s.logger.Info("Set gateway from network route for interface", "name", ifaceName, "gateway", gateway)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.logger.Warn("Failed to get routes", "error", err)
|
||||
}
|
||||
|
||||
// Get DNS servers from systemd-resolved or /etc/resolv.conf
|
||||
// Try systemd-resolved first
|
||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--status")
|
||||
output, err = cmd.Output()
|
||||
dnsServers := []string{}
|
||||
if err == nil {
|
||||
// Parse DNS from systemd-resolve output
|
||||
lines = strings.Split(string(output), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "DNS Servers:") {
|
||||
// Format: "DNS Servers: 8.8.8.8 8.8.4.4"
|
||||
parts := strings.Fields(line)
|
||||
if len(parts) >= 3 {
|
||||
dnsServers = parts[2:]
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback to /etc/resolv.conf
|
||||
data, err := os.ReadFile("/etc/resolv.conf")
|
||||
if err == nil {
|
||||
lines = strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "nameserver ") {
|
||||
dns := strings.TrimPrefix(line, "nameserver ")
|
||||
dns = strings.TrimSpace(dns)
|
||||
if dns != "" {
|
||||
dnsServers = append(dnsServers, dns)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert map to slice
|
||||
var interfaces []NetworkInterface
|
||||
s.logger.Debug("Converting interface map to slice", "map_size", len(interfaceMap))
|
||||
@@ -319,6 +613,14 @@ func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface
|
||||
}
|
||||
}
|
||||
|
||||
// Set DNS servers (use first two if available)
|
||||
if len(dnsServers) > 0 {
|
||||
iface.DNS1 = dnsServers[0]
|
||||
}
|
||||
if len(dnsServers) > 1 {
|
||||
iface.DNS2 = dnsServers[1]
|
||||
}
|
||||
|
||||
// Determine role based on interface name or IP (simple heuristic)
|
||||
// You can enhance this with configuration file or database lookup
|
||||
if strings.Contains(iface.Name, "eth") || strings.Contains(iface.Name, "ens") {
|
||||
@@ -345,3 +647,367 @@ func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface
|
||||
s.logger.Info("Listed network interfaces", "count", len(interfaces))
|
||||
return interfaces, nil
|
||||
}
|
||||
|
||||
// UpdateNetworkInterfaceRequest represents the request to update a network interface
|
||||
type UpdateNetworkInterfaceRequest struct {
|
||||
IPAddress string `json:"ip_address"`
|
||||
Subnet string `json:"subnet"`
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
DNS1 string `json:"dns1,omitempty"`
|
||||
DNS2 string `json:"dns2,omitempty"`
|
||||
Role string `json:"role,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateNetworkInterface updates network interface configuration
|
||||
func (s *Service) UpdateNetworkInterface(ctx context.Context, ifaceName string, req UpdateNetworkInterfaceRequest) (*NetworkInterface, error) {
|
||||
// Validate interface exists
|
||||
cmd := exec.CommandContext(ctx, "ip", "link", "show", ifaceName)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("interface %s not found: %w", ifaceName, err)
|
||||
}
|
||||
|
||||
// Remove existing IP address if any
|
||||
cmd = exec.CommandContext(ctx, "ip", "addr", "flush", "dev", ifaceName)
|
||||
cmd.Run() // Ignore error, interface might not have IP
|
||||
|
||||
// Set new IP address and subnet
|
||||
ipWithSubnet := fmt.Sprintf("%s/%s", req.IPAddress, req.Subnet)
|
||||
cmd = exec.CommandContext(ctx, "ip", "addr", "add", ipWithSubnet, "dev", ifaceName)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to set IP address", "interface", ifaceName, "error", err, "output", string(output))
|
||||
return nil, fmt.Errorf("failed to set IP address: %w", err)
|
||||
}
|
||||
|
||||
// Remove existing default route if any
|
||||
cmd = exec.CommandContext(ctx, "ip", "route", "del", "default")
|
||||
cmd.Run() // Ignore error, might not exist
|
||||
|
||||
// Set gateway if provided
|
||||
if req.Gateway != "" {
|
||||
cmd = exec.CommandContext(ctx, "ip", "route", "add", "default", "via", req.Gateway, "dev", ifaceName)
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to set gateway", "interface", ifaceName, "error", err, "output", string(output))
|
||||
return nil, fmt.Errorf("failed to set gateway: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update DNS in systemd-resolved or /etc/resolv.conf
|
||||
if req.DNS1 != "" || req.DNS2 != "" {
|
||||
// Try using systemd-resolve first
|
||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--status")
|
||||
if cmd.Run() == nil {
|
||||
// systemd-resolve is available, use it
|
||||
dnsServers := []string{}
|
||||
if req.DNS1 != "" {
|
||||
dnsServers = append(dnsServers, req.DNS1)
|
||||
}
|
||||
if req.DNS2 != "" {
|
||||
dnsServers = append(dnsServers, req.DNS2)
|
||||
}
|
||||
if len(dnsServers) > 0 {
|
||||
// Use resolvectl to set DNS (newer systemd)
|
||||
cmd = exec.CommandContext(ctx, "resolvectl", "dns", ifaceName, strings.Join(dnsServers, " "))
|
||||
if cmd.Run() != nil {
|
||||
// Fallback to systemd-resolve
|
||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--interface", ifaceName, "--set-dns", strings.Join(dnsServers, " "))
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to set DNS via systemd-resolve", "error", err, "output", string(output))
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Fallback: update /etc/resolv.conf
|
||||
resolvContent := "# Generated by Calypso\n"
|
||||
if req.DNS1 != "" {
|
||||
resolvContent += fmt.Sprintf("nameserver %s\n", req.DNS1)
|
||||
}
|
||||
if req.DNS2 != "" {
|
||||
resolvContent += fmt.Sprintf("nameserver %s\n", req.DNS2)
|
||||
}
|
||||
|
||||
tmpPath := "/tmp/resolv.conf." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
if err := os.WriteFile(tmpPath, []byte(resolvContent), 0644); err != nil {
|
||||
s.logger.Warn("Failed to write temporary resolv.conf", "error", err)
|
||||
} else {
|
||||
cmd = exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("mv %s /etc/resolv.conf", tmpPath))
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to update /etc/resolv.conf", "error", err, "output", string(output))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bring interface up
|
||||
cmd = exec.CommandContext(ctx, "ip", "link", "set", ifaceName, "up")
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to bring interface up", "interface", ifaceName, "error", err, "output", string(output))
|
||||
}
|
||||
|
||||
// Return updated interface
|
||||
updatedIface := &NetworkInterface{
|
||||
Name: ifaceName,
|
||||
IPAddress: req.IPAddress,
|
||||
Subnet: req.Subnet,
|
||||
Gateway: req.Gateway,
|
||||
DNS1: req.DNS1,
|
||||
DNS2: req.DNS2,
|
||||
Role: req.Role,
|
||||
Status: "Connected",
|
||||
Speed: "Unknown", // Will be updated on next list
|
||||
}
|
||||
|
||||
s.logger.Info("Updated network interface", "interface", ifaceName, "ip", req.IPAddress, "subnet", req.Subnet)
|
||||
return updatedIface, nil
|
||||
}
|
||||
|
||||
// SaveNTPSettings saves NTP configuration to the OS
|
||||
func (s *Service) SaveNTPSettings(ctx context.Context, settings NTPSettings) error {
|
||||
// Set timezone using timedatectl
|
||||
if settings.Timezone != "" {
|
||||
cmd := exec.CommandContext(ctx, "timedatectl", "set-timezone", settings.Timezone)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to set timezone", "timezone", settings.Timezone, "error", err, "output", string(output))
|
||||
return fmt.Errorf("failed to set timezone: %w", err)
|
||||
}
|
||||
s.logger.Info("Timezone set", "timezone", settings.Timezone)
|
||||
}
|
||||
|
||||
// Configure NTP servers in systemd-timesyncd
|
||||
if len(settings.NTPServers) > 0 {
|
||||
configPath := "/etc/systemd/timesyncd.conf"
|
||||
|
||||
// Build config content
|
||||
configContent := "[Time]\n"
|
||||
configContent += "NTP="
|
||||
for i, server := range settings.NTPServers {
|
||||
if i > 0 {
|
||||
configContent += " "
|
||||
}
|
||||
configContent += server
|
||||
}
|
||||
configContent += "\n"
|
||||
|
||||
// Write to temporary file first, then move to final location (requires root)
|
||||
tmpPath := "/tmp/timesyncd.conf." + fmt.Sprintf("%d", time.Now().Unix())
|
||||
if err := os.WriteFile(tmpPath, []byte(configContent), 0644); err != nil {
|
||||
s.logger.Error("Failed to write temporary NTP config", "error", err)
|
||||
return fmt.Errorf("failed to write temporary NTP configuration: %w", err)
|
||||
}
|
||||
|
||||
// Move to final location using sudo (requires root privileges)
|
||||
cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("mv %s %s", tmpPath, configPath))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to move NTP config", "error", err, "output", string(output))
|
||||
os.Remove(tmpPath) // Clean up temp file
|
||||
return fmt.Errorf("failed to move NTP configuration: %w", err)
|
||||
}
|
||||
|
||||
// Restart systemd-timesyncd to apply changes
|
||||
cmd = exec.CommandContext(ctx, "systemctl", "restart", "systemd-timesyncd")
|
||||
output, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
s.logger.Error("Failed to restart systemd-timesyncd", "error", err, "output", string(output))
|
||||
return fmt.Errorf("failed to restart systemd-timesyncd: %w", err)
|
||||
}
|
||||
|
||||
s.logger.Info("NTP servers configured", "servers", settings.NTPServers)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNTPSettings retrieves current NTP configuration from the OS
|
||||
func (s *Service) GetNTPSettings(ctx context.Context) (*NTPSettings, error) {
|
||||
settings := &NTPSettings{
|
||||
NTPServers: []string{},
|
||||
}
|
||||
|
||||
// Get current timezone using timedatectl
|
||||
cmd := exec.CommandContext(ctx, "timedatectl", "show", "--property=Timezone", "--value")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to get timezone", "error", err)
|
||||
settings.Timezone = "Etc/UTC" // Default fallback
|
||||
} else {
|
||||
settings.Timezone = strings.TrimSpace(string(output))
|
||||
if settings.Timezone == "" {
|
||||
settings.Timezone = "Etc/UTC"
|
||||
}
|
||||
}
|
||||
|
||||
// Read NTP servers from systemd-timesyncd config
|
||||
configPath := "/etc/systemd/timesyncd.conf"
|
||||
data, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
s.logger.Warn("Failed to read NTP config", "error", err)
|
||||
// Default NTP servers if config file doesn't exist
|
||||
settings.NTPServers = []string{"pool.ntp.org", "time.google.com"}
|
||||
} else {
|
||||
// Parse NTP servers from config file
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if strings.HasPrefix(line, "NTP=") {
|
||||
ntpLine := strings.TrimPrefix(line, "NTP=")
|
||||
if ntpLine != "" {
|
||||
servers := strings.Fields(ntpLine)
|
||||
settings.NTPServers = servers
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no NTP servers found in config, use defaults
|
||||
if len(settings.NTPServers) == 0 {
|
||||
settings.NTPServers = []string{"pool.ntp.org", "time.google.com"}
|
||||
}
|
||||
}
|
||||
|
||||
return settings, nil
|
||||
}
|
||||
|
||||
// ExecuteCommand executes a shell command and returns the output
|
||||
// service parameter is optional and can be: system, scst, storage, backup, tape
|
||||
func (s *Service) ExecuteCommand(ctx context.Context, command string, service string) (string, error) {
|
||||
// Sanitize command - basic security check
|
||||
command = strings.TrimSpace(command)
|
||||
if command == "" {
|
||||
return "", fmt.Errorf("command cannot be empty")
|
||||
}
|
||||
|
||||
// Block dangerous commands that could harm the system
|
||||
dangerousCommands := []string{
|
||||
"rm -rf /",
|
||||
"dd if=",
|
||||
":(){ :|:& };:",
|
||||
"mkfs",
|
||||
"fdisk",
|
||||
"parted",
|
||||
"format",
|
||||
"> /dev/sd",
|
||||
"mkfs.ext",
|
||||
"mkfs.xfs",
|
||||
"mkfs.btrfs",
|
||||
"wipefs",
|
||||
}
|
||||
|
||||
commandLower := strings.ToLower(command)
|
||||
for _, dangerous := range dangerousCommands {
|
||||
if strings.Contains(commandLower, dangerous) {
|
||||
return "", fmt.Errorf("command blocked for security reasons")
|
||||
}
|
||||
}
|
||||
|
||||
// Service-specific command handling
|
||||
switch service {
|
||||
case "scst":
|
||||
// Allow SCST admin commands
|
||||
if strings.HasPrefix(command, "scstadmin") {
|
||||
// SCST commands are safe
|
||||
break
|
||||
}
|
||||
case "backup":
|
||||
// Allow bconsole commands
|
||||
if strings.HasPrefix(command, "bconsole") {
|
||||
// Backup console commands are safe
|
||||
break
|
||||
}
|
||||
case "storage":
|
||||
// Allow ZFS and storage commands
|
||||
if strings.HasPrefix(command, "zfs") || strings.HasPrefix(command, "zpool") || strings.HasPrefix(command, "lsblk") {
|
||||
// Storage commands are safe
|
||||
break
|
||||
}
|
||||
case "tape":
|
||||
// Allow tape library commands
|
||||
if strings.HasPrefix(command, "mtx") || strings.HasPrefix(command, "lsscsi") || strings.HasPrefix(command, "sg_") {
|
||||
// Tape commands are safe
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Execute command with timeout (30 seconds)
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Check if command already has sudo (reuse commandLower from above)
|
||||
hasSudo := strings.HasPrefix(commandLower, "sudo ")
|
||||
|
||||
// Determine if command needs sudo based on service and command type
|
||||
needsSudo := false
|
||||
|
||||
if !hasSudo {
|
||||
// Commands that typically need sudo
|
||||
sudoCommands := []string{
|
||||
"scstadmin",
|
||||
"systemctl",
|
||||
"zfs",
|
||||
"zpool",
|
||||
"mount",
|
||||
"umount",
|
||||
"ip link",
|
||||
"ip addr",
|
||||
"iptables",
|
||||
"journalctl",
|
||||
}
|
||||
|
||||
for _, sudoCmd := range sudoCommands {
|
||||
if strings.HasPrefix(commandLower, sudoCmd) {
|
||||
needsSudo = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Service-specific sudo requirements
|
||||
switch service {
|
||||
case "scst":
|
||||
// All SCST admin commands need sudo
|
||||
if strings.HasPrefix(commandLower, "scstadmin") {
|
||||
needsSudo = true
|
||||
}
|
||||
case "storage":
|
||||
// ZFS commands typically need sudo
|
||||
if strings.HasPrefix(commandLower, "zfs") || strings.HasPrefix(commandLower, "zpool") {
|
||||
needsSudo = true
|
||||
}
|
||||
case "system":
|
||||
// System commands like systemctl need sudo
|
||||
if strings.HasPrefix(commandLower, "systemctl") || strings.HasPrefix(commandLower, "journalctl") {
|
||||
needsSudo = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build command with or without sudo
|
||||
var cmd *exec.Cmd
|
||||
if needsSudo && !hasSudo {
|
||||
// Use sudo for privileged commands (if not already present)
|
||||
cmd = exec.CommandContext(ctx, "sudo", "sh", "-c", command)
|
||||
} else {
|
||||
// Regular command (or already has sudo)
|
||||
cmd = exec.CommandContext(ctx, "sh", "-c", command)
|
||||
}
|
||||
|
||||
cmd.Env = append(os.Environ(), "TERM=xterm-256color")
|
||||
|
||||
cmd.Env = append(os.Environ(), "TERM=xterm-256color")
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
// Return output even if there's an error (some commands return non-zero exit codes)
|
||||
outputStr := string(output)
|
||||
if len(outputStr) > 0 {
|
||||
return outputStr, nil
|
||||
}
|
||||
return "", fmt.Errorf("command execution failed: %w", err)
|
||||
}
|
||||
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
328
backend/internal/system/terminal.go
Normal file
328
backend/internal/system/terminal.go
Normal file
@@ -0,0 +1,328 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/logger"
|
||||
"github.com/creack/pty"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// WebSocket timeouts
|
||||
writeWait = 10 * time.Second
|
||||
pongWait = 60 * time.Second
|
||||
pingPeriod = (pongWait * 9) / 10
|
||||
)
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 4096,
|
||||
WriteBufferSize: 4096,
|
||||
CheckOrigin: func(r *http.Request) bool {
|
||||
// Allow all origins - in production, validate against allowed domains
|
||||
return true
|
||||
},
|
||||
}
|
||||
|
||||
// TerminalSession manages a single terminal session
|
||||
type TerminalSession struct {
|
||||
conn *websocket.Conn
|
||||
pty *os.File
|
||||
cmd *exec.Cmd
|
||||
logger *logger.Logger
|
||||
mu sync.RWMutex
|
||||
closed bool
|
||||
username string
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// HandleTerminalWebSocket handles WebSocket connection for terminal
|
||||
func HandleTerminalWebSocket(c *gin.Context, log *logger.Logger) {
|
||||
// Verify authentication
|
||||
userID, exists := c.Get("user_id")
|
||||
if !exists {
|
||||
log.Warn("Terminal WebSocket: unauthorized access", "ip", c.ClientIP())
|
||||
c.JSON(http.StatusUnauthorized, gin.H{"error": "unauthorized"})
|
||||
return
|
||||
}
|
||||
|
||||
username, _ := c.Get("username")
|
||||
if username == nil {
|
||||
username = userID
|
||||
}
|
||||
|
||||
log.Info("Terminal WebSocket: connection attempt", "username", username, "ip", c.ClientIP())
|
||||
|
||||
// Upgrade connection
|
||||
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
|
||||
if err != nil {
|
||||
log.Error("Terminal WebSocket: upgrade failed", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Terminal WebSocket: connection upgraded", "username", username)
|
||||
|
||||
// Create session
|
||||
session := &TerminalSession{
|
||||
conn: conn,
|
||||
logger: log,
|
||||
username: username.(string),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start terminal
|
||||
if err := session.startPTY(); err != nil {
|
||||
log.Error("Terminal WebSocket: failed to start PTY", "error", err, "username", username)
|
||||
session.sendError(err.Error())
|
||||
session.close()
|
||||
return
|
||||
}
|
||||
|
||||
// Handle messages and PTY output
|
||||
go session.handleRead()
|
||||
go session.handleWrite()
|
||||
}
|
||||
|
||||
// startPTY starts the PTY session
|
||||
func (s *TerminalSession) startPTY() error {
|
||||
// Get user info
|
||||
currentUser, err := user.Lookup(s.username)
|
||||
if err != nil {
|
||||
// Fallback to current user
|
||||
currentUser, err = user.Current()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Determine shell
|
||||
shell := os.Getenv("SHELL")
|
||||
if shell == "" {
|
||||
shell = "/bin/bash"
|
||||
}
|
||||
|
||||
// Create command
|
||||
s.cmd = exec.Command(shell)
|
||||
s.cmd.Env = append(os.Environ(),
|
||||
"TERM=xterm-256color",
|
||||
"HOME="+currentUser.HomeDir,
|
||||
"USER="+currentUser.Username,
|
||||
"USERNAME="+currentUser.Username,
|
||||
)
|
||||
s.cmd.Dir = currentUser.HomeDir
|
||||
|
||||
// Start PTY
|
||||
ptyFile, err := pty.Start(s.cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.pty = ptyFile
|
||||
|
||||
// Set initial size
|
||||
pty.Setsize(ptyFile, &pty.Winsize{
|
||||
Rows: 24,
|
||||
Cols: 80,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleRead handles incoming WebSocket messages
|
||||
func (s *TerminalSession) handleRead() {
|
||||
defer s.close()
|
||||
|
||||
// Set read deadline and pong handler
|
||||
s.conn.SetReadDeadline(time.Now().Add(pongWait))
|
||||
s.conn.SetPongHandler(func(string) error {
|
||||
s.conn.SetReadDeadline(time.Now().Add(pongWait))
|
||||
return nil
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
messageType, data, err := s.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||
s.logger.Error("Terminal WebSocket: read error", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle binary messages (raw input)
|
||||
if messageType == websocket.BinaryMessage {
|
||||
s.writeToPTY(data)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle text messages (JSON commands)
|
||||
if messageType == websocket.TextMessage {
|
||||
var msg map[string]interface{}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch msg["type"] {
|
||||
case "input":
|
||||
if data, ok := msg["data"].(string); ok {
|
||||
s.writeToPTY([]byte(data))
|
||||
}
|
||||
|
||||
case "resize":
|
||||
if cols, ok1 := msg["cols"].(float64); ok1 {
|
||||
if rows, ok2 := msg["rows"].(float64); ok2 {
|
||||
s.resizePTY(uint16(cols), uint16(rows))
|
||||
}
|
||||
}
|
||||
|
||||
case "ping":
|
||||
s.writeWS(websocket.TextMessage, []byte(`{"type":"pong"}`))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleWrite handles PTY output to WebSocket
|
||||
func (s *TerminalSession) handleWrite() {
|
||||
defer s.close()
|
||||
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
defer ticker.Stop()
|
||||
|
||||
// Read from PTY and write to WebSocket
|
||||
buffer := make([]byte, 4096)
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
// Send ping
|
||||
if err := s.writeWS(websocket.PingMessage, nil); err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
// Read from PTY
|
||||
if s.pty != nil {
|
||||
n, err := s.pty.Read(buffer)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
s.logger.Error("Terminal WebSocket: PTY read error", "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if n > 0 {
|
||||
// Write binary data to WebSocket
|
||||
if err := s.writeWS(websocket.BinaryMessage, buffer[:n]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// writeToPTY writes data to PTY
|
||||
func (s *TerminalSession) writeToPTY(data []byte) {
|
||||
s.mu.RLock()
|
||||
closed := s.closed
|
||||
pty := s.pty
|
||||
s.mu.RUnlock()
|
||||
|
||||
if closed || pty == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := pty.Write(data); err != nil {
|
||||
s.logger.Error("Terminal WebSocket: PTY write error", "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
// resizePTY resizes the PTY
|
||||
func (s *TerminalSession) resizePTY(cols, rows uint16) {
|
||||
s.mu.RLock()
|
||||
closed := s.closed
|
||||
ptyFile := s.pty
|
||||
s.mu.RUnlock()
|
||||
|
||||
if closed || ptyFile == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Use pty.Setsize from package, not method from variable
|
||||
pty.Setsize(ptyFile, &pty.Winsize{
|
||||
Cols: cols,
|
||||
Rows: rows,
|
||||
})
|
||||
}
|
||||
|
||||
// writeWS writes message to WebSocket
|
||||
func (s *TerminalSession) writeWS(messageType int, data []byte) error {
|
||||
s.mu.RLock()
|
||||
closed := s.closed
|
||||
conn := s.conn
|
||||
s.mu.RUnlock()
|
||||
|
||||
if closed || conn == nil {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
|
||||
conn.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
return conn.WriteMessage(messageType, data)
|
||||
}
|
||||
|
||||
// sendError sends error message
|
||||
func (s *TerminalSession) sendError(errMsg string) {
|
||||
msg := map[string]interface{}{
|
||||
"type": "error",
|
||||
"error": errMsg,
|
||||
}
|
||||
data, _ := json.Marshal(msg)
|
||||
s.writeWS(websocket.TextMessage, data)
|
||||
}
|
||||
|
||||
// close closes the terminal session
|
||||
func (s *TerminalSession) close() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
|
||||
s.closed = true
|
||||
close(s.done)
|
||||
|
||||
// Close PTY
|
||||
if s.pty != nil {
|
||||
s.pty.Close()
|
||||
}
|
||||
|
||||
// Kill process
|
||||
if s.cmd != nil && s.cmd.Process != nil {
|
||||
s.cmd.Process.Signal(syscall.SIGTERM)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if s.cmd.ProcessState == nil || !s.cmd.ProcessState.Exited() {
|
||||
s.cmd.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
// Close WebSocket
|
||||
if s.conn != nil {
|
||||
s.conn.Close()
|
||||
}
|
||||
|
||||
s.logger.Info("Terminal WebSocket: session closed", "username", s.username)
|
||||
}
|
||||
1
bacula-config
Symbolic link
1
bacula-config
Symbolic link
@@ -0,0 +1 @@
|
||||
/etc/bacula
|
||||
788
docs/alpha/CODING-STANDARDS.md
Normal file
788
docs/alpha/CODING-STANDARDS.md
Normal file
@@ -0,0 +1,788 @@
|
||||
# Coding Standards
|
||||
## AtlasOS - Calypso Backup Appliance
|
||||
|
||||
**Version:** 1.0.0-alpha
|
||||
**Date:** 2025-01-XX
|
||||
**Status:** Active
|
||||
|
||||
---
|
||||
|
||||
## 1. Overview
|
||||
|
||||
This document defines the coding standards and best practices for the Calypso project. All code must adhere to these standards to ensure consistency, maintainability, and quality.
|
||||
|
||||
## 2. General Principles
|
||||
|
||||
### 2.1 Code Quality
|
||||
- **Readability**: Code should be self-documenting and easy to understand
|
||||
- **Maintainability**: Code should be easy to modify and extend
|
||||
- **Consistency**: Follow consistent patterns across the codebase
|
||||
- **Simplicity**: Prefer simple solutions over complex ones
|
||||
- **DRY**: Don't Repeat Yourself - avoid code duplication
|
||||
|
||||
### 2.2 Code Review
|
||||
- All code must be reviewed before merging
|
||||
- Reviewers should check for adherence to these standards
|
||||
- Address review comments before merging
|
||||
|
||||
### 2.3 Documentation
|
||||
- Document complex logic and algorithms
|
||||
- Keep comments up-to-date with code changes
|
||||
- Write clear commit messages
|
||||
|
||||
---
|
||||
|
||||
## 3. Backend (Go) Standards
|
||||
|
||||
### 3.1 Code Formatting
|
||||
|
||||
#### 3.1.1 Use gofmt
|
||||
- Always run `gofmt` before committing
|
||||
- Use `goimports` for import organization
|
||||
- Configure IDE to format on save
|
||||
|
||||
#### 3.1.2 Line Length
|
||||
- Maximum line length: 100 characters
|
||||
- Break long lines for readability
|
||||
|
||||
#### 3.1.3 Indentation
|
||||
- Use tabs for indentation (not spaces)
|
||||
- Tab width: 4 spaces equivalent
|
||||
|
||||
### 3.2 Naming Conventions
|
||||
|
||||
#### 3.2.1 Packages
|
||||
```go
|
||||
// Good: lowercase, single word, descriptive
|
||||
package storage
|
||||
package auth
|
||||
package monitoring
|
||||
|
||||
// Bad: mixed case, abbreviations
|
||||
package Storage
|
||||
package Auth
|
||||
package Mon
|
||||
```
|
||||
|
||||
#### 3.2.2 Functions
|
||||
```go
|
||||
// Good: camelCase, descriptive
|
||||
func createZFSPool(name string) error
|
||||
func listNetworkInterfaces() ([]Interface, error)
|
||||
func validateUserInput(input string) error
|
||||
|
||||
// Bad: unclear names, abbreviations
|
||||
func create(name string) error
|
||||
func list() ([]Interface, error)
|
||||
func val(input string) error
|
||||
```
|
||||
|
||||
#### 3.2.3 Variables
|
||||
```go
|
||||
// Good: camelCase, descriptive
|
||||
var poolName string
|
||||
var networkInterfaces []Interface
|
||||
var isActive bool
|
||||
|
||||
// Bad: single letters, unclear
|
||||
var n string
|
||||
var ifs []Interface
|
||||
var a bool
|
||||
```
|
||||
|
||||
#### 3.2.4 Constants
|
||||
```go
|
||||
// Good: PascalCase for exported, camelCase for unexported
|
||||
const DefaultPort = 8080
|
||||
const maxRetries = 3
|
||||
|
||||
// Bad: inconsistent casing
|
||||
const defaultPort = 8080
|
||||
const MAX_RETRIES = 3
|
||||
```
|
||||
|
||||
#### 3.2.5 Types and Structs
|
||||
```go
|
||||
// Good: PascalCase, descriptive
|
||||
type ZFSPool struct {
|
||||
ID string
|
||||
Name string
|
||||
Status string
|
||||
}
|
||||
|
||||
// Bad: unclear names
|
||||
type Pool struct {
|
||||
I string
|
||||
N string
|
||||
S string
|
||||
}
|
||||
```
|
||||
|
||||
### 3.3 File Organization
|
||||
|
||||
#### 3.3.1 File Structure
|
||||
```go
|
||||
// 1. Package declaration
|
||||
package storage
|
||||
|
||||
// 2. Imports (standard, third-party, local)
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/atlasos/calypso/internal/common/database"
|
||||
)
|
||||
|
||||
// 3. Constants
|
||||
const (
|
||||
defaultTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// 4. Types
|
||||
type Service struct {
|
||||
db *database.DB
|
||||
}
|
||||
|
||||
// 5. Functions
|
||||
func NewService(db *database.DB) *Service {
|
||||
return &Service{db: db}
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.3.2 File Naming
|
||||
- Use lowercase with underscores: `handler.go`, `service.go`
|
||||
- Test files: `handler_test.go`
|
||||
- One main type per file when possible
|
||||
|
||||
### 3.4 Error Handling
|
||||
|
||||
#### 3.4.1 Error Return
|
||||
```go
|
||||
// Good: always return error as last value
|
||||
func createPool(name string) (*Pool, error) {
|
||||
if name == "" {
|
||||
return nil, fmt.Errorf("pool name cannot be empty")
|
||||
}
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bad: panic, no error return
|
||||
func createPool(name string) *Pool {
|
||||
if name == "" {
|
||||
panic("pool name cannot be empty")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.4.2 Error Wrapping
|
||||
```go
|
||||
// Good: wrap errors with context
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create pool %s: %w", name, err)
|
||||
}
|
||||
|
||||
// Bad: lose error context
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.4.3 Error Messages
|
||||
```go
|
||||
// Good: clear, actionable error messages
|
||||
return fmt.Errorf("pool '%s' already exists", name)
|
||||
return fmt.Errorf("insufficient disk space: need %d bytes, have %d bytes", needed, available)
|
||||
|
||||
// Bad: unclear error messages
|
||||
return fmt.Errorf("error")
|
||||
return fmt.Errorf("failed")
|
||||
```
|
||||
|
||||
### 3.5 Comments
|
||||
|
||||
#### 3.5.1 Package Comments
|
||||
```go
|
||||
// Package storage provides storage management functionality including
|
||||
// ZFS pool and dataset operations, disk discovery, and storage repository management.
|
||||
package storage
|
||||
```
|
||||
|
||||
#### 3.5.2 Function Comments
|
||||
```go
|
||||
// CreateZFSPool creates a new ZFS pool with the specified configuration.
|
||||
// It validates the pool name, checks disk availability, and creates the pool.
|
||||
// Returns an error if the pool cannot be created.
|
||||
func CreateZFSPool(ctx context.Context, name string, disks []string) error {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.5.3 Inline Comments
|
||||
```go
|
||||
// Good: explain why, not what
|
||||
// Retry up to 3 times to handle transient network errors
|
||||
for i := 0; i < 3; i++ {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bad: obvious comments
|
||||
// Loop 3 times
|
||||
for i := 0; i < 3; i++ {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### 3.6 Testing
|
||||
|
||||
#### 3.6.1 Test File Naming
|
||||
- Test files: `*_test.go`
|
||||
- Test functions: `TestFunctionName`
|
||||
- Benchmark functions: `BenchmarkFunctionName`
|
||||
|
||||
#### 3.6.2 Test Structure
|
||||
```go
|
||||
func TestCreateZFSPool(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid pool name",
|
||||
input: "tank",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty pool name",
|
||||
input: "",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := createPool(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("createPool() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3.7 Concurrency
|
||||
|
||||
#### 3.7.1 Context Usage
|
||||
```go
|
||||
// Good: always accept context as first parameter
|
||||
func (s *Service) CreatePool(ctx context.Context, name string) error {
|
||||
// Use context for cancellation and timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bad: no context
|
||||
func (s *Service) CreatePool(name string) error {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
#### 3.7.2 Goroutines
|
||||
```go
|
||||
// Good: use context for cancellation
|
||||
go func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
// ...
|
||||
}()
|
||||
|
||||
// Bad: no cancellation mechanism
|
||||
go func() {
|
||||
// ...
|
||||
}()
|
||||
```
|
||||
|
||||
### 3.8 Database Operations
|
||||
|
||||
#### 3.8.1 Query Context
|
||||
```go
|
||||
// Good: use context for queries
|
||||
rows, err := s.db.QueryContext(ctx, query, args...)
|
||||
|
||||
// Bad: no context
|
||||
rows, err := s.db.Query(query, args...)
|
||||
```
|
||||
|
||||
#### 3.8.2 Transactions
|
||||
```go
|
||||
// Good: use transactions for multiple operations
|
||||
tx, err := s.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// ... operations ...
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Frontend (TypeScript/React) Standards
|
||||
|
||||
### 4.1 Code Formatting
|
||||
|
||||
#### 4.1.1 Use Prettier
|
||||
- Configure Prettier for consistent formatting
|
||||
- Format on save enabled
|
||||
- Maximum line length: 100 characters
|
||||
|
||||
#### 4.1.2 Indentation
|
||||
- Use 2 spaces for indentation
|
||||
- Consistent spacing in JSX
|
||||
|
||||
### 4.2 Naming Conventions
|
||||
|
||||
#### 4.2.1 Components
|
||||
```typescript
|
||||
// Good: PascalCase, descriptive
|
||||
function StoragePage() { }
|
||||
function CreatePoolModal() { }
|
||||
function NetworkInterfaceCard() { }
|
||||
|
||||
// Bad: unclear names
|
||||
function Page() { }
|
||||
function Modal() { }
|
||||
function Card() { }
|
||||
```
|
||||
|
||||
#### 4.2.2 Functions
|
||||
```typescript
|
||||
// Good: camelCase, descriptive
|
||||
function createZFSPool(name: string): Promise<ZFSPool> { }
|
||||
function handleSubmit(event: React.FormEvent): void { }
|
||||
function formatBytes(bytes: number): string { }
|
||||
|
||||
// Bad: unclear names
|
||||
function create(name: string) { }
|
||||
function handle(e: any) { }
|
||||
function fmt(b: number) { }
|
||||
```
|
||||
|
||||
#### 4.2.3 Variables
|
||||
```typescript
|
||||
// Good: camelCase, descriptive
|
||||
const poolName = 'tank'
|
||||
const networkInterfaces: NetworkInterface[] = []
|
||||
const isActive = true
|
||||
|
||||
// Bad: unclear names
|
||||
const n = 'tank'
|
||||
const ifs: any[] = []
|
||||
const a = true
|
||||
```
|
||||
|
||||
#### 4.2.4 Constants
|
||||
```typescript
|
||||
// Good: UPPER_SNAKE_CASE for constants
|
||||
const DEFAULT_PORT = 8080
|
||||
const MAX_RETRIES = 3
|
||||
const API_BASE_URL = '/api/v1'
|
||||
|
||||
// Bad: inconsistent casing
|
||||
const defaultPort = 8080
|
||||
const maxRetries = 3
|
||||
```
|
||||
|
||||
#### 4.2.5 Types and Interfaces
|
||||
```typescript
|
||||
// Good: PascalCase, descriptive
|
||||
interface ZFSPool {
|
||||
id: string
|
||||
name: string
|
||||
status: string
|
||||
}
|
||||
|
||||
type PoolStatus = 'online' | 'offline' | 'degraded'
|
||||
|
||||
// Bad: unclear names
|
||||
interface Pool {
|
||||
i: string
|
||||
n: string
|
||||
s: string
|
||||
}
|
||||
```
|
||||
|
||||
### 4.3 File Organization
|
||||
|
||||
#### 4.3.1 Component Structure
|
||||
```typescript
|
||||
// 1. Imports (React, third-party, local)
|
||||
import { useState } from 'react'
|
||||
import { useQuery } from '@tanstack/react-query'
|
||||
import { zfsApi } from '@/api/storage'
|
||||
|
||||
// 2. Types/Interfaces
|
||||
interface Props {
|
||||
poolId: string
|
||||
}
|
||||
|
||||
// 3. Component
|
||||
export default function PoolDetail({ poolId }: Props) {
|
||||
// 4. Hooks
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
|
||||
// 5. Queries
|
||||
const { data: pool } = useQuery({
|
||||
queryKey: ['pool', poolId],
|
||||
queryFn: () => zfsApi.getPool(poolId),
|
||||
})
|
||||
|
||||
// 6. Handlers
|
||||
const handleDelete = () => {
|
||||
// ...
|
||||
}
|
||||
|
||||
// 7. Effects
|
||||
useEffect(() => {
|
||||
// ...
|
||||
}, [poolId])
|
||||
|
||||
// 8. Render
|
||||
return (
|
||||
// JSX
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
#### 4.3.2 File Naming
|
||||
- Components: `PascalCase.tsx` (e.g., `StoragePage.tsx`)
|
||||
- Utilities: `camelCase.ts` (e.g., `formatBytes.ts`)
|
||||
- Types: `camelCase.ts` or `types.ts`
|
||||
- Hooks: `useCamelCase.ts` (e.g., `useStorage.ts`)
|
||||
|
||||
### 4.4 TypeScript
|
||||
|
||||
#### 4.4.1 Type Safety
|
||||
```typescript
|
||||
// Good: explicit types
|
||||
function createPool(name: string): Promise<ZFSPool> {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Bad: any types
|
||||
function createPool(name: any): any {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
#### 4.4.2 Interface Definitions
|
||||
```typescript
|
||||
// Good: clear interface definitions
|
||||
interface ZFSPool {
|
||||
id: string
|
||||
name: string
|
||||
status: 'online' | 'offline' | 'degraded'
|
||||
totalCapacityBytes: number
|
||||
usedCapacityBytes: number
|
||||
}
|
||||
|
||||
// Bad: unclear or missing types
|
||||
interface Pool {
|
||||
id: any
|
||||
name: any
|
||||
status: any
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5 React Patterns
|
||||
|
||||
#### 4.5.1 Hooks
|
||||
```typescript
|
||||
// Good: custom hooks for reusable logic
|
||||
function useZFSPool(poolId: string) {
|
||||
return useQuery({
|
||||
queryKey: ['pool', poolId],
|
||||
queryFn: () => zfsApi.getPool(poolId),
|
||||
})
|
||||
}
|
||||
|
||||
// Usage
|
||||
const { data: pool } = useZFSPool(poolId)
|
||||
```
|
||||
|
||||
#### 4.5.2 Component Composition
|
||||
```typescript
|
||||
// Good: small, focused components
|
||||
function PoolCard({ pool }: { pool: ZFSPool }) {
|
||||
return (
|
||||
<div>
|
||||
<PoolHeader pool={pool} />
|
||||
<PoolStats pool={pool} />
|
||||
<PoolActions pool={pool} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
// Bad: large, monolithic components
|
||||
function PoolCard({ pool }: { pool: ZFSPool }) {
|
||||
// 500+ lines of JSX
|
||||
}
|
||||
```
|
||||
|
||||
#### 4.5.3 State Management
|
||||
```typescript
|
||||
// Good: use React Query for server state
|
||||
const { data, isLoading } = useQuery({
|
||||
queryKey: ['pools'],
|
||||
queryFn: zfsApi.listPools,
|
||||
})
|
||||
|
||||
// Good: use local state for UI state
|
||||
const [isModalOpen, setIsModalOpen] = useState(false)
|
||||
|
||||
// Good: use Zustand for global UI state
|
||||
const { user, setUser } = useAuthStore()
|
||||
```
|
||||
|
||||
### 4.6 Error Handling
|
||||
|
||||
#### 4.6.1 Error Boundaries
|
||||
```typescript
|
||||
// Good: use error boundaries
|
||||
function ErrorBoundary({ children }: { children: React.ReactNode }) {
|
||||
// ...
|
||||
}
|
||||
|
||||
// Usage
|
||||
<ErrorBoundary>
|
||||
<App />
|
||||
</ErrorBoundary>
|
||||
```
|
||||
|
||||
#### 4.6.2 Error Handling in Queries
|
||||
```typescript
|
||||
// Good: handle errors in queries
|
||||
const { data, error, isLoading } = useQuery({
|
||||
queryKey: ['pools'],
|
||||
queryFn: zfsApi.listPools,
|
||||
onError: (error) => {
|
||||
console.error('Failed to load pools:', error)
|
||||
// Show user-friendly error message
|
||||
},
|
||||
})
|
||||
```
|
||||
|
||||
### 4.7 Styling
|
||||
|
||||
#### 4.7.1 TailwindCSS
|
||||
```typescript
|
||||
// Good: use Tailwind classes
|
||||
<div className="flex items-center gap-4 p-6 bg-card-dark rounded-lg border border-border-dark">
|
||||
<h2 className="text-lg font-bold text-white">Storage Pools</h2>
|
||||
</div>
|
||||
|
||||
// Bad: inline styles
|
||||
<div style={{ display: 'flex', padding: '24px', backgroundColor: '#18232e' }}>
|
||||
<h2 style={{ fontSize: '18px', fontWeight: 'bold', color: 'white' }}>Storage Pools</h2>
|
||||
</div>
|
||||
```
|
||||
|
||||
#### 4.7.2 Class Organization
|
||||
```typescript
|
||||
// Good: logical grouping
|
||||
className="flex items-center gap-4 p-6 bg-card-dark rounded-lg border border-border-dark hover:bg-border-dark transition-colors"
|
||||
|
||||
// Bad: random order
|
||||
className="p-6 flex border rounded-lg items-center gap-4 bg-card-dark border-border-dark"
|
||||
```
|
||||
|
||||
### 4.8 Testing
|
||||
|
||||
#### 4.8.1 Component Testing
|
||||
```typescript
|
||||
// Good: test component behavior
|
||||
describe('StoragePage', () => {
|
||||
it('displays pools when loaded', () => {
|
||||
render(<StoragePage />)
|
||||
expect(screen.getByText('tank')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('shows loading state', () => {
|
||||
render(<StoragePage />)
|
||||
expect(screen.getByText('Loading...')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Git Commit Standards
|
||||
|
||||
### 5.1 Commit Message Format
|
||||
```
|
||||
<type>(<scope>): <subject>
|
||||
|
||||
<body>
|
||||
|
||||
<footer>
|
||||
```
|
||||
|
||||
### 5.2 Commit Types
|
||||
- **feat**: New feature
|
||||
- **fix**: Bug fix
|
||||
- **docs**: Documentation changes
|
||||
- **style**: Code style changes (formatting, etc.)
|
||||
- **refactor**: Code refactoring
|
||||
- **test**: Test additions or changes
|
||||
- **chore**: Build process or auxiliary tool changes
|
||||
|
||||
### 5.3 Commit Examples
|
||||
```
|
||||
feat(storage): add ZFS pool creation endpoint
|
||||
|
||||
Add POST /api/v1/storage/zfs/pools endpoint with validation
|
||||
and error handling.
|
||||
|
||||
Closes #123
|
||||
|
||||
fix(shares): correct dataset_id field in create share
|
||||
|
||||
The frontend was sending dataset_name instead of dataset_id.
|
||||
Updated to use UUID from dataset selection.
|
||||
|
||||
docs: update API documentation for snapshot endpoints
|
||||
|
||||
refactor(auth): simplify JWT token validation logic
|
||||
```
|
||||
|
||||
### 5.4 Branch Naming
|
||||
- **feature/**: New features (e.g., `feature/object-storage`)
|
||||
- **fix/**: Bug fixes (e.g., `fix/share-creation-error`)
|
||||
- **docs/**: Documentation (e.g., `docs/api-documentation`)
|
||||
- **refactor/**: Refactoring (e.g., `refactor/storage-service`)
|
||||
|
||||
---
|
||||
|
||||
## 6. Code Review Guidelines
|
||||
|
||||
### 6.1 Review Checklist
|
||||
- [ ] Code follows naming conventions
|
||||
- [ ] Code is properly formatted
|
||||
- [ ] Error handling is appropriate
|
||||
- [ ] Tests are included for new features
|
||||
- [ ] Documentation is updated
|
||||
- [ ] No security vulnerabilities
|
||||
- [ ] Performance considerations addressed
|
||||
- [ ] No commented-out code
|
||||
- [ ] No console.log statements (use proper logging)
|
||||
|
||||
### 6.2 Review Comments
|
||||
- Be constructive and respectful
|
||||
- Explain why, not just what
|
||||
- Suggest improvements, not just point out issues
|
||||
- Approve when code meets standards
|
||||
|
||||
---
|
||||
|
||||
## 7. Documentation Standards
|
||||
|
||||
### 7.1 Code Comments
|
||||
- Document complex logic
|
||||
- Explain "why" not "what"
|
||||
- Keep comments up-to-date
|
||||
|
||||
### 7.2 API Documentation
|
||||
- Document all public APIs
|
||||
- Include parameter descriptions
|
||||
- Include return value descriptions
|
||||
- Include error conditions
|
||||
|
||||
### 7.3 README Files
|
||||
- Keep README files updated
|
||||
- Include setup instructions
|
||||
- Include usage examples
|
||||
- Include troubleshooting tips
|
||||
|
||||
---
|
||||
|
||||
## 8. Performance Standards
|
||||
|
||||
### 8.1 Backend
|
||||
- Database queries should be optimized
|
||||
- Use indexes appropriately
|
||||
- Avoid N+1 query problems
|
||||
- Use connection pooling
|
||||
|
||||
### 8.2 Frontend
|
||||
- Minimize re-renders
|
||||
- Use React.memo for expensive components
|
||||
- Lazy load routes
|
||||
- Optimize bundle size
|
||||
|
||||
---
|
||||
|
||||
## 9. Security Standards
|
||||
|
||||
### 9.1 Input Validation
|
||||
- Validate all user inputs
|
||||
- Sanitize inputs before use
|
||||
- Use parameterized queries
|
||||
- Escape output
|
||||
|
||||
### 9.2 Authentication
|
||||
- Never store passwords in plaintext
|
||||
- Use secure token storage
|
||||
- Implement proper session management
|
||||
- Handle token expiration
|
||||
|
||||
### 9.3 Authorization
|
||||
- Check permissions on every request
|
||||
- Use principle of least privilege
|
||||
- Log security events
|
||||
- Handle authorization errors properly
|
||||
|
||||
---
|
||||
|
||||
## 10. Tools and Configuration
|
||||
|
||||
### 10.1 Backend Tools
|
||||
- **gofmt**: Code formatting
|
||||
- **goimports**: Import organization
|
||||
- **golint**: Linting
|
||||
- **go vet**: Static analysis
|
||||
|
||||
### 10.2 Frontend Tools
|
||||
- **Prettier**: Code formatting
|
||||
- **ESLint**: Linting
|
||||
- **TypeScript**: Type checking
|
||||
- **Vite**: Build tool
|
||||
|
||||
---
|
||||
|
||||
## 11. Exceptions
|
||||
|
||||
### 11.1 When to Deviate
|
||||
- Performance-critical code may require optimization
|
||||
- Legacy code integration may require different patterns
|
||||
- Third-party library constraints
|
||||
|
||||
### 11.2 Documenting Exceptions
|
||||
- Document why standards are not followed
|
||||
- Include comments explaining deviations
|
||||
- Review exceptions during code review
|
||||
|
||||
---
|
||||
|
||||
## Document History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
|---------|------|--------|---------|
|
||||
| 1.0.0-alpha | 2025-01-XX | Development Team | Initial coding standards document |
|
||||
|
||||
102
docs/alpha/Calypso_System_Architecture.md
Normal file
102
docs/alpha/Calypso_System_Architecture.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Calypso System Architecture Document
|
||||
Adastra Storage & Backup Appliance
|
||||
Version: 1.0 (Dev Release V1)
|
||||
Status: Baseline Architecture
|
||||
|
||||
## 1. Purpose & Scope
|
||||
This document describes the system architecture of Calypso as an integrated storage and backup appliance. It aligns with the System Requirements Specification (SRS) and System Design Specification (SDS), and serves as a reference for architects, engineers, operators, and auditors.
|
||||
|
||||
## 2. Architectural Principles
|
||||
- Appliance-first design
|
||||
- Clear separation of binaries, configuration, and data
|
||||
- ZFS-native storage architecture
|
||||
- Upgrade and rollback safety
|
||||
- Minimal external dependencies
|
||||
|
||||
## 3. High-Level Architecture
|
||||
Calypso operates as a single-node appliance where the control plane orchestrates storage, backup, object storage, tape, and iSCSI subsystems through a unified API and UI.
|
||||
|
||||
## 4. Deployment Model
|
||||
- Single-node deployment
|
||||
- Bare metal or VM (bare metal recommended)
|
||||
- Linux-based OS (LTS)
|
||||
|
||||
## 5. Centralized Filesystem Architecture
|
||||
|
||||
### 5.1 Domain Separation
|
||||
| Domain | Location |
|
||||
|------|---------|
|
||||
| Binaries | /opt/adastra/calypso |
|
||||
| Configuration | /etc/calypso |
|
||||
| Data (ZFS) | /srv/calypso |
|
||||
| Logs | /var/log/calypso |
|
||||
| Runtime | /var/lib/calypso, /run/calypso |
|
||||
|
||||
### 5.2 Binary Layout
|
||||
```
|
||||
/opt/adastra/calypso/
|
||||
releases/
|
||||
1.0.0/
|
||||
bin/
|
||||
web/
|
||||
migrations/
|
||||
scripts/
|
||||
current -> releases/1.0.0
|
||||
third_party/
|
||||
```
|
||||
|
||||
### 5.3 Configuration Layout
|
||||
```
|
||||
/etc/calypso/
|
||||
calypso.yaml
|
||||
secrets.env
|
||||
tls/
|
||||
integrations/
|
||||
system/
|
||||
```
|
||||
|
||||
### 5.4 ZFS Data Layout
|
||||
```
|
||||
/srv/calypso/
|
||||
db/
|
||||
backups/
|
||||
object/
|
||||
shares/
|
||||
vtl/
|
||||
iscsi/
|
||||
uploads/
|
||||
cache/
|
||||
_system/
|
||||
```
|
||||
|
||||
## 6. Component Architecture
|
||||
- Calypso Control Plane (Go-based API)
|
||||
- ZFS (core storage)
|
||||
- Bacula (backup)
|
||||
- MinIO (object storage)
|
||||
- SCST (iSCSI)
|
||||
- MHVTL (virtual tape library)
|
||||
|
||||
## 7. Data Flow
|
||||
- User actions handled by Calypso API
|
||||
- Operations executed on ZFS datasets
|
||||
- Metadata stored centrally in ZFS
|
||||
|
||||
## 8. Security Baseline
|
||||
- Service isolation
|
||||
- Permission-based filesystem access
|
||||
- Secrets separation
|
||||
- Controlled subsystem access
|
||||
|
||||
## 9. Upgrade & Rollback
|
||||
- Versioned releases
|
||||
- Atomic switch via symlink
|
||||
- Data preserved independently in ZFS
|
||||
|
||||
## 10. Non-Goals (V1)
|
||||
- Multi-node clustering
|
||||
- Kubernetes orchestration
|
||||
- Inline malware scanning
|
||||
|
||||
## 11. Summary
|
||||
Calypso provides a clean, upgrade-safe, and enterprise-grade appliance architecture, forming a strong foundation for future HA and immutable designs.
|
||||
75
docs/alpha/README.md
Normal file
75
docs/alpha/README.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# AtlasOS - Calypso Documentation
|
||||
## Alpha Release
|
||||
|
||||
This directory contains the Software Requirements Specification (SRS) and Software Design Specification (SDS) documentation for the Calypso backup appliance management system.
|
||||
|
||||
## Documentation Structure
|
||||
|
||||
### Software Requirements Specification (SRS)
|
||||
Located in `srs/` directory:
|
||||
|
||||
- **SRS-00-Overview.md**: Overview and introduction
|
||||
- **SRS-01-Storage-Management.md**: ZFS storage management requirements
|
||||
- **SRS-02-File-Sharing.md**: SMB/NFS share management requirements
|
||||
- **SRS-03-iSCSI-Management.md**: iSCSI target management requirements
|
||||
- **SRS-04-Tape-Library-Management.md**: Physical and VTL management requirements
|
||||
- **SRS-05-Backup-Management.md**: Bacula/Bareos integration requirements
|
||||
- **SRS-06-Object-Storage.md**: S3-compatible object storage requirements
|
||||
- **SRS-07-Snapshot-Replication.md**: ZFS snapshot and replication requirements
|
||||
- **SRS-08-System-Management.md**: System configuration and management requirements
|
||||
- **SRS-09-Monitoring-Alerting.md**: Monitoring and alerting requirements
|
||||
- **SRS-10-IAM.md**: Identity and access management requirements
|
||||
- **SRS-11-User-Interface.md**: User interface and experience requirements
|
||||
|
||||
### Software Design Specification (SDS)
|
||||
Located in `sds/` directory:
|
||||
|
||||
- **SDS-00-Overview.md**: Design overview and introduction
|
||||
- **SDS-01-System-Architecture.md**: System architecture and component design
|
||||
- **SDS-02-Database-Design.md**: Database schema and data models
|
||||
- **SDS-03-API-Design.md**: REST API design and endpoints
|
||||
- **SDS-04-Security-Design.md**: Security architecture and implementation
|
||||
- **SDS-05-Integration-Design.md**: External system integration patterns
|
||||
|
||||
### Coding Standards
|
||||
- **CODING-STANDARDS.md**: Code style, naming conventions, and best practices for Go and TypeScript/React
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Features Implemented
|
||||
1. ✅ Storage Management (ZFS pools, datasets, disks)
|
||||
2. ✅ File Sharing (SMB/CIFS, NFS)
|
||||
3. ✅ iSCSI Management (SCST integration)
|
||||
4. ✅ Tape Library Management (Physical & VTL)
|
||||
5. ✅ Backup Management (Bacula/Bareos integration)
|
||||
6. ✅ Object Storage (S3-compatible)
|
||||
7. ✅ Snapshot & Replication
|
||||
8. ✅ System Management (Network, Services, NTP, SNMP, License)
|
||||
9. ✅ Monitoring & Alerting
|
||||
10. ✅ Identity & Access Management (IAM)
|
||||
11. ✅ User Interface (React SPA)
|
||||
|
||||
### Technology Stack
|
||||
- **Backend**: Go 1.21+, Gin, PostgreSQL
|
||||
- **Frontend**: React 18, TypeScript, Vite, TailwindCSS
|
||||
- **External**: ZFS, SCST, Bacula/Bareos, MHVTL
|
||||
|
||||
## Document Status
|
||||
|
||||
**Version**: 1.0.0-alpha
|
||||
**Last Updated**: 2025-01-XX
|
||||
**Status**: In Development
|
||||
|
||||
## Contributing
|
||||
|
||||
When updating documentation:
|
||||
1. Update the relevant SRS or SDS document
|
||||
2. Update the version and date in the document
|
||||
3. Update this README if structure changes
|
||||
4. Maintain consistency across documents
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- Implementation guides: `../on-progress/`
|
||||
- Technical specifications: `../../src/srs-technical-spec-documents/`
|
||||
|
||||
182
docs/alpha/sds/SDS-00-Overview.md
Normal file
182
docs/alpha/sds/SDS-00-Overview.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# Software Design Specification (SDS)
|
||||
## AtlasOS - Calypso Backup Appliance
|
||||
### Alpha Release
|
||||
|
||||
**Version:** 1.0.0-alpha
|
||||
**Date:** 2025-01-XX
|
||||
**Status:** In Development
|
||||
|
||||
---
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
### 1.1 Purpose
|
||||
This document provides a comprehensive Software Design Specification (SDS) for AtlasOS - Calypso, describing the system architecture, component design, database schema, API design, and implementation details.
|
||||
|
||||
### 1.2 Scope
|
||||
This SDS covers:
|
||||
- System architecture and design patterns
|
||||
- Component structure and organization
|
||||
- Database schema and data models
|
||||
- API design and endpoints
|
||||
- Security architecture
|
||||
- Deployment architecture
|
||||
- Integration patterns
|
||||
|
||||
### 1.3 Document Organization
|
||||
- **SDS-01**: System Architecture
|
||||
- **SDS-02**: Backend Design
|
||||
- **SDS-03**: Frontend Design
|
||||
- **SDS-04**: Database Design
|
||||
- **SDS-05**: API Design
|
||||
- **SDS-06**: Security Design
|
||||
- **SDS-07**: Integration Design
|
||||
|
||||
---
|
||||
|
||||
## 2. System Architecture Overview
|
||||
|
||||
### 2.1 High-Level Architecture
|
||||
Calypso follows a three-tier architecture:
|
||||
1. **Presentation Layer**: React-based SPA
|
||||
2. **Application Layer**: Go-based REST API
|
||||
3. **Data Layer**: PostgreSQL database
|
||||
|
||||
### 2.2 Architecture Patterns
|
||||
- **Clean Architecture**: Separation of concerns, domain-driven design
|
||||
- **RESTful API**: Resource-based API design
|
||||
- **Repository Pattern**: Data access abstraction
|
||||
- **Service Layer**: Business logic encapsulation
|
||||
- **Middleware Pattern**: Cross-cutting concerns
|
||||
|
||||
### 2.3 Technology Stack
|
||||
|
||||
#### Backend
|
||||
- **Language**: Go 1.21+
|
||||
- **Framework**: Gin web framework
|
||||
- **Database**: PostgreSQL 14+
|
||||
- **Authentication**: JWT tokens
|
||||
- **Logging**: Zerolog structured logging
|
||||
|
||||
#### Frontend
|
||||
- **Framework**: React 18 with TypeScript
|
||||
- **Build Tool**: Vite
|
||||
- **Styling**: TailwindCSS
|
||||
- **State Management**: Zustand + TanStack Query
|
||||
- **Routing**: React Router
|
||||
- **HTTP Client**: Axios
|
||||
|
||||
---
|
||||
|
||||
## 3. Design Principles
|
||||
|
||||
### 3.1 Separation of Concerns
|
||||
- Clear boundaries between layers
|
||||
- Single responsibility principle
|
||||
- Dependency inversion
|
||||
|
||||
### 3.2 Scalability
|
||||
- Stateless API design
|
||||
- Horizontal scaling capability
|
||||
- Efficient database queries
|
||||
|
||||
### 3.3 Security
|
||||
- Defense in depth
|
||||
- Principle of least privilege
|
||||
- Input validation and sanitization
|
||||
|
||||
### 3.4 Maintainability
|
||||
- Clean code principles
|
||||
- Comprehensive logging
|
||||
- Error handling
|
||||
- Code documentation
|
||||
|
||||
### 3.5 Performance
|
||||
- Response caching
|
||||
- Database query optimization
|
||||
- Efficient data structures
|
||||
- Background job processing
|
||||
|
||||
---
|
||||
|
||||
## 4. System Components
|
||||
|
||||
### 4.1 Backend Components
|
||||
- **Auth**: Authentication and authorization
|
||||
- **Storage**: ZFS and storage management
|
||||
- **Shares**: SMB/NFS share management
|
||||
- **SCST**: iSCSI target management
|
||||
- **Tape**: Physical and VTL management
|
||||
- **Backup**: Bacula/Bareos integration
|
||||
- **System**: System service management
|
||||
- **Monitoring**: Metrics and alerting
|
||||
- **IAM**: User and access management
|
||||
|
||||
### 4.2 Frontend Components
|
||||
- **Pages**: Route-based page components
|
||||
- **Components**: Reusable UI components
|
||||
- **API**: API client and queries
|
||||
- **Store**: Global state management
|
||||
- **Hooks**: Custom React hooks
|
||||
- **Utils**: Utility functions
|
||||
|
||||
---
|
||||
|
||||
## 5. Data Flow
|
||||
|
||||
### 5.1 Request Flow
|
||||
1. User action in frontend
|
||||
2. API call via Axios
|
||||
3. Request middleware (auth, logging, rate limiting)
|
||||
4. Handler processes request
|
||||
5. Service layer business logic
|
||||
6. Database operations
|
||||
7. Response returned to frontend
|
||||
8. UI update via React Query
|
||||
|
||||
### 5.2 Background Jobs
|
||||
- Disk monitoring (every 5 minutes)
|
||||
- ZFS pool monitoring (every 2 minutes)
|
||||
- Metrics collection (every 30 seconds)
|
||||
- Alert rule evaluation (continuous)
|
||||
|
||||
---
|
||||
|
||||
## 6. Deployment Architecture
|
||||
|
||||
### 6.1 Single-Server Deployment
|
||||
- Backend API service (systemd)
|
||||
- Frontend static files (nginx/caddy)
|
||||
- PostgreSQL database
|
||||
- External services (ZFS, SCST, Bacula)
|
||||
|
||||
### 6.2 Service Management
|
||||
- Systemd service files
|
||||
- Auto-restart on failure
|
||||
- Log rotation
|
||||
- Health checks
|
||||
|
||||
---
|
||||
|
||||
## 7. Future Enhancements
|
||||
|
||||
### 7.1 Scalability
|
||||
- Multi-server deployment
|
||||
- Load balancing
|
||||
- Database replication
|
||||
- Distributed caching
|
||||
|
||||
### 7.2 Features
|
||||
- WebSocket real-time updates
|
||||
- GraphQL API option
|
||||
- Microservices architecture
|
||||
- Container orchestration
|
||||
|
||||
---
|
||||
|
||||
## Document History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
|---------|------|--------|---------|
|
||||
| 1.0.0-alpha | 2025-01-XX | Development Team | Initial SDS document |
|
||||
|
||||
302
docs/alpha/sds/SDS-01-System-Architecture.md
Normal file
302
docs/alpha/sds/SDS-01-System-Architecture.md
Normal file
@@ -0,0 +1,302 @@
|
||||
# SDS-01: System Architecture
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
### 1.1 Three-Tier Architecture
|
||||
```
|
||||
┌─────────────────────────────────────┐
|
||||
│ Presentation Layer │
|
||||
│ (React SPA) │
|
||||
└──────────────┬──────────────────────┘
|
||||
│ HTTP/REST
|
||||
┌──────────────▼──────────────────────┐
|
||||
│ Application Layer │
|
||||
│ (Go REST API) │
|
||||
└──────────────┬──────────────────────┘
|
||||
│ SQL
|
||||
┌──────────────▼──────────────────────┐
|
||||
│ Data Layer │
|
||||
│ (PostgreSQL) │
|
||||
└─────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### 1.2 Component Layers
|
||||
|
||||
#### Backend Layers
|
||||
1. **Handler Layer**: HTTP request handling, validation
|
||||
2. **Service Layer**: Business logic, orchestration
|
||||
3. **Repository Layer**: Data access, database operations
|
||||
4. **Model Layer**: Data structures, domain models
|
||||
|
||||
#### Frontend Layers
|
||||
1. **Page Layer**: Route-based page components
|
||||
2. **Component Layer**: Reusable UI components
|
||||
3. **API Layer**: API client, data fetching
|
||||
4. **Store Layer**: Global state management
|
||||
|
||||
## 2. Backend Architecture
|
||||
|
||||
### 2.1 Directory Structure
|
||||
```
|
||||
backend/
|
||||
├── cmd/
|
||||
│ └── calypso-api/
|
||||
│ └── main.go
|
||||
├── internal/
|
||||
│ ├── auth/
|
||||
│ ├── storage/
|
||||
│ ├── shares/
|
||||
│ ├── scst/
|
||||
│ ├── tape_physical/
|
||||
│ ├── tape_vtl/
|
||||
│ ├── backup/
|
||||
│ ├── system/
|
||||
│ ├── monitoring/
|
||||
│ ├── iam/
|
||||
│ ├── tasks/
|
||||
│ └── common/
|
||||
│ ├── config/
|
||||
│ ├── database/
|
||||
│ ├── logger/
|
||||
│ ├── router/
|
||||
│ └── cache/
|
||||
└── db/
|
||||
└── migrations/
|
||||
```
|
||||
|
||||
### 2.2 Module Organization
|
||||
Each module follows this structure:
|
||||
- **handler.go**: HTTP handlers
|
||||
- **service.go**: Business logic
|
||||
- **model.go**: Data models (if needed)
|
||||
- **repository.go**: Database operations (if needed)
|
||||
|
||||
### 2.3 Common Components
|
||||
- **config**: Configuration management
|
||||
- **database**: Database connection and migrations
|
||||
- **logger**: Structured logging
|
||||
- **router**: HTTP router, middleware
|
||||
- **cache**: Response caching
|
||||
- **auth**: Authentication middleware
|
||||
- **audit**: Audit logging middleware
|
||||
|
||||
## 3. Frontend Architecture
|
||||
|
||||
### 3.1 Directory Structure
|
||||
```
|
||||
frontend/
|
||||
├── src/
|
||||
│ ├── pages/
|
||||
│ ├── components/
|
||||
│ ├── api/
|
||||
│ ├── store/
|
||||
│ ├── hooks/
|
||||
│ ├── lib/
|
||||
│ └── App.tsx
|
||||
└── public/
|
||||
```
|
||||
|
||||
### 3.2 Component Organization
|
||||
- **pages/**: Route-based page components
|
||||
- **components/**: Reusable UI components
|
||||
- **ui/**: Base UI components (buttons, inputs, etc.)
|
||||
- **Layout.tsx**: Main layout component
|
||||
- **api/**: API client and query definitions
|
||||
- **store/**: Zustand stores
|
||||
- **hooks/**: Custom React hooks
|
||||
- **lib/**: Utility functions
|
||||
|
||||
## 4. Request Processing Flow
|
||||
|
||||
### 4.1 HTTP Request Flow
|
||||
```
|
||||
Client Request
|
||||
↓
|
||||
CORS Middleware
|
||||
↓
|
||||
Rate Limiting Middleware
|
||||
↓
|
||||
Security Headers Middleware
|
||||
↓
|
||||
Cache Middleware (if enabled)
|
||||
↓
|
||||
Audit Logging Middleware
|
||||
↓
|
||||
Authentication Middleware
|
||||
↓
|
||||
Permission Middleware
|
||||
↓
|
||||
Handler
|
||||
↓
|
||||
Service
|
||||
↓
|
||||
Database
|
||||
↓
|
||||
Response
|
||||
```
|
||||
|
||||
### 4.2 Error Handling Flow
|
||||
```
|
||||
Error Occurred
|
||||
↓
|
||||
Service Layer Error
|
||||
↓
|
||||
Handler Error Handling
|
||||
↓
|
||||
Error Response Formatting
|
||||
↓
|
||||
HTTP Error Response
|
||||
↓
|
||||
Frontend Error Handling
|
||||
↓
|
||||
User Notification
|
||||
```
|
||||
|
||||
## 5. Background Services
|
||||
|
||||
### 5.1 Monitoring Services
|
||||
- **Disk Monitor**: Syncs disk information every 5 minutes
|
||||
- **ZFS Pool Monitor**: Syncs ZFS pool status every 2 minutes
|
||||
- **Metrics Service**: Collects system metrics every 30 seconds
|
||||
- **Alert Rule Engine**: Continuously evaluates alert rules
|
||||
|
||||
### 5.2 Event System
|
||||
- **Event Hub**: Broadcasts events to subscribers
|
||||
- **Metrics Broadcaster**: Broadcasts metrics to WebSocket clients
|
||||
- **Alert Service**: Processes alerts and notifications
|
||||
|
||||
## 6. Data Flow Patterns
|
||||
|
||||
### 6.1 Read Operations
|
||||
```
|
||||
Frontend Query
|
||||
↓
|
||||
API Call
|
||||
↓
|
||||
Handler
|
||||
↓
|
||||
Service
|
||||
↓
|
||||
Database Query
|
||||
↓
|
||||
Response
|
||||
↓
|
||||
React Query Cache
|
||||
↓
|
||||
UI Update
|
||||
```
|
||||
|
||||
### 6.2 Write Operations
|
||||
```
|
||||
Frontend Mutation
|
||||
↓
|
||||
API Call
|
||||
↓
|
||||
Handler (Validation)
|
||||
↓
|
||||
Service (Business Logic)
|
||||
↓
|
||||
Database Transaction
|
||||
↓
|
||||
Cache Invalidation
|
||||
↓
|
||||
Response
|
||||
↓
|
||||
React Query Invalidation
|
||||
↓
|
||||
UI Update
|
||||
```
|
||||
|
||||
## 7. Integration Points
|
||||
|
||||
### 7.1 External System Integrations
|
||||
- **ZFS**: Command-line tools (`zpool`, `zfs`)
|
||||
- **SCST**: Configuration files and commands
|
||||
- **Bacula/Bareos**: Database and `bconsole` commands
|
||||
- **MHVTL**: Configuration and control
|
||||
- **Systemd**: Service management
|
||||
|
||||
### 7.2 Integration Patterns
|
||||
- **Command Execution**: Execute system commands
|
||||
- **File Operations**: Read/write configuration files
|
||||
- **Database Access**: Direct database queries (Bacula)
|
||||
- **API Calls**: HTTP API calls (future)
|
||||
|
||||
## 8. Security Architecture
|
||||
|
||||
### 8.1 Authentication Flow
|
||||
```
|
||||
Login Request
|
||||
↓
|
||||
Credential Validation
|
||||
↓
|
||||
JWT Token Generation
|
||||
↓
|
||||
Token Response
|
||||
↓
|
||||
Token Storage (Frontend)
|
||||
↓
|
||||
Token in Request Headers
|
||||
↓
|
||||
Token Validation (Middleware)
|
||||
↓
|
||||
Request Processing
|
||||
```
|
||||
|
||||
### 8.2 Authorization Flow
|
||||
```
|
||||
Authenticated Request
|
||||
↓
|
||||
User Role Retrieval
|
||||
↓
|
||||
Permission Check
|
||||
↓
|
||||
Resource Access Check
|
||||
↓
|
||||
Request Processing or Denial
|
||||
```
|
||||
|
||||
## 9. Caching Strategy
|
||||
|
||||
### 9.1 Response Caching
|
||||
- **Cacheable Endpoints**: GET requests only
|
||||
- **Cache Keys**: Based on URL and query parameters
|
||||
- **TTL**: Configurable per endpoint
|
||||
- **Invalidation**: On write operations
|
||||
|
||||
### 9.2 Frontend Caching
|
||||
- **React Query**: Automatic caching and invalidation
|
||||
- **Stale Time**: 5 minutes default
|
||||
- **Cache Time**: 30 minutes default
|
||||
|
||||
## 10. Logging Architecture
|
||||
|
||||
### 10.1 Log Levels
|
||||
- **DEBUG**: Detailed debugging information
|
||||
- **INFO**: General informational messages
|
||||
- **WARN**: Warning messages
|
||||
- **ERROR**: Error messages
|
||||
|
||||
### 10.2 Log Structure
|
||||
- **Structured Logging**: JSON format
|
||||
- **Fields**: Timestamp, level, message, context
|
||||
- **Audit Logs**: Separate audit log table
|
||||
|
||||
## 11. Error Handling Architecture
|
||||
|
||||
### 11.1 Error Types
|
||||
- **Validation Errors**: 400 Bad Request
|
||||
- **Authentication Errors**: 401 Unauthorized
|
||||
- **Authorization Errors**: 403 Forbidden
|
||||
- **Not Found Errors**: 404 Not Found
|
||||
- **Server Errors**: 500 Internal Server Error
|
||||
|
||||
### 11.2 Error Response Format
|
||||
```json
|
||||
{
|
||||
"error": "Error message",
|
||||
"code": "ERROR_CODE",
|
||||
"details": {}
|
||||
}
|
||||
```
|
||||
|
||||
385
docs/alpha/sds/SDS-02-Database-Design.md
Normal file
385
docs/alpha/sds/SDS-02-Database-Design.md
Normal file
@@ -0,0 +1,385 @@
|
||||
# SDS-02: Database Design
|
||||
|
||||
## 1. Database Overview
|
||||
|
||||
### 1.1 Database System
|
||||
- **Type**: PostgreSQL 14+
|
||||
- **Encoding**: UTF-8
|
||||
- **Connection Pooling**: pgxpool
|
||||
- **Migrations**: Custom migration system
|
||||
|
||||
### 1.2 Database Schema Organization
|
||||
- **Tables**: Organized by domain (users, storage, shares, etc.)
|
||||
- **Indexes**: Performance indexes on foreign keys and frequently queried columns
|
||||
- **Constraints**: Foreign keys, unique constraints, check constraints
|
||||
|
||||
## 2. Core Tables
|
||||
|
||||
### 2.1 Users & Authentication
|
||||
```sql
|
||||
users (
|
||||
id UUID PRIMARY KEY,
|
||||
username VARCHAR(255) UNIQUE NOT NULL,
|
||||
email VARCHAR(255) UNIQUE,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
)
|
||||
|
||||
roles (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
)
|
||||
|
||||
permissions (
|
||||
id UUID PRIMARY KEY,
|
||||
resource VARCHAR(255) NOT NULL,
|
||||
action VARCHAR(255) NOT NULL,
|
||||
description TEXT,
|
||||
UNIQUE(resource, action)
|
||||
)
|
||||
|
||||
user_roles (
|
||||
user_id UUID REFERENCES users(id),
|
||||
role_id UUID REFERENCES roles(id),
|
||||
PRIMARY KEY (user_id, role_id)
|
||||
)
|
||||
|
||||
role_permissions (
|
||||
role_id UUID REFERENCES roles(id),
|
||||
permission_id UUID REFERENCES permissions(id),
|
||||
PRIMARY KEY (role_id, permission_id)
|
||||
)
|
||||
|
||||
groups (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
description TEXT,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
)
|
||||
|
||||
user_groups (
|
||||
user_id UUID REFERENCES users(id),
|
||||
group_id UUID REFERENCES groups(id),
|
||||
PRIMARY KEY (user_id, group_id)
|
||||
)
|
||||
```
|
||||
|
||||
### 2.2 Storage Tables
|
||||
```sql
|
||||
zfs_pools (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
description TEXT,
|
||||
raid_level VARCHAR(50),
|
||||
status VARCHAR(50),
|
||||
total_capacity_bytes BIGINT,
|
||||
used_capacity_bytes BIGINT,
|
||||
health_status VARCHAR(50),
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
|
||||
zfs_datasets (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) NOT NULL,
|
||||
pool_name VARCHAR(255) REFERENCES zfs_pools(name),
|
||||
type VARCHAR(50),
|
||||
mount_point VARCHAR(255),
|
||||
used_bytes BIGINT,
|
||||
available_bytes BIGINT,
|
||||
compression VARCHAR(50),
|
||||
quota BIGINT,
|
||||
reservation BIGINT,
|
||||
created_at TIMESTAMP,
|
||||
UNIQUE(pool_name, name)
|
||||
)
|
||||
|
||||
physical_disks (
|
||||
id UUID PRIMARY KEY,
|
||||
device_path VARCHAR(255) UNIQUE NOT NULL,
|
||||
vendor VARCHAR(255),
|
||||
model VARCHAR(255),
|
||||
serial_number VARCHAR(255),
|
||||
size_bytes BIGINT,
|
||||
type VARCHAR(50),
|
||||
status VARCHAR(50),
|
||||
last_synced_at TIMESTAMP
|
||||
)
|
||||
|
||||
storage_repositories (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
type VARCHAR(50),
|
||||
path VARCHAR(255),
|
||||
capacity_bytes BIGINT,
|
||||
used_bytes BIGINT,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
)
|
||||
```
|
||||
|
||||
### 2.3 Shares Tables
|
||||
```sql
|
||||
shares (
|
||||
id UUID PRIMARY KEY,
|
||||
dataset_id UUID REFERENCES zfs_datasets(id),
|
||||
share_type VARCHAR(50),
|
||||
nfs_enabled BOOLEAN DEFAULT false,
|
||||
nfs_options TEXT,
|
||||
nfs_clients TEXT[],
|
||||
smb_enabled BOOLEAN DEFAULT false,
|
||||
smb_share_name VARCHAR(255),
|
||||
smb_path VARCHAR(255),
|
||||
smb_comment TEXT,
|
||||
smb_guest_ok BOOLEAN DEFAULT false,
|
||||
smb_read_only BOOLEAN DEFAULT false,
|
||||
smb_browseable BOOLEAN DEFAULT true,
|
||||
is_active BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
```
|
||||
|
||||
### 2.4 iSCSI Tables
|
||||
```sql
|
||||
iscsi_targets (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
alias VARCHAR(255),
|
||||
enabled BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
|
||||
iscsi_luns (
|
||||
id UUID PRIMARY KEY,
|
||||
target_id UUID REFERENCES iscsi_targets(id),
|
||||
lun_number INTEGER,
|
||||
device_path VARCHAR(255),
|
||||
size_bytes BIGINT,
|
||||
created_at TIMESTAMP
|
||||
)
|
||||
|
||||
iscsi_initiators (
|
||||
id UUID PRIMARY KEY,
|
||||
iqn VARCHAR(255) UNIQUE NOT NULL,
|
||||
created_at TIMESTAMP
|
||||
)
|
||||
|
||||
target_initiators (
|
||||
target_id UUID REFERENCES iscsi_targets(id),
|
||||
initiator_id UUID REFERENCES iscsi_initiators(id),
|
||||
PRIMARY KEY (target_id, initiator_id)
|
||||
)
|
||||
```
|
||||
|
||||
### 2.5 Tape Tables
|
||||
```sql
|
||||
vtl_libraries (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
vendor VARCHAR(255),
|
||||
model VARCHAR(255),
|
||||
drive_count INTEGER,
|
||||
slot_count INTEGER,
|
||||
status VARCHAR(50),
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
|
||||
physical_libraries (
|
||||
id UUID PRIMARY KEY,
|
||||
vendor VARCHAR(255),
|
||||
model VARCHAR(255),
|
||||
serial_number VARCHAR(255),
|
||||
drive_count INTEGER,
|
||||
slot_count INTEGER,
|
||||
discovered_at TIMESTAMP
|
||||
)
|
||||
```
|
||||
|
||||
### 2.6 Backup Tables
|
||||
```sql
|
||||
backup_jobs (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
client_id INTEGER,
|
||||
fileset_id INTEGER,
|
||||
schedule VARCHAR(255),
|
||||
storage_pool_id INTEGER,
|
||||
enabled BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
```
|
||||
|
||||
### 2.7 Monitoring Tables
|
||||
```sql
|
||||
alerts (
|
||||
id UUID PRIMARY KEY,
|
||||
rule_id UUID,
|
||||
severity VARCHAR(50),
|
||||
source VARCHAR(255),
|
||||
message TEXT,
|
||||
status VARCHAR(50),
|
||||
acknowledged_at TIMESTAMP,
|
||||
resolved_at TIMESTAMP,
|
||||
created_at TIMESTAMP
|
||||
)
|
||||
|
||||
alert_rules (
|
||||
id UUID PRIMARY KEY,
|
||||
name VARCHAR(255) UNIQUE NOT NULL,
|
||||
description TEXT,
|
||||
source VARCHAR(255),
|
||||
condition_type VARCHAR(255),
|
||||
condition_config JSONB,
|
||||
severity VARCHAR(50),
|
||||
enabled BOOLEAN DEFAULT true,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP
|
||||
)
|
||||
```
|
||||
|
||||
### 2.8 Audit Tables
|
||||
```sql
|
||||
audit_logs (
|
||||
id UUID PRIMARY KEY,
|
||||
user_id UUID REFERENCES users(id),
|
||||
action VARCHAR(255),
|
||||
resource_type VARCHAR(255),
|
||||
resource_id VARCHAR(255),
|
||||
method VARCHAR(10),
|
||||
path VARCHAR(255),
|
||||
ip_address VARCHAR(45),
|
||||
user_agent TEXT,
|
||||
request_body JSONB,
|
||||
response_status INTEGER,
|
||||
created_at TIMESTAMP
|
||||
)
|
||||
```
|
||||
|
||||
### 2.9 Task Tables
|
||||
```sql
|
||||
tasks (
|
||||
id UUID PRIMARY KEY,
|
||||
type VARCHAR(255),
|
||||
status VARCHAR(50),
|
||||
progress INTEGER,
|
||||
result JSONB,
|
||||
error_message TEXT,
|
||||
created_at TIMESTAMP,
|
||||
updated_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
created_by UUID REFERENCES users(id)
|
||||
)
|
||||
```
|
||||
|
||||
## 3. Indexes
|
||||
|
||||
### 3.1 Performance Indexes
|
||||
```sql
|
||||
-- Users
|
||||
CREATE INDEX idx_users_username ON users(username);
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
|
||||
-- Storage
|
||||
CREATE INDEX idx_zfs_pools_name ON zfs_pools(name);
|
||||
CREATE INDEX idx_zfs_datasets_pool_name ON zfs_datasets(pool_name);
|
||||
|
||||
-- Shares
|
||||
CREATE INDEX idx_shares_dataset_id ON shares(dataset_id);
|
||||
CREATE INDEX idx_shares_created_by ON shares(created_by);
|
||||
|
||||
-- iSCSI
|
||||
CREATE INDEX idx_iscsi_targets_name ON iscsi_targets(name);
|
||||
CREATE INDEX idx_iscsi_luns_target_id ON iscsi_luns(target_id);
|
||||
|
||||
-- Monitoring
|
||||
CREATE INDEX idx_alerts_status ON alerts(status);
|
||||
CREATE INDEX idx_alerts_created_at ON alerts(created_at);
|
||||
CREATE INDEX idx_audit_logs_user_id ON audit_logs(user_id);
|
||||
CREATE INDEX idx_audit_logs_created_at ON audit_logs(created_at);
|
||||
```
|
||||
|
||||
## 4. Migrations
|
||||
|
||||
### 4.1 Migration System
|
||||
- **Location**: `db/migrations/`
|
||||
- **Naming**: `NNN_description.sql`
|
||||
- **Execution**: Sequential execution on startup
|
||||
- **Version Tracking**: `schema_migrations` table
|
||||
|
||||
### 4.2 Migration Files
|
||||
- `001_initial_schema.sql`: Core tables
|
||||
- `002_storage_and_tape_schema.sql`: Storage and tape tables
|
||||
- `003_performance_indexes.sql`: Performance indexes
|
||||
- `004_add_zfs_pools_table.sql`: ZFS pools
|
||||
- `005_add_zfs_datasets_table.sql`: ZFS datasets
|
||||
- `006_add_zfs_shares_and_iscsi.sql`: Shares and iSCSI
|
||||
- `007_add_vendor_to_vtl_libraries.sql`: VTL updates
|
||||
- `008_add_user_groups.sql`: User groups
|
||||
- `009_backup_jobs_schema.sql`: Backup jobs
|
||||
- `010_add_backup_permissions.sql`: Backup permissions
|
||||
- `011_sync_bacula_jobs_function.sql`: Bacula sync function
|
||||
|
||||
## 5. Data Relationships
|
||||
|
||||
### 5.1 Entity Relationships
|
||||
- **Users** → **Roles** (many-to-many)
|
||||
- **Roles** → **Permissions** (many-to-many)
|
||||
- **Users** → **Groups** (many-to-many)
|
||||
- **ZFS Pools** → **ZFS Datasets** (one-to-many)
|
||||
- **ZFS Datasets** → **Shares** (one-to-many)
|
||||
- **iSCSI Targets** → **LUNs** (one-to-many)
|
||||
- **iSCSI Targets** → **Initiators** (many-to-many)
|
||||
|
||||
## 6. Data Integrity
|
||||
|
||||
### 6.1 Constraints
|
||||
- **Primary Keys**: UUID primary keys for all tables
|
||||
- **Foreign Keys**: Referential integrity
|
||||
- **Unique Constraints**: Unique usernames, emails, names
|
||||
- **Check Constraints**: Valid status values, positive numbers
|
||||
|
||||
### 6.2 Cascading Rules
|
||||
- **ON DELETE CASCADE**: Child records deleted with parent
|
||||
- **ON DELETE RESTRICT**: Prevent deletion if referenced
|
||||
- **ON UPDATE CASCADE**: Update foreign keys on parent update
|
||||
|
||||
## 7. Query Optimization
|
||||
|
||||
### 7.1 Query Patterns
|
||||
- **Eager Loading**: Join related data when needed
|
||||
- **Pagination**: Limit and offset for large datasets
|
||||
- **Filtering**: WHERE clauses for filtering
|
||||
- **Sorting**: ORDER BY for sorted results
|
||||
|
||||
### 7.2 Caching Strategy
|
||||
- **Query Result Caching**: Cache frequently accessed queries
|
||||
- **Cache Invalidation**: Invalidate on write operations
|
||||
- **TTL**: Time-to-live for cached data
|
||||
|
||||
## 8. Backup & Recovery
|
||||
|
||||
### 8.1 Backup Strategy
|
||||
- **Regular Backups**: Daily database backups
|
||||
- **Point-in-Time Recovery**: WAL archiving
|
||||
- **Backup Retention**: 30 days retention
|
||||
|
||||
### 8.2 Recovery Procedures
|
||||
- **Full Restore**: Restore from backup
|
||||
- **Point-in-Time**: Restore to specific timestamp
|
||||
- **Selective Restore**: Restore specific tables
|
||||
|
||||
286
docs/alpha/sds/SDS-03-API-Design.md
Normal file
286
docs/alpha/sds/SDS-03-API-Design.md
Normal file
@@ -0,0 +1,286 @@
|
||||
# SDS-03: API Design
|
||||
|
||||
## 1. API Overview
|
||||
|
||||
### 1.1 API Style
|
||||
- **RESTful**: Resource-based API design
|
||||
- **Versioning**: `/api/v1/` prefix
|
||||
- **Content-Type**: `application/json`
|
||||
- **Authentication**: JWT Bearer tokens
|
||||
|
||||
### 1.2 API Base URL
|
||||
```
|
||||
http://localhost:8080/api/v1
|
||||
```
|
||||
|
||||
## 2. Authentication API
|
||||
|
||||
### 2.1 Endpoints
|
||||
```
|
||||
POST /auth/login
|
||||
POST /auth/logout
|
||||
GET /auth/me
|
||||
```
|
||||
|
||||
### 2.2 Request/Response Examples
|
||||
|
||||
#### Login
|
||||
```http
|
||||
POST /api/v1/auth/login
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"username": "admin",
|
||||
"password": "password"
|
||||
}
|
||||
|
||||
Response: 200 OK
|
||||
{
|
||||
"token": "eyJhbGciOiJIUzI1NiIs...",
|
||||
"user": {
|
||||
"id": "uuid",
|
||||
"username": "admin",
|
||||
"email": "admin@example.com",
|
||||
"roles": ["admin"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Get Current User
|
||||
```http
|
||||
GET /api/v1/auth/me
|
||||
Authorization: Bearer <token>
|
||||
|
||||
Response: 200 OK
|
||||
{
|
||||
"id": "uuid",
|
||||
"username": "admin",
|
||||
"email": "admin@example.com",
|
||||
"roles": ["admin"],
|
||||
"permissions": ["storage:read", "storage:write", ...]
|
||||
}
|
||||
```
|
||||
|
||||
## 3. Storage API
|
||||
|
||||
### 3.1 ZFS Pools
|
||||
```
|
||||
GET /storage/zfs/pools
|
||||
GET /storage/zfs/pools/:id
|
||||
POST /storage/zfs/pools
|
||||
DELETE /storage/zfs/pools/:id
|
||||
POST /storage/zfs/pools/:id/spare
|
||||
```
|
||||
|
||||
### 3.2 ZFS Datasets
|
||||
```
|
||||
GET /storage/zfs/pools/:id/datasets
|
||||
POST /storage/zfs/pools/:id/datasets
|
||||
DELETE /storage/zfs/pools/:id/datasets/:dataset
|
||||
```
|
||||
|
||||
### 3.3 Request/Response Examples
|
||||
|
||||
#### Create ZFS Pool
|
||||
```http
|
||||
POST /api/v1/storage/zfs/pools
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"name": "tank",
|
||||
"raid_level": "mirror",
|
||||
"disks": ["/dev/sdb", "/dev/sdc"],
|
||||
"compression": "lz4",
|
||||
"deduplication": false
|
||||
}
|
||||
|
||||
Response: 201 Created
|
||||
{
|
||||
"id": "uuid",
|
||||
"name": "tank",
|
||||
"status": "online",
|
||||
"total_capacity_bytes": 1000000000000,
|
||||
"created_at": "2025-01-01T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
## 4. Shares API
|
||||
|
||||
### 4.1 Endpoints
|
||||
```
|
||||
GET /shares
|
||||
GET /shares/:id
|
||||
POST /shares
|
||||
PUT /shares/:id
|
||||
DELETE /shares/:id
|
||||
```
|
||||
|
||||
### 4.2 Request/Response Examples
|
||||
|
||||
#### Create Share
|
||||
```http
|
||||
POST /api/v1/shares
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"dataset_id": "uuid",
|
||||
"share_type": "both",
|
||||
"nfs_enabled": true,
|
||||
"nfs_clients": ["192.168.1.0/24"],
|
||||
"smb_enabled": true,
|
||||
"smb_share_name": "shared",
|
||||
"smb_path": "/mnt/tank/shared",
|
||||
"smb_guest_ok": false,
|
||||
"smb_read_only": false
|
||||
}
|
||||
|
||||
Response: 201 Created
|
||||
{
|
||||
"id": "uuid",
|
||||
"dataset_id": "uuid",
|
||||
"share_type": "both",
|
||||
"nfs_enabled": true,
|
||||
"smb_enabled": true,
|
||||
"created_at": "2025-01-01T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
## 5. iSCSI API
|
||||
|
||||
### 5.1 Endpoints
|
||||
```
|
||||
GET /scst/targets
|
||||
GET /scst/targets/:id
|
||||
POST /scst/targets
|
||||
DELETE /scst/targets/:id
|
||||
POST /scst/targets/:id/luns
|
||||
DELETE /scst/targets/:id/luns/:lunId
|
||||
POST /scst/targets/:id/initiators
|
||||
GET /scst/initiators
|
||||
POST /scst/config/apply
|
||||
```
|
||||
|
||||
## 6. System API
|
||||
|
||||
### 6.1 Endpoints
|
||||
```
|
||||
GET /system/services
|
||||
GET /system/services/:name
|
||||
POST /system/services/:name/restart
|
||||
GET /system/services/:name/logs
|
||||
GET /system/interfaces
|
||||
PUT /system/interfaces/:name
|
||||
GET /system/ntp
|
||||
POST /system/ntp
|
||||
GET /system/logs
|
||||
GET /system/network/throughput
|
||||
POST /system/execute
|
||||
POST /system/support-bundle
|
||||
```
|
||||
|
||||
## 7. Monitoring API
|
||||
|
||||
### 7.1 Endpoints
|
||||
```
|
||||
GET /monitoring/metrics
|
||||
GET /monitoring/health
|
||||
GET /monitoring/alerts
|
||||
GET /monitoring/alerts/:id
|
||||
POST /monitoring/alerts/:id/acknowledge
|
||||
POST /monitoring/alerts/:id/resolve
|
||||
GET /monitoring/rules
|
||||
POST /monitoring/rules
|
||||
```
|
||||
|
||||
## 8. IAM API
|
||||
|
||||
### 8.1 Endpoints
|
||||
```
|
||||
GET /iam/users
|
||||
GET /iam/users/:id
|
||||
POST /iam/users
|
||||
PUT /iam/users/:id
|
||||
DELETE /iam/users/:id
|
||||
|
||||
GET /iam/roles
|
||||
GET /iam/roles/:id
|
||||
POST /iam/roles
|
||||
PUT /iam/roles/:id
|
||||
DELETE /iam/roles/:id
|
||||
|
||||
GET /iam/permissions
|
||||
GET /iam/groups
|
||||
```
|
||||
|
||||
## 9. Error Responses
|
||||
|
||||
### 9.1 Error Format
|
||||
```json
|
||||
{
|
||||
"error": "Error message",
|
||||
"code": "ERROR_CODE",
|
||||
"details": {
|
||||
"field": "validation error"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 9.2 HTTP Status Codes
|
||||
- **200 OK**: Success
|
||||
- **201 Created**: Resource created
|
||||
- **400 Bad Request**: Validation error
|
||||
- **401 Unauthorized**: Authentication required
|
||||
- **403 Forbidden**: Permission denied
|
||||
- **404 Not Found**: Resource not found
|
||||
- **500 Internal Server Error**: Server error
|
||||
|
||||
## 10. Pagination
|
||||
|
||||
### 10.1 Pagination Parameters
|
||||
```
|
||||
GET /api/v1/resource?page=1&limit=20
|
||||
```
|
||||
|
||||
### 10.2 Pagination Response
|
||||
```json
|
||||
{
|
||||
"data": [...],
|
||||
"pagination": {
|
||||
"page": 1,
|
||||
"limit": 20,
|
||||
"total": 100,
|
||||
"total_pages": 5
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 11. Filtering & Sorting
|
||||
|
||||
### 11.1 Filtering
|
||||
```
|
||||
GET /api/v1/resource?status=active&type=filesystem
|
||||
```
|
||||
|
||||
### 11.2 Sorting
|
||||
```
|
||||
GET /api/v1/resource?sort=name&order=asc
|
||||
```
|
||||
|
||||
## 12. Rate Limiting
|
||||
|
||||
### 12.1 Rate Limits
|
||||
- **Default**: 100 requests per minute per IP
|
||||
- **Authenticated**: 200 requests per minute per user
|
||||
- **Headers**: `X-RateLimit-Limit`, `X-RateLimit-Remaining`
|
||||
|
||||
## 13. Caching
|
||||
|
||||
### 13.1 Cache Headers
|
||||
- **Cache-Control**: `max-age=300` for GET requests
|
||||
- **ETag**: Entity tags for cache validation
|
||||
- **Last-Modified**: Last modification time
|
||||
|
||||
### 13.2 Cache Invalidation
|
||||
- **On Write**: Invalidate related cache entries
|
||||
- **Manual**: Clear cache via admin endpoint
|
||||
|
||||
224
docs/alpha/sds/SDS-04-Security-Design.md
Normal file
224
docs/alpha/sds/SDS-04-Security-Design.md
Normal file
@@ -0,0 +1,224 @@
|
||||
# SDS-04: Security Design
|
||||
|
||||
## 1. Security Overview
|
||||
|
||||
### 1.1 Security Principles
|
||||
- **Defense in Depth**: Multiple layers of security
|
||||
- **Principle of Least Privilege**: Minimum required permissions
|
||||
- **Secure by Default**: Secure default configurations
|
||||
- **Input Validation**: Validate all inputs
|
||||
- **Output Encoding**: Encode all outputs
|
||||
|
||||
## 2. Authentication
|
||||
|
||||
### 2.1 Authentication Method
|
||||
- **JWT Tokens**: JSON Web Tokens for stateless authentication
|
||||
- **Token Expiration**: Configurable expiration time
|
||||
- **Token Refresh**: Refresh token mechanism (future)
|
||||
|
||||
### 2.2 Password Security
|
||||
- **Hashing**: bcrypt with cost factor 10
|
||||
- **Password Requirements**: Minimum length, complexity
|
||||
- **Password Storage**: Hashed passwords only, never plaintext
|
||||
|
||||
### 2.3 Session Management
|
||||
- **Stateless**: No server-side session storage
|
||||
- **Token Storage**: Secure storage in frontend (localStorage/sessionStorage)
|
||||
- **Token Validation**: Validate on every request
|
||||
|
||||
## 3. Authorization
|
||||
|
||||
### 3.1 Role-Based Access Control (RBAC)
|
||||
- **Roles**: Admin, Operator, ReadOnly
|
||||
- **Permissions**: Resource-based permissions (storage:read, storage:write)
|
||||
- **Role Assignment**: Users assigned to roles
|
||||
- **Permission Inheritance**: Permissions inherited from roles
|
||||
|
||||
### 3.2 Permission Model
|
||||
```
|
||||
Resource:Action
|
||||
Examples:
|
||||
- storage:read
|
||||
- storage:write
|
||||
- iscsi:read
|
||||
- iscsi:write
|
||||
- backup:read
|
||||
- backup:write
|
||||
- system:read
|
||||
- system:write
|
||||
```
|
||||
|
||||
### 3.3 Permission Checking
|
||||
- **Middleware**: Permission middleware checks on protected routes
|
||||
- **Handler Level**: Additional checks in handlers if needed
|
||||
- **Service Level**: Business logic permission checks
|
||||
|
||||
## 4. Input Validation
|
||||
|
||||
### 4.1 Validation Layers
|
||||
1. **Frontend**: Client-side validation
|
||||
2. **Handler**: Request validation
|
||||
3. **Service**: Business logic validation
|
||||
4. **Database**: Constraint validation
|
||||
|
||||
### 4.2 Validation Rules
|
||||
- **Required Fields**: Check for required fields
|
||||
- **Type Validation**: Validate data types
|
||||
- **Format Validation**: Validate formats (email, IP, etc.)
|
||||
- **Range Validation**: Validate numeric ranges
|
||||
- **Length Validation**: Validate string lengths
|
||||
|
||||
### 4.3 SQL Injection Prevention
|
||||
- **Parameterized Queries**: Use parameterized queries only
|
||||
- **No String Concatenation**: Never concatenate SQL strings
|
||||
- **Input Sanitization**: Sanitize all inputs
|
||||
|
||||
## 5. Output Encoding
|
||||
|
||||
### 5.1 XSS Prevention
|
||||
- **HTML Encoding**: Encode HTML in responses
|
||||
- **JSON Encoding**: Proper JSON encoding
|
||||
- **Content Security Policy**: CSP headers
|
||||
|
||||
### 5.2 Response Headers
|
||||
```
|
||||
Content-Security-Policy: default-src 'self'
|
||||
X-Content-Type-Options: nosniff
|
||||
X-Frame-Options: DENY
|
||||
X-XSS-Protection: 1; mode=block
|
||||
```
|
||||
|
||||
## 6. HTTPS & TLS
|
||||
|
||||
### 6.1 TLS Configuration
|
||||
- **TLS Version**: TLS 1.2 minimum
|
||||
- **Cipher Suites**: Strong cipher suites only
|
||||
- **Certificate**: Valid SSL certificate
|
||||
|
||||
### 6.2 HTTPS Enforcement
|
||||
- **Redirect HTTP to HTTPS**: Force HTTPS
|
||||
- **HSTS**: HTTP Strict Transport Security
|
||||
|
||||
## 7. Rate Limiting
|
||||
|
||||
### 7.1 Rate Limit Strategy
|
||||
- **IP-Based**: Rate limit by IP address
|
||||
- **User-Based**: Rate limit by authenticated user
|
||||
- **Endpoint-Based**: Different limits per endpoint
|
||||
|
||||
### 7.2 Rate Limit Configuration
|
||||
- **Default**: 100 requests/minute
|
||||
- **Authenticated**: 200 requests/minute
|
||||
- **Strict Endpoints**: Lower limits for sensitive endpoints
|
||||
|
||||
## 8. Audit Logging
|
||||
|
||||
### 8.1 Audit Events
|
||||
- **Authentication**: Login, logout, failed login
|
||||
- **Authorization**: Permission denied events
|
||||
- **Data Access**: Read operations (configurable)
|
||||
- **Data Modification**: Create, update, delete operations
|
||||
- **System Actions**: System configuration changes
|
||||
|
||||
### 8.2 Audit Log Format
|
||||
```json
|
||||
{
|
||||
"id": "uuid",
|
||||
"user_id": "uuid",
|
||||
"action": "CREATE_SHARE",
|
||||
"resource_type": "share",
|
||||
"resource_id": "uuid",
|
||||
"method": "POST",
|
||||
"path": "/api/v1/shares",
|
||||
"ip_address": "192.168.1.100",
|
||||
"user_agent": "Mozilla/5.0...",
|
||||
"request_body": {...},
|
||||
"response_status": 201,
|
||||
"created_at": "2025-01-01T00:00:00Z"
|
||||
}
|
||||
```
|
||||
|
||||
## 9. Error Handling
|
||||
|
||||
### 9.1 Error Information
|
||||
- **Public Errors**: Safe error messages for users
|
||||
- **Private Errors**: Detailed errors in logs only
|
||||
- **No Stack Traces**: Never expose stack traces to users
|
||||
|
||||
### 9.2 Error Logging
|
||||
- **Log All Errors**: Log all errors with context
|
||||
- **Sensitive Data**: Never log passwords, tokens, secrets
|
||||
- **Error Tracking**: Track error patterns
|
||||
|
||||
## 10. File Upload Security
|
||||
|
||||
### 10.1 Upload Restrictions
|
||||
- **File Types**: Whitelist allowed file types
|
||||
- **File Size**: Maximum file size limits
|
||||
- **File Validation**: Validate file contents
|
||||
|
||||
### 10.2 Storage Security
|
||||
- **Secure Storage**: Store in secure location
|
||||
- **Access Control**: Restrict file access
|
||||
- **Virus Scanning**: Scan uploaded files (future)
|
||||
|
||||
## 11. API Security
|
||||
|
||||
### 11.1 API Authentication
|
||||
- **Bearer Tokens**: JWT in Authorization header
|
||||
- **Token Validation**: Validate on every request
|
||||
- **Token Expiration**: Enforce token expiration
|
||||
|
||||
### 11.2 API Rate Limiting
|
||||
- **Per IP**: Rate limit by IP address
|
||||
- **Per User**: Rate limit by authenticated user
|
||||
- **Per Endpoint**: Different limits per endpoint
|
||||
|
||||
## 12. Database Security
|
||||
|
||||
### 12.1 Database Access
|
||||
- **Connection Security**: Encrypted connections
|
||||
- **Credentials**: Secure credential storage
|
||||
- **Least Privilege**: Database user with minimum privileges
|
||||
|
||||
### 12.2 Data Encryption
|
||||
- **At Rest**: Database encryption (future)
|
||||
- **In Transit**: TLS for database connections
|
||||
- **Sensitive Data**: Encrypt sensitive fields
|
||||
|
||||
## 13. System Security
|
||||
|
||||
### 13.1 Command Execution
|
||||
- **Whitelist**: Only allow whitelisted commands
|
||||
- **Input Validation**: Validate command inputs
|
||||
- **Output Sanitization**: Sanitize command outputs
|
||||
|
||||
### 13.2 File System Access
|
||||
- **Path Validation**: Validate all file paths
|
||||
- **Access Control**: Restrict file system access
|
||||
- **Symlink Protection**: Prevent symlink attacks
|
||||
|
||||
## 14. Security Headers
|
||||
|
||||
### 14.1 HTTP Security Headers
|
||||
```
|
||||
X-Content-Type-Options: nosniff
|
||||
X-Frame-Options: DENY
|
||||
X-XSS-Protection: 1; mode=block
|
||||
Content-Security-Policy: default-src 'self'
|
||||
Strict-Transport-Security: max-age=31536000
|
||||
Referrer-Policy: strict-origin-when-cross-origin
|
||||
```
|
||||
|
||||
## 15. Security Monitoring
|
||||
|
||||
### 15.1 Security Events
|
||||
- **Failed Logins**: Monitor failed login attempts
|
||||
- **Permission Denials**: Monitor permission denials
|
||||
- **Suspicious Activity**: Detect suspicious patterns
|
||||
|
||||
### 15.2 Alerting
|
||||
- **Security Alerts**: Alert on security events
|
||||
- **Thresholds**: Alert thresholds for suspicious activity
|
||||
- **Notification**: Notify administrators
|
||||
|
||||
294
docs/alpha/sds/SDS-05-Integration-Design.md
Normal file
294
docs/alpha/sds/SDS-05-Integration-Design.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# SDS-05: Integration Design
|
||||
|
||||
## 1. Integration Overview
|
||||
|
||||
### 1.1 External Systems
|
||||
Calypso integrates with several external systems:
|
||||
- **ZFS**: Zettabyte File System for storage management
|
||||
- **SCST**: SCSI target subsystem for iSCSI
|
||||
- **Bacula/Bareos**: Backup software
|
||||
- **MHVTL**: Virtual Tape Library emulation
|
||||
- **Systemd**: Service management
|
||||
- **PostgreSQL**: Database system
|
||||
|
||||
## 2. ZFS Integration
|
||||
|
||||
### 2.1 Integration Method
|
||||
- **Command Execution**: Execute `zpool` and `zfs` commands
|
||||
- **Output Parsing**: Parse command output
|
||||
- **Error Handling**: Handle command errors
|
||||
|
||||
### 2.2 ZFS Commands
|
||||
```bash
|
||||
# Pool operations
|
||||
zpool create <pool> <disks>
|
||||
zpool list
|
||||
zpool status <pool>
|
||||
zpool destroy <pool>
|
||||
|
||||
# Dataset operations
|
||||
zfs create <dataset>
|
||||
zfs list
|
||||
zfs destroy <dataset>
|
||||
zfs snapshot <dataset>@<snapshot>
|
||||
zfs clone <snapshot> <clone>
|
||||
zfs rollback <snapshot>
|
||||
```
|
||||
|
||||
### 2.3 Data Synchronization
|
||||
- **Pool Monitor**: Background service syncs pool status every 2 minutes
|
||||
- **Dataset Monitor**: Real-time dataset information
|
||||
- **ARC Stats**: Real-time ARC statistics
|
||||
|
||||
## 3. SCST Integration
|
||||
|
||||
### 3.1 Integration Method
|
||||
- **Configuration Files**: Read/write SCST configuration files
|
||||
- **Command Execution**: Execute SCST admin commands
|
||||
- **Config Apply**: Apply configuration changes
|
||||
|
||||
### 3.2 SCST Operations
|
||||
```bash
|
||||
# Target management
|
||||
scstadmin -add_target <target>
|
||||
scstadmin -enable_target <target>
|
||||
scstadmin -disable_target <target>
|
||||
scstadmin -remove_target <target>
|
||||
|
||||
# LUN management
|
||||
scstadmin -add_lun <lun> -driver <driver> -target <target>
|
||||
scstadmin -remove_lun <lun> -driver <driver> -target <target>
|
||||
|
||||
# Initiator management
|
||||
scstadmin -add_init <initiator> -driver <driver> -target <target>
|
||||
scstadmin -remove_init <initiator> -driver <driver> -target <target>
|
||||
|
||||
# Config apply
|
||||
scstadmin -write_config /etc/scst.conf
|
||||
```
|
||||
|
||||
### 3.3 Configuration File Format
|
||||
- **Location**: `/etc/scst.conf`
|
||||
- **Format**: SCST configuration syntax
|
||||
- **Backup**: Backup before modifications
|
||||
|
||||
## 4. Bacula/Bareos Integration
|
||||
|
||||
### 4.1 Integration Methods
|
||||
- **Database Access**: Direct PostgreSQL access to Bacula database
|
||||
- **Bconsole Commands**: Execute commands via `bconsole`
|
||||
- **Job Synchronization**: Sync jobs from Bacula database
|
||||
|
||||
### 4.2 Database Schema
|
||||
- **Tables**: Jobs, Clients, Filesets, Pools, Volumes, Media
|
||||
- **Queries**: SQL queries to retrieve backup information
|
||||
- **Updates**: Update job status, volume information
|
||||
|
||||
### 4.3 Bconsole Commands
|
||||
```bash
|
||||
# Job operations
|
||||
run job=<job_name>
|
||||
status job=<job_id>
|
||||
list jobs
|
||||
list files jobid=<job_id>
|
||||
|
||||
# Client operations
|
||||
list clients
|
||||
status client=<client_name>
|
||||
|
||||
# Pool operations
|
||||
list pools
|
||||
list volumes pool=<pool_name>
|
||||
|
||||
# Storage operations
|
||||
list storage
|
||||
status storage=<storage_name>
|
||||
```
|
||||
|
||||
### 4.4 Job Synchronization
|
||||
- **Background Sync**: Periodic sync from Bacula database
|
||||
- **Real-time Updates**: Update on job completion
|
||||
- **Status Mapping**: Map Bacula status to Calypso status
|
||||
|
||||
## 5. MHVTL Integration
|
||||
|
||||
### 5.1 Integration Method
|
||||
- **Configuration Files**: Read/write MHVTL configuration
|
||||
- **Command Execution**: Execute MHVTL control commands
|
||||
- **Status Monitoring**: Monitor VTL status
|
||||
|
||||
### 5.2 MHVTL Operations
|
||||
```bash
|
||||
# Library operations
|
||||
vtlcmd -l <library> -s <status>
|
||||
vtlcmd -l <library> -d <drive> -l <load>
|
||||
vtlcmd -l <library> -d <drive> -u <unload>
|
||||
|
||||
# Media operations
|
||||
vtlcmd -l <library> -m <media> -l <label>
|
||||
```
|
||||
|
||||
### 5.3 Configuration Management
|
||||
- **Library Configuration**: Create/update VTL library configs
|
||||
- **Drive Configuration**: Configure virtual drives
|
||||
- **Slot Configuration**: Configure virtual slots
|
||||
|
||||
## 6. Systemd Integration
|
||||
|
||||
### 6.1 Integration Method
|
||||
- **DBus API**: Use systemd DBus API
|
||||
- **Command Execution**: Execute `systemctl` commands
|
||||
- **Service Status**: Query service status
|
||||
|
||||
### 6.2 Systemd Operations
|
||||
```bash
|
||||
# Service control
|
||||
systemctl start <service>
|
||||
systemctl stop <service>
|
||||
systemctl restart <service>
|
||||
systemctl status <service>
|
||||
|
||||
# Service information
|
||||
systemctl list-units --type=service
|
||||
systemctl show <service>
|
||||
```
|
||||
|
||||
### 6.3 Service Management
|
||||
- **Service Discovery**: Discover available services
|
||||
- **Status Monitoring**: Monitor service status
|
||||
- **Log Access**: Access service logs via journalctl
|
||||
|
||||
## 7. Network Interface Integration
|
||||
|
||||
### 7.1 Integration Method
|
||||
- **System Commands**: Execute network configuration commands
|
||||
- **File Operations**: Read/write network configuration files
|
||||
- **Status Queries**: Query interface status
|
||||
|
||||
### 7.2 Network Operations
|
||||
```bash
|
||||
# Interface information
|
||||
ip addr show
|
||||
ip link show
|
||||
ethtool <interface>
|
||||
|
||||
# Interface configuration
|
||||
ip addr add <ip>/<mask> dev <interface>
|
||||
ip link set <interface> up/down
|
||||
```
|
||||
|
||||
### 7.3 Configuration Files
|
||||
- **Netplan**: Ubuntu network configuration
|
||||
- **NetworkManager**: NetworkManager configuration
|
||||
- **ifconfig**: Legacy configuration
|
||||
|
||||
## 8. NTP Integration
|
||||
|
||||
### 8.1 Integration Method
|
||||
- **Configuration Files**: Read/write NTP configuration
|
||||
- **Command Execution**: Execute NTP commands
|
||||
- **Status Queries**: Query NTP status
|
||||
|
||||
### 8.2 NTP Operations
|
||||
```bash
|
||||
# NTP status
|
||||
ntpq -p
|
||||
timedatectl status
|
||||
|
||||
# NTP configuration
|
||||
timedatectl set-timezone <timezone>
|
||||
timedatectl set-ntp <true/false>
|
||||
```
|
||||
|
||||
### 8.3 Configuration Files
|
||||
- **ntp.conf**: NTP daemon configuration
|
||||
- **chrony.conf**: Chrony configuration (alternative)
|
||||
|
||||
## 9. Integration Patterns
|
||||
|
||||
### 9.1 Command Execution Pattern
|
||||
```go
|
||||
func executeCommand(cmd string, args []string) (string, error) {
|
||||
ctx := context.Background()
|
||||
output, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("command failed: %w", err)
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
```
|
||||
|
||||
### 9.2 File Operation Pattern
|
||||
```go
|
||||
func readConfigFile(path string) ([]byte, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read config: %w", err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func writeConfigFile(path string, data []byte) error {
|
||||
if err := os.WriteFile(path, data, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write config: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### 9.3 Database Integration Pattern
|
||||
```go
|
||||
func queryBaculaDB(query string, args ...interface{}) ([]map[string]interface{}, error) {
|
||||
rows, err := db.Query(query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Process rows...
|
||||
return results, nil
|
||||
}
|
||||
```
|
||||
|
||||
## 10. Error Handling
|
||||
|
||||
### 10.1 Command Execution Errors
|
||||
- **Timeout**: Command execution timeout
|
||||
- **Exit Code**: Check command exit codes
|
||||
- **Output Parsing**: Handle parsing errors
|
||||
|
||||
### 10.2 File Operation Errors
|
||||
- **Permission Errors**: Handle permission denied
|
||||
- **File Not Found**: Handle missing files
|
||||
- **Write Errors**: Handle write failures
|
||||
|
||||
### 10.3 Database Integration Errors
|
||||
- **Connection Errors**: Handle database connection failures
|
||||
- **Query Errors**: Handle SQL query errors
|
||||
- **Transaction Errors**: Handle transaction failures
|
||||
|
||||
## 11. Monitoring & Health Checks
|
||||
|
||||
### 11.1 Integration Health
|
||||
- **ZFS Health**: Monitor ZFS pool health
|
||||
- **SCST Health**: Monitor SCST service status
|
||||
- **Bacula Health**: Monitor Bacula service status
|
||||
- **MHVTL Health**: Monitor MHVTL service status
|
||||
|
||||
### 11.2 Health Check Endpoints
|
||||
```
|
||||
GET /api/v1/health
|
||||
GET /api/v1/health/zfs
|
||||
GET /api/v1/health/scst
|
||||
GET /api/v1/health/bacula
|
||||
GET /api/v1/health/vtl
|
||||
```
|
||||
|
||||
## 12. Future Integrations
|
||||
|
||||
### 12.1 Planned Integrations
|
||||
- **LDAP/AD**: Directory service integration
|
||||
- **Cloud Storage**: Cloud backup integration
|
||||
- **Monitoring Systems**: Prometheus, Grafana integration
|
||||
- **Notification Systems**: Email, Slack, PagerDuty integration
|
||||
|
||||
283
docs/alpha/srs/SRS-00-Overview.md
Normal file
283
docs/alpha/srs/SRS-00-Overview.md
Normal file
@@ -0,0 +1,283 @@
|
||||
# Software Requirements Specification (SRS)
|
||||
## AtlasOS - Calypso Backup Appliance
|
||||
### Alpha Release
|
||||
|
||||
**Version:** 1.0.0-alpha
|
||||
**Date:** 2025-01-XX
|
||||
**Status:** In Development
|
||||
|
||||
---
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
### 1.1 Purpose
|
||||
This document provides a comprehensive Software Requirements Specification (SRS) for AtlasOS - Calypso, an enterprise-grade backup appliance management system. The system provides unified management for storage, backup, tape libraries, and system administration through a modern web-based interface.
|
||||
|
||||
### 1.2 Scope
|
||||
Calypso is designed to manage:
|
||||
- ZFS storage pools and datasets
|
||||
- File sharing (SMB/CIFS and NFS)
|
||||
- iSCSI block storage targets
|
||||
- Physical and Virtual Tape Libraries (VTL)
|
||||
- Backup job management (Bacula/Bareos integration)
|
||||
- System monitoring and alerting
|
||||
- User and access management (IAM)
|
||||
- Object storage services
|
||||
- Snapshot and replication management
|
||||
|
||||
### 1.3 Definitions, Acronyms, and Abbreviations
|
||||
- **ZFS**: Zettabyte File System
|
||||
- **SMB/CIFS**: Server Message Block / Common Internet File System
|
||||
- **NFS**: Network File System
|
||||
- **iSCSI**: Internet Small Computer Systems Interface
|
||||
- **VTL**: Virtual Tape Library
|
||||
- **IAM**: Identity and Access Management
|
||||
- **RBAC**: Role-Based Access Control
|
||||
- **API**: Application Programming Interface
|
||||
- **REST**: Representational State Transfer
|
||||
- **JWT**: JSON Web Token
|
||||
- **SNMP**: Simple Network Management Protocol
|
||||
- **NTP**: Network Time Protocol
|
||||
|
||||
### 1.4 References
|
||||
- ZFS Documentation: https://openzfs.github.io/openzfs-docs/
|
||||
- SCST Documentation: http://scst.sourceforge.net/
|
||||
- Bacula Documentation: https://www.bacula.org/documentation/
|
||||
- React Documentation: https://react.dev/
|
||||
- Go Documentation: https://go.dev/doc/
|
||||
|
||||
### 1.5 Overview
|
||||
This SRS is organized into the following sections:
|
||||
- **SRS-01**: Storage Management
|
||||
- **SRS-02**: File Sharing (SMB/NFS)
|
||||
- **SRS-03**: iSCSI Management
|
||||
- **SRS-04**: Tape Library Management
|
||||
- **SRS-05**: Backup Management
|
||||
- **SRS-06**: Object Storage
|
||||
- **SRS-07**: Snapshot & Replication
|
||||
- **SRS-08**: System Management
|
||||
- **SRS-09**: Monitoring & Alerting
|
||||
- **SRS-10**: Identity & Access Management
|
||||
- **SRS-11**: User Interface & Experience
|
||||
|
||||
---
|
||||
|
||||
## 2. System Overview
|
||||
|
||||
### 2.1 System Architecture
|
||||
Calypso follows a client-server architecture:
|
||||
- **Frontend**: React-based Single Page Application (SPA)
|
||||
- **Backend**: Go-based REST API server
|
||||
- **Database**: PostgreSQL for persistent storage
|
||||
- **External Services**: ZFS, SCST, Bacula/Bareos, MHVTL
|
||||
|
||||
### 2.2 Technology Stack
|
||||
|
||||
#### Frontend
|
||||
- React 18 with TypeScript
|
||||
- Vite for build tooling
|
||||
- TailwindCSS for styling
|
||||
- TanStack Query for data fetching
|
||||
- React Router for navigation
|
||||
- Zustand for state management
|
||||
- Axios for HTTP requests
|
||||
- Lucide React for icons
|
||||
|
||||
#### Backend
|
||||
- Go 1.21+
|
||||
- Gin web framework
|
||||
- PostgreSQL database
|
||||
- JWT for authentication
|
||||
- Structured logging (zerolog)
|
||||
|
||||
### 2.3 Deployment Model
|
||||
- Single-server deployment
|
||||
- Systemd service management
|
||||
- Reverse proxy support (nginx/caddy)
|
||||
- WebSocket support for real-time updates
|
||||
|
||||
---
|
||||
|
||||
## 3. Functional Requirements
|
||||
|
||||
### 3.1 Authentication & Authorization
|
||||
- User login/logout
|
||||
- JWT-based session management
|
||||
- Role-based access control (Admin, Operator, ReadOnly)
|
||||
- Permission-based feature access
|
||||
- Session timeout and refresh
|
||||
|
||||
### 3.2 Storage Management
|
||||
- ZFS pool creation, deletion, and monitoring
|
||||
- Dataset management (filesystems and volumes)
|
||||
- Disk discovery and monitoring
|
||||
- Storage repository management
|
||||
- ARC statistics monitoring
|
||||
|
||||
### 3.3 File Sharing
|
||||
- SMB/CIFS share creation and configuration
|
||||
- NFS share creation and client management
|
||||
- Share access control
|
||||
- Mount point management
|
||||
|
||||
### 3.4 iSCSI Management
|
||||
- iSCSI target creation and management
|
||||
- LUN mapping and configuration
|
||||
- Initiator access control
|
||||
- Portal configuration
|
||||
- Extent management
|
||||
|
||||
### 3.5 Tape Library Management
|
||||
- Physical tape library discovery
|
||||
- Virtual Tape Library (VTL) management
|
||||
- Tape drive and slot management
|
||||
- Media inventory
|
||||
|
||||
### 3.6 Backup Management
|
||||
- Backup job creation and scheduling
|
||||
- Bacula/Bareos integration
|
||||
- Storage pool and volume management
|
||||
- Job history and monitoring
|
||||
- Client management
|
||||
|
||||
### 3.7 Object Storage
|
||||
- S3-compatible bucket management
|
||||
- Access policy configuration
|
||||
- User and key management
|
||||
- Usage monitoring
|
||||
|
||||
### 3.8 Snapshot & Replication
|
||||
- ZFS snapshot creation and management
|
||||
- Snapshot rollback and cloning
|
||||
- Replication task configuration
|
||||
- Remote replication management
|
||||
|
||||
### 3.9 System Management
|
||||
- Network interface configuration
|
||||
- Service management (start/stop/restart)
|
||||
- NTP configuration
|
||||
- SNMP configuration
|
||||
- System logs viewing
|
||||
- Terminal console access
|
||||
- Feature license management
|
||||
|
||||
### 3.10 Monitoring & Alerting
|
||||
- Real-time system metrics
|
||||
- Storage health monitoring
|
||||
- Network throughput monitoring
|
||||
- Alert rule configuration
|
||||
- Alert history and management
|
||||
|
||||
### 3.11 Identity & Access Management
|
||||
- User account management
|
||||
- Role management
|
||||
- Permission assignment
|
||||
- Group management
|
||||
- User profile management
|
||||
|
||||
---
|
||||
|
||||
## 4. Non-Functional Requirements
|
||||
|
||||
### 4.1 Performance
|
||||
- API response time < 200ms for read operations
|
||||
- API response time < 1s for write operations
|
||||
- Support for 100+ concurrent users
|
||||
- Real-time metrics update every 5-30 seconds
|
||||
|
||||
### 4.2 Security
|
||||
- HTTPS support
|
||||
- JWT token expiration and refresh
|
||||
- Password hashing (bcrypt)
|
||||
- SQL injection prevention
|
||||
- XSS protection
|
||||
- CSRF protection
|
||||
- Rate limiting
|
||||
- Audit logging
|
||||
|
||||
### 4.3 Reliability
|
||||
- Database transaction support
|
||||
- Error handling and recovery
|
||||
- Health check endpoints
|
||||
- Graceful shutdown
|
||||
|
||||
### 4.4 Usability
|
||||
- Responsive web design
|
||||
- Dark theme support
|
||||
- Intuitive navigation
|
||||
- Real-time feedback
|
||||
- Loading states
|
||||
- Error messages
|
||||
|
||||
### 4.5 Maintainability
|
||||
- Clean code architecture
|
||||
- Comprehensive logging
|
||||
- API documentation
|
||||
- Code comments
|
||||
- Modular design
|
||||
|
||||
---
|
||||
|
||||
## 5. System Constraints
|
||||
|
||||
### 5.1 Hardware Requirements
|
||||
- Minimum: 4GB RAM, 2 CPU cores, 100GB storage
|
||||
- Recommended: 8GB+ RAM, 4+ CPU cores, 500GB+ storage
|
||||
|
||||
### 5.2 Software Requirements
|
||||
- Linux-based operating system (Ubuntu 24.04+)
|
||||
- PostgreSQL 14+
|
||||
- ZFS support
|
||||
- SCST installed and configured
|
||||
- Bacula/Bareos (optional, for backup features)
|
||||
|
||||
### 5.3 Network Requirements
|
||||
- Network connectivity for remote access
|
||||
- SSH access for system management
|
||||
- Port 8080 (API) and 3000 (Frontend) accessible
|
||||
|
||||
---
|
||||
|
||||
## 6. Assumptions and Dependencies
|
||||
|
||||
### 6.1 Assumptions
|
||||
- System has root/sudo access for ZFS and system operations
|
||||
- Network interfaces are properly configured
|
||||
- External services (Bacula, SCST) are installed and accessible
|
||||
- Users have basic understanding of storage and backup concepts
|
||||
|
||||
### 6.2 Dependencies
|
||||
- PostgreSQL database
|
||||
- ZFS kernel module and tools
|
||||
- SCST kernel module and tools
|
||||
- Bacula/Bareos (for backup features)
|
||||
- MHVTL (for VTL features)
|
||||
|
||||
---
|
||||
|
||||
## 7. Future Enhancements
|
||||
|
||||
### 7.1 Planned Features
|
||||
- LDAP/Active Directory integration
|
||||
- Multi-site replication
|
||||
- Cloud backup integration
|
||||
- Advanced encryption at rest
|
||||
- WebSocket real-time updates
|
||||
- Mobile responsive improvements
|
||||
- Advanced reporting and analytics
|
||||
|
||||
### 7.2 Potential Enhancements
|
||||
- Multi-tenant support
|
||||
- API rate limiting per user
|
||||
- Advanced backup scheduling
|
||||
- Disaster recovery features
|
||||
- Performance optimization tools
|
||||
|
||||
---
|
||||
|
||||
## Document History
|
||||
|
||||
| Version | Date | Author | Changes |
|
||||
|---------|------|--------|---------|
|
||||
| 1.0.0-alpha | 2025-01-XX | Development Team | Initial SRS document |
|
||||
|
||||
127
docs/alpha/srs/SRS-01-Storage-Management.md
Normal file
127
docs/alpha/srs/SRS-01-Storage-Management.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# SRS-01: Storage Management
|
||||
|
||||
## 1. Overview
|
||||
Storage Management module provides comprehensive management of ZFS storage pools, datasets, disks, and storage repositories.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 ZFS Pool Management
|
||||
**FR-SM-001**: System shall allow users to create ZFS pools
|
||||
- **Input**: Pool name, RAID level, disk selection, compression, deduplication options
|
||||
- **Output**: Created pool with UUID
|
||||
- **Validation**: Pool name uniqueness, disk availability, RAID level compatibility
|
||||
|
||||
**FR-SM-002**: System shall allow users to list all ZFS pools
|
||||
- **Output**: List of pools with status, capacity, health information
|
||||
- **Refresh**: Auto-refresh every 2 minutes
|
||||
|
||||
**FR-SM-003**: System shall allow users to view ZFS pool details
|
||||
- **Output**: Pool configuration, capacity, health, datasets, disk information
|
||||
|
||||
**FR-SM-004**: System shall allow users to delete ZFS pools
|
||||
- **Validation**: Pool must be empty or confirmation required
|
||||
- **Side Effect**: All datasets in pool are destroyed
|
||||
|
||||
**FR-SM-005**: System shall allow users to add spare disks to pools
|
||||
- **Input**: Pool ID, disk list
|
||||
- **Validation**: Disk availability, compatibility
|
||||
|
||||
### 2.2 ZFS Dataset Management
|
||||
**FR-SM-006**: System shall allow users to create ZFS datasets
|
||||
- **Input**: Pool ID, dataset name, type (filesystem/volume), compression, quota, reservation, mount point
|
||||
- **Output**: Created dataset with UUID
|
||||
- **Validation**: Name uniqueness within pool, valid mount point
|
||||
|
||||
**FR-SM-007**: System shall allow users to list datasets in a pool
|
||||
- **Input**: Pool ID
|
||||
- **Output**: List of datasets with properties
|
||||
- **Refresh**: Auto-refresh every 1 second
|
||||
|
||||
**FR-SM-008**: System shall allow users to delete ZFS datasets
|
||||
- **Input**: Pool ID, dataset name
|
||||
- **Validation**: Dataset must not be in use
|
||||
|
||||
### 2.3 Disk Management
|
||||
**FR-SM-009**: System shall discover and list all physical disks
|
||||
- **Output**: Disk list with size, type, status, mount information
|
||||
- **Refresh**: Auto-refresh every 5 minutes
|
||||
|
||||
**FR-SM-010**: System shall allow users to manually sync disk discovery
|
||||
- **Action**: Trigger disk rescan
|
||||
|
||||
**FR-SM-011**: System shall display disk details
|
||||
- **Output**: Disk properties, partitions, usage, health status
|
||||
|
||||
### 2.4 Storage Repository Management
|
||||
**FR-SM-012**: System shall allow users to create storage repositories
|
||||
- **Input**: Name, type, path, capacity
|
||||
- **Output**: Created repository with ID
|
||||
|
||||
**FR-SM-013**: System shall allow users to list storage repositories
|
||||
- **Output**: Repository list with capacity, usage, status
|
||||
|
||||
**FR-SM-014**: System shall allow users to view repository details
|
||||
- **Output**: Repository properties, usage statistics
|
||||
|
||||
**FR-SM-015**: System shall allow users to delete storage repositories
|
||||
- **Validation**: Repository must not be in use
|
||||
|
||||
### 2.5 ARC Statistics
|
||||
**FR-SM-016**: System shall display ZFS ARC statistics
|
||||
- **Output**: Hit ratio, cache size, eviction statistics
|
||||
- **Refresh**: Real-time updates
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Storage Dashboard
|
||||
- Pool overview cards with capacity and health
|
||||
- Dataset tree view
|
||||
- Disk list with status indicators
|
||||
- Quick actions (create pool, create dataset)
|
||||
|
||||
### 3.2 Pool Management
|
||||
- Pool creation wizard
|
||||
- Pool detail view with tabs (Overview, Datasets, Disks, Settings)
|
||||
- Pool deletion confirmation dialog
|
||||
|
||||
### 3.3 Dataset Management
|
||||
- Dataset creation form
|
||||
- Dataset list with filtering and sorting
|
||||
- Dataset detail view
|
||||
- Dataset deletion confirmation
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/storage/zfs/pools
|
||||
GET /api/v1/storage/zfs/pools/:id
|
||||
POST /api/v1/storage/zfs/pools
|
||||
DELETE /api/v1/storage/zfs/pools/:id
|
||||
POST /api/v1/storage/zfs/pools/:id/spare
|
||||
|
||||
GET /api/v1/storage/zfs/pools/:id/datasets
|
||||
POST /api/v1/storage/zfs/pools/:id/datasets
|
||||
DELETE /api/v1/storage/zfs/pools/:id/datasets/:dataset
|
||||
|
||||
GET /api/v1/storage/disks
|
||||
POST /api/v1/storage/disks/sync
|
||||
|
||||
GET /api/v1/storage/repositories
|
||||
GET /api/v1/storage/repositories/:id
|
||||
POST /api/v1/storage/repositories
|
||||
DELETE /api/v1/storage/repositories/:id
|
||||
|
||||
GET /api/v1/storage/zfs/arc/stats
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **storage:read**: Required for all read operations
|
||||
- **storage:write**: Required for create, update, delete operations
|
||||
|
||||
## 6. Error Handling
|
||||
- Invalid pool name format
|
||||
- Disk not available
|
||||
- Pool already exists
|
||||
- Dataset in use
|
||||
- Insufficient permissions
|
||||
|
||||
141
docs/alpha/srs/SRS-02-File-Sharing.md
Normal file
141
docs/alpha/srs/SRS-02-File-Sharing.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# SRS-02: File Sharing (SMB/NFS)
|
||||
|
||||
## 1. Overview
|
||||
File Sharing module provides management of SMB/CIFS and NFS shares for network file access.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Share Management
|
||||
**FR-FS-001**: System shall allow users to create shares
|
||||
- **Input**: Dataset ID, share type (SMB/NFS/Both), share name, mount point
|
||||
- **Output**: Created share with UUID
|
||||
- **Validation**: Dataset exists, share name uniqueness
|
||||
|
||||
**FR-FS-002**: System shall allow users to list all shares
|
||||
- **Output**: Share list with type, dataset, status
|
||||
- **Filtering**: By protocol, dataset, status
|
||||
|
||||
**FR-FS-003**: System shall allow users to view share details
|
||||
- **Output**: Share configuration, protocol settings, access control
|
||||
|
||||
**FR-FS-004**: System shall allow users to update shares
|
||||
- **Input**: Share ID, updated configuration
|
||||
- **Validation**: Valid configuration values
|
||||
|
||||
**FR-FS-005**: System shall allow users to delete shares
|
||||
- **Validation**: Share must not be actively accessed
|
||||
|
||||
### 2.2 SMB/CIFS Configuration
|
||||
**FR-FS-006**: System shall allow users to configure SMB share name
|
||||
- **Input**: Share ID, SMB share name
|
||||
- **Validation**: Valid SMB share name format
|
||||
|
||||
**FR-FS-007**: System shall allow users to configure SMB path
|
||||
- **Input**: Share ID, SMB path
|
||||
- **Validation**: Path exists and is accessible
|
||||
|
||||
**FR-FS-008**: System shall allow users to configure SMB comment
|
||||
- **Input**: Share ID, comment text
|
||||
|
||||
**FR-FS-009**: System shall allow users to enable/disable guest access
|
||||
- **Input**: Share ID, guest access flag
|
||||
|
||||
**FR-FS-010**: System shall allow users to configure read-only access
|
||||
- **Input**: Share ID, read-only flag
|
||||
|
||||
**FR-FS-011**: System shall allow users to configure browseable option
|
||||
- **Input**: Share ID, browseable flag
|
||||
|
||||
### 2.3 NFS Configuration
|
||||
**FR-FS-012**: System shall allow users to configure NFS clients
|
||||
- **Input**: Share ID, client list (IP addresses or hostnames)
|
||||
- **Validation**: Valid IP/hostname format
|
||||
|
||||
**FR-FS-013**: System shall allow users to add NFS clients
|
||||
- **Input**: Share ID, client address
|
||||
- **Validation**: Client not already in list
|
||||
|
||||
**FR-FS-014**: System shall allow users to remove NFS clients
|
||||
- **Input**: Share ID, client address
|
||||
|
||||
**FR-FS-015**: System shall allow users to configure NFS options
|
||||
- **Input**: Share ID, NFS options (ro, rw, sync, async, etc.)
|
||||
|
||||
### 2.4 Share Status
|
||||
**FR-FS-016**: System shall display share status (enabled/disabled)
|
||||
- **Output**: Current status for each protocol
|
||||
|
||||
**FR-FS-017**: System shall allow users to enable/disable SMB protocol
|
||||
- **Input**: Share ID, enabled flag
|
||||
|
||||
**FR-FS-018**: System shall allow users to enable/disable NFS protocol
|
||||
- **Input**: Share ID, enabled flag
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Share List View
|
||||
- Master-detail layout
|
||||
- Search and filter functionality
|
||||
- Protocol indicators (SMB/NFS badges)
|
||||
- Status indicators
|
||||
|
||||
### 3.2 Share Detail View
|
||||
- Protocol tabs (SMB, NFS)
|
||||
- Configuration forms
|
||||
- Client management (for NFS)
|
||||
- Quick actions (enable/disable protocols)
|
||||
|
||||
### 3.3 Create Share Modal
|
||||
- Dataset selection
|
||||
- Share name input
|
||||
- Protocol selection
|
||||
- Initial configuration
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/shares
|
||||
GET /api/v1/shares/:id
|
||||
POST /api/v1/shares
|
||||
PUT /api/v1/shares/:id
|
||||
DELETE /api/v1/shares/:id
|
||||
```
|
||||
|
||||
## 5. Data Model
|
||||
|
||||
### Share Object
|
||||
```json
|
||||
{
|
||||
"id": "uuid",
|
||||
"dataset_id": "uuid",
|
||||
"dataset_name": "string",
|
||||
"mount_point": "string",
|
||||
"share_type": "smb|nfs|both",
|
||||
"smb_enabled": boolean,
|
||||
"smb_share_name": "string",
|
||||
"smb_path": "string",
|
||||
"smb_comment": "string",
|
||||
"smb_guest_ok": boolean,
|
||||
"smb_read_only": boolean,
|
||||
"smb_browseable": boolean,
|
||||
"nfs_enabled": boolean,
|
||||
"nfs_clients": ["string"],
|
||||
"nfs_options": "string",
|
||||
"is_active": boolean,
|
||||
"created_at": "timestamp",
|
||||
"updated_at": "timestamp",
|
||||
"created_by": "uuid"
|
||||
}
|
||||
```
|
||||
|
||||
## 6. Permissions
|
||||
- **storage:read**: Required for viewing shares
|
||||
- **storage:write**: Required for creating, updating, deleting shares
|
||||
|
||||
## 7. Error Handling
|
||||
- Invalid dataset ID
|
||||
- Duplicate share name
|
||||
- Invalid client address format
|
||||
- Share in use
|
||||
- Insufficient permissions
|
||||
|
||||
163
docs/alpha/srs/SRS-03-iSCSI-Management.md
Normal file
163
docs/alpha/srs/SRS-03-iSCSI-Management.md
Normal file
@@ -0,0 +1,163 @@
|
||||
# SRS-03: iSCSI Management
|
||||
|
||||
## 1. Overview
|
||||
iSCSI Management module provides configuration and management of iSCSI targets, LUNs, initiators, and portals using SCST.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Target Management
|
||||
**FR-ISCSI-001**: System shall allow users to create iSCSI targets
|
||||
- **Input**: Target name, alias
|
||||
- **Output**: Created target with ID
|
||||
- **Validation**: Target name uniqueness, valid IQN format
|
||||
|
||||
**FR-ISCSI-002**: System shall allow users to list all iSCSI targets
|
||||
- **Output**: Target list with status, LUN count, initiator count
|
||||
|
||||
**FR-ISCSI-003**: System shall allow users to view target details
|
||||
- **Output**: Target configuration, LUNs, initiators, status
|
||||
|
||||
**FR-ISCSI-004**: System shall allow users to delete iSCSI targets
|
||||
- **Validation**: Target must not be in use
|
||||
|
||||
**FR-ISCSI-005**: System shall allow users to enable/disable targets
|
||||
- **Input**: Target ID, enabled flag
|
||||
|
||||
### 2.2 LUN Management
|
||||
**FR-ISCSI-006**: System shall allow users to add LUNs to targets
|
||||
- **Input**: Target ID, device path, LUN number
|
||||
- **Validation**: Device exists, LUN number available
|
||||
|
||||
**FR-ISCSI-007**: System shall allow users to remove LUNs from targets
|
||||
- **Input**: Target ID, LUN ID
|
||||
|
||||
**FR-ISCSI-008**: System shall display LUN information
|
||||
- **Output**: LUN number, device, size, status
|
||||
|
||||
### 2.3 Initiator Management
|
||||
**FR-ISCSI-009**: System shall allow users to add initiators to targets
|
||||
- **Input**: Target ID, initiator IQN
|
||||
- **Validation**: Valid IQN format
|
||||
|
||||
**FR-ISCSI-010**: System shall allow users to remove initiators from targets
|
||||
- **Input**: Target ID, initiator ID
|
||||
|
||||
**FR-ISCSI-011**: System shall allow users to list all initiators
|
||||
- **Output**: Initiator list with associated targets
|
||||
|
||||
**FR-ISCSI-012**: System shall allow users to create initiator groups
|
||||
- **Input**: Group name, initiator list
|
||||
- **Output**: Created group with ID
|
||||
|
||||
**FR-ISCSI-013**: System shall allow users to manage initiator groups
|
||||
- **Actions**: Create, update, delete, add/remove initiators
|
||||
|
||||
### 2.4 Portal Management
|
||||
**FR-ISCSI-014**: System shall allow users to create portals
|
||||
- **Input**: IP address, port
|
||||
- **Output**: Created portal with ID
|
||||
|
||||
**FR-ISCSI-015**: System shall allow users to list portals
|
||||
- **Output**: Portal list with IP, port, status
|
||||
|
||||
**FR-ISCSI-016**: System shall allow users to update portals
|
||||
- **Input**: Portal ID, updated configuration
|
||||
|
||||
**FR-ISCSI-017**: System shall allow users to delete portals
|
||||
- **Input**: Portal ID
|
||||
|
||||
### 2.5 Extent Management
|
||||
**FR-ISCSI-018**: System shall allow users to create extents
|
||||
- **Input**: Device path, size, type
|
||||
- **Output**: Created extent
|
||||
|
||||
**FR-ISCSI-019**: System shall allow users to list extents
|
||||
- **Output**: Extent list with device, size, type
|
||||
|
||||
**FR-ISCSI-020**: System shall allow users to delete extents
|
||||
- **Input**: Extent device
|
||||
|
||||
### 2.6 Configuration Management
|
||||
**FR-ISCSI-021**: System shall allow users to view SCST configuration file
|
||||
- **Output**: Current SCST configuration
|
||||
|
||||
**FR-ISCSI-022**: System shall allow users to update SCST configuration file
|
||||
- **Input**: Configuration content
|
||||
- **Validation**: Valid SCST configuration format
|
||||
|
||||
**FR-ISCSI-023**: System shall allow users to apply SCST configuration
|
||||
- **Action**: Reload SCST configuration
|
||||
- **Side Effect**: Targets may be restarted
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Target List View
|
||||
- Target cards with status indicators
|
||||
- Quick actions (enable/disable, delete)
|
||||
- Filter and search functionality
|
||||
|
||||
### 3.2 Target Detail View
|
||||
- Overview tab (target info, status)
|
||||
- LUNs tab (LUN list, add/remove)
|
||||
- Initiators tab (initiator list, add/remove)
|
||||
- Settings tab (target configuration)
|
||||
|
||||
### 3.3 Create Target Wizard
|
||||
- Target name input
|
||||
- Alias input
|
||||
- Initial LUN assignment (optional)
|
||||
- Initial initiator assignment (optional)
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/scst/targets
|
||||
GET /api/v1/scst/targets/:id
|
||||
POST /api/v1/scst/targets
|
||||
DELETE /api/v1/scst/targets/:id
|
||||
POST /api/v1/scst/targets/:id/enable
|
||||
POST /api/v1/scst/targets/:id/disable
|
||||
|
||||
POST /api/v1/scst/targets/:id/luns
|
||||
DELETE /api/v1/scst/targets/:id/luns/:lunId
|
||||
|
||||
POST /api/v1/scst/targets/:id/initiators
|
||||
GET /api/v1/scst/initiators
|
||||
GET /api/v1/scst/initiators/:id
|
||||
DELETE /api/v1/scst/initiators/:id
|
||||
|
||||
GET /api/v1/scst/initiator-groups
|
||||
GET /api/v1/scst/initiator-groups/:id
|
||||
POST /api/v1/scst/initiator-groups
|
||||
PUT /api/v1/scst/initiator-groups/:id
|
||||
DELETE /api/v1/scst/initiator-groups/:id
|
||||
POST /api/v1/scst/initiator-groups/:id/initiators
|
||||
|
||||
GET /api/v1/scst/portals
|
||||
GET /api/v1/scst/portals/:id
|
||||
POST /api/v1/scst/portals
|
||||
PUT /api/v1/scst/portals/:id
|
||||
DELETE /api/v1/scst/portals/:id
|
||||
|
||||
GET /api/v1/scst/extents
|
||||
POST /api/v1/scst/extents
|
||||
DELETE /api/v1/scst/extents/:device
|
||||
|
||||
GET /api/v1/scst/config/file
|
||||
PUT /api/v1/scst/config/file
|
||||
POST /api/v1/scst/config/apply
|
||||
|
||||
GET /api/v1/scst/handlers
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **iscsi:read**: Required for viewing targets, initiators, portals
|
||||
- **iscsi:write**: Required for creating, updating, deleting
|
||||
|
||||
## 6. Error Handling
|
||||
- Invalid IQN format
|
||||
- Target name already exists
|
||||
- Device not available
|
||||
- SCST configuration errors
|
||||
- Insufficient permissions
|
||||
|
||||
115
docs/alpha/srs/SRS-04-Tape-Library-Management.md
Normal file
115
docs/alpha/srs/SRS-04-Tape-Library-Management.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# SRS-04: Tape Library Management
|
||||
|
||||
## 1. Overview
|
||||
Tape Library Management module provides management of physical and virtual tape libraries, drives, slots, and media.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Physical Tape Library
|
||||
**FR-TAPE-001**: System shall discover physical tape libraries
|
||||
- **Action**: Scan for attached tape libraries
|
||||
- **Output**: List of discovered libraries
|
||||
|
||||
**FR-TAPE-002**: System shall list physical tape libraries
|
||||
- **Output**: Library list with vendor, model, serial number
|
||||
|
||||
**FR-TAPE-003**: System shall display physical library details
|
||||
- **Output**: Library properties, drives, slots, media
|
||||
|
||||
**FR-TAPE-004**: System shall allow users to load media
|
||||
- **Input**: Library ID, drive ID, slot ID
|
||||
- **Action**: Load tape from slot to drive
|
||||
|
||||
**FR-TAPE-005**: System shall allow users to unload media
|
||||
- **Input**: Library ID, drive ID, slot ID
|
||||
- **Action**: Unload tape from drive to slot
|
||||
|
||||
### 2.2 Virtual Tape Library (VTL)
|
||||
**FR-TAPE-006**: System shall allow users to create VTL libraries
|
||||
- **Input**: Library name, vendor, model, drive count, slot count
|
||||
- **Output**: Created VTL with ID
|
||||
|
||||
**FR-TAPE-007**: System shall allow users to list VTL libraries
|
||||
- **Output**: VTL list with status, drive count, slot count
|
||||
|
||||
**FR-TAPE-008**: System shall allow users to view VTL details
|
||||
- **Output**: VTL configuration, drives, slots, media
|
||||
|
||||
**FR-TAPE-009**: System shall allow users to update VTL libraries
|
||||
- **Input**: VTL ID, updated configuration
|
||||
|
||||
**FR-TAPE-010**: System shall allow users to delete VTL libraries
|
||||
- **Input**: VTL ID
|
||||
- **Validation**: VTL must not be in use
|
||||
|
||||
**FR-TAPE-011**: System shall allow users to start/stop VTL libraries
|
||||
- **Input**: VTL ID, action (start/stop)
|
||||
|
||||
### 2.3 Drive Management
|
||||
**FR-TAPE-012**: System shall display drive information
|
||||
- **Output**: Drive status, media loaded, position
|
||||
|
||||
**FR-TAPE-013**: System shall allow users to control drives
|
||||
- **Actions**: Load, unload, eject, rewind
|
||||
|
||||
### 2.4 Slot Management
|
||||
**FR-TAPE-014**: System shall display slot information
|
||||
- **Output**: Slot status, media present, media label
|
||||
|
||||
**FR-TAPE-015**: System shall allow users to manage slots
|
||||
- **Actions**: View media, move media
|
||||
|
||||
### 2.5 Media Management
|
||||
**FR-TAPE-016**: System shall display media inventory
|
||||
- **Output**: Media list with label, type, status, location
|
||||
|
||||
**FR-TAPE-017**: System shall allow users to label media
|
||||
- **Input**: Media ID, label
|
||||
- **Validation**: Valid label format
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Library List View
|
||||
- Physical and VTL library cards
|
||||
- Status indicators
|
||||
- Quick actions (discover, create VTL)
|
||||
|
||||
### 3.2 Library Detail View
|
||||
- Overview tab (library info, status)
|
||||
- Drives tab (drive list, controls)
|
||||
- Slots tab (slot grid, media info)
|
||||
- Media tab (media inventory)
|
||||
|
||||
### 3.3 VTL Creation Wizard
|
||||
- Library name and configuration
|
||||
- Drive and slot count
|
||||
- Vendor and model selection
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/tape/physical/libraries
|
||||
POST /api/v1/tape/physical/libraries/discover
|
||||
GET /api/v1/tape/physical/libraries/:id
|
||||
|
||||
GET /api/v1/tape/vtl/libraries
|
||||
GET /api/v1/tape/vtl/libraries/:id
|
||||
POST /api/v1/tape/vtl/libraries
|
||||
PUT /api/v1/tape/vtl/libraries/:id
|
||||
DELETE /api/v1/tape/vtl/libraries/:id
|
||||
POST /api/v1/tape/vtl/libraries/:id/start
|
||||
POST /api/v1/tape/vtl/libraries/:id/stop
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **tape:read**: Required for viewing libraries
|
||||
- **tape:write**: Required for creating, updating, deleting, controlling
|
||||
|
||||
## 6. Error Handling
|
||||
- Library not found
|
||||
- Drive not available
|
||||
- Slot already occupied
|
||||
- Media not found
|
||||
- MHVTL service errors
|
||||
- Insufficient permissions
|
||||
|
||||
130
docs/alpha/srs/SRS-05-Backup-Management.md
Normal file
130
docs/alpha/srs/SRS-05-Backup-Management.md
Normal file
@@ -0,0 +1,130 @@
|
||||
# SRS-05: Backup Management
|
||||
|
||||
## 1. Overview
|
||||
Backup Management module provides integration with Bacula/Bareos for backup job management, scheduling, and monitoring.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Backup Jobs
|
||||
**FR-BACKUP-001**: System shall allow users to create backup jobs
|
||||
- **Input**: Job name, client, fileset, schedule, storage pool
|
||||
- **Output**: Created job with ID
|
||||
- **Validation**: Valid client, fileset, schedule
|
||||
|
||||
**FR-BACKUP-002**: System shall allow users to list backup jobs
|
||||
- **Output**: Job list with status, last run, next run
|
||||
- **Filtering**: By status, client, schedule
|
||||
|
||||
**FR-BACKUP-003**: System shall allow users to view job details
|
||||
- **Output**: Job configuration, history, statistics
|
||||
|
||||
**FR-BACKUP-004**: System shall allow users to run jobs manually
|
||||
- **Input**: Job ID
|
||||
- **Action**: Trigger immediate job execution
|
||||
|
||||
**FR-BACKUP-005**: System shall display job history
|
||||
- **Output**: Job run history with status, duration, data transferred
|
||||
|
||||
### 2.2 Clients
|
||||
**FR-BACKUP-006**: System shall list backup clients
|
||||
- **Output**: Client list with status, last backup
|
||||
|
||||
**FR-BACKUP-007**: System shall display client details
|
||||
- **Output**: Client configuration, job history
|
||||
|
||||
### 2.3 Storage Pools
|
||||
**FR-BACKUP-008**: System shall allow users to create storage pools
|
||||
- **Input**: Pool name, pool type, volume count
|
||||
- **Output**: Created pool with ID
|
||||
|
||||
**FR-BACKUP-009**: System shall allow users to list storage pools
|
||||
- **Output**: Pool list with type, volume count, usage
|
||||
|
||||
**FR-BACKUP-010**: System shall allow users to delete storage pools
|
||||
- **Input**: Pool ID
|
||||
- **Validation**: Pool must not be in use
|
||||
|
||||
### 2.4 Storage Volumes
|
||||
**FR-BACKUP-011**: System shall allow users to create storage volumes
|
||||
- **Input**: Pool ID, volume name, size
|
||||
- **Output**: Created volume with ID
|
||||
|
||||
**FR-BACKUP-012**: System shall allow users to list storage volumes
|
||||
- **Output**: Volume list with status, usage, expiration
|
||||
|
||||
**FR-BACKUP-013**: System shall allow users to update storage volumes
|
||||
- **Input**: Volume ID, updated properties
|
||||
|
||||
**FR-BACKUP-014**: System shall allow users to delete storage volumes
|
||||
- **Input**: Volume ID
|
||||
|
||||
### 2.5 Media Management
|
||||
**FR-BACKUP-015**: System shall list backup media
|
||||
- **Output**: Media list with label, type, status, location
|
||||
|
||||
**FR-BACKUP-016**: System shall display media details
|
||||
- **Output**: Media properties, job history, usage
|
||||
|
||||
### 2.6 Dashboard Statistics
|
||||
**FR-BACKUP-017**: System shall display backup dashboard statistics
|
||||
- **Output**: Total jobs, running jobs, success rate, data backed up
|
||||
|
||||
### 2.7 Bconsole Integration
|
||||
**FR-BACKUP-018**: System shall allow users to execute bconsole commands
|
||||
- **Input**: Command string
|
||||
- **Output**: Command output
|
||||
- **Validation**: Allowed commands only
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Backup Dashboard
|
||||
- Statistics cards (total jobs, running, success rate)
|
||||
- Recent job activity
|
||||
- Quick actions
|
||||
|
||||
### 3.2 Job Management
|
||||
- Job list with filtering
|
||||
- Job creation wizard
|
||||
- Job detail view with history
|
||||
- Job run controls
|
||||
|
||||
### 3.3 Storage Management
|
||||
- Storage pool list and management
|
||||
- Volume list and management
|
||||
- Media inventory
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/backup/dashboard/stats
|
||||
GET /api/v1/backup/jobs
|
||||
GET /api/v1/backup/jobs/:id
|
||||
POST /api/v1/backup/jobs
|
||||
|
||||
GET /api/v1/backup/clients
|
||||
GET /api/v1/backup/storage/pools
|
||||
POST /api/v1/backup/storage/pools
|
||||
DELETE /api/v1/backup/storage/pools/:id
|
||||
|
||||
GET /api/v1/backup/storage/volumes
|
||||
POST /api/v1/backup/storage/volumes
|
||||
PUT /api/v1/backup/storage/volumes/:id
|
||||
DELETE /api/v1/backup/storage/volumes/:id
|
||||
|
||||
GET /api/v1/backup/media
|
||||
GET /api/v1/backup/storage/daemons
|
||||
|
||||
POST /api/v1/backup/console/execute
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **backup:read**: Required for viewing jobs, clients, storage
|
||||
- **backup:write**: Required for creating, updating, deleting, executing
|
||||
|
||||
## 6. Error Handling
|
||||
- Bacula/Bareos connection errors
|
||||
- Invalid job configuration
|
||||
- Job execution failures
|
||||
- Storage pool/volume errors
|
||||
- Insufficient permissions
|
||||
|
||||
111
docs/alpha/srs/SRS-06-Object-Storage.md
Normal file
111
docs/alpha/srs/SRS-06-Object-Storage.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# SRS-06: Object Storage
|
||||
|
||||
## 1. Overview
|
||||
Object Storage module provides S3-compatible object storage service management including buckets, access policies, and user/key management.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Bucket Management
|
||||
**FR-OBJ-001**: System shall allow users to create buckets
|
||||
- **Input**: Bucket name, access policy (private/public-read)
|
||||
- **Output**: Created bucket with ID
|
||||
- **Validation**: Bucket name uniqueness, valid S3 naming
|
||||
|
||||
**FR-OBJ-002**: System shall allow users to list buckets
|
||||
- **Output**: Bucket list with name, type, usage, object count
|
||||
- **Filtering**: By name, type, access policy
|
||||
|
||||
**FR-OBJ-003**: System shall allow users to view bucket details
|
||||
- **Output**: Bucket configuration, usage statistics, access policy
|
||||
|
||||
**FR-OBJ-004**: System shall allow users to delete buckets
|
||||
- **Input**: Bucket ID
|
||||
- **Validation**: Bucket must be empty or confirmation required
|
||||
|
||||
**FR-OBJ-005**: System shall display bucket usage
|
||||
- **Output**: Storage used, object count, last modified
|
||||
|
||||
### 2.2 Access Policy Management
|
||||
**FR-OBJ-006**: System shall allow users to configure bucket access policies
|
||||
- **Input**: Bucket ID, access policy (private, public-read, public-read-write)
|
||||
- **Output**: Updated access policy
|
||||
|
||||
**FR-OBJ-007**: System shall display current access policy
|
||||
- **Output**: Policy type, policy document
|
||||
|
||||
### 2.3 User & Key Management
|
||||
**FR-OBJ-008**: System shall allow users to create S3 users
|
||||
- **Input**: Username, access level
|
||||
- **Output**: Created user with access keys
|
||||
|
||||
**FR-OBJ-009**: System shall allow users to list S3 users
|
||||
- **Output**: User list with access level, key count
|
||||
|
||||
**FR-OBJ-010**: System shall allow users to generate access keys
|
||||
- **Input**: User ID
|
||||
- **Output**: Access key ID and secret key
|
||||
|
||||
**FR-OBJ-011**: System shall allow users to revoke access keys
|
||||
- **Input**: User ID, key ID
|
||||
|
||||
### 2.4 Service Management
|
||||
**FR-OBJ-012**: System shall display service status
|
||||
- **Output**: Service status (running/stopped), uptime
|
||||
|
||||
**FR-OBJ-013**: System shall display service statistics
|
||||
- **Output**: Total usage, object count, endpoint URL
|
||||
|
||||
**FR-OBJ-014**: System shall display S3 endpoint URL
|
||||
- **Output**: Endpoint URL with copy functionality
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Object Storage Dashboard
|
||||
- Service status card
|
||||
- Statistics cards (total usage, object count, uptime)
|
||||
- S3 endpoint display with copy button
|
||||
|
||||
### 3.2 Bucket Management
|
||||
- Bucket list with search and filter
|
||||
- Bucket creation modal
|
||||
- Bucket detail view with tabs (Overview, Settings, Access Policy)
|
||||
- Bucket actions (delete, configure)
|
||||
|
||||
### 3.3 Tabs
|
||||
- **Buckets**: Main bucket management
|
||||
- **Users & Keys**: S3 user and access key management
|
||||
- **Monitoring**: Usage statistics and monitoring
|
||||
- **Settings**: Service configuration
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/object-storage/buckets
|
||||
GET /api/v1/object-storage/buckets/:id
|
||||
POST /api/v1/object-storage/buckets
|
||||
DELETE /api/v1/object-storage/buckets/:id
|
||||
PUT /api/v1/object-storage/buckets/:id/policy
|
||||
|
||||
GET /api/v1/object-storage/users
|
||||
POST /api/v1/object-storage/users
|
||||
GET /api/v1/object-storage/users/:id/keys
|
||||
POST /api/v1/object-storage/users/:id/keys
|
||||
DELETE /api/v1/object-storage/users/:id/keys/:keyId
|
||||
|
||||
GET /api/v1/object-storage/service/status
|
||||
GET /api/v1/object-storage/service/stats
|
||||
GET /api/v1/object-storage/service/endpoint
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **object-storage:read**: Required for viewing buckets, users
|
||||
- **object-storage:write**: Required for creating, updating, deleting
|
||||
|
||||
## 6. Error Handling
|
||||
- Invalid bucket name
|
||||
- Bucket already exists
|
||||
- Bucket not empty
|
||||
- Invalid access policy
|
||||
- Service not available
|
||||
- Insufficient permissions
|
||||
|
||||
145
docs/alpha/srs/SRS-07-Snapshot-Replication.md
Normal file
145
docs/alpha/srs/SRS-07-Snapshot-Replication.md
Normal file
@@ -0,0 +1,145 @@
|
||||
# SRS-07: Snapshot & Replication
|
||||
|
||||
## 1. Overview
|
||||
Snapshot & Replication module provides ZFS snapshot management and remote replication task configuration.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Snapshot Management
|
||||
**FR-SNAP-001**: System shall allow users to create snapshots
|
||||
- **Input**: Dataset name, snapshot name
|
||||
- **Output**: Created snapshot with timestamp
|
||||
- **Validation**: Dataset exists, snapshot name uniqueness
|
||||
|
||||
**FR-SNAP-002**: System shall allow users to list snapshots
|
||||
- **Output**: Snapshot list with name, dataset, created date, referenced size
|
||||
- **Filtering**: By dataset, date range, name
|
||||
|
||||
**FR-SNAP-003**: System shall allow users to view snapshot details
|
||||
- **Output**: Snapshot properties, dataset, size, creation date
|
||||
|
||||
**FR-SNAP-004**: System shall allow users to delete snapshots
|
||||
- **Input**: Snapshot ID
|
||||
- **Validation**: Snapshot not in use
|
||||
|
||||
**FR-SNAP-005**: System shall allow users to rollback to snapshot
|
||||
- **Input**: Snapshot ID
|
||||
- **Warning**: Data loss warning required
|
||||
- **Action**: Rollback dataset to snapshot state
|
||||
|
||||
**FR-SNAP-006**: System shall allow users to clone snapshots
|
||||
- **Input**: Snapshot ID, clone name
|
||||
- **Output**: Created clone dataset
|
||||
|
||||
**FR-SNAP-007**: System shall display snapshot retention information
|
||||
- **Output**: Snapshots marked for expiration, retention policy
|
||||
|
||||
### 2.2 Replication Management
|
||||
**FR-SNAP-008**: System shall allow users to create replication tasks
|
||||
- **Input**: Task name, source dataset, target host, target dataset, schedule, compression
|
||||
- **Output**: Created replication task with ID
|
||||
- **Validation**: Valid source dataset, target host reachable
|
||||
|
||||
**FR-SNAP-009**: System shall allow users to list replication tasks
|
||||
- **Output**: Task list with status, last run, next run
|
||||
|
||||
**FR-SNAP-010**: System shall allow users to view replication task details
|
||||
- **Output**: Task configuration, history, status
|
||||
|
||||
**FR-SNAP-011**: System shall allow users to update replication tasks
|
||||
- **Input**: Task ID, updated configuration
|
||||
|
||||
**FR-SNAP-012**: System shall allow users to delete replication tasks
|
||||
- **Input**: Task ID
|
||||
|
||||
**FR-SNAP-013**: System shall display replication status
|
||||
- **Output**: Task status (idle, running, error), progress percentage
|
||||
|
||||
**FR-SNAP-014**: System shall allow users to run replication manually
|
||||
- **Input**: Task ID
|
||||
- **Action**: Trigger immediate replication
|
||||
|
||||
### 2.3 Replication Configuration
|
||||
**FR-SNAP-015**: System shall allow users to configure replication schedule
|
||||
- **Input**: Schedule type (hourly, daily, weekly, monthly, custom cron)
|
||||
- **Input**: Schedule time
|
||||
|
||||
**FR-SNAP-016**: System shall allow users to configure target settings
|
||||
- **Input**: Target host, SSH port, target user, target dataset
|
||||
|
||||
**FR-SNAP-017**: System shall allow users to configure compression
|
||||
- **Input**: Compression type (off, lz4, gzip, zstd)
|
||||
|
||||
**FR-SNAP-018**: System shall allow users to configure replication options
|
||||
- **Input**: Recursive flag, auto-snapshot flag, encryption flag
|
||||
|
||||
### 2.4 Restore Points
|
||||
**FR-SNAP-019**: System shall display restore points
|
||||
- **Output**: Available restore points from snapshots
|
||||
|
||||
**FR-SNAP-020**: System shall allow users to restore from snapshot
|
||||
- **Input**: Snapshot ID, restore target
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Snapshot & Replication Dashboard
|
||||
- Statistics cards (total snapshots, last replication, next scheduled)
|
||||
- Quick actions (create snapshot, view logs)
|
||||
|
||||
### 3.2 Tabs
|
||||
- **Snapshots**: Snapshot list and management
|
||||
- **Replication Tasks**: Replication task management
|
||||
- **Restore Points**: Restore point management
|
||||
|
||||
### 3.3 Snapshot List
|
||||
- Table view with columns (name, dataset, created, referenced, actions)
|
||||
- Search and filter functionality
|
||||
- Pagination
|
||||
- Bulk actions (select multiple)
|
||||
|
||||
### 3.4 Replication Task Management
|
||||
- Task list with status indicators
|
||||
- Task creation wizard
|
||||
- Task detail view with progress
|
||||
|
||||
### 3.5 Create Replication Modal
|
||||
- Task name input
|
||||
- Source dataset selection
|
||||
- Target configuration (host, port, user, dataset)
|
||||
- Schedule configuration
|
||||
- Compression and options
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/snapshots
|
||||
GET /api/v1/snapshots/:id
|
||||
POST /api/v1/snapshots
|
||||
DELETE /api/v1/snapshots/:id
|
||||
POST /api/v1/snapshots/:id/rollback
|
||||
POST /api/v1/snapshots/:id/clone
|
||||
|
||||
GET /api/v1/replication/tasks
|
||||
GET /api/v1/replication/tasks/:id
|
||||
POST /api/v1/replication/tasks
|
||||
PUT /api/v1/replication/tasks/:id
|
||||
DELETE /api/v1/replication/tasks/:id
|
||||
POST /api/v1/replication/tasks/:id/run
|
||||
GET /api/v1/replication/tasks/:id/status
|
||||
|
||||
GET /api/v1/restore-points
|
||||
POST /api/v1/restore-points/restore
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **storage:read**: Required for viewing snapshots and replication tasks
|
||||
- **storage:write**: Required for creating, updating, deleting, executing
|
||||
|
||||
## 6. Error Handling
|
||||
- Invalid dataset
|
||||
- Snapshot not found
|
||||
- Replication target unreachable
|
||||
- SSH authentication failure
|
||||
- Replication task errors
|
||||
- Insufficient permissions
|
||||
|
||||
167
docs/alpha/srs/SRS-08-System-Management.md
Normal file
167
docs/alpha/srs/SRS-08-System-Management.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# SRS-08: System Management
|
||||
|
||||
## 1. Overview
|
||||
System Management module provides configuration and management of system services, network interfaces, time synchronization, and system administration features.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Network Interface Management
|
||||
**FR-SYS-001**: System shall list network interfaces
|
||||
- **Output**: Interface list with name, IP address, status, speed
|
||||
- **Refresh**: Auto-refresh every 5 seconds
|
||||
|
||||
**FR-SYS-002**: System shall allow users to view interface details
|
||||
- **Output**: Interface properties, IP configuration, statistics
|
||||
|
||||
**FR-SYS-003**: System shall allow users to update interface configuration
|
||||
- **Input**: Interface name, IP address, subnet, gateway
|
||||
- **Validation**: Valid IP configuration
|
||||
|
||||
**FR-SYS-004**: System shall display interface status
|
||||
- **Output**: Connection status (Connected/Down), speed, role
|
||||
|
||||
### 2.2 Service Management
|
||||
**FR-SYS-005**: System shall list system services
|
||||
- **Output**: Service list with name, status, description
|
||||
- **Refresh**: Auto-refresh every 5 seconds
|
||||
|
||||
**FR-SYS-006**: System shall allow users to view service status
|
||||
- **Output**: Service status (active/inactive), enabled state
|
||||
|
||||
**FR-SYS-007**: System shall allow users to restart services
|
||||
- **Input**: Service name
|
||||
- **Action**: Restart service via systemd
|
||||
|
||||
**FR-SYS-008**: System shall allow users to start/stop services
|
||||
- **Input**: Service name, action (start/stop)
|
||||
|
||||
**FR-SYS-009**: System shall display service logs
|
||||
- **Input**: Service name
|
||||
- **Output**: Recent service logs
|
||||
|
||||
### 2.3 NTP Configuration
|
||||
**FR-SYS-010**: System shall allow users to configure timezone
|
||||
- **Input**: Timezone string
|
||||
- **Output**: Updated timezone
|
||||
|
||||
**FR-SYS-011**: System shall allow users to configure NTP servers
|
||||
- **Input**: NTP server list
|
||||
- **Output**: Updated NTP configuration
|
||||
|
||||
**FR-SYS-012**: System shall allow users to add NTP servers
|
||||
- **Input**: NTP server address
|
||||
- **Validation**: Valid NTP server address
|
||||
|
||||
**FR-SYS-013**: System shall allow users to remove NTP servers
|
||||
- **Input**: NTP server address
|
||||
|
||||
**FR-SYS-014**: System shall display NTP server status
|
||||
- **Output**: Server status, stratum, latency
|
||||
|
||||
### 2.4 SNMP Configuration
|
||||
**FR-SYS-015**: System shall allow users to enable/disable SNMP
|
||||
- **Input**: Enabled flag
|
||||
- **Action**: Enable/disable SNMP service
|
||||
|
||||
**FR-SYS-016**: System shall allow users to configure SNMP community string
|
||||
- **Input**: Community string
|
||||
- **Output**: Updated SNMP configuration
|
||||
|
||||
**FR-SYS-017**: System shall allow users to configure SNMP trap receiver
|
||||
- **Input**: Trap receiver IP address
|
||||
- **Output**: Updated SNMP configuration
|
||||
|
||||
### 2.5 System Logs
|
||||
**FR-SYS-018**: System shall allow users to view system logs
|
||||
- **Output**: System log entries with timestamp, level, message
|
||||
- **Filtering**: By level, time range, search
|
||||
|
||||
### 2.6 Terminal Console
|
||||
**FR-SYS-019**: System shall provide terminal console access
|
||||
- **Input**: Command string
|
||||
- **Output**: Command output
|
||||
- **Validation**: Allowed commands only (for security)
|
||||
|
||||
### 2.7 Feature License Management
|
||||
**FR-SYS-020**: System shall display license status
|
||||
- **Output**: License status (active/expired), expiration date, days remaining
|
||||
|
||||
**FR-SYS-021**: System shall display enabled features
|
||||
- **Output**: Feature list with enabled/disabled status
|
||||
|
||||
**FR-SYS-022**: System shall allow users to update license key
|
||||
- **Input**: License key
|
||||
- **Validation**: Valid license key format
|
||||
- **Action**: Update and validate license
|
||||
|
||||
**FR-SYS-023**: System shall allow users to download license information
|
||||
- **Output**: License information file
|
||||
|
||||
### 2.8 System Actions
|
||||
**FR-SYS-024**: System shall allow users to reboot system
|
||||
- **Action**: System reboot (with confirmation)
|
||||
|
||||
**FR-SYS-025**: System shall allow users to shutdown system
|
||||
- **Action**: System shutdown (with confirmation)
|
||||
|
||||
**FR-SYS-026**: System shall allow users to generate support bundle
|
||||
- **Output**: Support bundle archive
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 System Configuration Dashboard
|
||||
- Network interfaces card
|
||||
- Service control card
|
||||
- NTP configuration card
|
||||
- Management & SNMP card
|
||||
- Feature License card
|
||||
|
||||
### 3.2 Network Interface Management
|
||||
- Interface list with status indicators
|
||||
- Interface detail modal
|
||||
- Edit interface modal
|
||||
|
||||
### 3.3 Service Control
|
||||
- Service list with toggle switches
|
||||
- Service status indicators
|
||||
- Service log viewing
|
||||
|
||||
### 3.4 License Management
|
||||
- License status display
|
||||
- Enabled features list
|
||||
- Update license key modal
|
||||
- Download license info button
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/system/interfaces
|
||||
PUT /api/v1/system/interfaces/:name
|
||||
|
||||
GET /api/v1/system/services
|
||||
GET /api/v1/system/services/:name
|
||||
POST /api/v1/system/services/:name/restart
|
||||
GET /api/v1/system/services/:name/logs
|
||||
|
||||
GET /api/v1/system/ntp
|
||||
POST /api/v1/system/ntp
|
||||
|
||||
GET /api/v1/system/logs
|
||||
GET /api/v1/system/network/throughput
|
||||
|
||||
POST /api/v1/system/execute
|
||||
POST /api/v1/system/support-bundle
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **system:read**: Required for viewing interfaces, services, logs
|
||||
- **system:write**: Required for updating configuration, executing commands
|
||||
|
||||
## 6. Error Handling
|
||||
- Invalid IP configuration
|
||||
- Service not found
|
||||
- Service restart failures
|
||||
- Invalid NTP server
|
||||
- License validation errors
|
||||
- Insufficient permissions
|
||||
|
||||
127
docs/alpha/srs/SRS-09-Monitoring-Alerting.md
Normal file
127
docs/alpha/srs/SRS-09-Monitoring-Alerting.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# SRS-09: Monitoring & Alerting
|
||||
|
||||
## 1. Overview
|
||||
Monitoring & Alerting module provides real-time system monitoring, metrics collection, alert management, and system health tracking.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 System Metrics
|
||||
**FR-MON-001**: System shall collect and display CPU metrics
|
||||
- **Output**: CPU usage percentage, load average
|
||||
- **Refresh**: Every 5 seconds
|
||||
|
||||
**FR-MON-002**: System shall collect and display memory metrics
|
||||
- **Output**: Total memory, used memory, available memory, usage percentage
|
||||
- **Refresh**: Every 5 seconds
|
||||
|
||||
**FR-MON-003**: System shall collect and display storage metrics
|
||||
- **Output**: Total capacity, used capacity, available capacity, usage percentage
|
||||
- **Refresh**: Every 5 seconds
|
||||
|
||||
**FR-MON-004**: System shall collect and display network throughput
|
||||
- **Output**: Inbound/outbound throughput, historical data
|
||||
- **Refresh**: Every 5 seconds
|
||||
|
||||
**FR-MON-005**: System shall display ZFS ARC statistics
|
||||
- **Output**: ARC hit ratio, cache size, eviction statistics
|
||||
- **Refresh**: Real-time
|
||||
|
||||
### 2.2 ZFS Health Monitoring
|
||||
**FR-MON-006**: System shall display ZFS pool health
|
||||
- **Output**: Pool status, health indicators, errors
|
||||
|
||||
**FR-MON-007**: System shall display ZFS dataset health
|
||||
- **Output**: Dataset status, quota usage, compression ratio
|
||||
|
||||
### 2.3 System Logs
|
||||
**FR-MON-008**: System shall display system logs
|
||||
- **Output**: Log entries with timestamp, level, source, message
|
||||
- **Filtering**: By level, time range, search
|
||||
- **Refresh**: Every 10 minutes
|
||||
|
||||
**FR-MON-009**: System shall allow users to search logs
|
||||
- **Input**: Search query
|
||||
- **Output**: Filtered log entries
|
||||
|
||||
### 2.4 Active Jobs
|
||||
**FR-MON-010**: System shall display active jobs
|
||||
- **Output**: Job list with type, status, progress, start time
|
||||
|
||||
**FR-MON-011**: System shall allow users to view job details
|
||||
- **Output**: Job configuration, progress, logs
|
||||
|
||||
### 2.5 Alert Management
|
||||
**FR-MON-012**: System shall display active alerts
|
||||
- **Output**: Alert list with severity, source, message, timestamp
|
||||
|
||||
**FR-MON-013**: System shall allow users to acknowledge alerts
|
||||
- **Input**: Alert ID
|
||||
- **Action**: Mark alert as acknowledged
|
||||
|
||||
**FR-MON-014**: System shall allow users to resolve alerts
|
||||
- **Input**: Alert ID
|
||||
- **Action**: Mark alert as resolved
|
||||
|
||||
**FR-MON-015**: System shall display alert history
|
||||
- **Output**: Historical alerts with status, resolution
|
||||
|
||||
**FR-MON-016**: System shall allow users to configure alert rules
|
||||
- **Input**: Rule name, condition, severity, enabled flag
|
||||
- **Output**: Created alert rule
|
||||
|
||||
**FR-MON-017**: System shall evaluate alert rules
|
||||
- **Action**: Automatic evaluation based on metrics
|
||||
- **Output**: Generated alerts when conditions met
|
||||
|
||||
### 2.6 Health Checks
|
||||
**FR-MON-018**: System shall perform health checks
|
||||
- **Output**: Overall system health status (healthy/degraded/unhealthy)
|
||||
|
||||
**FR-MON-019**: System shall display health check details
|
||||
- **Output**: Component health status, issues, recommendations
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 Monitoring Dashboard
|
||||
- Metrics cards (CPU, Memory, Storage, Network)
|
||||
- Real-time charts (Network Throughput, ZFS ARC Hit Ratio)
|
||||
- System health indicators
|
||||
|
||||
### 3.2 Tabs
|
||||
- **Active Jobs**: Running jobs list
|
||||
- **System Logs**: Log viewer with filtering
|
||||
- **Alerts History**: Alert list and management
|
||||
|
||||
### 3.3 Alert Management
|
||||
- Alert list with severity indicators
|
||||
- Alert detail view
|
||||
- Alert acknowledgment and resolution
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/monitoring/metrics
|
||||
GET /api/v1/monitoring/health
|
||||
GET /api/v1/monitoring/alerts
|
||||
GET /api/v1/monitoring/alerts/:id
|
||||
POST /api/v1/monitoring/alerts/:id/acknowledge
|
||||
POST /api/v1/monitoring/alerts/:id/resolve
|
||||
GET /api/v1/monitoring/rules
|
||||
POST /api/v1/monitoring/rules
|
||||
PUT /api/v1/monitoring/rules/:id
|
||||
DELETE /api/v1/monitoring/rules/:id
|
||||
|
||||
GET /api/v1/system/logs
|
||||
GET /api/v1/system/network/throughput
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **monitoring:read**: Required for viewing metrics, alerts, logs
|
||||
- **monitoring:write**: Required for acknowledging/resolving alerts, configuring rules
|
||||
|
||||
## 6. Error Handling
|
||||
- Metrics collection failures
|
||||
- Alert rule evaluation errors
|
||||
- Log access errors
|
||||
- Insufficient permissions
|
||||
|
||||
191
docs/alpha/srs/SRS-10-IAM.md
Normal file
191
docs/alpha/srs/SRS-10-IAM.md
Normal file
@@ -0,0 +1,191 @@
|
||||
# SRS-10: Identity & Access Management
|
||||
|
||||
## 1. Overview
|
||||
Identity & Access Management (IAM) module provides user account management, role-based access control (RBAC), permission management, and group management.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 User Management
|
||||
**FR-IAM-001**: System shall allow admins to create users
|
||||
- **Input**: Username, email, password, roles
|
||||
- **Output**: Created user with ID
|
||||
- **Validation**: Username uniqueness, valid email, strong password
|
||||
|
||||
**FR-IAM-002**: System shall allow admins to list users
|
||||
- **Output**: User list with username, email, roles, status
|
||||
- **Filtering**: By role, status, search
|
||||
|
||||
**FR-IAM-003**: System shall allow admins to view user details
|
||||
- **Output**: User properties, roles, groups, permissions
|
||||
|
||||
**FR-IAM-004**: System shall allow admins to update users
|
||||
- **Input**: User ID, updated properties
|
||||
- **Validation**: Valid updated values
|
||||
|
||||
**FR-IAM-005**: System shall allow admins to delete users
|
||||
- **Input**: User ID
|
||||
- **Validation**: Cannot delete own account
|
||||
|
||||
**FR-IAM-006**: System shall allow users to view own profile
|
||||
- **Output**: Own user properties, roles, permissions
|
||||
|
||||
**FR-IAM-007**: System shall allow users to update own profile
|
||||
- **Input**: Updated profile properties (email, password)
|
||||
- **Validation**: Valid updated values
|
||||
|
||||
### 2.2 Role Management
|
||||
**FR-IAM-008**: System shall allow admins to create roles
|
||||
- **Input**: Role name, description, permissions
|
||||
- **Output**: Created role with ID
|
||||
- **Validation**: Role name uniqueness
|
||||
|
||||
**FR-IAM-009**: System shall allow admins to list roles
|
||||
- **Output**: Role list with name, description, permission count
|
||||
|
||||
**FR-IAM-010**: System shall allow admins to view role details
|
||||
- **Output**: Role properties, assigned permissions, users with role
|
||||
|
||||
**FR-IAM-011**: System shall allow admins to update roles
|
||||
- **Input**: Role ID, updated properties
|
||||
|
||||
**FR-IAM-012**: System shall allow admins to delete roles
|
||||
- **Input**: Role ID
|
||||
- **Validation**: Role not assigned to users
|
||||
|
||||
**FR-IAM-013**: System shall allow admins to assign permissions to roles
|
||||
- **Input**: Role ID, permission ID
|
||||
- **Action**: Add permission to role
|
||||
|
||||
**FR-IAM-014**: System shall allow admins to remove permissions from roles
|
||||
- **Input**: Role ID, permission ID
|
||||
- **Action**: Remove permission from role
|
||||
|
||||
### 2.3 Permission Management
|
||||
**FR-IAM-015**: System shall list available permissions
|
||||
- **Output**: Permission list with resource, action, description
|
||||
|
||||
**FR-IAM-016**: System shall display permission details
|
||||
- **Output**: Permission properties, roles with permission
|
||||
|
||||
### 2.4 Group Management
|
||||
**FR-IAM-017**: System shall allow admins to create groups
|
||||
- **Input**: Group name, description
|
||||
- **Output**: Created group with ID
|
||||
|
||||
**FR-IAM-018**: System shall allow admins to list groups
|
||||
- **Output**: Group list with name, description, member count
|
||||
|
||||
**FR-IAM-019**: System shall allow admins to view group details
|
||||
- **Output**: Group properties, members, roles
|
||||
|
||||
**FR-IAM-020**: System shall allow admins to update groups
|
||||
- **Input**: Group ID, updated properties
|
||||
|
||||
**FR-IAM-021**: System shall allow admins to delete groups
|
||||
- **Input**: Group ID
|
||||
|
||||
**FR-IAM-022**: System shall allow admins to add users to groups
|
||||
- **Input**: Group ID, user ID
|
||||
- **Action**: Add user to group
|
||||
|
||||
**FR-IAM-023**: System shall allow admins to remove users from groups
|
||||
- **Input**: Group ID, user ID
|
||||
- **Action**: Remove user from group
|
||||
|
||||
### 2.5 User-Role Assignment
|
||||
**FR-IAM-024**: System shall allow admins to assign roles to users
|
||||
- **Input**: User ID, role ID
|
||||
- **Action**: Assign role to user
|
||||
|
||||
**FR-IAM-025**: System shall allow admins to remove roles from users
|
||||
- **Input**: User ID, role ID
|
||||
- **Action**: Remove role from user
|
||||
|
||||
### 2.6 Authentication
|
||||
**FR-IAM-026**: System shall authenticate users
|
||||
- **Input**: Username, password
|
||||
- **Output**: JWT token on success
|
||||
- **Validation**: Valid credentials
|
||||
|
||||
**FR-IAM-027**: System shall manage user sessions
|
||||
- **Output**: Current user information, session expiration
|
||||
|
||||
**FR-IAM-028**: System shall allow users to logout
|
||||
- **Action**: Invalidate session token
|
||||
|
||||
## 3. User Interface Requirements
|
||||
|
||||
### 3.1 IAM Dashboard
|
||||
- User management tab
|
||||
- Role management tab
|
||||
- Group management tab
|
||||
- Permission overview
|
||||
|
||||
### 3.2 User Management
|
||||
- User list with filtering
|
||||
- User creation modal
|
||||
- User detail view
|
||||
- User edit form
|
||||
|
||||
### 3.3 Role Management
|
||||
- Role list with permission count
|
||||
- Role creation modal
|
||||
- Role detail view with permission assignment
|
||||
- Role edit form
|
||||
|
||||
### 3.4 Group Management
|
||||
- Group list with member count
|
||||
- Group creation modal
|
||||
- Group detail view with member management
|
||||
- Group edit form
|
||||
|
||||
## 4. API Endpoints
|
||||
|
||||
```
|
||||
GET /api/v1/iam/users
|
||||
GET /api/v1/iam/users/:id
|
||||
POST /api/v1/iam/users
|
||||
PUT /api/v1/iam/users/:id
|
||||
DELETE /api/v1/iam/users/:id
|
||||
|
||||
POST /api/v1/iam/users/:id/roles
|
||||
DELETE /api/v1/iam/users/:id/roles
|
||||
POST /api/v1/iam/users/:id/groups
|
||||
DELETE /api/v1/iam/users/:id/groups
|
||||
|
||||
GET /api/v1/iam/roles
|
||||
GET /api/v1/iam/roles/:id
|
||||
POST /api/v1/iam/roles
|
||||
PUT /api/v1/iam/roles/:id
|
||||
DELETE /api/v1/iam/roles/:id
|
||||
|
||||
GET /api/v1/iam/roles/:id/permissions
|
||||
POST /api/v1/iam/roles/:id/permissions
|
||||
DELETE /api/v1/iam/roles/:id/permissions
|
||||
|
||||
GET /api/v1/iam/permissions
|
||||
|
||||
GET /api/v1/iam/groups
|
||||
GET /api/v1/iam/groups/:id
|
||||
POST /api/v1/iam/groups
|
||||
PUT /api/v1/iam/groups/:id
|
||||
DELETE /api/v1/iam/groups/:id
|
||||
|
||||
POST /api/v1/iam/groups/:id/users
|
||||
DELETE /api/v1/iam/groups/:id/users/:user_id
|
||||
```
|
||||
|
||||
## 5. Permissions
|
||||
- **iam:read**: Required for viewing users, roles, groups
|
||||
- **iam:write**: Required for creating, updating, deleting
|
||||
- **admin role**: Required for all IAM operations
|
||||
|
||||
## 6. Error Handling
|
||||
- Username already exists
|
||||
- Invalid email format
|
||||
- Weak password
|
||||
- Role not found
|
||||
- Permission denied
|
||||
- Cannot delete own account
|
||||
- Insufficient permissions
|
||||
|
||||
179
docs/alpha/srs/SRS-11-User-Interface.md
Normal file
179
docs/alpha/srs/SRS-11-User-Interface.md
Normal file
@@ -0,0 +1,179 @@
|
||||
# SRS-11: User Interface & Experience
|
||||
|
||||
## 1. Overview
|
||||
User Interface & Experience module defines the requirements for the web-based user interface, navigation, responsiveness, and user experience.
|
||||
|
||||
## 2. Functional Requirements
|
||||
|
||||
### 2.1 Layout & Navigation
|
||||
**FR-UI-001**: System shall provide a consistent layout structure
|
||||
- **Components**: Header, sidebar navigation, main content area, footer
|
||||
- **Responsive**: Adapt to different screen sizes
|
||||
|
||||
**FR-UI-002**: System shall provide sidebar navigation
|
||||
- **Features**: Collapsible sidebar, active route highlighting, icon-based navigation
|
||||
- **Items**: Dashboard, Storage, Object Storage, Shares, Snapshots, Tape, iSCSI, Backup, Terminal, Monitoring, Alerts, System, IAM
|
||||
|
||||
**FR-UI-003**: System shall provide breadcrumb navigation
|
||||
- **Features**: Hierarchical navigation path, clickable breadcrumbs
|
||||
|
||||
**FR-UI-004**: System shall provide user profile menu
|
||||
- **Features**: User info, logout option, profile link
|
||||
|
||||
### 2.2 Authentication UI
|
||||
**FR-UI-005**: System shall provide login page
|
||||
- **Components**: Username input, password input, login button, error messages
|
||||
- **Validation**: Real-time validation feedback
|
||||
|
||||
**FR-UI-006**: System shall handle authentication errors
|
||||
- **Display**: Clear error messages for invalid credentials
|
||||
|
||||
**FR-UI-007**: System shall redirect authenticated users
|
||||
- **Action**: Redirect to dashboard if already logged in
|
||||
|
||||
### 2.3 Dashboard
|
||||
**FR-UI-008**: System shall provide system overview dashboard
|
||||
- **Components**: System status, metrics cards, recent activity, quick actions
|
||||
- **Refresh**: Auto-refresh metrics
|
||||
|
||||
**FR-UI-009**: System shall display system health indicators
|
||||
- **Components**: Health status badge, component status indicators
|
||||
|
||||
### 2.4 Data Display
|
||||
**FR-UI-010**: System shall provide table views
|
||||
- **Features**: Sorting, filtering, pagination, search
|
||||
- **Responsive**: Mobile-friendly table layout
|
||||
|
||||
**FR-UI-011**: System shall provide card-based layouts
|
||||
- **Features**: Status indicators, quick actions, hover effects
|
||||
|
||||
**FR-UI-012**: System shall provide master-detail views
|
||||
- **Features**: List on left, details on right, selection highlighting
|
||||
|
||||
### 2.5 Forms & Modals
|
||||
**FR-UI-013**: System shall provide form inputs
|
||||
- **Types**: Text, number, select, checkbox, radio, textarea, file
|
||||
- **Validation**: Real-time validation, error messages
|
||||
|
||||
**FR-UI-014**: System shall provide modal dialogs
|
||||
- **Features**: Overlay, close button, form submission, loading states
|
||||
|
||||
**FR-UI-015**: System shall provide confirmation dialogs
|
||||
- **Features**: Warning messages, confirm/cancel actions
|
||||
|
||||
### 2.6 Feedback & Notifications
|
||||
**FR-UI-016**: System shall provide loading states
|
||||
- **Components**: Spinners, skeleton loaders, progress indicators
|
||||
|
||||
**FR-UI-017**: System shall provide success notifications
|
||||
- **Display**: Toast notifications, inline success messages
|
||||
|
||||
**FR-UI-018**: System shall provide error notifications
|
||||
- **Display**: Toast notifications, inline error messages, error pages
|
||||
|
||||
**FR-UI-019**: System shall provide warning notifications
|
||||
- **Display**: Warning dialogs, warning badges
|
||||
|
||||
### 2.7 Charts & Visualizations
|
||||
**FR-UI-020**: System shall provide metric charts
|
||||
- **Types**: Line charts, bar charts, pie charts, gauge charts
|
||||
- **Libraries**: Recharts integration
|
||||
|
||||
**FR-UI-021**: System shall provide real-time chart updates
|
||||
- **Refresh**: Auto-refresh chart data
|
||||
|
||||
### 2.8 Responsive Design
|
||||
**FR-UI-022**: System shall be responsive
|
||||
- **Breakpoints**: Mobile (< 640px), Tablet (640px - 1024px), Desktop (> 1024px)
|
||||
- **Adaptation**: Layout adjustments, menu collapse, touch-friendly controls
|
||||
|
||||
**FR-UI-023**: System shall support dark theme
|
||||
- **Features**: Dark color scheme, theme persistence
|
||||
|
||||
### 2.9 Accessibility
|
||||
**FR-UI-024**: System shall support keyboard navigation
|
||||
- **Features**: Tab navigation, keyboard shortcuts, focus indicators
|
||||
|
||||
**FR-UI-025**: System shall provide ARIA labels
|
||||
- **Features**: Screen reader support, semantic HTML
|
||||
|
||||
## 3. Design Requirements
|
||||
|
||||
### 3.1 Color Scheme
|
||||
- **Primary**: #137fec (Blue)
|
||||
- **Background Dark**: #101922
|
||||
- **Surface Dark**: #18232e
|
||||
- **Border Dark**: #2a3b4d
|
||||
- **Text Primary**: White
|
||||
- **Text Secondary**: #92adc9
|
||||
- **Success**: Green (#10b981)
|
||||
- **Warning**: Yellow (#f59e0b)
|
||||
- **Error**: Red (#ef4444)
|
||||
|
||||
### 3.2 Typography
|
||||
- **Font Family**: Manrope (Display), System fonts (Body)
|
||||
- **Headings**: Bold, various sizes
|
||||
- **Body**: Regular, readable sizes
|
||||
|
||||
### 3.3 Spacing
|
||||
- **Consistent**: 4px base unit
|
||||
- **Padding**: 16px, 24px, 32px
|
||||
- **Gap**: 8px, 16px, 24px, 32px
|
||||
|
||||
### 3.4 Components
|
||||
- **Buttons**: Primary, secondary, outline, danger variants
|
||||
- **Cards**: Rounded corners, borders, shadows
|
||||
- **Inputs**: Rounded, bordered, focus states
|
||||
- **Badges**: Small, colored, with icons
|
||||
|
||||
## 4. User Experience Requirements
|
||||
|
||||
### 4.1 Performance
|
||||
- **Page Load**: < 2 seconds initial load
|
||||
- **Navigation**: < 100ms route transitions
|
||||
- **API Calls**: Loading states during requests
|
||||
|
||||
### 4.2 Usability
|
||||
- **Intuitive**: Clear navigation, obvious actions
|
||||
- **Consistent**: Consistent patterns across pages
|
||||
- **Feedback**: Immediate feedback for user actions
|
||||
- **Error Handling**: Clear error messages and recovery options
|
||||
|
||||
### 4.3 Discoverability
|
||||
- **Help**: Tooltips, help text, documentation links
|
||||
- **Search**: Global search functionality (future)
|
||||
- **Guides**: Onboarding flow (future)
|
||||
|
||||
## 5. Technology Stack
|
||||
|
||||
### 5.1 Frontend Framework
|
||||
- React 18 with TypeScript
|
||||
- Vite for build tooling
|
||||
- React Router for navigation
|
||||
|
||||
### 5.2 Styling
|
||||
- TailwindCSS for utility-first styling
|
||||
- Custom CSS for specific components
|
||||
- Dark theme support
|
||||
|
||||
### 5.3 State Management
|
||||
- Zustand for global state
|
||||
- TanStack Query for server state
|
||||
- React hooks for local state
|
||||
|
||||
### 5.4 UI Libraries
|
||||
- Lucide React for icons
|
||||
- Recharts for charts
|
||||
- Custom components
|
||||
|
||||
## 6. Browser Support
|
||||
- Chrome/Edge: Latest 2 versions
|
||||
- Firefox: Latest 2 versions
|
||||
- Safari: Latest 2 versions
|
||||
|
||||
## 7. Error Handling
|
||||
- Network errors: Retry mechanism, error messages
|
||||
- Validation errors: Inline error messages
|
||||
- Server errors: Error pages, error notifications
|
||||
- 404 errors: Not found page
|
||||
|
||||
117
docs/on-progress/WEBSOCKET-PROXY-CONFIG.md
Normal file
117
docs/on-progress/WEBSOCKET-PROXY-CONFIG.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# WebSocket Proxy Configuration
|
||||
|
||||
Untuk terminal console WebSocket berfungsi dengan baik, reverse proxy (Nginx/Apache) perlu dikonfigurasi untuk mendukung WebSocket upgrade.
|
||||
|
||||
## Nginx Configuration
|
||||
|
||||
Tambahkan konfigurasi berikut di Nginx untuk mendukung WebSocket:
|
||||
|
||||
```nginx
|
||||
server {
|
||||
listen 80;
|
||||
listen [::]:80;
|
||||
server_name atlas-demo.avt.data-center.id;
|
||||
|
||||
# WebSocket upgrade headers
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
|
||||
location /api/v1/system/terminal/ws {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
|
||||
# WebSocket headers
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# WebSocket timeouts
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
}
|
||||
|
||||
location /api {
|
||||
proxy_pass http://localhost:8080;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:3000;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Apache Configuration (mod_proxy_wstunnel)
|
||||
|
||||
Jika menggunakan Apache, pastikan mod_proxy_wstunnel diaktifkan:
|
||||
|
||||
```apache
|
||||
LoadModule proxy_module modules/mod_proxy.so
|
||||
LoadModule proxy_http_module modules/mod_proxy_http.so
|
||||
LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerName atlas-demo.avt.data-center.id
|
||||
|
||||
# WebSocket endpoint
|
||||
ProxyPass /api/v1/system/terminal/ws ws://localhost:8080/api/v1/system/terminal/ws
|
||||
ProxyPassReverse /api/v1/system/terminal/ws ws://localhost:8080/api/v1/system/terminal/ws
|
||||
|
||||
# Regular API
|
||||
ProxyPass /api http://localhost:8080/api
|
||||
ProxyPassReverse /api http://localhost:8080/api
|
||||
|
||||
# Frontend
|
||||
ProxyPass / http://localhost:3000/
|
||||
ProxyPassReverse / http://localhost:3000/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
## Testing WebSocket Connection
|
||||
|
||||
Setelah konfigurasi, test dengan:
|
||||
|
||||
```bash
|
||||
# Test WebSocket connection
|
||||
wscat -c wss://atlas-demo.avt.data-center.id/api/v1/system/terminal/ws?token=YOUR_TOKEN
|
||||
```
|
||||
|
||||
atau menggunakan curl:
|
||||
|
||||
```bash
|
||||
curl -i -N \
|
||||
-H "Connection: Upgrade" \
|
||||
-H "Upgrade: websocket" \
|
||||
-H "Sec-WebSocket-Version: 13" \
|
||||
-H "Sec-WebSocket-Key: test" \
|
||||
http://localhost:8080/api/v1/system/terminal/ws?token=YOUR_TOKEN
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
1. **Error: WebSocket connection failed**
|
||||
- Pastikan reverse proxy dikonfigurasi dengan benar
|
||||
- Check log backend untuk error details
|
||||
- Pastikan port 8080 accessible
|
||||
|
||||
2. **Connection closed immediately**
|
||||
- Check WriteTimeout di server config (harus 0 untuk WebSocket)
|
||||
- Check proxy timeouts (harus cukup panjang)
|
||||
|
||||
3. **401 Unauthorized**
|
||||
- Pastikan token valid dan tidak expired
|
||||
- Check authentication middleware
|
||||
354
docs/on-progress/bacula-vtl-troubleshooting.md
Normal file
354
docs/on-progress/bacula-vtl-troubleshooting.md
Normal file
@@ -0,0 +1,354 @@
|
||||
# Bacula VTL Integration - Root Cause Analysis & Troubleshooting
|
||||
|
||||
## Issue Summary
|
||||
Bacula Storage Daemon was unable to read slots from mhVTL (Virtual Tape Library) autochanger devices, reporting "Device has 0 slots" despite mtx-changer script working correctly when called manually.
|
||||
|
||||
## Environment
|
||||
- **OS**: Ubuntu Linux
|
||||
- **Bacula Version**: 13.0.4
|
||||
- **VTL**: mhVTL (Virtual Tape Library)
|
||||
- **Autochangers**:
|
||||
- Quantum Scalar i500 (4 drives, 43 slots)
|
||||
- Quantum Scalar i40 (4 drives, 44 slots)
|
||||
- **Tape Drives**: 8x QUANTUM ULTRIUM-HH8 (LTO-8)
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### Primary Issues Identified
|
||||
|
||||
#### 1. **Incorrect Tape Device Type**
|
||||
**Problem**: Using rewinding tape devices (`/dev/st*`) instead of non-rewinding devices (`/dev/nst*`)
|
||||
|
||||
**Impact**: Tape would rewind after each operation, causing data loss and operational failures
|
||||
|
||||
**Solution**: Changed all Archive Device directives from `/dev/st*` to `/dev/nst*`
|
||||
|
||||
```diff
|
||||
Device {
|
||||
Name = Drive-0
|
||||
- Archive Device = /dev/st0
|
||||
+ Archive Device = /dev/nst0
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. **Missing Drive Index Parameter**
|
||||
**Problem**: Device configurations lacked Drive Index parameter
|
||||
|
||||
**Impact**: Bacula couldn't properly identify which physical drive in the autochanger to use
|
||||
|
||||
**Solution**: Added Drive Index (0-3) to each Device resource
|
||||
|
||||
```diff
|
||||
Device {
|
||||
Name = Drive-0
|
||||
+ Drive Index = 0
|
||||
Archive Device = /dev/nst0
|
||||
}
|
||||
```
|
||||
|
||||
#### 3. **Incorrect AlwaysOpen Setting**
|
||||
**Problem**: AlwaysOpen was set to `no`
|
||||
|
||||
**Impact**: Device wouldn't remain open, causing connection issues with VTL
|
||||
|
||||
**Solution**: Changed AlwaysOpen to `yes` for all tape devices
|
||||
|
||||
```diff
|
||||
Device {
|
||||
Name = Drive-0
|
||||
- AlwaysOpen = no
|
||||
+ AlwaysOpen = yes
|
||||
}
|
||||
```
|
||||
|
||||
#### 4. **Wrong Changer Device Path**
|
||||
**Problem**: Using `/dev/sch*` (medium changer device) instead of `/dev/sg*` (generic SCSI device)
|
||||
|
||||
**Impact**: bacula user couldn't access the changer due to permission issues (cdrom group vs tape group)
|
||||
|
||||
**Solution**: Changed Changer Device to use sg devices
|
||||
|
||||
```diff
|
||||
Autochanger {
|
||||
Name = Scalar-i500
|
||||
- Changer Device = /dev/sch0
|
||||
+ Changer Device = /dev/sg7
|
||||
}
|
||||
```
|
||||
|
||||
**Device Mapping**:
|
||||
- `/dev/sch0` → `/dev/sg7` (Scalar i500)
|
||||
- `/dev/sch1` → `/dev/sg8` (Scalar i40)
|
||||
|
||||
#### 5. **Missing User Permissions**
|
||||
**Problem**: bacula user not in required groups for device access
|
||||
|
||||
**Impact**: "Permission denied" errors when accessing tape and changer devices
|
||||
|
||||
**Solution**: Added bacula user to tape and cdrom groups
|
||||
|
||||
```bash
|
||||
usermod -a -G tape,cdrom bacula
|
||||
systemctl restart bacula-sd
|
||||
```
|
||||
|
||||
#### 6. **Incorrect Storage Resource Configuration**
|
||||
**Problem**: Storage resource in Director config referenced autochanger name instead of individual drives
|
||||
|
||||
**Impact**: Bacula couldn't properly communicate with individual tape drives
|
||||
|
||||
**Solution**: Listed all drives explicitly in Storage resource
|
||||
|
||||
```diff
|
||||
Storage {
|
||||
Name = Scalar-i500
|
||||
- Device = Scalar-i500
|
||||
+ Device = Drive-0
|
||||
+ Device = Drive-1
|
||||
+ Device = Drive-2
|
||||
+ Device = Drive-3
|
||||
Autochanger = Scalar-i500
|
||||
}
|
||||
```
|
||||
|
||||
#### 7. **mtx-changer List Output Format**
|
||||
**Problem**: Script output format didn't match Bacula's expected format
|
||||
|
||||
**Impact**: "Invalid Slot number" errors, preventing volume labeling
|
||||
|
||||
**Original Output**: `1 Full:VolumeTag=E01001L8`
|
||||
**Expected Output**: `1:E01001L8`
|
||||
|
||||
**Solution**: Fixed sed pattern in list command
|
||||
|
||||
```bash
|
||||
# Original (incorrect)
|
||||
list)
|
||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:/ /'
|
||||
;;
|
||||
|
||||
# Fixed
|
||||
list)
|
||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/'
|
||||
;;
|
||||
```
|
||||
|
||||
## Troubleshooting Steps
|
||||
|
||||
### Step 1: Verify mtx-changer Script Works Manually
|
||||
```bash
|
||||
# Test slots command
|
||||
/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots
|
||||
# Expected output: 43
|
||||
|
||||
# Test list command
|
||||
/usr/lib/bacula/scripts/mtx-changer /dev/sg7 list
|
||||
# Expected output: 1:E01001L8, 2:E01002L8, etc.
|
||||
```
|
||||
|
||||
### Step 2: Test as bacula User
|
||||
```bash
|
||||
# Test if bacula user can access devices
|
||||
su -s /bin/bash bacula -c "/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots"
|
||||
|
||||
# If permission denied, check groups
|
||||
groups bacula
|
||||
# Should include: bacula tape cdrom
|
||||
```
|
||||
|
||||
### Step 3: Verify Device Permissions
|
||||
```bash
|
||||
# Check changer devices
|
||||
ls -l /dev/sch* /dev/sg7 /dev/sg8
|
||||
# sg devices should be in tape group
|
||||
|
||||
# Check tape devices
|
||||
ls -l /dev/nst*
|
||||
# Should be in tape group with rw permissions
|
||||
```
|
||||
|
||||
### Step 4: Test Bacula Storage Daemon Connection
|
||||
```bash
|
||||
# From bconsole
|
||||
echo "status storage=Scalar-i500" | bconsole
|
||||
|
||||
# Should show autochanger and drives
|
||||
```
|
||||
|
||||
### Step 5: Update Slots
|
||||
```bash
|
||||
echo -e "update slots storage=Scalar-i500\n0\n" | bconsole
|
||||
|
||||
# Should show: Device "Drive-0" has 43 slots
|
||||
# NOT: Device has 0 slots
|
||||
```
|
||||
|
||||
### Step 6: Label Tapes
|
||||
```bash
|
||||
echo -e "label barcodes storage=Scalar-i500 pool=Default\n0\nyes\n" | bconsole
|
||||
|
||||
# Should successfully label tapes using barcodes
|
||||
```
|
||||
|
||||
## Configuration Files
|
||||
|
||||
### /etc/bacula/bacula-sd.conf (Storage Daemon)
|
||||
```bash
|
||||
Autochanger {
|
||||
Name = Scalar-i500
|
||||
Device = Drive-0, Drive-1, Drive-2, Drive-3
|
||||
Changer Command = "/usr/lib/bacula/scripts/mtx-changer %c %o %S %a %d"
|
||||
Changer Device = /dev/sg7
|
||||
}
|
||||
|
||||
Device {
|
||||
Name = Drive-0
|
||||
Drive Index = 0
|
||||
Changer Device = /dev/sg7
|
||||
Media Type = LTO-8
|
||||
Archive Device = /dev/nst0
|
||||
AutomaticMount = yes
|
||||
AlwaysOpen = yes
|
||||
RemovableMedia = yes
|
||||
RandomAccess = no
|
||||
AutoChanger = yes
|
||||
Maximum Concurrent Jobs = 1
|
||||
}
|
||||
```
|
||||
|
||||
### /etc/bacula/bacula-dir.conf (Director)
|
||||
```bash
|
||||
Storage {
|
||||
Name = Scalar-i500
|
||||
Address = localhost
|
||||
SDPort = 9103
|
||||
Password = "QJQPnZ5Q5p6D73RcvR7ksrOm9UG3mAhvV"
|
||||
Device = Drive-0
|
||||
Device = Drive-1
|
||||
Device = Drive-2
|
||||
Device = Drive-3
|
||||
Media Type = LTO-8
|
||||
Autochanger = Scalar-i500
|
||||
Maximum Concurrent Jobs = 4
|
||||
}
|
||||
```
|
||||
|
||||
### /usr/lib/bacula/scripts/mtx-changer
|
||||
```bash
|
||||
#!/bin/sh
|
||||
MTX=/usr/sbin/mtx
|
||||
|
||||
ctl=$1
|
||||
cmd="$2"
|
||||
slot=$3
|
||||
device=$4
|
||||
drive=$5
|
||||
|
||||
case "$cmd" in
|
||||
loaded)
|
||||
${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" >/dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" | awk '{print $7}' | sed 's/.*=//'
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
;;
|
||||
|
||||
load)
|
||||
${MTX} -f $ctl load $slot $drive
|
||||
;;
|
||||
|
||||
unload)
|
||||
${MTX} -f $ctl unload $slot $drive
|
||||
;;
|
||||
|
||||
list)
|
||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/'
|
||||
;;
|
||||
|
||||
slots)
|
||||
${MTX} -f $ctl status | grep "Storage Changer" | awk '{print $5}'
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid command: $cmd"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
```
|
||||
|
||||
## Verification Commands
|
||||
|
||||
### Check Device Mapping
|
||||
```bash
|
||||
lsscsi -g | grep -E "mediumx|tape"
|
||||
```
|
||||
|
||||
### Check VTL Services
|
||||
```bash
|
||||
systemctl list-units 'vtl*'
|
||||
```
|
||||
|
||||
### Test Manual Tape Load
|
||||
```bash
|
||||
# Load tape to drive
|
||||
mtx -f /dev/sg7 load 1 0
|
||||
|
||||
# Check drive status
|
||||
mt -f /dev/nst0 status
|
||||
|
||||
# Unload tape
|
||||
mtx -f /dev/sg7 unload 1 0
|
||||
```
|
||||
|
||||
### List Labeled Volumes
|
||||
```bash
|
||||
echo "list volumes pool=Default" | bconsole
|
||||
```
|
||||
|
||||
## Common Errors and Solutions
|
||||
|
||||
### Error: "Device has 0 slots"
|
||||
**Cause**: Wrong changer device or permission issues
|
||||
**Solution**: Use /dev/sg* devices and verify bacula user in tape/cdrom groups
|
||||
|
||||
### Error: "Permission denied" accessing /dev/sch0
|
||||
**Cause**: bacula user not in cdrom group
|
||||
**Solution**: `usermod -a -G cdrom bacula && systemctl restart bacula-sd`
|
||||
|
||||
### Error: "Invalid Slot number"
|
||||
**Cause**: mtx-changer list output format incorrect
|
||||
**Solution**: Fix sed pattern to output `slot:volumetag` format
|
||||
|
||||
### Error: "No medium found" after successful load
|
||||
**Cause**: Using rewinding devices (/dev/st*) or AlwaysOpen=no
|
||||
**Solution**: Use /dev/nst* and set AlwaysOpen=yes
|
||||
|
||||
### Error: "READ ELEMENT STATUS Command Failed"
|
||||
**Cause**: Permission issue or VTL service problem
|
||||
**Solution**: Check user permissions and restart vtllibrary service
|
||||
|
||||
## Results
|
||||
|
||||
### Scalar i500 (WORKING)
|
||||
- ✅ 43 slots detected
|
||||
- ✅ 20 tapes successfully labeled (E01001L8 - E01020L8)
|
||||
- ✅ Autochanger operations functional
|
||||
- ✅ Ready for backup jobs
|
||||
|
||||
### Scalar i40 (ISSUE)
|
||||
- ⚠️ 44 slots detected
|
||||
- ❌ Hardware Error during tape load operations
|
||||
- ❌ 0 tapes labeled
|
||||
- **Status**: Requires mhVTL configuration investigation or system restart
|
||||
|
||||
## References
|
||||
- Bacula Documentation: https://www.bacula.org/
|
||||
- Article: "Using Bacula with mhVTL" - https://karellen.blogspot.com/2012/02/using-bacula-with-mhvtl.html
|
||||
- mhVTL Project: https://github.com/markh794/mhvtl
|
||||
|
||||
## Date
|
||||
Created: 2025-12-31
|
||||
Author: Warp AI Agent
|
||||
344
docs/on-progress/healthcheck-script.md
Normal file
344
docs/on-progress/healthcheck-script.md
Normal file
@@ -0,0 +1,344 @@
|
||||
# Calypso Appliance Health Check Script
|
||||
|
||||
## Overview
|
||||
Comprehensive health check script for all Calypso Appliance components. Performs automated checks across system resources, services, network, storage, and backup infrastructure.
|
||||
|
||||
## Installation
|
||||
Script location: `/usr/local/bin/calypso-healthcheck`
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
```bash
|
||||
# Run health check (requires root)
|
||||
calypso-healthcheck
|
||||
|
||||
# Run and save to specific location
|
||||
calypso-healthcheck 2>&1 | tee /root/healthcheck-$(date +%Y%m%d).log
|
||||
```
|
||||
|
||||
### Exit Codes
|
||||
- `0` - All checks passed (100% healthy)
|
||||
- `1` - Healthy with warnings (some non-critical issues)
|
||||
- `2` - Degraded (80%+ checks passed, some failures)
|
||||
- `3` - Critical (less than 80% checks passed)
|
||||
|
||||
### Automated Checks
|
||||
|
||||
#### System Resources (4 checks)
|
||||
- Root filesystem usage (threshold: 80%)
|
||||
- /var filesystem usage (threshold: 80%)
|
||||
- Memory usage (threshold: 90%)
|
||||
- CPU load average
|
||||
|
||||
#### Database Services (2 checks)
|
||||
- PostgreSQL service status
|
||||
- Database presence (calypso, bacula)
|
||||
|
||||
#### Calypso Application (7 checks)
|
||||
- calypso-api service
|
||||
- calypso-frontend service
|
||||
- calypso-logger service
|
||||
- API port 8443
|
||||
- Frontend port 3000
|
||||
- API health endpoint
|
||||
- Frontend health endpoint
|
||||
|
||||
#### Backup Services - Bacula (8 checks)
|
||||
- bacula-director service
|
||||
- bacula-fd service
|
||||
- bacula-sd service
|
||||
- Director bconsole connectivity
|
||||
- Storage (Scalar-i500) accessibility
|
||||
- Director port 9101
|
||||
- FD port 9102
|
||||
- SD port 9103
|
||||
|
||||
#### Virtual Tape Library - mhVTL (4 checks)
|
||||
- mhvtl.target status
|
||||
- vtllibrary@10 (Scalar i500)
|
||||
- vtllibrary@30 (Scalar i40)
|
||||
- VTL device count (2 changers, 8 tape drives)
|
||||
- Scalar i500 slots detection
|
||||
|
||||
#### Storage Protocols (9 checks)
|
||||
- NFS server service
|
||||
- Samba (smbd) service
|
||||
- NetBIOS (nmbd) service
|
||||
- SCST service
|
||||
- iSCSI target service
|
||||
- NFS port 2049
|
||||
- SMB port 445
|
||||
- NetBIOS port 139
|
||||
- iSCSI port 3260
|
||||
|
||||
#### Monitoring & Management (2 checks)
|
||||
- SNMP daemon
|
||||
- SNMP port 161
|
||||
|
||||
#### Network Connectivity (2 checks)
|
||||
- Internet connectivity (ping 8.8.8.8)
|
||||
- Network manager status
|
||||
|
||||
**Total: 39+ automated checks**
|
||||
|
||||
## Output Format
|
||||
|
||||
### Console Output
|
||||
- Color-coded status indicators:
|
||||
- ✓ Green = Passed
|
||||
- ⚠ Yellow = Warning
|
||||
- ✗ Red = Failed
|
||||
|
||||
### Example Output
|
||||
```
|
||||
==========================================
|
||||
CALYPSO APPLIANCE HEALTH CHECK
|
||||
==========================================
|
||||
Date: 2025-12-31 01:46:27
|
||||
Hostname: calypso
|
||||
Uptime: up 6 days, 2 hours, 50 minutes
|
||||
Log file: /var/log/calypso-healthcheck-20251231-014627.log
|
||||
|
||||
========================================
|
||||
SYSTEM RESOURCES
|
||||
========================================
|
||||
✓ Root filesystem (18% used)
|
||||
✓ Var filesystem (18% used)
|
||||
✓ Memory usage (49% used, 8206MB available)
|
||||
✓ CPU load average (2.18, 8 cores)
|
||||
|
||||
...
|
||||
|
||||
========================================
|
||||
HEALTH CHECK SUMMARY
|
||||
========================================
|
||||
|
||||
Total Checks: 39
|
||||
Passed: 35
|
||||
Warnings: 0
|
||||
Failed: 4
|
||||
|
||||
⚠ OVERALL STATUS: DEGRADED (89%)
|
||||
```
|
||||
|
||||
### Log Files
|
||||
All checks are logged to: `/var/log/calypso-healthcheck-YYYYMMDD-HHMMSS.log`
|
||||
|
||||
Logs include:
|
||||
- Timestamp and system information
|
||||
- Detailed check results
|
||||
- Summary statistics
|
||||
- Overall health status
|
||||
|
||||
## Scheduling
|
||||
|
||||
### Manual Execution
|
||||
```bash
|
||||
# Run on demand
|
||||
sudo calypso-healthcheck
|
||||
```
|
||||
|
||||
### Cron Job (Recommended)
|
||||
Add to crontab for automated checks:
|
||||
|
||||
```bash
|
||||
# Daily health check at 2 AM
|
||||
0 2 * * * /usr/local/bin/calypso-healthcheck > /dev/null 2>&1
|
||||
|
||||
# Weekly health check on Monday at 6 AM with email notification
|
||||
0 6 * * 1 /usr/local/bin/calypso-healthcheck 2>&1 | mail -s "Calypso Health Check" admin@example.com
|
||||
```
|
||||
|
||||
### Systemd Timer (Alternative)
|
||||
Create `/etc/systemd/system/calypso-healthcheck.timer`:
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Daily Calypso Health Check
|
||||
Requires=calypso-healthcheck.service
|
||||
|
||||
[Timer]
|
||||
OnCalendar=daily
|
||||
Persistent=true
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
```
|
||||
|
||||
Create `/etc/systemd/system/calypso-healthcheck.service`:
|
||||
```ini
|
||||
[Unit]
|
||||
Description=Calypso Appliance Health Check
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
ExecStart=/usr/local/bin/calypso-healthcheck
|
||||
```
|
||||
|
||||
Enable:
|
||||
```bash
|
||||
systemctl enable --now calypso-healthcheck.timer
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Failures
|
||||
|
||||
#### API/Frontend Health Endpoints Failing
|
||||
```bash
|
||||
# Check if services are running
|
||||
systemctl status calypso-api calypso-frontend
|
||||
|
||||
# Check service logs
|
||||
journalctl -u calypso-api -n 50
|
||||
journalctl -u calypso-frontend -n 50
|
||||
|
||||
# Test manually
|
||||
curl -k https://localhost:8443/health
|
||||
curl -k https://localhost:3000/health
|
||||
```
|
||||
|
||||
#### Bacula Director Not Responding
|
||||
```bash
|
||||
# Check service
|
||||
systemctl status bacula-director
|
||||
|
||||
# Test bconsole
|
||||
echo "status director" | bconsole
|
||||
|
||||
# Check logs
|
||||
tail -50 /var/log/bacula/bacula.log
|
||||
```
|
||||
|
||||
#### VTL Slots Not Detected
|
||||
```bash
|
||||
# Check VTL services
|
||||
systemctl status mhvtl.target
|
||||
|
||||
# Check devices
|
||||
lsscsi | grep -E "mediumx|tape"
|
||||
|
||||
# Test manually
|
||||
mtx -f /dev/sg7 status
|
||||
echo "update slots storage=Scalar-i500" | bconsole
|
||||
```
|
||||
|
||||
#### Storage Protocols Port Not Listening
|
||||
```bash
|
||||
# Check service status
|
||||
systemctl status nfs-server smbd nmbd scst iscsi-scstd
|
||||
|
||||
# Check listening ports
|
||||
ss -tuln | grep -E "2049|445|139|3260"
|
||||
|
||||
# Restart services if needed
|
||||
systemctl restart nfs-server
|
||||
systemctl restart smbd nmbd
|
||||
```
|
||||
|
||||
## Customization
|
||||
|
||||
### Modify Thresholds
|
||||
Edit `/usr/local/bin/calypso-healthcheck`:
|
||||
|
||||
```bash
|
||||
# Disk usage threshold (default: 80%)
|
||||
check_disk "/" 80 "Root filesystem"
|
||||
|
||||
# Memory usage threshold (default: 90%)
|
||||
if [ "$mem_percent" -lt 90 ]; then
|
||||
|
||||
# Change expected VTL devices
|
||||
if [ "$changer_count" -ge 2 ] && [ "$tape_count" -ge 8 ]; then
|
||||
```
|
||||
|
||||
### Add Custom Checks
|
||||
Add new check functions:
|
||||
|
||||
```bash
|
||||
check_custom() {
|
||||
TOTAL_CHECKS=$((TOTAL_CHECKS + 1))
|
||||
|
||||
if [[ condition ]]; then
|
||||
echo -e "${GREEN}${CHECK}${NC} Custom check passed" | tee -a "$LOG_FILE"
|
||||
PASSED_CHECKS=$((PASSED_CHECKS + 1))
|
||||
else
|
||||
echo -e "${RED}${CROSS}${NC} Custom check failed" | tee -a "$LOG_FILE"
|
||||
FAILED_CHECKS=$((FAILED_CHECKS + 1))
|
||||
fi
|
||||
}
|
||||
|
||||
# Call in main script
|
||||
check_custom
|
||||
```
|
||||
|
||||
## Integration
|
||||
|
||||
### Monitoring Systems
|
||||
Export metrics for monitoring:
|
||||
|
||||
```bash
|
||||
# Nagios/Icinga format
|
||||
calypso-healthcheck
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "OK - All checks passed"
|
||||
exit 0
|
||||
elif [ $? -eq 1 ]; then
|
||||
echo "WARNING - Healthy with warnings"
|
||||
exit 1
|
||||
else
|
||||
echo "CRITICAL - System degraded"
|
||||
exit 2
|
||||
fi
|
||||
```
|
||||
|
||||
### API Integration
|
||||
Parse JSON output:
|
||||
|
||||
```bash
|
||||
# Add JSON output option
|
||||
calypso-healthcheck --json > /tmp/health.json
|
||||
```
|
||||
|
||||
## Maintenance
|
||||
|
||||
### Log Rotation
|
||||
Logs are stored in `/var/log/calypso-healthcheck-*.log`
|
||||
|
||||
Create `/etc/logrotate.d/calypso-healthcheck`:
|
||||
```
|
||||
/var/log/calypso-healthcheck-*.log {
|
||||
weekly
|
||||
rotate 12
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
}
|
||||
```
|
||||
|
||||
### Cleanup Old Logs
|
||||
```bash
|
||||
# Remove logs older than 30 days
|
||||
find /var/log -name "calypso-healthcheck-*.log" -mtime +30 -delete
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Run after reboot** - Verify all services started correctly
|
||||
2. **Schedule regular checks** - Daily or weekly automated runs
|
||||
3. **Monitor exit codes** - Alert on degraded/critical status
|
||||
4. **Review logs periodically** - Identify patterns or recurring issues
|
||||
5. **Update checks** - Add new components as system evolves
|
||||
6. **Baseline health** - Establish normal operating parameters
|
||||
7. **Document exceptions** - Note known warnings that are acceptable
|
||||
|
||||
## See Also
|
||||
- `pre-reboot-checklist.md` - Pre-reboot verification
|
||||
- `bacula-vtl-troubleshooting.md` - VTL troubleshooting guide
|
||||
- System logs: `/var/log/syslog`, `/var/log/bacula/`
|
||||
|
||||
---
|
||||
|
||||
*Created: 2025-12-31*
|
||||
*Script: `/usr/local/bin/calypso-healthcheck`*
|
||||
225
docs/on-progress/pre-reboot-checklist.md
Normal file
225
docs/on-progress/pre-reboot-checklist.md
Normal file
@@ -0,0 +1,225 @@
|
||||
# Calypso Appliance - Pre-Reboot Checklist
|
||||
|
||||
**Date:** 2025-12-31
|
||||
**Status:** ✅ READY FOR REBOOT
|
||||
|
||||
---
|
||||
|
||||
## Enabled Services (Auto-start on boot)
|
||||
|
||||
### Core Application Services
|
||||
| Service | Status | Purpose |
|
||||
|---------|--------|---------|
|
||||
| postgresql.service | ✅ enabled | Database backend |
|
||||
| calypso-api.service | ✅ enabled | REST API backend |
|
||||
| calypso-frontend.service | ✅ enabled | Web UI (React) |
|
||||
| calypso-logger.service | ✅ enabled | Application logging |
|
||||
|
||||
### Backup Services (Bacula)
|
||||
| Service | Status | Purpose |
|
||||
|---------|--------|---------|
|
||||
| bacula-director.service | ✅ enabled | Backup orchestration |
|
||||
| bacula-fd.service | ✅ enabled | File daemon (client) |
|
||||
| bacula-sd.service | ✅ enabled | Storage daemon (VTL) |
|
||||
|
||||
### Virtual Tape Library (mhVTL)
|
||||
| Service | Status | Purpose |
|
||||
|---------|--------|---------|
|
||||
| mhvtl.target | ✅ enabled | VTL master target |
|
||||
| vtllibrary@10.service | ✅ enabled | Scalar i500 library |
|
||||
| vtllibrary@30.service | ✅ enabled | Scalar i40 library |
|
||||
| vtltape@11-14.service | ✅ enabled | i500 tape drives (4) |
|
||||
| vtltape@31-34.service | ✅ enabled | i40 tape drives (4) |
|
||||
|
||||
### Storage Protocols
|
||||
| Service | Status | Purpose |
|
||||
|---------|--------|---------|
|
||||
| nfs-server.service | ✅ enabled | NFS file sharing |
|
||||
| nfs-blkmap.service | ✅ enabled | NFS block mapping |
|
||||
| smbd.service | ✅ enabled | Samba/CIFS server |
|
||||
| nmbd.service | ✅ enabled | NetBIOS name service |
|
||||
| scst.service | ✅ enabled | SCSI target subsystem |
|
||||
| iscsi-scstd.service | ✅ enabled | iSCSI target daemon |
|
||||
|
||||
### Monitoring & Management
|
||||
| Service | Status | Purpose |
|
||||
|---------|--------|---------|
|
||||
| snmpd.service | ✅ enabled | SNMP monitoring |
|
||||
|
||||
---
|
||||
|
||||
## Boot Order & Dependencies
|
||||
|
||||
```
|
||||
1. Network (systemd-networkd)
|
||||
↓
|
||||
2. Storage Foundation
|
||||
- NFS server
|
||||
- Samba (smbd/nmbd)
|
||||
- SCST/iSCSI
|
||||
↓
|
||||
3. PostgreSQL Database
|
||||
↓
|
||||
4. VTL Services (mhvtl.target)
|
||||
- vtllibrary services
|
||||
- vtltape services
|
||||
↓
|
||||
5. Bacula Services
|
||||
- bacula-director (after postgresql)
|
||||
- bacula-fd
|
||||
- bacula-sd (after VTL)
|
||||
↓
|
||||
6. Calypso Application
|
||||
- calypso-api (after postgresql)
|
||||
- calypso-frontend (wants calypso-api)
|
||||
- calypso-logger (wants api & frontend)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Post-Reboot Verification
|
||||
|
||||
### 1. Check System Boot
|
||||
```bash
|
||||
# Check boot time
|
||||
systemd-analyze
|
||||
systemd-analyze blame | head -20
|
||||
```
|
||||
|
||||
### 2. Check Core Services
|
||||
```bash
|
||||
# Calypso application
|
||||
systemctl status calypso-api calypso-frontend calypso-logger
|
||||
|
||||
# Database
|
||||
systemctl status postgresql
|
||||
|
||||
# Check API health
|
||||
curl -k https://localhost:8443/health
|
||||
curl -k https://localhost:3000/health
|
||||
```
|
||||
|
||||
### 3. Check Backup Services
|
||||
```bash
|
||||
# Bacula status
|
||||
systemctl status bacula-director bacula-fd bacula-sd
|
||||
|
||||
# Test bconsole connection
|
||||
echo "status director" | bconsole
|
||||
|
||||
# Check VTL connection
|
||||
echo "status storage=Scalar-i500" | bconsole
|
||||
```
|
||||
|
||||
### 4. Check Storage Protocols
|
||||
```bash
|
||||
# NFS
|
||||
systemctl status nfs-server
|
||||
showmount -e localhost
|
||||
|
||||
# Samba
|
||||
systemctl status smbd nmbd
|
||||
smbstatus
|
||||
|
||||
# iSCSI/SCST
|
||||
systemctl status scst iscsi-scstd
|
||||
scstadmin -list_target
|
||||
```
|
||||
|
||||
### 5. Check VTL Devices
|
||||
```bash
|
||||
# VTL services
|
||||
systemctl status mhvtl.target
|
||||
|
||||
# Check devices
|
||||
lsscsi | grep -E "mediumx|tape"
|
||||
|
||||
# Test autochanger
|
||||
mtx -f /dev/sg7 status | head -10
|
||||
```
|
||||
|
||||
### 6. Check Monitoring
|
||||
```bash
|
||||
# SNMP
|
||||
systemctl status snmpd
|
||||
snmpwalk -v2c -c public localhost system
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Network Access Points
|
||||
|
||||
| Service | URL/Port | Description |
|
||||
|---------|----------|-------------|
|
||||
| Web UI | https://[IP]:3000 | Calypso frontend |
|
||||
| API | https://[IP]:8443 | REST API |
|
||||
| Bacula Director | localhost:9101 | bconsole access |
|
||||
| PostgreSQL | localhost:5432 | Database |
|
||||
| NFS | tcp/2049 | NFS shares |
|
||||
| Samba | tcp/445, tcp/139 | CIFS/SMB shares |
|
||||
| iSCSI | tcp/3260 | iSCSI targets |
|
||||
| SNMP | udp/161 | Monitoring |
|
||||
|
||||
---
|
||||
|
||||
## Important Notes
|
||||
|
||||
### Bacula VTL Configuration
|
||||
- **Scalar i500**: 43 slots, 20 tapes labeled (E01001L8-E01020L8) ✅
|
||||
- **Scalar i40**: 44 slots, needs investigation after reboot ⚠️
|
||||
- Changer devices: /dev/sg7 (i500), /dev/sg8 (i40)
|
||||
- Tape devices: /dev/nst0-7 (non-rewinding)
|
||||
- User permissions: bacula in tape+cdrom groups
|
||||
|
||||
### Storage Paths
|
||||
- Calypso working directory: `/development/calypso`
|
||||
- Bacula configs: `/etc/bacula/`
|
||||
- VTL configs: `/etc/mhvtl/`
|
||||
- PostgreSQL data: `/var/lib/postgresql/`
|
||||
|
||||
### Known Issues
|
||||
- Scalar i40 VTL: Hardware error during tape load (requires investigation)
|
||||
|
||||
---
|
||||
|
||||
## Emergency Recovery
|
||||
|
||||
If services fail to start after reboot:
|
||||
|
||||
```bash
|
||||
# Check failed services
|
||||
systemctl --failed
|
||||
|
||||
# View service logs
|
||||
journalctl -xeu calypso-api
|
||||
journalctl -xeu bacula-director
|
||||
journalctl -xeu mhvtl.target
|
||||
|
||||
# Manual service restart
|
||||
systemctl restart calypso-api
|
||||
systemctl restart bacula-sd
|
||||
systemctl restart mhvtl.target
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checklist Summary
|
||||
|
||||
- [x] PostgreSQL database: enabled
|
||||
- [x] Calypso services (api, frontend, logger): enabled
|
||||
- [x] Bacula services (director, fd, sd): enabled
|
||||
- [x] mhVTL services (libraries, tape drives): enabled
|
||||
- [x] NFS server: enabled
|
||||
- [x] Samba (smbd, nmbd): enabled
|
||||
- [x] SCST/iSCSI: enabled
|
||||
- [x] SNMP monitoring: enabled
|
||||
- [x] Network services: configured
|
||||
- [x] User permissions: configured
|
||||
- [x] Service dependencies: verified
|
||||
|
||||
**Status: SAFE TO REBOOT** ✅
|
||||
|
||||
---
|
||||
|
||||
*Generated: 2025-12-31*
|
||||
*Documentation: /development/calypso/docs/*
|
||||
17
frontend/package-lock.json
generated
17
frontend/package-lock.json
generated
@@ -9,6 +9,8 @@
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@tanstack/react-query": "^5.12.0",
|
||||
"@xterm/addon-fit": "^0.11.0",
|
||||
"@xterm/xterm": "^6.0.0",
|
||||
"axios": "^1.6.2",
|
||||
"clsx": "^2.0.0",
|
||||
"date-fns": "^2.30.0",
|
||||
@@ -1764,6 +1766,21 @@
|
||||
"vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@xterm/addon-fit": {
|
||||
"version": "0.11.0",
|
||||
"resolved": "https://registry.npmjs.org/@xterm/addon-fit/-/addon-fit-0.11.0.tgz",
|
||||
"integrity": "sha512-jYcgT6xtVYhnhgxh3QgYDnnNMYTcf8ElbxxFzX0IZo+vabQqSPAjC3c1wJrKB5E19VwQei89QCiZZP86DCPF7g==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@xterm/xterm": {
|
||||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz",
|
||||
"integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==",
|
||||
"license": "MIT",
|
||||
"workspaces": [
|
||||
"addons/*"
|
||||
]
|
||||
},
|
||||
"node_modules/acorn": {
|
||||
"version": "8.15.0",
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
||||
|
||||
@@ -11,6 +11,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@tanstack/react-query": "^5.12.0",
|
||||
"@xterm/addon-fit": "^0.11.0",
|
||||
"@xterm/xterm": "^6.0.0",
|
||||
"axios": "^1.6.2",
|
||||
"clsx": "^2.0.0",
|
||||
"date-fns": "^2.30.0",
|
||||
|
||||
@@ -12,8 +12,14 @@ import ISCSITargetsPage from '@/pages/ISCSITargets'
|
||||
import ISCSITargetDetailPage from '@/pages/ISCSITargetDetail'
|
||||
import SystemPage from '@/pages/System'
|
||||
import BackupManagementPage from '@/pages/BackupManagement'
|
||||
import TerminalConsolePage from '@/pages/TerminalConsole'
|
||||
import SharesPage from '@/pages/Shares'
|
||||
import IAMPage from '@/pages/IAM'
|
||||
import ProfilePage from '@/pages/Profile'
|
||||
import MonitoringPage from '@/pages/Monitoring'
|
||||
import ObjectStoragePage from '@/pages/ObjectStorage'
|
||||
import SnapshotReplicationPage from '@/pages/SnapshotReplication'
|
||||
import ShareShieldPage from '@/pages/ShareShield'
|
||||
import Layout from '@/components/Layout'
|
||||
|
||||
// Create a client
|
||||
@@ -59,6 +65,12 @@ function App() {
|
||||
<Route path="iscsi" element={<ISCSITargetsPage />} />
|
||||
<Route path="iscsi/:id" element={<ISCSITargetDetailPage />} />
|
||||
<Route path="backup" element={<BackupManagementPage />} />
|
||||
<Route path="shares" element={<SharesPage />} />
|
||||
<Route path="terminal" element={<TerminalConsolePage />} />
|
||||
<Route path="object-storage" element={<ObjectStoragePage />} />
|
||||
<Route path="snapshots" element={<SnapshotReplicationPage />} />
|
||||
<Route path="share-shield" element={<ShareShieldPage />} />
|
||||
<Route path="monitoring" element={<MonitoringPage />} />
|
||||
<Route path="alerts" element={<AlertsPage />} />
|
||||
<Route path="system" element={<SystemPage />} />
|
||||
<Route path="iam" element={<IAMPage />} />
|
||||
|
||||
@@ -46,6 +46,45 @@ export interface CreateJobRequest {
|
||||
pool_name?: string
|
||||
}
|
||||
|
||||
export interface BackupClient {
|
||||
client_id: number
|
||||
name: string
|
||||
uname?: string
|
||||
enabled: boolean
|
||||
auto_prune?: boolean
|
||||
file_retention?: number
|
||||
job_retention?: number
|
||||
last_backup_at?: string
|
||||
total_jobs?: number
|
||||
total_bytes?: number
|
||||
status?: 'online' | 'offline'
|
||||
}
|
||||
|
||||
export interface ListClientsResponse {
|
||||
clients: BackupClient[]
|
||||
total: number
|
||||
}
|
||||
|
||||
export interface ListClientsParams {
|
||||
enabled?: boolean
|
||||
search?: string
|
||||
}
|
||||
|
||||
export interface PoolStats {
|
||||
name: string
|
||||
used_bytes: number
|
||||
total_bytes: number
|
||||
usage_percent: number
|
||||
}
|
||||
|
||||
export interface DashboardStats {
|
||||
director_status: string
|
||||
director_uptime: string
|
||||
last_job?: BackupJob
|
||||
active_jobs_count: number
|
||||
default_pool?: PoolStats
|
||||
}
|
||||
|
||||
export const backupAPI = {
|
||||
listJobs: async (params?: ListJobsParams): Promise<ListJobsResponse> => {
|
||||
const queryParams = new URLSearchParams()
|
||||
@@ -71,5 +110,148 @@ export const backupAPI = {
|
||||
const response = await apiClient.post<BackupJob>('/backup/jobs', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
executeBconsoleCommand: async (command: string): Promise<{ output: string }> => {
|
||||
const response = await apiClient.post<{ output: string }>('/backup/console/execute', { command })
|
||||
return response.data
|
||||
},
|
||||
|
||||
listClients: async (params?: ListClientsParams): Promise<ListClientsResponse> => {
|
||||
const queryParams = new URLSearchParams()
|
||||
if (params?.enabled !== undefined) queryParams.append('enabled', params.enabled.toString())
|
||||
if (params?.search) queryParams.append('search', params.search)
|
||||
|
||||
const response = await apiClient.get<ListClientsResponse>(
|
||||
`/backup/clients${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
||||
)
|
||||
return response.data
|
||||
},
|
||||
|
||||
getDashboardStats: async (): Promise<DashboardStats> => {
|
||||
const response = await apiClient.get<DashboardStats>('/backup/dashboard/stats')
|
||||
return response.data
|
||||
},
|
||||
|
||||
listStoragePools: async (): Promise<{ pools: StoragePool[]; total: number }> => {
|
||||
const response = await apiClient.get<{ pools: StoragePool[]; total: number }>('/backup/storage/pools')
|
||||
return response.data
|
||||
},
|
||||
|
||||
listStorageVolumes: async (poolName?: string): Promise<{ volumes: StorageVolume[]; total: number }> => {
|
||||
const queryParams = new URLSearchParams()
|
||||
if (poolName) queryParams.append('pool_name', poolName)
|
||||
const response = await apiClient.get<{ volumes: StorageVolume[]; total: number }>(
|
||||
`/backup/storage/volumes${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
||||
)
|
||||
return response.data
|
||||
},
|
||||
|
||||
listStorageDaemons: async (): Promise<{ daemons: StorageDaemon[]; total: number }> => {
|
||||
const response = await apiClient.get<{ daemons: StorageDaemon[]; total: number }>('/backup/storage/daemons')
|
||||
return response.data
|
||||
},
|
||||
|
||||
createStoragePool: async (data: CreateStoragePoolRequest): Promise<StoragePool> => {
|
||||
const response = await apiClient.post<StoragePool>('/backup/storage/pools', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteStoragePool: async (poolId: number): Promise<void> => {
|
||||
await apiClient.delete(`/backup/storage/pools/${poolId}`)
|
||||
},
|
||||
|
||||
createStorageVolume: async (data: CreateStorageVolumeRequest): Promise<StorageVolume> => {
|
||||
const response = await apiClient.post<StorageVolume>('/backup/storage/volumes', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
updateStorageVolume: async (volumeId: number, data: UpdateStorageVolumeRequest): Promise<StorageVolume> => {
|
||||
const response = await apiClient.put<StorageVolume>(`/backup/storage/volumes/${volumeId}`, data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteStorageVolume: async (volumeId: number): Promise<void> => {
|
||||
await apiClient.delete(`/backup/storage/volumes/${volumeId}`)
|
||||
},
|
||||
|
||||
listMedia: async (): Promise<{ media: Media[]; total: number }> => {
|
||||
const response = await apiClient.get<{ media: Media[]; total: number }>('/backup/media')
|
||||
return response.data
|
||||
},
|
||||
}
|
||||
|
||||
export interface CreateStoragePoolRequest {
|
||||
name: string
|
||||
pool_type?: string
|
||||
label_format?: string
|
||||
recycle?: boolean
|
||||
auto_prune?: boolean
|
||||
}
|
||||
|
||||
export interface CreateStorageVolumeRequest {
|
||||
volume_name: string
|
||||
pool_name: string
|
||||
media_type?: string
|
||||
max_vol_bytes?: number
|
||||
vol_retention?: number
|
||||
}
|
||||
|
||||
export interface UpdateStorageVolumeRequest {
|
||||
max_vol_bytes?: number
|
||||
vol_retention?: number
|
||||
}
|
||||
|
||||
export interface Media {
|
||||
media_id: number
|
||||
volume_name: string
|
||||
pool_name: string
|
||||
media_type: string
|
||||
status: string
|
||||
vol_bytes: number
|
||||
max_vol_bytes: number
|
||||
vol_files: number
|
||||
last_written?: string
|
||||
recycle_count: number
|
||||
slot?: number
|
||||
in_changer?: number
|
||||
library_name?: string
|
||||
}
|
||||
|
||||
export interface StoragePool {
|
||||
pool_id: number
|
||||
name: string
|
||||
pool_type: string
|
||||
label_format?: string
|
||||
recycle?: boolean
|
||||
auto_prune?: boolean
|
||||
volume_count: number
|
||||
used_bytes: number
|
||||
total_bytes: number
|
||||
usage_percent: number
|
||||
}
|
||||
|
||||
export interface StorageVolume {
|
||||
volume_id: number
|
||||
media_id: number
|
||||
volume_name: string
|
||||
pool_name: string
|
||||
media_type: string
|
||||
vol_status: string
|
||||
vol_bytes: number
|
||||
max_vol_bytes: number
|
||||
vol_files: number
|
||||
vol_retention?: string
|
||||
last_written?: string
|
||||
recycle_count: number
|
||||
}
|
||||
|
||||
export interface StorageDaemon {
|
||||
storage_id: number
|
||||
name: string
|
||||
address: string
|
||||
port: number
|
||||
device_name: string
|
||||
media_type: string
|
||||
status: string
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,9 @@ const apiClient = axios.create({
|
||||
baseURL: '/api/v1',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
||||
'Pragma': 'no-cache',
|
||||
'Expires': '0',
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -88,7 +88,14 @@ export interface AddInitiatorRequest {
|
||||
|
||||
export const scstAPI = {
|
||||
listTargets: async (): Promise<SCSTTarget[]> => {
|
||||
const response = await apiClient.get('/scst/targets')
|
||||
const response = await apiClient.get('/scst/targets', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
||||
},
|
||||
})
|
||||
return response.data.targets || []
|
||||
},
|
||||
|
||||
@@ -97,7 +104,14 @@ export const scstAPI = {
|
||||
luns: SCSTLUN[]
|
||||
initiator_groups?: SCSTInitiatorGroup[]
|
||||
}> => {
|
||||
const response = await apiClient.get(`/scst/targets/${id}`)
|
||||
const response = await apiClient.get(`/scst/targets/${id}`, {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
||||
},
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
@@ -112,6 +126,11 @@ export const scstAPI = {
|
||||
return response.data
|
||||
},
|
||||
|
||||
removeLUN: async (targetId: string, lunId: string): Promise<{ message: string }> => {
|
||||
const response = await apiClient.delete(`/scst/targets/${targetId}/luns/${lunId}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
addInitiator: async (targetId: string, data: AddInitiatorRequest): Promise<{ task_id: string }> => {
|
||||
const response = await apiClient.post(`/scst/targets/${targetId}/initiators`, data)
|
||||
return response.data
|
||||
@@ -123,17 +142,38 @@ export const scstAPI = {
|
||||
},
|
||||
|
||||
listHandlers: async (): Promise<SCSTHandler[]> => {
|
||||
const response = await apiClient.get('/scst/handlers')
|
||||
const response = await apiClient.get('/scst/handlers', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(),
|
||||
},
|
||||
})
|
||||
return response.data.handlers || []
|
||||
},
|
||||
|
||||
listPortals: async (): Promise<SCSTPortal[]> => {
|
||||
const response = await apiClient.get('/scst/portals')
|
||||
const response = await apiClient.get('/scst/portals', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
||||
},
|
||||
})
|
||||
return response.data.portals || []
|
||||
},
|
||||
|
||||
getPortal: async (id: string): Promise<SCSTPortal> => {
|
||||
const response = await apiClient.get(`/scst/portals/${id}`)
|
||||
const response = await apiClient.get(`/scst/portals/${id}`, {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(),
|
||||
},
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
@@ -161,13 +201,32 @@ export const scstAPI = {
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteTarget: async (targetId: string): Promise<{ message: string }> => {
|
||||
const response = await apiClient.delete(`/scst/targets/${targetId}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
listInitiators: async (): Promise<SCSTInitiator[]> => {
|
||||
const response = await apiClient.get('/scst/initiators')
|
||||
const response = await apiClient.get('/scst/initiators', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(),
|
||||
},
|
||||
})
|
||||
return response.data.initiators || []
|
||||
},
|
||||
|
||||
getInitiator: async (id: string): Promise<SCSTInitiator> => {
|
||||
const response = await apiClient.get(`/scst/initiators/${id}`)
|
||||
const response = await apiClient.get(`/scst/initiators/${id}`, {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(),
|
||||
},
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
@@ -176,7 +235,14 @@ export const scstAPI = {
|
||||
},
|
||||
|
||||
listExtents: async (): Promise<SCSTExtent[]> => {
|
||||
const response = await apiClient.get('/scst/extents')
|
||||
const response = await apiClient.get('/scst/extents', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
||||
},
|
||||
})
|
||||
return response.data.extents || []
|
||||
},
|
||||
|
||||
@@ -188,6 +254,68 @@ export const scstAPI = {
|
||||
deleteExtent: async (deviceName: string): Promise<void> => {
|
||||
await apiClient.delete(`/scst/extents/${deviceName}`)
|
||||
},
|
||||
|
||||
// Initiator Groups
|
||||
listInitiatorGroups: async (): Promise<SCSTInitiatorGroup[]> => {
|
||||
const response = await apiClient.get('/scst/initiator-groups', {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
||||
},
|
||||
})
|
||||
return response.data.groups || []
|
||||
},
|
||||
|
||||
getInitiatorGroup: async (id: string): Promise<SCSTInitiatorGroup> => {
|
||||
const response = await apiClient.get(`/scst/initiator-groups/${id}`, {
|
||||
headers: {
|
||||
'Cache-Control': 'no-cache',
|
||||
},
|
||||
params: {
|
||||
_t: Date.now(),
|
||||
},
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
createInitiatorGroup: async (data: { target_id: string; group_name: string }): Promise<SCSTInitiatorGroup> => {
|
||||
const response = await apiClient.post('/scst/initiator-groups', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
updateInitiatorGroup: async (id: string, data: { group_name: string }): Promise<SCSTInitiatorGroup> => {
|
||||
const response = await apiClient.put(`/scst/initiator-groups/${id}`, data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteInitiatorGroup: async (id: string): Promise<void> => {
|
||||
await apiClient.delete(`/scst/initiator-groups/${id}`)
|
||||
},
|
||||
|
||||
addInitiatorToGroup: async (groupId: string, initiatorIQN: string): Promise<{ message: string }> => {
|
||||
const response = await apiClient.post(`/scst/initiator-groups/${groupId}/initiators`, {
|
||||
initiator_iqn: initiatorIQN,
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
// Config file management
|
||||
getConfigFile: async (path?: string): Promise<{ content: string; path: string }> => {
|
||||
const response = await apiClient.get('/scst/config/file', {
|
||||
params: path ? { path } : {},
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
|
||||
updateConfigFile: async (content: string, path?: string): Promise<{ message: string; path: string }> => {
|
||||
const response = await apiClient.put('/scst/config/file', {
|
||||
content,
|
||||
path,
|
||||
})
|
||||
return response.data
|
||||
},
|
||||
}
|
||||
|
||||
export interface SCSTExtent {
|
||||
|
||||
75
frontend/src/api/shares.ts
Normal file
75
frontend/src/api/shares.ts
Normal file
@@ -0,0 +1,75 @@
|
||||
import apiClient from './client'
|
||||
|
||||
export interface Share {
|
||||
id: string
|
||||
dataset_id: string
|
||||
dataset_name: string
|
||||
mount_point: string
|
||||
share_type: 'nfs' | 'smb' | 'both' | 'none'
|
||||
nfs_enabled: boolean
|
||||
nfs_options?: string
|
||||
nfs_clients?: string[]
|
||||
smb_enabled: boolean
|
||||
smb_share_name?: string
|
||||
smb_path?: string
|
||||
smb_comment?: string
|
||||
smb_guest_ok: boolean
|
||||
smb_read_only: boolean
|
||||
smb_browseable: boolean
|
||||
is_active: boolean
|
||||
created_at: string
|
||||
updated_at: string
|
||||
created_by: string
|
||||
}
|
||||
|
||||
export interface CreateShareRequest {
|
||||
dataset_id: string
|
||||
nfs_enabled: boolean
|
||||
nfs_options?: string
|
||||
nfs_clients?: string[]
|
||||
smb_enabled: boolean
|
||||
smb_share_name?: string
|
||||
smb_comment?: string
|
||||
smb_guest_ok?: boolean
|
||||
smb_read_only?: boolean
|
||||
smb_browseable?: boolean
|
||||
}
|
||||
|
||||
export interface UpdateShareRequest {
|
||||
nfs_enabled?: boolean
|
||||
nfs_options?: string
|
||||
nfs_clients?: string[]
|
||||
smb_enabled?: boolean
|
||||
smb_share_name?: string
|
||||
smb_comment?: string
|
||||
smb_guest_ok?: boolean
|
||||
smb_read_only?: boolean
|
||||
smb_browseable?: boolean
|
||||
is_active?: boolean
|
||||
}
|
||||
|
||||
export const sharesAPI = {
|
||||
listShares: async (): Promise<Share[]> => {
|
||||
const response = await apiClient.get<{ shares: Share[] }>('/shares')
|
||||
return response.data.shares || []
|
||||
},
|
||||
|
||||
getShare: async (id: string): Promise<Share> => {
|
||||
const response = await apiClient.get<Share>(`/shares/${id}`)
|
||||
return response.data
|
||||
},
|
||||
|
||||
createShare: async (data: CreateShareRequest): Promise<Share> => {
|
||||
const response = await apiClient.post<Share>('/shares', data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
updateShare: async (id: string, data: UpdateShareRequest): Promise<Share> => {
|
||||
const response = await apiClient.put<Share>(`/shares/${id}`, data)
|
||||
return response.data
|
||||
},
|
||||
|
||||
deleteShare: async (id: string): Promise<void> => {
|
||||
await apiClient.delete(`/shares/${id}`)
|
||||
},
|
||||
}
|
||||
@@ -166,6 +166,7 @@ export const zfsApi = {
|
||||
}
|
||||
|
||||
export interface ZFSDataset {
|
||||
id: string
|
||||
name: string
|
||||
pool: string
|
||||
type: string // filesystem, volume, snapshot
|
||||
|
||||
@@ -7,6 +7,50 @@ export interface NetworkInterface {
|
||||
status: string // "Connected" or "Down"
|
||||
speed: string // e.g., "10 Gbps", "1 Gbps"
|
||||
role: string // "Management", "ISCSI", or empty
|
||||
gateway?: string
|
||||
dns1?: string
|
||||
dns2?: string
|
||||
}
|
||||
|
||||
export interface UpdateNetworkInterfaceRequest {
|
||||
ip_address: string
|
||||
subnet: string
|
||||
gateway?: string
|
||||
dns1?: string
|
||||
dns2?: string
|
||||
role?: string
|
||||
}
|
||||
|
||||
export interface SaveNTPSettingsRequest {
|
||||
timezone: string
|
||||
ntp_servers: string[]
|
||||
}
|
||||
|
||||
export interface NTPSettings {
|
||||
timezone: string
|
||||
ntp_servers: string[]
|
||||
}
|
||||
|
||||
export interface ServiceStatus {
|
||||
name: string
|
||||
active_state: string // "active", "inactive", "activating", "deactivating", "failed"
|
||||
sub_state: string
|
||||
load_state: string
|
||||
description: string
|
||||
since?: string
|
||||
}
|
||||
|
||||
export interface SystemLogEntry {
|
||||
time: string
|
||||
level: string
|
||||
source: string
|
||||
message: string
|
||||
}
|
||||
|
||||
export interface NetworkDataPoint {
|
||||
time: string
|
||||
inbound: number // Mbps
|
||||
outbound: number // Mbps
|
||||
}
|
||||
|
||||
export const systemAPI = {
|
||||
@@ -14,5 +58,31 @@ export const systemAPI = {
|
||||
const response = await apiClient.get<{ interfaces: NetworkInterface[] | null }>('/system/interfaces')
|
||||
return response.data.interfaces || []
|
||||
},
|
||||
updateNetworkInterface: async (name: string, data: UpdateNetworkInterfaceRequest): Promise<NetworkInterface> => {
|
||||
const response = await apiClient.put<{ interface: NetworkInterface }>(`/system/interfaces/${name}`, data)
|
||||
return response.data.interface
|
||||
},
|
||||
getNTPSettings: async (): Promise<NTPSettings> => {
|
||||
const response = await apiClient.get<{ settings: NTPSettings }>('/system/ntp')
|
||||
return response.data.settings
|
||||
},
|
||||
saveNTPSettings: async (data: SaveNTPSettingsRequest): Promise<void> => {
|
||||
await apiClient.post('/system/ntp', data)
|
||||
},
|
||||
listServices: async (): Promise<ServiceStatus[]> => {
|
||||
const response = await apiClient.get<{ services: ServiceStatus[] }>('/system/services')
|
||||
return response.data.services || []
|
||||
},
|
||||
restartService: async (name: string): Promise<void> => {
|
||||
await apiClient.post(`/system/services/${name}/restart`)
|
||||
},
|
||||
getSystemLogs: async (limit: number = 30): Promise<SystemLogEntry[]> => {
|
||||
const response = await apiClient.get<{ logs: SystemLogEntry[] }>(`/system/logs?limit=${limit}`)
|
||||
return response.data.logs || []
|
||||
},
|
||||
getNetworkThroughput: async (duration: string = '5m'): Promise<NetworkDataPoint[]> => {
|
||||
const response = await apiClient.get<{ data: NetworkDataPoint[] }>(`/system/network/throughput?duration=${duration}`)
|
||||
return response.data.data || []
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -7,11 +7,16 @@ import {
|
||||
HardDrive,
|
||||
Database,
|
||||
Network,
|
||||
Settings,
|
||||
Bell,
|
||||
Server,
|
||||
Users,
|
||||
Archive
|
||||
Archive,
|
||||
Terminal,
|
||||
Share,
|
||||
Activity,
|
||||
Box,
|
||||
Camera,
|
||||
Shield
|
||||
} from 'lucide-react'
|
||||
import { useState, useEffect } from 'react'
|
||||
|
||||
@@ -44,10 +49,15 @@ export default function Layout() {
|
||||
const navigation = [
|
||||
{ name: 'Dashboard', href: '/', icon: LayoutDashboard },
|
||||
{ name: 'Storage', href: '/storage', icon: HardDrive },
|
||||
{ name: 'Object Storage', href: '/object-storage', icon: Box },
|
||||
{ name: 'Shares', href: '/shares', icon: Share },
|
||||
{ name: 'Snapshots & Replication', href: '/snapshots', icon: Camera },
|
||||
{ name: 'Tape Libraries', href: '/tape', icon: Database },
|
||||
{ name: 'iSCSI Management', href: '/iscsi', icon: Network },
|
||||
{ name: 'Backup Management', href: '/backup', icon: Archive },
|
||||
{ name: 'Tasks', href: '/tasks', icon: Settings },
|
||||
{ name: 'Terminal Console', href: '/terminal', icon: Terminal },
|
||||
{ name: 'Share Shield', href: '/share-shield', icon: Shield },
|
||||
{ name: 'Monitoring & Logs', href: '/monitoring', icon: Activity },
|
||||
{ name: 'Alerts', href: '/alerts', icon: Bell },
|
||||
{ name: 'System', href: '/system', icon: Server },
|
||||
]
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ import { useState, useMemo, useEffect } from 'react'
|
||||
import apiClient from '@/api/client'
|
||||
import { monitoringApi } from '@/api/monitoring'
|
||||
import { storageApi } from '@/api/storage'
|
||||
import { systemAPI } from '@/api/system'
|
||||
import { formatBytes } from '@/lib/format'
|
||||
import {
|
||||
Cpu,
|
||||
@@ -46,17 +47,18 @@ const MOCK_ACTIVE_JOBS = [
|
||||
},
|
||||
]
|
||||
|
||||
const MOCK_SYSTEM_LOGS = [
|
||||
{ time: '10:45:22', level: 'INFO', source: 'systemd', message: 'Started User Manager for UID 1000.' },
|
||||
{ time: '10:45:15', level: 'WARN', source: 'smartd', message: 'Device: /dev/ada5, SMART Usage Attribute: 194 Temperature_Celsius changed from 38 to 41' },
|
||||
{ time: '10:44:58', level: 'INFO', source: 'kernel', message: 'ix0: link state changed to UP' },
|
||||
{ time: '10:42:10', level: 'INFO', source: 'zfs', message: 'zfs_arc_reclaim_thread: reclaiming 157286400 bytes ...' },
|
||||
]
|
||||
|
||||
export default function Dashboard() {
|
||||
const [activeTab, setActiveTab] = useState<'jobs' | 'logs' | 'alerts'>('jobs')
|
||||
const [networkDataPoints, setNetworkDataPoints] = useState<Array<{ time: string; inbound: number; outbound: number }>>([])
|
||||
const refreshInterval = 5
|
||||
|
||||
// Fetch system logs with auto-refresh every 10 minutes
|
||||
const { data: systemLogs = [], isLoading: logsLoading, refetch: refetchLogs } = useQuery({
|
||||
queryKey: ['system-logs'],
|
||||
queryFn: () => systemAPI.getSystemLogs(30),
|
||||
refetchInterval: 10 * 60 * 1000, // 10 minutes
|
||||
})
|
||||
|
||||
const { data: health } = useQuery({
|
||||
queryKey: ['health'],
|
||||
@@ -143,51 +145,25 @@ export default function Dashboard() {
|
||||
return { totalStorage: total, usedStorage: used, storagePercent: percent }
|
||||
}, [repositories])
|
||||
|
||||
// Initialize network data
|
||||
// Fetch network throughput data from RRD
|
||||
const { data: networkThroughput = [] } = useQuery({
|
||||
queryKey: ['network-throughput'],
|
||||
queryFn: () => systemAPI.getNetworkThroughput('5m'),
|
||||
refetchInterval: 5 * 1000, // Refresh every 5 seconds
|
||||
})
|
||||
|
||||
// Update network data points when new data arrives
|
||||
useEffect(() => {
|
||||
// Generate initial 30 data points
|
||||
const initialData = []
|
||||
const now = Date.now()
|
||||
for (let i = 29; i >= 0; i--) {
|
||||
const time = new Date(now - i * 5000)
|
||||
const minutes = time.getMinutes().toString().padStart(2, '0')
|
||||
const seconds = time.getSeconds().toString().padStart(2, '0')
|
||||
|
||||
const baseInbound = 800 + Math.random() * 400
|
||||
const baseOutbound = 400 + Math.random() * 200
|
||||
|
||||
initialData.push({
|
||||
time: `${minutes}:${seconds}`,
|
||||
inbound: Math.round(baseInbound),
|
||||
outbound: Math.round(baseOutbound),
|
||||
})
|
||||
if (networkThroughput.length > 0) {
|
||||
// Take last 30 points
|
||||
const points = networkThroughput.slice(-30).map((point) => ({
|
||||
time: point.time,
|
||||
inbound: Math.round(point.inbound),
|
||||
outbound: Math.round(point.outbound),
|
||||
}))
|
||||
setNetworkDataPoints(points)
|
||||
}
|
||||
setNetworkDataPoints(initialData)
|
||||
|
||||
// Update data every 5 seconds
|
||||
const interval = setInterval(() => {
|
||||
setNetworkDataPoints((prev) => {
|
||||
const now = new Date()
|
||||
const minutes = now.getMinutes().toString().padStart(2, '0')
|
||||
const seconds = now.getSeconds().toString().padStart(2, '0')
|
||||
|
||||
const baseInbound = 800 + Math.random() * 400
|
||||
const baseOutbound = 400 + Math.random() * 200
|
||||
|
||||
const newPoint = {
|
||||
time: `${minutes}:${seconds}`,
|
||||
inbound: Math.round(baseInbound),
|
||||
outbound: Math.round(baseOutbound),
|
||||
}
|
||||
|
||||
// Keep only last 30 points
|
||||
const updated = [...prev.slice(1), newPoint]
|
||||
return updated
|
||||
})
|
||||
}, 5000)
|
||||
|
||||
return () => clearInterval(interval)
|
||||
}, [])
|
||||
}, [networkThroughput])
|
||||
|
||||
// Calculate current and peak throughput
|
||||
const currentThroughput = useMemo(() => {
|
||||
@@ -564,39 +540,59 @@ export default function Dashboard() {
|
||||
<h4 className="text-xs uppercase text-text-secondary font-bold tracking-wider">
|
||||
Recent System Events
|
||||
</h4>
|
||||
<button className="text-xs text-primary hover:text-white transition-colors">
|
||||
View All Logs
|
||||
</button>
|
||||
<div className="flex items-center gap-3">
|
||||
<button
|
||||
onClick={() => refetchLogs()}
|
||||
disabled={logsLoading}
|
||||
className="text-xs text-primary hover:text-white transition-colors flex items-center gap-1 disabled:opacity-50"
|
||||
>
|
||||
<RefreshCw size={14} className={logsLoading ? 'animate-spin' : ''} />
|
||||
Refresh
|
||||
</button>
|
||||
<button className="text-xs text-primary hover:text-white transition-colors">
|
||||
View All Logs
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex-1 overflow-y-auto custom-scrollbar bg-[#111a22]">
|
||||
<table className="w-full text-left border-collapse">
|
||||
<tbody className="text-sm font-mono divide-y divide-border-dark/50">
|
||||
{MOCK_SYSTEM_LOGS.map((log, idx) => (
|
||||
<tr key={idx} className="group hover:bg-[#233648] transition-colors">
|
||||
<td className="px-6 py-2 text-text-secondary w-32 whitespace-nowrap">
|
||||
{log.time}
|
||||
</td>
|
||||
<td className="px-6 py-2 w-24">
|
||||
<span
|
||||
className={
|
||||
log.level === 'INFO'
|
||||
? 'text-emerald-500'
|
||||
: log.level === 'WARN'
|
||||
? 'text-yellow-500'
|
||||
: 'text-red-500'
|
||||
}
|
||||
>
|
||||
{log.level}
|
||||
</span>
|
||||
</td>
|
||||
<td className="px-6 py-2 w-32 text-white">{log.source}</td>
|
||||
<td className="px-6 py-2 text-text-secondary truncate max-w-lg">
|
||||
{log.message}
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
{logsLoading ? (
|
||||
<div className="flex items-center justify-center py-8">
|
||||
<span className="text-text-secondary">Loading logs...</span>
|
||||
</div>
|
||||
) : systemLogs.length === 0 ? (
|
||||
<div className="flex items-center justify-center py-8">
|
||||
<span className="text-text-secondary">No logs available</span>
|
||||
</div>
|
||||
) : (
|
||||
<table className="w-full text-left border-collapse">
|
||||
<tbody className="text-sm font-mono divide-y divide-border-dark/50">
|
||||
{systemLogs.map((log, idx) => (
|
||||
<tr key={idx} className="group hover:bg-[#233648] transition-colors">
|
||||
<td className="px-6 py-2 text-text-secondary w-32 whitespace-nowrap">
|
||||
{log.time}
|
||||
</td>
|
||||
<td className="px-6 py-2 w-24">
|
||||
<span
|
||||
className={
|
||||
log.level === 'INFO' || log.level === 'NOTICE' || log.level === 'DEBUG'
|
||||
? 'text-emerald-500'
|
||||
: log.level === 'WARN'
|
||||
? 'text-yellow-500'
|
||||
: 'text-red-500'
|
||||
}
|
||||
>
|
||||
{log.level}
|
||||
</span>
|
||||
</td>
|
||||
<td className="px-6 py-2 w-32 text-white">{log.source}</td>
|
||||
<td className="px-6 py-2 text-text-secondary truncate max-w-lg">
|
||||
{log.message}
|
||||
</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -696,10 +696,15 @@ function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) {
|
||||
iamApi.updateUser(user.id, data),
|
||||
onSuccess: async () => {
|
||||
onSuccess()
|
||||
// Invalidate all related queries to refresh counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-users'] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-roles'] }) // Refresh role user counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) // Refresh group user counts
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-users'] })
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-user', user.id] })
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-roles'] })
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-groups'] })
|
||||
},
|
||||
onError: (error: any) => {
|
||||
console.error('Failed to update user:', error)
|
||||
@@ -725,9 +730,11 @@ function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) {
|
||||
},
|
||||
onSuccess: async (_, roleName: string) => {
|
||||
// Don't overwrite state with server data - keep optimistic update
|
||||
// Just invalidate queries for other components
|
||||
// Invalidate queries to refresh counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-roles'] }) // Refresh role user count
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-roles'] })
|
||||
// Use functional update to get current state
|
||||
setUserRoles(current => {
|
||||
console.log('assignRoleMutation onSuccess - roleName:', roleName, 'current userRoles:', current)
|
||||
@@ -753,9 +760,11 @@ function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) {
|
||||
},
|
||||
onSuccess: async (_, roleName: string) => {
|
||||
// Don't overwrite state with server data - keep optimistic update
|
||||
// Just invalidate queries for other components
|
||||
// Invalidate queries to refresh counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-roles'] }) // Refresh role user count
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-roles'] })
|
||||
console.log('Role removed successfully:', roleName, 'Current userRoles:', userRoles)
|
||||
},
|
||||
onError: (error: any, _roleName: string, context: any) => {
|
||||
@@ -785,9 +794,11 @@ function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) {
|
||||
},
|
||||
onSuccess: async (_, groupName: string) => {
|
||||
// Don't overwrite state with server data - keep optimistic update
|
||||
// Just invalidate queries for other components
|
||||
// Invalidate queries to refresh counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) // Refresh group user count
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-groups'] })
|
||||
// Use functional update to get current state
|
||||
setUserGroups(current => {
|
||||
console.log('assignGroupMutation onSuccess - groupName:', groupName, 'current userGroups:', current)
|
||||
@@ -813,9 +824,11 @@ function EditUserForm({ user, onClose, onSuccess }: EditUserFormProps) {
|
||||
},
|
||||
onSuccess: async (_, groupName: string) => {
|
||||
// Don't overwrite state with server data - keep optimistic update
|
||||
// Just invalidate queries for other components
|
||||
// Invalidate queries to refresh counts
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-user', user.id] })
|
||||
queryClient.invalidateQueries({ queryKey: ['iam-groups'] }) // Refresh group user count
|
||||
await queryClient.refetchQueries({ queryKey: ['iam-groups'] })
|
||||
console.log('Group removed successfully:', groupName, 'Current userGroups:', userGroups)
|
||||
},
|
||||
onError: (error: any, _groupName: string, context: any) => {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user