Compare commits
1 Commits
developmen
...
3c4cb03df4
| Author | SHA1 | Date | |
|---|---|---|---|
| 3c4cb03df4 |
Binary file not shown.
Binary file not shown.
@@ -1,383 +0,0 @@
|
|||||||
package backup
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/logger"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handler handles backup-related API requests
|
|
||||||
type Handler struct {
|
|
||||||
service *Service
|
|
||||||
logger *logger.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHandler creates a new backup handler
|
|
||||||
func NewHandler(service *Service, log *logger.Logger) *Handler {
|
|
||||||
return &Handler{
|
|
||||||
service: service,
|
|
||||||
logger: log,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListJobs lists backup jobs with optional filters
|
|
||||||
func (h *Handler) ListJobs(c *gin.Context) {
|
|
||||||
opts := ListJobsOptions{
|
|
||||||
Status: c.Query("status"),
|
|
||||||
JobType: c.Query("job_type"),
|
|
||||||
ClientName: c.Query("client_name"),
|
|
||||||
JobName: c.Query("job_name"),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse pagination
|
|
||||||
var limit, offset int
|
|
||||||
if limitStr := c.Query("limit"); limitStr != "" {
|
|
||||||
if _, err := fmt.Sscanf(limitStr, "%d", &limit); err == nil {
|
|
||||||
opts.Limit = limit
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if offsetStr := c.Query("offset"); offsetStr != "" {
|
|
||||||
if _, err := fmt.Sscanf(offsetStr, "%d", &offset); err == nil {
|
|
||||||
opts.Offset = offset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jobs, totalCount, err := h.service.ListJobs(c.Request.Context(), opts)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list jobs", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list jobs"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if jobs == nil {
|
|
||||||
jobs = []Job{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"jobs": jobs,
|
|
||||||
"total": totalCount,
|
|
||||||
"limit": opts.Limit,
|
|
||||||
"offset": opts.Offset,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJob retrieves a job by ID
|
|
||||||
func (h *Handler) GetJob(c *gin.Context) {
|
|
||||||
id := c.Param("id")
|
|
||||||
|
|
||||||
job, err := h.service.GetJob(c.Request.Context(), id)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "job not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "job not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get job", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get job"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, job)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateJob creates a new backup job
|
|
||||||
func (h *Handler) CreateJob(c *gin.Context) {
|
|
||||||
var req CreateJobRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate job type
|
|
||||||
validJobTypes := map[string]bool{
|
|
||||||
"Backup": true, "Restore": true, "Verify": true, "Copy": true, "Migrate": true,
|
|
||||||
}
|
|
||||||
if !validJobTypes[req.JobType] {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_type"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate job level
|
|
||||||
validJobLevels := map[string]bool{
|
|
||||||
"Full": true, "Incremental": true, "Differential": true, "Since": true,
|
|
||||||
}
|
|
||||||
if !validJobLevels[req.JobLevel] {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid job_level"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
job, err := h.service.CreateJob(c.Request.Context(), req)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to create job", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create job"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, job)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecuteBconsoleCommand executes a bconsole command
|
|
||||||
func (h *Handler) ExecuteBconsoleCommand(c *gin.Context) {
|
|
||||||
var req struct {
|
|
||||||
Command string `json:"command" binding:"required"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "command is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := h.service.ExecuteBconsoleCommand(c.Request.Context(), req.Command)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to execute bconsole command", "error", err, "command", req.Command)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
|
||||||
"error": "failed to execute command",
|
|
||||||
"output": output,
|
|
||||||
"details": err.Error(),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"output": output,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListClients lists all backup clients with optional filters
|
|
||||||
func (h *Handler) ListClients(c *gin.Context) {
|
|
||||||
opts := ListClientsOptions{}
|
|
||||||
|
|
||||||
// Parse enabled filter
|
|
||||||
if enabledStr := c.Query("enabled"); enabledStr != "" {
|
|
||||||
enabled := enabledStr == "true"
|
|
||||||
opts.Enabled = &enabled
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse search query
|
|
||||||
opts.Search = c.Query("search")
|
|
||||||
|
|
||||||
clients, err := h.service.ListClients(c.Request.Context(), opts)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list clients", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{
|
|
||||||
"error": "failed to list clients",
|
|
||||||
"details": err.Error(),
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if clients == nil {
|
|
||||||
clients = []Client{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"clients": clients,
|
|
||||||
"total": len(clients),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDashboardStats returns dashboard statistics
|
|
||||||
func (h *Handler) GetDashboardStats(c *gin.Context) {
|
|
||||||
stats, err := h.service.GetDashboardStats(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to get dashboard stats", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get dashboard stats"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, stats)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListStoragePools lists all storage pools
|
|
||||||
func (h *Handler) ListStoragePools(c *gin.Context) {
|
|
||||||
pools, err := h.service.ListStoragePools(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list storage pools", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage pools"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if pools == nil {
|
|
||||||
pools = []StoragePool{}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("Listed storage pools", "count", len(pools))
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"pools": pools,
|
|
||||||
"total": len(pools),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListStorageVolumes lists all storage volumes
|
|
||||||
func (h *Handler) ListStorageVolumes(c *gin.Context) {
|
|
||||||
poolName := c.Query("pool_name")
|
|
||||||
|
|
||||||
volumes, err := h.service.ListStorageVolumes(c.Request.Context(), poolName)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list storage volumes", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage volumes"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if volumes == nil {
|
|
||||||
volumes = []StorageVolume{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"volumes": volumes,
|
|
||||||
"total": len(volumes),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListStorageDaemons lists all storage daemons
|
|
||||||
func (h *Handler) ListStorageDaemons(c *gin.Context) {
|
|
||||||
daemons, err := h.service.ListStorageDaemons(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list storage daemons", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list storage daemons"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if daemons == nil {
|
|
||||||
daemons = []StorageDaemon{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"daemons": daemons,
|
|
||||||
"total": len(daemons),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateStoragePool creates a new storage pool
|
|
||||||
func (h *Handler) CreateStoragePool(c *gin.Context) {
|
|
||||||
var req CreatePoolRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pool, err := h.service.CreateStoragePool(c.Request.Context(), req)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to create storage pool", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, pool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteStoragePool deletes a storage pool
|
|
||||||
func (h *Handler) DeleteStoragePool(c *gin.Context) {
|
|
||||||
idStr := c.Param("id")
|
|
||||||
if idStr == "" {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "pool ID is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var poolID int
|
|
||||||
if _, err := fmt.Sscanf(idStr, "%d", &poolID); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid pool ID"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := h.service.DeleteStoragePool(c.Request.Context(), poolID)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to delete storage pool", "error", err, "pool_id", poolID)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "pool deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateStorageVolume creates a new storage volume
|
|
||||||
func (h *Handler) CreateStorageVolume(c *gin.Context) {
|
|
||||||
var req CreateVolumeRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
volume, err := h.service.CreateStorageVolume(c.Request.Context(), req)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to create storage volume", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, volume)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateStorageVolume updates a storage volume
|
|
||||||
func (h *Handler) UpdateStorageVolume(c *gin.Context) {
|
|
||||||
idStr := c.Param("id")
|
|
||||||
if idStr == "" {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var volumeID int
|
|
||||||
if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var req UpdateVolumeRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
volume, err := h.service.UpdateStorageVolume(c.Request.Context(), volumeID, req)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to update storage volume", "error", err, "volume_id", volumeID)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, volume)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteStorageVolume deletes a storage volume
|
|
||||||
func (h *Handler) DeleteStorageVolume(c *gin.Context) {
|
|
||||||
idStr := c.Param("id")
|
|
||||||
if idStr == "" {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "volume ID is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var volumeID int
|
|
||||||
if _, err := fmt.Sscanf(idStr, "%d", &volumeID); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid volume ID"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := h.service.DeleteStorageVolume(c.Request.Context(), volumeID)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to delete storage volume", "error", err, "volume_id", volumeID)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "volume deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListMedia lists all media from bconsole "list media" command
|
|
||||||
func (h *Handler) ListMedia(c *gin.Context) {
|
|
||||||
media, err := h.service.ListMedia(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list media", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if media == nil {
|
|
||||||
media = []Media{}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("Listed media", "count", len(media))
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
|
||||||
"media": media,
|
|
||||||
"total": len(media),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -59,7 +59,7 @@ func RunMigrations(ctx context.Context, db *DB) error {
|
|||||||
|
|
||||||
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
return fmt.Errorf("failed to execute migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to execute migration %s: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record migration
|
// Record migration
|
||||||
@@ -68,11 +68,11 @@ func RunMigrations(ctx context.Context, db *DB) error {
|
|||||||
migration.Version,
|
migration.Version,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
tx.Rollback()
|
tx.Rollback()
|
||||||
return fmt.Errorf("failed to record migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to record migration %s: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tx.Commit(); err != nil {
|
if err := tx.Commit(); err != nil {
|
||||||
return fmt.Errorf("failed to commit migration %d: %w", migration.Version, err)
|
return fmt.Errorf("failed to commit migration %s: %w", migration.Version, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Migration applied successfully", "version", migration.Version)
|
log.Info("Migration applied successfully", "version", migration.Version)
|
||||||
|
|||||||
@@ -1,3 +0,0 @@
|
|||||||
-- Add vendor column to virtual_tape_libraries table
|
|
||||||
ALTER TABLE virtual_tape_libraries ADD COLUMN IF NOT EXISTS vendor VARCHAR(255);
|
|
||||||
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
-- Add user groups feature
|
|
||||||
-- Groups table
|
|
||||||
CREATE TABLE IF NOT EXISTS groups (
|
|
||||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
||||||
name VARCHAR(255) NOT NULL UNIQUE,
|
|
||||||
description TEXT,
|
|
||||||
is_system BOOLEAN NOT NULL DEFAULT false,
|
|
||||||
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
-- User groups junction table
|
|
||||||
CREATE TABLE IF NOT EXISTS user_groups (
|
|
||||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
|
||||||
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
|
||||||
assigned_at TIMESTAMP NOT NULL DEFAULT NOW(),
|
|
||||||
assigned_by UUID REFERENCES users(id),
|
|
||||||
PRIMARY KEY (user_id, group_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Group roles junction table (groups can have roles)
|
|
||||||
CREATE TABLE IF NOT EXISTS group_roles (
|
|
||||||
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
|
||||||
role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE,
|
|
||||||
granted_at TIMESTAMP NOT NULL DEFAULT NOW(),
|
|
||||||
PRIMARY KEY (group_id, role_id)
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Indexes
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_groups_name ON groups(name);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_user_groups_user_id ON user_groups(user_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_user_groups_group_id ON user_groups(group_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_group_roles_group_id ON group_roles(group_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_group_roles_role_id ON group_roles(role_id);
|
|
||||||
|
|
||||||
-- Insert default system groups
|
|
||||||
INSERT INTO groups (name, description, is_system) VALUES
|
|
||||||
('wheel', 'System administrators group', true),
|
|
||||||
('operators', 'System operators group', true),
|
|
||||||
('backup', 'Backup operators group', true),
|
|
||||||
('auditors', 'Auditors group', true),
|
|
||||||
('storage_admins', 'Storage administrators group', true),
|
|
||||||
('services', 'Service accounts group', true)
|
|
||||||
ON CONFLICT (name) DO NOTHING;
|
|
||||||
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
-- AtlasOS - Calypso
|
|
||||||
-- Backup Jobs Schema
|
|
||||||
-- Version: 9.0
|
|
||||||
|
|
||||||
-- Backup jobs table
|
|
||||||
CREATE TABLE IF NOT EXISTS backup_jobs (
|
|
||||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
||||||
job_id INTEGER NOT NULL UNIQUE, -- Bareos job ID
|
|
||||||
job_name VARCHAR(255) NOT NULL,
|
|
||||||
client_name VARCHAR(255) NOT NULL,
|
|
||||||
job_type VARCHAR(50) NOT NULL, -- 'Backup', 'Restore', 'Verify', 'Copy', 'Migrate'
|
|
||||||
job_level VARCHAR(50) NOT NULL, -- 'Full', 'Incremental', 'Differential', 'Since'
|
|
||||||
status VARCHAR(50) NOT NULL, -- 'Running', 'Completed', 'Failed', 'Canceled', 'Waiting'
|
|
||||||
bytes_written BIGINT NOT NULL DEFAULT 0,
|
|
||||||
files_written INTEGER NOT NULL DEFAULT 0,
|
|
||||||
duration_seconds INTEGER,
|
|
||||||
started_at TIMESTAMP,
|
|
||||||
ended_at TIMESTAMP,
|
|
||||||
error_message TEXT,
|
|
||||||
storage_name VARCHAR(255),
|
|
||||||
pool_name VARCHAR(255),
|
|
||||||
volume_name VARCHAR(255),
|
|
||||||
created_at TIMESTAMP NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Indexes for performance
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_name ON backup_jobs(job_name);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_client_name ON backup_jobs(client_name);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_status ON backup_jobs(status);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_started_at ON backup_jobs(started_at DESC);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_type ON backup_jobs(job_type);
|
|
||||||
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
-- AtlasOS - Calypso
|
|
||||||
-- Add Backup Permissions
|
|
||||||
-- Version: 10.0
|
|
||||||
|
|
||||||
-- Insert backup permissions
|
|
||||||
INSERT INTO permissions (name, resource, action, description) VALUES
|
|
||||||
('backup:read', 'backup', 'read', 'View backup jobs and history'),
|
|
||||||
('backup:write', 'backup', 'write', 'Create and manage backup jobs'),
|
|
||||||
('backup:manage', 'backup', 'manage', 'Full backup management')
|
|
||||||
ON CONFLICT (name) DO NOTHING;
|
|
||||||
|
|
||||||
-- Assign backup permissions to roles
|
|
||||||
|
|
||||||
-- Admin gets all backup permissions (explicitly assign since admin query in 001 only runs once)
|
|
||||||
INSERT INTO role_permissions (role_id, permission_id)
|
|
||||||
SELECT r.id, p.id
|
|
||||||
FROM roles r, permissions p
|
|
||||||
WHERE r.name = 'admin'
|
|
||||||
AND p.resource = 'backup'
|
|
||||||
ON CONFLICT DO NOTHING;
|
|
||||||
|
|
||||||
-- Operator gets read and write permissions for backup
|
|
||||||
INSERT INTO role_permissions (role_id, permission_id)
|
|
||||||
SELECT r.id, p.id
|
|
||||||
FROM roles r, permissions p
|
|
||||||
WHERE r.name = 'operator'
|
|
||||||
AND p.resource = 'backup'
|
|
||||||
AND p.action IN ('read', 'write')
|
|
||||||
ON CONFLICT DO NOTHING;
|
|
||||||
|
|
||||||
-- ReadOnly gets only read permission for backup
|
|
||||||
INSERT INTO role_permissions (role_id, permission_id)
|
|
||||||
SELECT r.id, p.id
|
|
||||||
FROM roles r, permissions p
|
|
||||||
WHERE r.name = 'readonly'
|
|
||||||
AND p.resource = 'backup'
|
|
||||||
AND p.action = 'read'
|
|
||||||
ON CONFLICT DO NOTHING;
|
|
||||||
|
|
||||||
@@ -1,209 +0,0 @@
|
|||||||
-- AtlasOS - Calypso
|
|
||||||
-- PostgreSQL Function to Sync Jobs from Bacula to Calypso
|
|
||||||
-- Version: 11.0
|
|
||||||
--
|
|
||||||
-- This function syncs jobs from Bacula database (Job table) to Calypso database (backup_jobs table)
|
|
||||||
-- Uses dblink extension to query Bacula database from Calypso database
|
|
||||||
--
|
|
||||||
-- Prerequisites:
|
|
||||||
-- 1. dblink extension must be installed: CREATE EXTENSION IF NOT EXISTS dblink;
|
|
||||||
-- 2. User must have access to both databases
|
|
||||||
-- 3. Connection parameters must be configured in the function
|
|
||||||
|
|
||||||
-- Create function to sync jobs from Bacula to Calypso
|
|
||||||
CREATE OR REPLACE FUNCTION sync_bacula_jobs(
|
|
||||||
bacula_db_name TEXT DEFAULT 'bacula',
|
|
||||||
bacula_host TEXT DEFAULT 'localhost',
|
|
||||||
bacula_port INTEGER DEFAULT 5432,
|
|
||||||
bacula_user TEXT DEFAULT 'calypso',
|
|
||||||
bacula_password TEXT DEFAULT ''
|
|
||||||
)
|
|
||||||
RETURNS TABLE(
|
|
||||||
jobs_synced INTEGER,
|
|
||||||
jobs_inserted INTEGER,
|
|
||||||
jobs_updated INTEGER,
|
|
||||||
errors INTEGER
|
|
||||||
) AS $$
|
|
||||||
DECLARE
|
|
||||||
conn_str TEXT;
|
|
||||||
jobs_count INTEGER := 0;
|
|
||||||
inserted_count INTEGER := 0;
|
|
||||||
updated_count INTEGER := 0;
|
|
||||||
error_count INTEGER := 0;
|
|
||||||
job_record RECORD;
|
|
||||||
BEGIN
|
|
||||||
-- Build dblink connection string
|
|
||||||
conn_str := format(
|
|
||||||
'dbname=%s host=%s port=%s user=%s password=%s',
|
|
||||||
bacula_db_name,
|
|
||||||
bacula_host,
|
|
||||||
bacula_port,
|
|
||||||
bacula_user,
|
|
||||||
bacula_password
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Query jobs from Bacula database using dblink
|
|
||||||
FOR job_record IN
|
|
||||||
SELECT * FROM dblink(
|
|
||||||
conn_str,
|
|
||||||
$QUERY$
|
|
||||||
SELECT
|
|
||||||
j.JobId,
|
|
||||||
j.Name as job_name,
|
|
||||||
COALESCE(c.Name, 'unknown') as client_name,
|
|
||||||
CASE
|
|
||||||
WHEN j.Type = 'B' THEN 'Backup'
|
|
||||||
WHEN j.Type = 'R' THEN 'Restore'
|
|
||||||
WHEN j.Type = 'V' THEN 'Verify'
|
|
||||||
WHEN j.Type = 'C' THEN 'Copy'
|
|
||||||
WHEN j.Type = 'M' THEN 'Migrate'
|
|
||||||
ELSE 'Backup'
|
|
||||||
END as job_type,
|
|
||||||
CASE
|
|
||||||
WHEN j.Level = 'F' THEN 'Full'
|
|
||||||
WHEN j.Level = 'I' THEN 'Incremental'
|
|
||||||
WHEN j.Level = 'D' THEN 'Differential'
|
|
||||||
WHEN j.Level = 'S' THEN 'Since'
|
|
||||||
ELSE 'Full'
|
|
||||||
END as job_level,
|
|
||||||
CASE
|
|
||||||
WHEN j.JobStatus = 'T' THEN 'Running'
|
|
||||||
WHEN j.JobStatus = 'C' THEN 'Completed'
|
|
||||||
WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed'
|
|
||||||
WHEN j.JobStatus = 'A' THEN 'Canceled'
|
|
||||||
WHEN j.JobStatus = 'W' THEN 'Waiting'
|
|
||||||
ELSE 'Waiting'
|
|
||||||
END as status,
|
|
||||||
COALESCE(j.JobBytes, 0) as bytes_written,
|
|
||||||
COALESCE(j.JobFiles, 0) as files_written,
|
|
||||||
j.StartTime as started_at,
|
|
||||||
j.EndTime as ended_at,
|
|
||||||
CASE
|
|
||||||
WHEN j.EndTime IS NOT NULL AND j.StartTime IS NOT NULL
|
|
||||||
THEN EXTRACT(EPOCH FROM (j.EndTime - j.StartTime))::INTEGER
|
|
||||||
ELSE NULL
|
|
||||||
END as duration_seconds
|
|
||||||
FROM Job j
|
|
||||||
LEFT JOIN Client c ON j.ClientId = c.ClientId
|
|
||||||
ORDER BY j.StartTime DESC
|
|
||||||
LIMIT 1000
|
|
||||||
$QUERY$
|
|
||||||
) AS t(
|
|
||||||
job_id INTEGER,
|
|
||||||
job_name TEXT,
|
|
||||||
client_name TEXT,
|
|
||||||
job_type TEXT,
|
|
||||||
job_level TEXT,
|
|
||||||
status TEXT,
|
|
||||||
bytes_written BIGINT,
|
|
||||||
files_written INTEGER,
|
|
||||||
started_at TIMESTAMP,
|
|
||||||
ended_at TIMESTAMP,
|
|
||||||
duration_seconds INTEGER
|
|
||||||
)
|
|
||||||
LOOP
|
|
||||||
BEGIN
|
|
||||||
-- Check if job already exists (before insert/update)
|
|
||||||
IF EXISTS (SELECT 1 FROM backup_jobs WHERE job_id = job_record.job_id) THEN
|
|
||||||
updated_count := updated_count + 1;
|
|
||||||
ELSE
|
|
||||||
inserted_count := inserted_count + 1;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- Upsert job to backup_jobs table
|
|
||||||
INSERT INTO backup_jobs (
|
|
||||||
job_id, job_name, client_name, job_type, job_level, status,
|
|
||||||
bytes_written, files_written, started_at, ended_at, duration_seconds,
|
|
||||||
updated_at
|
|
||||||
) VALUES (
|
|
||||||
job_record.job_id,
|
|
||||||
job_record.job_name,
|
|
||||||
job_record.client_name,
|
|
||||||
job_record.job_type,
|
|
||||||
job_record.job_level,
|
|
||||||
job_record.status,
|
|
||||||
job_record.bytes_written,
|
|
||||||
job_record.files_written,
|
|
||||||
job_record.started_at,
|
|
||||||
job_record.ended_at,
|
|
||||||
job_record.duration_seconds,
|
|
||||||
NOW()
|
|
||||||
)
|
|
||||||
ON CONFLICT (job_id) DO UPDATE SET
|
|
||||||
job_name = EXCLUDED.job_name,
|
|
||||||
client_name = EXCLUDED.client_name,
|
|
||||||
job_type = EXCLUDED.job_type,
|
|
||||||
job_level = EXCLUDED.job_level,
|
|
||||||
status = EXCLUDED.status,
|
|
||||||
bytes_written = EXCLUDED.bytes_written,
|
|
||||||
files_written = EXCLUDED.files_written,
|
|
||||||
started_at = EXCLUDED.started_at,
|
|
||||||
ended_at = EXCLUDED.ended_at,
|
|
||||||
duration_seconds = EXCLUDED.duration_seconds,
|
|
||||||
updated_at = NOW();
|
|
||||||
|
|
||||||
jobs_count := jobs_count + 1;
|
|
||||||
EXCEPTION
|
|
||||||
WHEN OTHERS THEN
|
|
||||||
error_count := error_count + 1;
|
|
||||||
-- Log error but continue with next job
|
|
||||||
RAISE WARNING 'Error syncing job %: %', job_record.job_id, SQLERRM;
|
|
||||||
END;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
-- Return summary
|
|
||||||
RETURN QUERY SELECT jobs_count, inserted_count, updated_count, error_count;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
-- Create a simpler version that uses current database connection settings
|
|
||||||
-- This version assumes Bacula is on same host/port with same user
|
|
||||||
CREATE OR REPLACE FUNCTION sync_bacula_jobs_simple()
|
|
||||||
RETURNS TABLE(
|
|
||||||
jobs_synced INTEGER,
|
|
||||||
jobs_inserted INTEGER,
|
|
||||||
jobs_updated INTEGER,
|
|
||||||
errors INTEGER
|
|
||||||
) AS $$
|
|
||||||
DECLARE
|
|
||||||
current_user_name TEXT;
|
|
||||||
current_host TEXT;
|
|
||||||
current_port INTEGER;
|
|
||||||
current_db TEXT;
|
|
||||||
BEGIN
|
|
||||||
-- Get current connection info
|
|
||||||
SELECT
|
|
||||||
current_user,
|
|
||||||
COALESCE(inet_server_addr()::TEXT, 'localhost'),
|
|
||||||
COALESCE(inet_server_port(), 5432),
|
|
||||||
current_database()
|
|
||||||
INTO
|
|
||||||
current_user_name,
|
|
||||||
current_host,
|
|
||||||
current_port,
|
|
||||||
current_db;
|
|
||||||
|
|
||||||
-- Call main function with current connection settings
|
|
||||||
-- Note: password needs to be passed or configured in .pgpass
|
|
||||||
RETURN QUERY
|
|
||||||
SELECT * FROM sync_bacula_jobs(
|
|
||||||
'bacula', -- Try 'bacula' first
|
|
||||||
current_host,
|
|
||||||
current_port,
|
|
||||||
current_user_name,
|
|
||||||
'' -- Empty password - will use .pgpass or peer authentication
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
-- Grant execute permission to calypso user
|
|
||||||
GRANT EXECUTE ON FUNCTION sync_bacula_jobs(TEXT, TEXT, INTEGER, TEXT, TEXT) TO calypso;
|
|
||||||
GRANT EXECUTE ON FUNCTION sync_bacula_jobs_simple() TO calypso;
|
|
||||||
|
|
||||||
-- Create index if not exists (should already exist from migration 009)
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_updated_at ON backup_jobs(updated_at);
|
|
||||||
|
|
||||||
COMMENT ON FUNCTION sync_bacula_jobs IS 'Syncs jobs from Bacula database to Calypso backup_jobs table using dblink';
|
|
||||||
COMMENT ON FUNCTION sync_bacula_jobs_simple IS 'Simplified version that uses current connection settings (requires .pgpass for password)';
|
|
||||||
|
|
||||||
@@ -1,209 +0,0 @@
|
|||||||
-- AtlasOS - Calypso
|
|
||||||
-- PostgreSQL Function to Sync Jobs from Bacula to Calypso
|
|
||||||
-- Version: 11.0
|
|
||||||
--
|
|
||||||
-- This function syncs jobs from Bacula database (Job table) to Calypso database (backup_jobs table)
|
|
||||||
-- Uses dblink extension to query Bacula database from Calypso database
|
|
||||||
--
|
|
||||||
-- Prerequisites:
|
|
||||||
-- 1. dblink extension must be installed: CREATE EXTENSION IF NOT EXISTS dblink;
|
|
||||||
-- 2. User must have access to both databases
|
|
||||||
-- 3. Connection parameters must be configured in the function
|
|
||||||
|
|
||||||
-- Create function to sync jobs from Bacula to Calypso
|
|
||||||
CREATE OR REPLACE FUNCTION sync_bacula_jobs(
|
|
||||||
bacula_db_name TEXT DEFAULT 'bacula',
|
|
||||||
bacula_host TEXT DEFAULT 'localhost',
|
|
||||||
bacula_port INTEGER DEFAULT 5432,
|
|
||||||
bacula_user TEXT DEFAULT 'calypso',
|
|
||||||
bacula_password TEXT DEFAULT ''
|
|
||||||
)
|
|
||||||
RETURNS TABLE(
|
|
||||||
jobs_synced INTEGER,
|
|
||||||
jobs_inserted INTEGER,
|
|
||||||
jobs_updated INTEGER,
|
|
||||||
errors INTEGER
|
|
||||||
) AS $$
|
|
||||||
DECLARE
|
|
||||||
conn_str TEXT;
|
|
||||||
jobs_count INTEGER := 0;
|
|
||||||
inserted_count INTEGER := 0;
|
|
||||||
updated_count INTEGER := 0;
|
|
||||||
error_count INTEGER := 0;
|
|
||||||
job_record RECORD;
|
|
||||||
BEGIN
|
|
||||||
-- Build dblink connection string
|
|
||||||
conn_str := format(
|
|
||||||
'dbname=%s host=%s port=%s user=%s password=%s',
|
|
||||||
bacula_db_name,
|
|
||||||
bacula_host,
|
|
||||||
bacula_port,
|
|
||||||
bacula_user,
|
|
||||||
bacula_password
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Query jobs from Bacula database using dblink
|
|
||||||
FOR job_record IN
|
|
||||||
SELECT * FROM dblink(
|
|
||||||
conn_str,
|
|
||||||
$$
|
|
||||||
SELECT
|
|
||||||
j.JobId,
|
|
||||||
j.Name as job_name,
|
|
||||||
COALESCE(c.Name, 'unknown') as client_name,
|
|
||||||
CASE
|
|
||||||
WHEN j.Type = 'B' THEN 'Backup'
|
|
||||||
WHEN j.Type = 'R' THEN 'Restore'
|
|
||||||
WHEN j.Type = 'V' THEN 'Verify'
|
|
||||||
WHEN j.Type = 'C' THEN 'Copy'
|
|
||||||
WHEN j.Type = 'M' THEN 'Migrate'
|
|
||||||
ELSE 'Backup'
|
|
||||||
END as job_type,
|
|
||||||
CASE
|
|
||||||
WHEN j.Level = 'F' THEN 'Full'
|
|
||||||
WHEN j.Level = 'I' THEN 'Incremental'
|
|
||||||
WHEN j.Level = 'D' THEN 'Differential'
|
|
||||||
WHEN j.Level = 'S' THEN 'Since'
|
|
||||||
ELSE 'Full'
|
|
||||||
END as job_level,
|
|
||||||
CASE
|
|
||||||
WHEN j.JobStatus = 'T' THEN 'Running'
|
|
||||||
WHEN j.JobStatus = 'C' THEN 'Completed'
|
|
||||||
WHEN j.JobStatus = 'f' OR j.JobStatus = 'F' THEN 'Failed'
|
|
||||||
WHEN j.JobStatus = 'A' THEN 'Canceled'
|
|
||||||
WHEN j.JobStatus = 'W' THEN 'Waiting'
|
|
||||||
ELSE 'Waiting'
|
|
||||||
END as status,
|
|
||||||
COALESCE(j.JobBytes, 0) as bytes_written,
|
|
||||||
COALESCE(j.JobFiles, 0) as files_written,
|
|
||||||
j.StartTime as started_at,
|
|
||||||
j.EndTime as ended_at,
|
|
||||||
CASE
|
|
||||||
WHEN j.EndTime IS NOT NULL AND j.StartTime IS NOT NULL
|
|
||||||
THEN EXTRACT(EPOCH FROM (j.EndTime - j.StartTime))::INTEGER
|
|
||||||
ELSE NULL
|
|
||||||
END as duration_seconds
|
|
||||||
FROM Job j
|
|
||||||
LEFT JOIN Client c ON j.ClientId = c.ClientId
|
|
||||||
ORDER BY j.StartTime DESC
|
|
||||||
LIMIT 1000
|
|
||||||
$$
|
|
||||||
) AS t(
|
|
||||||
job_id INTEGER,
|
|
||||||
job_name TEXT,
|
|
||||||
client_name TEXT,
|
|
||||||
job_type TEXT,
|
|
||||||
job_level TEXT,
|
|
||||||
status TEXT,
|
|
||||||
bytes_written BIGINT,
|
|
||||||
files_written INTEGER,
|
|
||||||
started_at TIMESTAMP,
|
|
||||||
ended_at TIMESTAMP,
|
|
||||||
duration_seconds INTEGER
|
|
||||||
)
|
|
||||||
LOOP
|
|
||||||
BEGIN
|
|
||||||
-- Check if job already exists (before insert/update)
|
|
||||||
IF EXISTS (SELECT 1 FROM backup_jobs WHERE job_id = job_record.job_id) THEN
|
|
||||||
updated_count := updated_count + 1;
|
|
||||||
ELSE
|
|
||||||
inserted_count := inserted_count + 1;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- Upsert job to backup_jobs table
|
|
||||||
INSERT INTO backup_jobs (
|
|
||||||
job_id, job_name, client_name, job_type, job_level, status,
|
|
||||||
bytes_written, files_written, started_at, ended_at, duration_seconds,
|
|
||||||
updated_at
|
|
||||||
) VALUES (
|
|
||||||
job_record.job_id,
|
|
||||||
job_record.job_name,
|
|
||||||
job_record.client_name,
|
|
||||||
job_record.job_type,
|
|
||||||
job_record.job_level,
|
|
||||||
job_record.status,
|
|
||||||
job_record.bytes_written,
|
|
||||||
job_record.files_written,
|
|
||||||
job_record.started_at,
|
|
||||||
job_record.ended_at,
|
|
||||||
job_record.duration_seconds,
|
|
||||||
NOW()
|
|
||||||
)
|
|
||||||
ON CONFLICT (job_id) DO UPDATE SET
|
|
||||||
job_name = EXCLUDED.job_name,
|
|
||||||
client_name = EXCLUDED.client_name,
|
|
||||||
job_type = EXCLUDED.job_type,
|
|
||||||
job_level = EXCLUDED.job_level,
|
|
||||||
status = EXCLUDED.status,
|
|
||||||
bytes_written = EXCLUDED.bytes_written,
|
|
||||||
files_written = EXCLUDED.files_written,
|
|
||||||
started_at = EXCLUDED.started_at,
|
|
||||||
ended_at = EXCLUDED.ended_at,
|
|
||||||
duration_seconds = EXCLUDED.duration_seconds,
|
|
||||||
updated_at = NOW();
|
|
||||||
|
|
||||||
jobs_count := jobs_count + 1;
|
|
||||||
EXCEPTION
|
|
||||||
WHEN OTHERS THEN
|
|
||||||
error_count := error_count + 1;
|
|
||||||
-- Log error but continue with next job
|
|
||||||
RAISE WARNING 'Error syncing job %: %', job_record.job_id, SQLERRM;
|
|
||||||
END;
|
|
||||||
END LOOP;
|
|
||||||
|
|
||||||
-- Return summary
|
|
||||||
RETURN QUERY SELECT jobs_count, inserted_count, updated_count, error_count;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
-- Create a simpler version that uses current database connection settings
|
|
||||||
-- This version assumes Bacula is on same host/port with same user
|
|
||||||
CREATE OR REPLACE FUNCTION sync_bacula_jobs_simple()
|
|
||||||
RETURNS TABLE(
|
|
||||||
jobs_synced INTEGER,
|
|
||||||
jobs_inserted INTEGER,
|
|
||||||
jobs_updated INTEGER,
|
|
||||||
errors INTEGER
|
|
||||||
) AS $$
|
|
||||||
DECLARE
|
|
||||||
current_user_name TEXT;
|
|
||||||
current_host TEXT;
|
|
||||||
current_port INTEGER;
|
|
||||||
current_db TEXT;
|
|
||||||
BEGIN
|
|
||||||
-- Get current connection info
|
|
||||||
SELECT
|
|
||||||
current_user,
|
|
||||||
COALESCE(inet_server_addr()::TEXT, 'localhost'),
|
|
||||||
COALESCE(inet_server_port(), 5432),
|
|
||||||
current_database()
|
|
||||||
INTO
|
|
||||||
current_user_name,
|
|
||||||
current_host,
|
|
||||||
current_port,
|
|
||||||
current_db;
|
|
||||||
|
|
||||||
-- Call main function with current connection settings
|
|
||||||
-- Note: password needs to be passed or configured in .pgpass
|
|
||||||
RETURN QUERY
|
|
||||||
SELECT * FROM sync_bacula_jobs(
|
|
||||||
'bacula', -- Try 'bacula' first
|
|
||||||
current_host,
|
|
||||||
current_port,
|
|
||||||
current_user_name,
|
|
||||||
'' -- Empty password - will use .pgpass or peer authentication
|
|
||||||
);
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql;
|
|
||||||
|
|
||||||
-- Grant execute permission to calypso user
|
|
||||||
GRANT EXECUTE ON FUNCTION sync_bacula_jobs(TEXT, TEXT, INTEGER, TEXT, TEXT) TO calypso;
|
|
||||||
GRANT EXECUTE ON FUNCTION sync_bacula_jobs_simple() TO calypso;
|
|
||||||
|
|
||||||
-- Create index if not exists (should already exist from migration 009)
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_job_id ON backup_jobs(job_id);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_backup_jobs_updated_at ON backup_jobs(updated_at);
|
|
||||||
|
|
||||||
COMMENT ON FUNCTION sync_bacula_jobs IS 'Syncs jobs from Bacula database to Calypso backup_jobs table using dblink';
|
|
||||||
COMMENT ON FUNCTION sync_bacula_jobs_simple IS 'Simplified version that uses current connection settings (requires .pgpass for password)';
|
|
||||||
|
|
||||||
@@ -51,13 +51,6 @@ func cacheMiddleware(cfg CacheConfig, cache *cache.Cache) gin.HandlerFunc {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't cache VTL endpoints - they change frequently
|
|
||||||
path := c.Request.URL.Path
|
|
||||||
if strings.HasPrefix(path, "/api/v1/tape/vtl/") {
|
|
||||||
c.Next()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate cache key from request path and query string
|
// Generate cache key from request path and query string
|
||||||
keyParts := []string{c.Request.URL.Path}
|
keyParts := []string{c.Request.URL.Path}
|
||||||
if c.Request.URL.RawQuery != "" {
|
if c.Request.URL.RawQuery != "" {
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/atlasos/calypso/internal/audit"
|
"github.com/atlasos/calypso/internal/audit"
|
||||||
"github.com/atlasos/calypso/internal/auth"
|
"github.com/atlasos/calypso/internal/auth"
|
||||||
"github.com/atlasos/calypso/internal/backup"
|
|
||||||
"github.com/atlasos/calypso/internal/common/cache"
|
"github.com/atlasos/calypso/internal/common/cache"
|
||||||
"github.com/atlasos/calypso/internal/common/config"
|
"github.com/atlasos/calypso/internal/common/config"
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
"github.com/atlasos/calypso/internal/common/database"
|
||||||
@@ -206,32 +205,10 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
|||||||
scstGroup.GET("/targets", scstHandler.ListTargets)
|
scstGroup.GET("/targets", scstHandler.ListTargets)
|
||||||
scstGroup.GET("/targets/:id", scstHandler.GetTarget)
|
scstGroup.GET("/targets/:id", scstHandler.GetTarget)
|
||||||
scstGroup.POST("/targets", scstHandler.CreateTarget)
|
scstGroup.POST("/targets", scstHandler.CreateTarget)
|
||||||
scstGroup.POST("/targets/:id/luns", requirePermission("iscsi", "write"), scstHandler.AddLUN)
|
scstGroup.POST("/targets/:id/luns", scstHandler.AddLUN)
|
||||||
scstGroup.DELETE("/targets/:id/luns/:lunId", requirePermission("iscsi", "write"), scstHandler.RemoveLUN)
|
|
||||||
scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator)
|
scstGroup.POST("/targets/:id/initiators", scstHandler.AddInitiator)
|
||||||
scstGroup.POST("/targets/:id/enable", scstHandler.EnableTarget)
|
|
||||||
scstGroup.POST("/targets/:id/disable", scstHandler.DisableTarget)
|
|
||||||
scstGroup.DELETE("/targets/:id", requirePermission("iscsi", "write"), scstHandler.DeleteTarget)
|
|
||||||
scstGroup.GET("/initiators", scstHandler.ListAllInitiators)
|
|
||||||
scstGroup.GET("/initiators/:id", scstHandler.GetInitiator)
|
|
||||||
scstGroup.DELETE("/initiators/:id", scstHandler.RemoveInitiator)
|
|
||||||
scstGroup.GET("/extents", scstHandler.ListExtents)
|
|
||||||
scstGroup.POST("/extents", scstHandler.CreateExtent)
|
|
||||||
scstGroup.DELETE("/extents/:device", scstHandler.DeleteExtent)
|
|
||||||
scstGroup.POST("/config/apply", scstHandler.ApplyConfig)
|
scstGroup.POST("/config/apply", scstHandler.ApplyConfig)
|
||||||
scstGroup.GET("/handlers", scstHandler.ListHandlers)
|
scstGroup.GET("/handlers", scstHandler.ListHandlers)
|
||||||
scstGroup.GET("/portals", scstHandler.ListPortals)
|
|
||||||
scstGroup.GET("/portals/:id", scstHandler.GetPortal)
|
|
||||||
scstGroup.POST("/portals", scstHandler.CreatePortal)
|
|
||||||
scstGroup.PUT("/portals/:id", scstHandler.UpdatePortal)
|
|
||||||
scstGroup.DELETE("/portals/:id", scstHandler.DeletePortal)
|
|
||||||
// Initiator Groups routes
|
|
||||||
scstGroup.GET("/initiator-groups", scstHandler.ListAllInitiatorGroups)
|
|
||||||
scstGroup.GET("/initiator-groups/:id", scstHandler.GetInitiatorGroup)
|
|
||||||
scstGroup.POST("/initiator-groups", requirePermission("iscsi", "write"), scstHandler.CreateInitiatorGroup)
|
|
||||||
scstGroup.PUT("/initiator-groups/:id", requirePermission("iscsi", "write"), scstHandler.UpdateInitiatorGroup)
|
|
||||||
scstGroup.DELETE("/initiator-groups/:id", requirePermission("iscsi", "write"), scstHandler.DeleteInitiatorGroup)
|
|
||||||
scstGroup.POST("/initiator-groups/:id/initiators", requirePermission("iscsi", "write"), scstHandler.AddInitiatorToGroup)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Physical Tape Libraries
|
// Physical Tape Libraries
|
||||||
@@ -269,18 +246,7 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
|||||||
}
|
}
|
||||||
|
|
||||||
// System Management
|
// System Management
|
||||||
systemService := system.NewService(log)
|
|
||||||
systemHandler := system.NewHandler(log, tasks.NewEngine(db, log))
|
systemHandler := system.NewHandler(log, tasks.NewEngine(db, log))
|
||||||
// Set service in handler (if handler needs direct access)
|
|
||||||
// Note: Handler already has service via NewHandler, but we need to ensure it's the same instance
|
|
||||||
|
|
||||||
// Start network monitoring with RRD
|
|
||||||
if err := systemService.StartNetworkMonitoring(context.Background()); err != nil {
|
|
||||||
log.Warn("Failed to start network monitoring", "error", err)
|
|
||||||
} else {
|
|
||||||
log.Info("Network monitoring started with RRD")
|
|
||||||
}
|
|
||||||
|
|
||||||
systemGroup := protected.Group("/system")
|
systemGroup := protected.Group("/system")
|
||||||
systemGroup.Use(requirePermission("system", "read"))
|
systemGroup.Use(requirePermission("system", "read"))
|
||||||
{
|
{
|
||||||
@@ -288,88 +254,19 @@ func NewRouter(cfg *config.Config, db *database.DB, log *logger.Logger) *gin.Eng
|
|||||||
systemGroup.GET("/services/:name", systemHandler.GetServiceStatus)
|
systemGroup.GET("/services/:name", systemHandler.GetServiceStatus)
|
||||||
systemGroup.POST("/services/:name/restart", systemHandler.RestartService)
|
systemGroup.POST("/services/:name/restart", systemHandler.RestartService)
|
||||||
systemGroup.GET("/services/:name/logs", systemHandler.GetServiceLogs)
|
systemGroup.GET("/services/:name/logs", systemHandler.GetServiceLogs)
|
||||||
systemGroup.GET("/logs", systemHandler.GetSystemLogs)
|
|
||||||
systemGroup.GET("/network/throughput", systemHandler.GetNetworkThroughput)
|
|
||||||
systemGroup.POST("/support-bundle", systemHandler.GenerateSupportBundle)
|
systemGroup.POST("/support-bundle", systemHandler.GenerateSupportBundle)
|
||||||
systemGroup.GET("/interfaces", systemHandler.ListNetworkInterfaces)
|
|
||||||
systemGroup.PUT("/interfaces/:name", systemHandler.UpdateNetworkInterface)
|
|
||||||
systemGroup.GET("/ntp", systemHandler.GetNTPSettings)
|
|
||||||
systemGroup.POST("/ntp", systemHandler.SaveNTPSettings)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IAM routes - GetUser can be accessed by user viewing own profile or admin
|
// IAM (admin only)
|
||||||
iamHandler := iam.NewHandler(db, cfg, log)
|
iamHandler := iam.NewHandler(db, cfg, log)
|
||||||
protected.GET("/iam/users/:id", iamHandler.GetUser)
|
|
||||||
|
|
||||||
// IAM admin routes
|
|
||||||
iamGroup := protected.Group("/iam")
|
iamGroup := protected.Group("/iam")
|
||||||
iamGroup.Use(requireRole("admin"))
|
iamGroup.Use(requireRole("admin"))
|
||||||
{
|
{
|
||||||
iamGroup.GET("/users", iamHandler.ListUsers)
|
iamGroup.GET("/users", iamHandler.ListUsers)
|
||||||
|
iamGroup.GET("/users/:id", iamHandler.GetUser)
|
||||||
iamGroup.POST("/users", iamHandler.CreateUser)
|
iamGroup.POST("/users", iamHandler.CreateUser)
|
||||||
iamGroup.PUT("/users/:id", iamHandler.UpdateUser)
|
iamGroup.PUT("/users/:id", iamHandler.UpdateUser)
|
||||||
iamGroup.DELETE("/users/:id", iamHandler.DeleteUser)
|
iamGroup.DELETE("/users/:id", iamHandler.DeleteUser)
|
||||||
// Roles routes
|
|
||||||
iamGroup.GET("/roles", iamHandler.ListRoles)
|
|
||||||
iamGroup.GET("/roles/:id", iamHandler.GetRole)
|
|
||||||
iamGroup.POST("/roles", iamHandler.CreateRole)
|
|
||||||
iamGroup.PUT("/roles/:id", iamHandler.UpdateRole)
|
|
||||||
iamGroup.DELETE("/roles/:id", iamHandler.DeleteRole)
|
|
||||||
iamGroup.GET("/roles/:id/permissions", iamHandler.GetRolePermissions)
|
|
||||||
iamGroup.POST("/roles/:id/permissions", iamHandler.AssignPermissionToRole)
|
|
||||||
iamGroup.DELETE("/roles/:id/permissions", iamHandler.RemovePermissionFromRole)
|
|
||||||
|
|
||||||
// Permissions routes
|
|
||||||
iamGroup.GET("/permissions", iamHandler.ListPermissions)
|
|
||||||
|
|
||||||
// User role/group assignment
|
|
||||||
iamGroup.POST("/users/:id/roles", iamHandler.AssignRoleToUser)
|
|
||||||
iamGroup.DELETE("/users/:id/roles", iamHandler.RemoveRoleFromUser)
|
|
||||||
iamGroup.POST("/users/:id/groups", iamHandler.AssignGroupToUser)
|
|
||||||
iamGroup.DELETE("/users/:id/groups", iamHandler.RemoveGroupFromUser)
|
|
||||||
|
|
||||||
// Groups routes
|
|
||||||
iamGroup.GET("/groups", iamHandler.ListGroups)
|
|
||||||
iamGroup.GET("/groups/:id", iamHandler.GetGroup)
|
|
||||||
iamGroup.POST("/groups", iamHandler.CreateGroup)
|
|
||||||
iamGroup.PUT("/groups/:id", iamHandler.UpdateGroup)
|
|
||||||
iamGroup.DELETE("/groups/:id", iamHandler.DeleteGroup)
|
|
||||||
iamGroup.POST("/groups/:id/users", iamHandler.AddUserToGroup)
|
|
||||||
iamGroup.DELETE("/groups/:id/users/:user_id", iamHandler.RemoveUserFromGroup)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backup Jobs
|
|
||||||
backupService := backup.NewService(db, log)
|
|
||||||
// Set up direct connection to Bacula database
|
|
||||||
// Try common Bacula database names
|
|
||||||
baculaDBName := "bacula" // Default
|
|
||||||
if err := backupService.SetBaculaDatabase(cfg.Database, baculaDBName); err != nil {
|
|
||||||
log.Warn("Failed to connect to Bacula database, trying 'bareos'", "error", err)
|
|
||||||
// Try 'bareos' as alternative
|
|
||||||
if err := backupService.SetBaculaDatabase(cfg.Database, "bareos"); err != nil {
|
|
||||||
log.Error("Failed to connect to Bacula database", "error", err, "tried", []string{"bacula", "bareos"})
|
|
||||||
// Continue anyway - will fallback to bconsole
|
|
||||||
}
|
|
||||||
}
|
|
||||||
backupHandler := backup.NewHandler(backupService, log)
|
|
||||||
backupGroup := protected.Group("/backup")
|
|
||||||
backupGroup.Use(requirePermission("backup", "read"))
|
|
||||||
{
|
|
||||||
backupGroup.GET("/dashboard/stats", backupHandler.GetDashboardStats)
|
|
||||||
backupGroup.GET("/jobs", backupHandler.ListJobs)
|
|
||||||
backupGroup.GET("/jobs/:id", backupHandler.GetJob)
|
|
||||||
backupGroup.POST("/jobs", requirePermission("backup", "write"), backupHandler.CreateJob)
|
|
||||||
backupGroup.GET("/clients", backupHandler.ListClients)
|
|
||||||
backupGroup.GET("/storage/pools", backupHandler.ListStoragePools)
|
|
||||||
backupGroup.POST("/storage/pools", requirePermission("backup", "write"), backupHandler.CreateStoragePool)
|
|
||||||
backupGroup.DELETE("/storage/pools/:id", requirePermission("backup", "write"), backupHandler.DeleteStoragePool)
|
|
||||||
backupGroup.GET("/storage/volumes", backupHandler.ListStorageVolumes)
|
|
||||||
backupGroup.POST("/storage/volumes", requirePermission("backup", "write"), backupHandler.CreateStorageVolume)
|
|
||||||
backupGroup.PUT("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.UpdateStorageVolume)
|
|
||||||
backupGroup.DELETE("/storage/volumes/:id", requirePermission("backup", "write"), backupHandler.DeleteStorageVolume)
|
|
||||||
backupGroup.GET("/media", backupHandler.ListMedia)
|
|
||||||
backupGroup.GET("/storage/daemons", backupHandler.ListStorageDaemons)
|
|
||||||
backupGroup.POST("/console/execute", requirePermission("backup", "write"), backupHandler.ExecuteBconsoleCommand)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitoring
|
// Monitoring
|
||||||
|
|||||||
@@ -1,221 +0,0 @@
|
|||||||
package iam
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Group represents a user group
|
|
||||||
type Group struct {
|
|
||||||
ID string
|
|
||||||
Name string
|
|
||||||
Description string
|
|
||||||
IsSystem bool
|
|
||||||
CreatedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
UserCount int
|
|
||||||
RoleCount int
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGroupByID retrieves a group by ID
|
|
||||||
func GetGroupByID(db *database.DB, groupID string) (*Group, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, description, is_system, created_at, updated_at
|
|
||||||
FROM groups
|
|
||||||
WHERE id = $1
|
|
||||||
`
|
|
||||||
|
|
||||||
var group Group
|
|
||||||
err := db.QueryRow(query, groupID).Scan(
|
|
||||||
&group.ID, &group.Name, &group.Description, &group.IsSystem,
|
|
||||||
&group.CreatedAt, &group.UpdatedAt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get user count
|
|
||||||
var userCount int
|
|
||||||
db.QueryRow("SELECT COUNT(*) FROM user_groups WHERE group_id = $1", groupID).Scan(&userCount)
|
|
||||||
group.UserCount = userCount
|
|
||||||
|
|
||||||
// Get role count
|
|
||||||
var roleCount int
|
|
||||||
db.QueryRow("SELECT COUNT(*) FROM group_roles WHERE group_id = $1", groupID).Scan(&roleCount)
|
|
||||||
group.RoleCount = roleCount
|
|
||||||
|
|
||||||
return &group, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGroupByName retrieves a group by name
|
|
||||||
func GetGroupByName(db *database.DB, name string) (*Group, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, description, is_system, created_at, updated_at
|
|
||||||
FROM groups
|
|
||||||
WHERE name = $1
|
|
||||||
`
|
|
||||||
|
|
||||||
var group Group
|
|
||||||
err := db.QueryRow(query, name).Scan(
|
|
||||||
&group.ID, &group.Name, &group.Description, &group.IsSystem,
|
|
||||||
&group.CreatedAt, &group.UpdatedAt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &group, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUserGroups retrieves all groups for a user
|
|
||||||
func GetUserGroups(db *database.DB, userID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT g.name
|
|
||||||
FROM groups g
|
|
||||||
INNER JOIN user_groups ug ON g.id = ug.group_id
|
|
||||||
WHERE ug.user_id = $1
|
|
||||||
ORDER BY g.name
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, userID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var groups []string
|
|
||||||
for rows.Next() {
|
|
||||||
var groupName string
|
|
||||||
if err := rows.Scan(&groupName); err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
groups = append(groups, groupName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if groups == nil {
|
|
||||||
groups = []string{}
|
|
||||||
}
|
|
||||||
return groups, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGroupUsers retrieves all users in a group
|
|
||||||
func GetGroupUsers(db *database.DB, groupID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT u.id
|
|
||||||
FROM users u
|
|
||||||
INNER JOIN user_groups ug ON u.id = ug.user_id
|
|
||||||
WHERE ug.group_id = $1
|
|
||||||
ORDER BY u.username
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, groupID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var userIDs []string
|
|
||||||
for rows.Next() {
|
|
||||||
var userID string
|
|
||||||
if err := rows.Scan(&userID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
userIDs = append(userIDs, userID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return userIDs, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetGroupRoles retrieves all roles for a group
|
|
||||||
func GetGroupRoles(db *database.DB, groupID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT r.name
|
|
||||||
FROM roles r
|
|
||||||
INNER JOIN group_roles gr ON r.id = gr.role_id
|
|
||||||
WHERE gr.group_id = $1
|
|
||||||
ORDER BY r.name
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, groupID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var roles []string
|
|
||||||
for rows.Next() {
|
|
||||||
var role string
|
|
||||||
if err := rows.Scan(&role); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
roles = append(roles, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
return roles, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddUserToGroup adds a user to a group
|
|
||||||
func AddUserToGroup(db *database.DB, userID, groupID, assignedBy string) error {
|
|
||||||
query := `
|
|
||||||
INSERT INTO user_groups (user_id, group_id, assigned_by)
|
|
||||||
VALUES ($1, $2, $3)
|
|
||||||
ON CONFLICT (user_id, group_id) DO NOTHING
|
|
||||||
`
|
|
||||||
_, err := db.Exec(query, userID, groupID, assignedBy)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveUserFromGroup removes a user from a group
|
|
||||||
func RemoveUserFromGroup(db *database.DB, userID, groupID string) error {
|
|
||||||
query := `DELETE FROM user_groups WHERE user_id = $1 AND group_id = $2`
|
|
||||||
_, err := db.Exec(query, userID, groupID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddRoleToGroup adds a role to a group
|
|
||||||
func AddRoleToGroup(db *database.DB, groupID, roleID string) error {
|
|
||||||
query := `
|
|
||||||
INSERT INTO group_roles (group_id, role_id)
|
|
||||||
VALUES ($1, $2)
|
|
||||||
ON CONFLICT (group_id, role_id) DO NOTHING
|
|
||||||
`
|
|
||||||
_, err := db.Exec(query, groupID, roleID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveRoleFromGroup removes a role from a group
|
|
||||||
func RemoveRoleFromGroup(db *database.DB, groupID, roleID string) error {
|
|
||||||
query := `DELETE FROM group_roles WHERE group_id = $1 AND role_id = $2`
|
|
||||||
_, err := db.Exec(query, groupID, roleID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUserRolesFromGroups retrieves all roles for a user via groups
|
|
||||||
func GetUserRolesFromGroups(db *database.DB, userID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT DISTINCT r.name
|
|
||||||
FROM roles r
|
|
||||||
INNER JOIN group_roles gr ON r.id = gr.role_id
|
|
||||||
INNER JOIN user_groups ug ON gr.group_id = ug.group_id
|
|
||||||
WHERE ug.user_id = $1
|
|
||||||
ORDER BY r.name
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, userID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var roles []string
|
|
||||||
for rows.Next() {
|
|
||||||
var role string
|
|
||||||
if err := rows.Scan(&role); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
roles = append(roles, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
return roles, rows.Err()
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,237 +0,0 @@
|
|||||||
package iam
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Role represents a system role
|
|
||||||
type Role struct {
|
|
||||||
ID string
|
|
||||||
Name string
|
|
||||||
Description string
|
|
||||||
IsSystem bool
|
|
||||||
CreatedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRoleByID retrieves a role by ID
|
|
||||||
func GetRoleByID(db *database.DB, roleID string) (*Role, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, description, is_system, created_at, updated_at
|
|
||||||
FROM roles
|
|
||||||
WHERE id = $1
|
|
||||||
`
|
|
||||||
|
|
||||||
var role Role
|
|
||||||
err := db.QueryRow(query, roleID).Scan(
|
|
||||||
&role.ID, &role.Name, &role.Description, &role.IsSystem,
|
|
||||||
&role.CreatedAt, &role.UpdatedAt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &role, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRoleByName retrieves a role by name
|
|
||||||
func GetRoleByName(db *database.DB, name string) (*Role, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, description, is_system, created_at, updated_at
|
|
||||||
FROM roles
|
|
||||||
WHERE name = $1
|
|
||||||
`
|
|
||||||
|
|
||||||
var role Role
|
|
||||||
err := db.QueryRow(query, name).Scan(
|
|
||||||
&role.ID, &role.Name, &role.Description, &role.IsSystem,
|
|
||||||
&role.CreatedAt, &role.UpdatedAt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &role, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListRoles retrieves all roles
|
|
||||||
func ListRoles(db *database.DB) ([]*Role, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, description, is_system, created_at, updated_at
|
|
||||||
FROM roles
|
|
||||||
ORDER BY name
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var roles []*Role
|
|
||||||
for rows.Next() {
|
|
||||||
var role Role
|
|
||||||
if err := rows.Scan(
|
|
||||||
&role.ID, &role.Name, &role.Description, &role.IsSystem,
|
|
||||||
&role.CreatedAt, &role.UpdatedAt,
|
|
||||||
); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
roles = append(roles, &role)
|
|
||||||
}
|
|
||||||
|
|
||||||
return roles, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateRole creates a new role
|
|
||||||
func CreateRole(db *database.DB, name, description string) (*Role, error) {
|
|
||||||
query := `
|
|
||||||
INSERT INTO roles (name, description)
|
|
||||||
VALUES ($1, $2)
|
|
||||||
RETURNING id, name, description, is_system, created_at, updated_at
|
|
||||||
`
|
|
||||||
|
|
||||||
var role Role
|
|
||||||
err := db.QueryRow(query, name, description).Scan(
|
|
||||||
&role.ID, &role.Name, &role.Description, &role.IsSystem,
|
|
||||||
&role.CreatedAt, &role.UpdatedAt,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &role, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateRole updates an existing role
|
|
||||||
func UpdateRole(db *database.DB, roleID, name, description string) error {
|
|
||||||
query := `
|
|
||||||
UPDATE roles
|
|
||||||
SET name = $1, description = $2, updated_at = NOW()
|
|
||||||
WHERE id = $3
|
|
||||||
`
|
|
||||||
_, err := db.Exec(query, name, description, roleID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteRole deletes a role
|
|
||||||
func DeleteRole(db *database.DB, roleID string) error {
|
|
||||||
query := `DELETE FROM roles WHERE id = $1`
|
|
||||||
_, err := db.Exec(query, roleID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRoleUsers retrieves all users with a specific role
|
|
||||||
func GetRoleUsers(db *database.DB, roleID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT u.id
|
|
||||||
FROM users u
|
|
||||||
INNER JOIN user_roles ur ON u.id = ur.user_id
|
|
||||||
WHERE ur.role_id = $1
|
|
||||||
ORDER BY u.username
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, roleID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var userIDs []string
|
|
||||||
for rows.Next() {
|
|
||||||
var userID string
|
|
||||||
if err := rows.Scan(&userID); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
userIDs = append(userIDs, userID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return userIDs, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRolePermissions retrieves all permissions for a role
|
|
||||||
func GetRolePermissions(db *database.DB, roleID string) ([]string, error) {
|
|
||||||
query := `
|
|
||||||
SELECT p.name
|
|
||||||
FROM permissions p
|
|
||||||
INNER JOIN role_permissions rp ON p.id = rp.permission_id
|
|
||||||
WHERE rp.role_id = $1
|
|
||||||
ORDER BY p.name
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query, roleID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var permissions []string
|
|
||||||
for rows.Next() {
|
|
||||||
var perm string
|
|
||||||
if err := rows.Scan(&perm); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
permissions = append(permissions, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
return permissions, rows.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddPermissionToRole assigns a permission to a role
|
|
||||||
func AddPermissionToRole(db *database.DB, roleID, permissionID string) error {
|
|
||||||
query := `
|
|
||||||
INSERT INTO role_permissions (role_id, permission_id)
|
|
||||||
VALUES ($1, $2)
|
|
||||||
ON CONFLICT (role_id, permission_id) DO NOTHING
|
|
||||||
`
|
|
||||||
_, err := db.Exec(query, roleID, permissionID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemovePermissionFromRole removes a permission from a role
|
|
||||||
func RemovePermissionFromRole(db *database.DB, roleID, permissionID string) error {
|
|
||||||
query := `DELETE FROM role_permissions WHERE role_id = $1 AND permission_id = $2`
|
|
||||||
_, err := db.Exec(query, roleID, permissionID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPermissionIDByName retrieves a permission ID by name
|
|
||||||
func GetPermissionIDByName(db *database.DB, permissionName string) (string, error) {
|
|
||||||
var permissionID string
|
|
||||||
err := db.QueryRow("SELECT id FROM permissions WHERE name = $1", permissionName).Scan(&permissionID)
|
|
||||||
return permissionID, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPermissions retrieves all permissions
|
|
||||||
func ListPermissions(db *database.DB) ([]map[string]interface{}, error) {
|
|
||||||
query := `
|
|
||||||
SELECT id, name, resource, action, description
|
|
||||||
FROM permissions
|
|
||||||
ORDER BY resource, action
|
|
||||||
`
|
|
||||||
|
|
||||||
rows, err := db.Query(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var permissions []map[string]interface{}
|
|
||||||
for rows.Next() {
|
|
||||||
var id, name, resource, action, description string
|
|
||||||
if err := rows.Scan(&id, &name, &resource, &action, &description); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
permissions = append(permissions, map[string]interface{}{
|
|
||||||
"id": id,
|
|
||||||
"name": name,
|
|
||||||
"resource": resource,
|
|
||||||
"action": action,
|
|
||||||
"description": description,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return permissions, rows.Err()
|
|
||||||
}
|
|
||||||
@@ -2,7 +2,6 @@ package iam
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
"github.com/atlasos/calypso/internal/common/database"
|
||||||
@@ -91,14 +90,11 @@ func GetUserRoles(db *database.DB, userID string) ([]string, error) {
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var role string
|
var role string
|
||||||
if err := rows.Scan(&role); err != nil {
|
if err := rows.Scan(&role); err != nil {
|
||||||
return []string{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
roles = append(roles, role)
|
roles = append(roles, role)
|
||||||
}
|
}
|
||||||
|
|
||||||
if roles == nil {
|
|
||||||
roles = []string{}
|
|
||||||
}
|
|
||||||
return roles, rows.Err()
|
return roles, rows.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,53 +118,11 @@ func GetUserPermissions(db *database.DB, userID string) ([]string, error) {
|
|||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
var perm string
|
var perm string
|
||||||
if err := rows.Scan(&perm); err != nil {
|
if err := rows.Scan(&perm); err != nil {
|
||||||
return []string{}, err
|
return nil, err
|
||||||
}
|
}
|
||||||
permissions = append(permissions, perm)
|
permissions = append(permissions, perm)
|
||||||
}
|
}
|
||||||
|
|
||||||
if permissions == nil {
|
|
||||||
permissions = []string{}
|
|
||||||
}
|
|
||||||
return permissions, rows.Err()
|
return permissions, rows.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddUserRole assigns a role to a user
|
|
||||||
func AddUserRole(db *database.DB, userID, roleID, assignedBy string) error {
|
|
||||||
query := `
|
|
||||||
INSERT INTO user_roles (user_id, role_id, assigned_by)
|
|
||||||
VALUES ($1, $2, $3)
|
|
||||||
ON CONFLICT (user_id, role_id) DO NOTHING
|
|
||||||
`
|
|
||||||
result, err := db.Exec(query, userID, roleID, assignedBy)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to insert user role: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if row was actually inserted (not just skipped due to conflict)
|
|
||||||
rowsAffected, err := result.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get rows affected: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rowsAffected == 0 {
|
|
||||||
// Row already exists, this is not an error but we should know about it
|
|
||||||
return nil // ON CONFLICT DO NOTHING means this is expected
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveUserRole removes a role from a user
|
|
||||||
func RemoveUserRole(db *database.DB, userID, roleID string) error {
|
|
||||||
query := `DELETE FROM user_roles WHERE user_id = $1 AND role_id = $2`
|
|
||||||
_, err := db.Exec(query, userID, roleID)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRoleIDByName retrieves a role ID by name
|
|
||||||
func GetRoleIDByName(db *database.DB, roleName string) (string, error) {
|
|
||||||
var roleID string
|
|
||||||
err := db.QueryRow("SELECT id FROM roles WHERE name = $1", roleName).Scan(&roleID)
|
|
||||||
return roleID, err
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,14 +1,10 @@
|
|||||||
package monitoring
|
package monitoring
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"context"
|
"context"
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
"github.com/atlasos/calypso/internal/common/database"
|
||||||
@@ -98,21 +94,6 @@ type MetricsService struct {
|
|||||||
db *database.DB
|
db *database.DB
|
||||||
logger *logger.Logger
|
logger *logger.Logger
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
lastCPU *cpuStats // For CPU usage calculation
|
|
||||||
lastCPUTime time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// cpuStats represents CPU statistics from /proc/stat
|
|
||||||
type cpuStats struct {
|
|
||||||
user uint64
|
|
||||||
nice uint64
|
|
||||||
system uint64
|
|
||||||
idle uint64
|
|
||||||
iowait uint64
|
|
||||||
irq uint64
|
|
||||||
softirq uint64
|
|
||||||
steal uint64
|
|
||||||
guest uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMetricsService creates a new metrics service
|
// NewMetricsService creates a new metrics service
|
||||||
@@ -134,8 +115,6 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) {
|
|||||||
sysMetrics, err := s.collectSystemMetrics(ctx)
|
sysMetrics, err := s.collectSystemMetrics(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error("Failed to collect system metrics", "error", err)
|
s.logger.Error("Failed to collect system metrics", "error", err)
|
||||||
// Set default/zero values if collection fails
|
|
||||||
metrics.System = SystemMetrics{}
|
|
||||||
} else {
|
} else {
|
||||||
metrics.System = *sysMetrics
|
metrics.System = *sysMetrics
|
||||||
}
|
}
|
||||||
@@ -188,17 +167,21 @@ func (s *MetricsService) CollectMetrics(ctx context.Context) (*Metrics, error) {
|
|||||||
|
|
||||||
// collectSystemMetrics collects system-level metrics
|
// collectSystemMetrics collects system-level metrics
|
||||||
func (s *MetricsService) collectSystemMetrics(ctx context.Context) (*SystemMetrics, error) {
|
func (s *MetricsService) collectSystemMetrics(ctx context.Context) (*SystemMetrics, error) {
|
||||||
// Get system memory from /proc/meminfo
|
var m runtime.MemStats
|
||||||
memoryTotal, memoryUsed, memoryPercent := s.getSystemMemory()
|
runtime.ReadMemStats(&m)
|
||||||
|
|
||||||
// Get CPU usage from /proc/stat
|
// Get memory info
|
||||||
cpuUsage := s.getCPUUsage()
|
memoryUsed := int64(m.Alloc)
|
||||||
|
memoryTotal := int64(m.Sys)
|
||||||
|
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
||||||
|
|
||||||
// Get system uptime from /proc/uptime
|
// Uptime
|
||||||
uptime := s.getSystemUptime()
|
uptime := time.Since(s.startTime).Seconds()
|
||||||
|
|
||||||
|
// CPU and disk would require external tools or system calls
|
||||||
|
// For now, we'll use placeholders
|
||||||
metrics := &SystemMetrics{
|
metrics := &SystemMetrics{
|
||||||
CPUUsagePercent: cpuUsage,
|
CPUUsagePercent: 0.0, // Would need to read from /proc/stat
|
||||||
MemoryUsed: memoryUsed,
|
MemoryUsed: memoryUsed,
|
||||||
MemoryTotal: memoryTotal,
|
MemoryTotal: memoryTotal,
|
||||||
MemoryPercent: memoryPercent,
|
MemoryPercent: memoryPercent,
|
||||||
@@ -420,232 +403,3 @@ func (s *MetricsService) collectTaskMetrics(ctx context.Context) (*TaskMetrics,
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSystemUptime reads system uptime from /proc/uptime
|
|
||||||
// Returns uptime in seconds, or service uptime as fallback
|
|
||||||
func (s *MetricsService) getSystemUptime() float64 {
|
|
||||||
file, err := os.Open("/proc/uptime")
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to service uptime if /proc/uptime is not available
|
|
||||||
s.logger.Warn("Failed to read /proc/uptime, using service uptime", "error", err)
|
|
||||||
return time.Since(s.startTime).Seconds()
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
if !scanner.Scan() {
|
|
||||||
// Fallback to service uptime if file is empty
|
|
||||||
s.logger.Warn("Failed to read /proc/uptime content, using service uptime")
|
|
||||||
return time.Since(s.startTime).Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) == 0 {
|
|
||||||
// Fallback to service uptime if no data
|
|
||||||
s.logger.Warn("No data in /proc/uptime, using service uptime")
|
|
||||||
return time.Since(s.startTime).Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
// First field is system uptime in seconds
|
|
||||||
uptimeSeconds, err := strconv.ParseFloat(fields[0], 64)
|
|
||||||
if err != nil {
|
|
||||||
// Fallback to service uptime if parsing fails
|
|
||||||
s.logger.Warn("Failed to parse /proc/uptime, using service uptime", "error", err)
|
|
||||||
return time.Since(s.startTime).Seconds()
|
|
||||||
}
|
|
||||||
|
|
||||||
return uptimeSeconds
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSystemMemory reads system memory from /proc/meminfo
|
|
||||||
// Returns total, used (in bytes), and usage percentage
|
|
||||||
func (s *MetricsService) getSystemMemory() (int64, int64, float64) {
|
|
||||||
file, err := os.Open("/proc/meminfo")
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to read /proc/meminfo, using Go runtime memory", "error", err)
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
memoryUsed := int64(m.Alloc)
|
|
||||||
memoryTotal := int64(m.Sys)
|
|
||||||
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
|
||||||
return memoryTotal, memoryUsed, memoryPercent
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
var memTotal, memAvailable, memFree, buffers, cached int64
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse line like "MemTotal: 16375596 kB"
|
|
||||||
// or "MemTotal: 16375596" (some systems don't have unit)
|
|
||||||
colonIdx := strings.Index(line, ":")
|
|
||||||
if colonIdx == -1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimSpace(line[:colonIdx])
|
|
||||||
valuePart := strings.TrimSpace(line[colonIdx+1:])
|
|
||||||
|
|
||||||
// Split value part to get number (ignore unit like "kB")
|
|
||||||
fields := strings.Fields(valuePart)
|
|
||||||
if len(fields) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := strconv.ParseInt(fields[0], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values in /proc/meminfo are in KB, convert to bytes
|
|
||||||
valueBytes := value * 1024
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case "MemTotal":
|
|
||||||
memTotal = valueBytes
|
|
||||||
case "MemAvailable":
|
|
||||||
memAvailable = valueBytes
|
|
||||||
case "MemFree":
|
|
||||||
memFree = valueBytes
|
|
||||||
case "Buffers":
|
|
||||||
buffers = valueBytes
|
|
||||||
case "Cached":
|
|
||||||
cached = valueBytes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
s.logger.Warn("Error scanning /proc/meminfo", "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if memTotal == 0 {
|
|
||||||
s.logger.Warn("Failed to get MemTotal from /proc/meminfo, using Go runtime memory", "memTotal", memTotal)
|
|
||||||
var m runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&m)
|
|
||||||
memoryUsed := int64(m.Alloc)
|
|
||||||
memoryTotal := int64(m.Sys)
|
|
||||||
memoryPercent := float64(memoryUsed) / float64(memoryTotal) * 100
|
|
||||||
return memoryTotal, memoryUsed, memoryPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate used memory
|
|
||||||
// If MemAvailable exists (kernel 3.14+), use it for more accurate calculation
|
|
||||||
var memoryUsed int64
|
|
||||||
if memAvailable > 0 {
|
|
||||||
memoryUsed = memTotal - memAvailable
|
|
||||||
} else {
|
|
||||||
// Fallback: MemTotal - MemFree - Buffers - Cached
|
|
||||||
memoryUsed = memTotal - memFree - buffers - cached
|
|
||||||
if memoryUsed < 0 {
|
|
||||||
memoryUsed = memTotal - memFree
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
memoryPercent := float64(memoryUsed) / float64(memTotal) * 100
|
|
||||||
|
|
||||||
s.logger.Debug("System memory stats",
|
|
||||||
"memTotal", memTotal,
|
|
||||||
"memAvailable", memAvailable,
|
|
||||||
"memoryUsed", memoryUsed,
|
|
||||||
"memoryPercent", memoryPercent)
|
|
||||||
|
|
||||||
return memTotal, memoryUsed, memoryPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCPUUsage reads CPU usage from /proc/stat
|
|
||||||
// Requires two readings to calculate percentage
|
|
||||||
func (s *MetricsService) getCPUUsage() float64 {
|
|
||||||
currentCPU, err := s.readCPUStats()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to read CPU stats", "error", err)
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first reading, store it and return 0
|
|
||||||
if s.lastCPU == nil {
|
|
||||||
s.lastCPU = currentCPU
|
|
||||||
s.lastCPUTime = time.Now()
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate time difference
|
|
||||||
timeDiff := time.Since(s.lastCPUTime).Seconds()
|
|
||||||
if timeDiff < 0.1 {
|
|
||||||
// Too soon, return previous value or 0
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate total CPU time
|
|
||||||
prevTotal := s.lastCPU.user + s.lastCPU.nice + s.lastCPU.system + s.lastCPU.idle +
|
|
||||||
s.lastCPU.iowait + s.lastCPU.irq + s.lastCPU.softirq + s.lastCPU.steal + s.lastCPU.guest
|
|
||||||
currTotal := currentCPU.user + currentCPU.nice + currentCPU.system + currentCPU.idle +
|
|
||||||
currentCPU.iowait + currentCPU.irq + currentCPU.softirq + currentCPU.steal + currentCPU.guest
|
|
||||||
|
|
||||||
// Calculate idle time
|
|
||||||
prevIdle := s.lastCPU.idle + s.lastCPU.iowait
|
|
||||||
currIdle := currentCPU.idle + currentCPU.iowait
|
|
||||||
|
|
||||||
// Calculate used time
|
|
||||||
totalDiff := currTotal - prevTotal
|
|
||||||
idleDiff := currIdle - prevIdle
|
|
||||||
|
|
||||||
if totalDiff == 0 {
|
|
||||||
return 0.0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate CPU usage percentage
|
|
||||||
usagePercent := 100.0 * (1.0 - float64(idleDiff)/float64(totalDiff))
|
|
||||||
|
|
||||||
// Update last CPU stats
|
|
||||||
s.lastCPU = currentCPU
|
|
||||||
s.lastCPUTime = time.Now()
|
|
||||||
|
|
||||||
return usagePercent
|
|
||||||
}
|
|
||||||
|
|
||||||
// readCPUStats reads CPU statistics from /proc/stat
|
|
||||||
func (s *MetricsService) readCPUStats() (*cpuStats, error) {
|
|
||||||
file, err := os.Open("/proc/stat")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to open /proc/stat: %w", err)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
if !scanner.Scan() {
|
|
||||||
return nil, fmt.Errorf("failed to read /proc/stat")
|
|
||||||
}
|
|
||||||
|
|
||||||
line := strings.TrimSpace(scanner.Text())
|
|
||||||
if !strings.HasPrefix(line, "cpu ") {
|
|
||||||
return nil, fmt.Errorf("invalid /proc/stat format")
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) < 8 {
|
|
||||||
return nil, fmt.Errorf("insufficient CPU stats fields")
|
|
||||||
}
|
|
||||||
|
|
||||||
stats := &cpuStats{}
|
|
||||||
stats.user, _ = strconv.ParseUint(fields[1], 10, 64)
|
|
||||||
stats.nice, _ = strconv.ParseUint(fields[2], 10, 64)
|
|
||||||
stats.system, _ = strconv.ParseUint(fields[3], 10, 64)
|
|
||||||
stats.idle, _ = strconv.ParseUint(fields[4], 10, 64)
|
|
||||||
stats.iowait, _ = strconv.ParseUint(fields[5], 10, 64)
|
|
||||||
stats.irq, _ = strconv.ParseUint(fields[6], 10, 64)
|
|
||||||
stats.softirq, _ = strconv.ParseUint(fields[7], 10, 64)
|
|
||||||
|
|
||||||
if len(fields) > 8 {
|
|
||||||
stats.steal, _ = strconv.ParseUint(fields[8], 10, 64)
|
|
||||||
}
|
|
||||||
if len(fields) > 9 {
|
|
||||||
stats.guest, _ = strconv.ParseUint(fields[9], 10, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
return stats, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,15 +1,12 @@
|
|||||||
package scst
|
package scst
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
"github.com/atlasos/calypso/internal/common/database"
|
||||||
"github.com/atlasos/calypso/internal/common/logger"
|
"github.com/atlasos/calypso/internal/common/logger"
|
||||||
"github.com/atlasos/calypso/internal/tasks"
|
"github.com/atlasos/calypso/internal/tasks"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/go-playground/validator/v10"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler handles SCST-related API requests
|
// Handler handles SCST-related API requests
|
||||||
@@ -39,11 +36,6 @@ func (h *Handler) ListTargets(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure we return an empty array instead of null
|
|
||||||
if targets == nil {
|
|
||||||
targets = []Target{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"targets": targets})
|
c.JSON(http.StatusOK, gin.H{"targets": targets})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,24 +55,11 @@ func (h *Handler) GetTarget(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get LUNs
|
// Get LUNs
|
||||||
luns, err := h.service.GetTargetLUNs(c.Request.Context(), targetID)
|
luns, _ := h.service.GetTargetLUNs(c.Request.Context(), targetID)
|
||||||
if err != nil {
|
|
||||||
h.logger.Warn("Failed to get LUNs", "target_id", targetID, "error", err)
|
|
||||||
// Return empty array instead of nil
|
|
||||||
luns = []LUN{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get initiator groups
|
|
||||||
groups, err2 := h.service.GetTargetInitiatorGroups(c.Request.Context(), targetID)
|
|
||||||
if err2 != nil {
|
|
||||||
h.logger.Warn("Failed to get initiator groups", "target_id", targetID, "error", err2)
|
|
||||||
groups = []InitiatorGroup{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{
|
c.JSON(http.StatusOK, gin.H{
|
||||||
"target": target,
|
"target": target,
|
||||||
"luns": luns,
|
"luns": luns,
|
||||||
"initiator_groups": groups,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -119,11 +98,6 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set alias to name for frontend compatibility (same as ListTargets)
|
|
||||||
target.Alias = target.Name
|
|
||||||
// LUNCount will be 0 for newly created target
|
|
||||||
target.LUNCount = 0
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, target)
|
c.JSON(http.StatusCreated, target)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +105,7 @@ func (h *Handler) CreateTarget(c *gin.Context) {
|
|||||||
type AddLUNRequest struct {
|
type AddLUNRequest struct {
|
||||||
DeviceName string `json:"device_name" binding:"required"`
|
DeviceName string `json:"device_name" binding:"required"`
|
||||||
DevicePath string `json:"device_path" binding:"required"`
|
DevicePath string `json:"device_path" binding:"required"`
|
||||||
LUNNumber int `json:"lun_number"` // Note: cannot use binding:"required" for int as 0 is valid
|
LUNNumber int `json:"lun_number" binding:"required"`
|
||||||
HandlerType string `json:"handler_type" binding:"required"`
|
HandlerType string `json:"handler_type" binding:"required"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -147,43 +121,7 @@ func (h *Handler) AddLUN(c *gin.Context) {
|
|||||||
|
|
||||||
var req AddLUNRequest
|
var req AddLUNRequest
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
h.logger.Error("Failed to bind AddLUN request", "error", err)
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||||
// Provide more detailed error message
|
|
||||||
if validationErr, ok := err.(validator.ValidationErrors); ok {
|
|
||||||
var errorMessages []string
|
|
||||||
for _, fieldErr := range validationErr {
|
|
||||||
errorMessages = append(errorMessages, fmt.Sprintf("%s is required", fieldErr.Field()))
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("validation failed: %s", strings.Join(errorMessages, ", "))})
|
|
||||||
} else {
|
|
||||||
// Extract error message without full struct name
|
|
||||||
errMsg := err.Error()
|
|
||||||
if idx := strings.Index(errMsg, "Key: '"); idx >= 0 {
|
|
||||||
// Extract field name from error message
|
|
||||||
fieldStart := idx + 6 // Length of "Key: '"
|
|
||||||
if fieldEnd := strings.Index(errMsg[fieldStart:], "'"); fieldEnd >= 0 {
|
|
||||||
fieldName := errMsg[fieldStart : fieldStart+fieldEnd]
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid or missing field: %s", fieldName)})
|
|
||||||
} else {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request format"})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("invalid request: %v", err)})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate required fields (additional check in case binding doesn't catch it)
|
|
||||||
if req.DeviceName == "" || req.DevicePath == "" || req.HandlerType == "" {
|
|
||||||
h.logger.Error("Missing required fields in AddLUN request", "device_name", req.DeviceName, "device_path", req.DevicePath, "handler_type", req.HandlerType)
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "device_name, device_path, and handler_type are required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate LUN number range
|
|
||||||
if req.LUNNumber < 0 || req.LUNNumber > 255 {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "lun_number must be between 0 and 255"})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,48 +134,6 @@ func (h *Handler) AddLUN(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, gin.H{"message": "LUN added successfully"})
|
c.JSON(http.StatusOK, gin.H{"message": "LUN added successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveLUN removes a LUN from a target
|
|
||||||
func (h *Handler) RemoveLUN(c *gin.Context) {
|
|
||||||
targetID := c.Param("id")
|
|
||||||
lunID := c.Param("lunId")
|
|
||||||
|
|
||||||
// Get target
|
|
||||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
|
||||||
if err != nil {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get LUN to get the LUN number
|
|
||||||
var lunNumber int
|
|
||||||
err = h.db.QueryRowContext(c.Request.Context(),
|
|
||||||
"SELECT lun_number FROM scst_luns WHERE id = $1 AND target_id = $2",
|
|
||||||
lunID, targetID,
|
|
||||||
).Scan(&lunNumber)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "no rows") {
|
|
||||||
// LUN already deleted from database - check if it still exists in SCST
|
|
||||||
// Try to get LUN number from URL or try common LUN numbers
|
|
||||||
// For now, return success since it's already deleted (idempotent)
|
|
||||||
h.logger.Info("LUN not found in database, may already be deleted", "lun_id", lunID, "target_id", targetID)
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "LUN already removed or not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get LUN", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get LUN"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove LUN
|
|
||||||
if err := h.service.RemoveLUN(c.Request.Context(), target.IQN, lunNumber); err != nil {
|
|
||||||
h.logger.Error("Failed to remove LUN", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "LUN removed successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddInitiatorRequest represents an initiator addition request
|
// AddInitiatorRequest represents an initiator addition request
|
||||||
type AddInitiatorRequest struct {
|
type AddInitiatorRequest struct {
|
||||||
InitiatorIQN string `json:"initiator_iqn" binding:"required"`
|
InitiatorIQN string `json:"initiator_iqn" binding:"required"`
|
||||||
@@ -268,149 +164,6 @@ func (h *Handler) AddInitiator(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"})
|
c.JSON(http.StatusOK, gin.H{"message": "Initiator added successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddInitiatorToGroupRequest represents a request to add an initiator to a group
|
|
||||||
type AddInitiatorToGroupRequest struct {
|
|
||||||
InitiatorIQN string `json:"initiator_iqn" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddInitiatorToGroup adds an initiator to a specific group
|
|
||||||
func (h *Handler) AddInitiatorToGroup(c *gin.Context) {
|
|
||||||
groupID := c.Param("id")
|
|
||||||
|
|
||||||
var req AddInitiatorToGroupRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
validationErrors := make(map[string]string)
|
|
||||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
|
||||||
for _, fe := range ve {
|
|
||||||
field := strings.ToLower(fe.Field())
|
|
||||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{
|
|
||||||
"error": "invalid request",
|
|
||||||
"validation_errors": validationErrors,
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err := h.service.AddInitiatorToGroup(c.Request.Context(), groupID, req.InitiatorIQN)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "single initiator only") {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to add initiator to group", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to add initiator to group"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator added to group successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListAllInitiators lists all initiators across all targets
|
|
||||||
func (h *Handler) ListAllInitiators(c *gin.Context) {
|
|
||||||
initiators, err := h.service.ListAllInitiators(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list initiators", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiators"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if initiators == nil {
|
|
||||||
initiators = []InitiatorWithTarget{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"initiators": initiators})
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveInitiator removes an initiator
|
|
||||||
func (h *Handler) RemoveInitiator(c *gin.Context) {
|
|
||||||
initiatorID := c.Param("id")
|
|
||||||
|
|
||||||
if err := h.service.RemoveInitiator(c.Request.Context(), initiatorID); err != nil {
|
|
||||||
if err.Error() == "initiator not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to remove initiator", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Initiator removed successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInitiator retrieves an initiator by ID
|
|
||||||
func (h *Handler) GetInitiator(c *gin.Context) {
|
|
||||||
initiatorID := c.Param("id")
|
|
||||||
|
|
||||||
initiator, err := h.service.GetInitiator(c.Request.Context(), initiatorID)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "initiator not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get initiator", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, initiator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListExtents lists all device extents
|
|
||||||
func (h *Handler) ListExtents(c *gin.Context) {
|
|
||||||
extents, err := h.service.ListExtents(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list extents", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list extents"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if extents == nil {
|
|
||||||
extents = []Extent{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"extents": extents})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateExtentRequest represents a request to create an extent
|
|
||||||
type CreateExtentRequest struct {
|
|
||||||
DeviceName string `json:"device_name" binding:"required"`
|
|
||||||
DevicePath string `json:"device_path" binding:"required"`
|
|
||||||
HandlerType string `json:"handler_type" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateExtent creates a new device extent
|
|
||||||
func (h *Handler) CreateExtent(c *gin.Context) {
|
|
||||||
var req CreateExtentRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.CreateExtent(c.Request.Context(), req.DeviceName, req.DevicePath, req.HandlerType); err != nil {
|
|
||||||
h.logger.Error("Failed to create extent", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, gin.H{"message": "Extent created successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteExtent deletes a device extent
|
|
||||||
func (h *Handler) DeleteExtent(c *gin.Context) {
|
|
||||||
deviceName := c.Param("device")
|
|
||||||
|
|
||||||
if err := h.service.DeleteExtent(c.Request.Context(), deviceName); err != nil {
|
|
||||||
h.logger.Error("Failed to delete extent", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Extent deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyConfig applies SCST configuration
|
// ApplyConfig applies SCST configuration
|
||||||
func (h *Handler) ApplyConfig(c *gin.Context) {
|
func (h *Handler) ApplyConfig(c *gin.Context) {
|
||||||
userID, _ := c.Get("user_id")
|
userID, _ := c.Get("user_id")
|
||||||
@@ -456,292 +209,3 @@ func (h *Handler) ListHandlers(c *gin.Context) {
|
|||||||
c.JSON(http.StatusOK, gin.H{"handlers": handlers})
|
c.JSON(http.StatusOK, gin.H{"handlers": handlers})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPortals lists all iSCSI portals
|
|
||||||
func (h *Handler) ListPortals(c *gin.Context) {
|
|
||||||
portals, err := h.service.ListPortals(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list portals", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list portals"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we return an empty array instead of null
|
|
||||||
if portals == nil {
|
|
||||||
portals = []Portal{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"portals": portals})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreatePortal creates a new portal
|
|
||||||
func (h *Handler) CreatePortal(c *gin.Context) {
|
|
||||||
var portal Portal
|
|
||||||
if err := c.ShouldBindJSON(&portal); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.CreatePortal(c.Request.Context(), &portal); err != nil {
|
|
||||||
h.logger.Error("Failed to create portal", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusCreated, portal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdatePortal updates a portal
|
|
||||||
func (h *Handler) UpdatePortal(c *gin.Context) {
|
|
||||||
id := c.Param("id")
|
|
||||||
|
|
||||||
var portal Portal
|
|
||||||
if err := c.ShouldBindJSON(&portal); err != nil {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.UpdatePortal(c.Request.Context(), id, &portal); err != nil {
|
|
||||||
if err.Error() == "portal not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to update portal", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, portal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableTarget enables a target
|
|
||||||
func (h *Handler) EnableTarget(c *gin.Context) {
|
|
||||||
targetID := c.Param("id")
|
|
||||||
|
|
||||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "target not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get target", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.EnableTarget(c.Request.Context(), target.IQN); err != nil {
|
|
||||||
h.logger.Error("Failed to enable target", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Target enabled successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableTarget disables a target
|
|
||||||
func (h *Handler) DisableTarget(c *gin.Context) {
|
|
||||||
targetID := c.Param("id")
|
|
||||||
|
|
||||||
target, err := h.service.GetTarget(c.Request.Context(), targetID)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "target not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get target", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get target"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.DisableTarget(c.Request.Context(), target.IQN); err != nil {
|
|
||||||
h.logger.Error("Failed to disable target", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Target disabled successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteTarget deletes a target
|
|
||||||
func (h *Handler) DeleteTarget(c *gin.Context) {
|
|
||||||
targetID := c.Param("id")
|
|
||||||
|
|
||||||
if err := h.service.DeleteTarget(c.Request.Context(), targetID); err != nil {
|
|
||||||
if err.Error() == "target not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "target not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to delete target", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Target deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeletePortal deletes a portal
|
|
||||||
func (h *Handler) DeletePortal(c *gin.Context) {
|
|
||||||
id := c.Param("id")
|
|
||||||
|
|
||||||
if err := h.service.DeletePortal(c.Request.Context(), id); err != nil {
|
|
||||||
if err.Error() == "portal not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to delete portal", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "Portal deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPortal retrieves a portal by ID
|
|
||||||
func (h *Handler) GetPortal(c *gin.Context) {
|
|
||||||
id := c.Param("id")
|
|
||||||
|
|
||||||
portal, err := h.service.GetPortal(c.Request.Context(), id)
|
|
||||||
if err != nil {
|
|
||||||
if err.Error() == "portal not found" {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "portal not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get portal", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get portal"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, portal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateInitiatorGroupRequest represents a request to create an initiator group
|
|
||||||
type CreateInitiatorGroupRequest struct {
|
|
||||||
TargetID string `json:"target_id" binding:"required"`
|
|
||||||
GroupName string `json:"group_name" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateInitiatorGroup creates a new initiator group
|
|
||||||
func (h *Handler) CreateInitiatorGroup(c *gin.Context) {
|
|
||||||
var req CreateInitiatorGroupRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
validationErrors := make(map[string]string)
|
|
||||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
|
||||||
for _, fe := range ve {
|
|
||||||
field := strings.ToLower(fe.Field())
|
|
||||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{
|
|
||||||
"error": "invalid request",
|
|
||||||
"validation_errors": validationErrors,
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
group, err := h.service.CreateInitiatorGroup(c.Request.Context(), req.TargetID, req.GroupName)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "already exists") || strings.Contains(err.Error(), "not found") {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to create initiator group", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to create initiator group"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, group)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateInitiatorGroupRequest represents a request to update an initiator group
|
|
||||||
type UpdateInitiatorGroupRequest struct {
|
|
||||||
GroupName string `json:"group_name" binding:"required"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateInitiatorGroup updates an initiator group
|
|
||||||
func (h *Handler) UpdateInitiatorGroup(c *gin.Context) {
|
|
||||||
groupID := c.Param("id")
|
|
||||||
|
|
||||||
var req UpdateInitiatorGroupRequest
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
validationErrors := make(map[string]string)
|
|
||||||
if ve, ok := err.(validator.ValidationErrors); ok {
|
|
||||||
for _, fe := range ve {
|
|
||||||
field := strings.ToLower(fe.Field())
|
|
||||||
validationErrors[field] = fmt.Sprintf("Field '%s' is required", field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{
|
|
||||||
"error": "invalid request",
|
|
||||||
"validation_errors": validationErrors,
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
group, err := h.service.UpdateInitiatorGroup(c.Request.Context(), groupID, req.GroupName)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") || strings.Contains(err.Error(), "already exists") {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to update initiator group", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to update initiator group"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, group)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteInitiatorGroup deletes an initiator group
|
|
||||||
func (h *Handler) DeleteInitiatorGroup(c *gin.Context) {
|
|
||||||
groupID := c.Param("id")
|
|
||||||
|
|
||||||
err := h.service.DeleteInitiatorGroup(c.Request.Context(), groupID)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if strings.Contains(err.Error(), "cannot delete") || strings.Contains(err.Error(), "contains") {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to delete initiator group", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to delete initiator group"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "initiator group deleted successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInitiatorGroup retrieves an initiator group by ID
|
|
||||||
func (h *Handler) GetInitiatorGroup(c *gin.Context) {
|
|
||||||
groupID := c.Param("id")
|
|
||||||
|
|
||||||
group, err := h.service.GetInitiatorGroup(c.Request.Context(), groupID)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") {
|
|
||||||
c.JSON(http.StatusNotFound, gin.H{"error": "initiator group not found"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
h.logger.Error("Failed to get initiator group", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get initiator group"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, group)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListAllInitiatorGroups lists all initiator groups
|
|
||||||
func (h *Handler) ListAllInitiatorGroups(c *gin.Context) {
|
|
||||||
groups, err := h.service.ListAllInitiatorGroups(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list initiator groups", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list initiator groups"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if groups == nil {
|
|
||||||
groups = []InitiatorGroup{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"groups": groups})
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -304,13 +304,6 @@ func (h *Handler) DeleteZFSPool(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate cache for pools list
|
|
||||||
if h.cache != nil {
|
|
||||||
cacheKey := "http:/api/v1/storage/zfs/pools:"
|
|
||||||
h.cache.Delete(cacheKey)
|
|
||||||
h.logger.Debug("Cache invalidated for pools list", "key", cacheKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "ZFS pool deleted successfully"})
|
c.JSON(http.StatusOK, gin.H{"message": "ZFS pool deleted successfully"})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -46,7 +45,6 @@ type ZFSPool struct {
|
|||||||
ScrubInterval int `json:"scrub_interval"` // days
|
ScrubInterval int `json:"scrub_interval"` // days
|
||||||
IsActive bool `json:"is_active"`
|
IsActive bool `json:"is_active"`
|
||||||
HealthStatus string `json:"health_status"` // online, degraded, faulted, offline
|
HealthStatus string `json:"health_status"` // online, degraded, faulted, offline
|
||||||
CompressRatio float64 `json:"compress_ratio"` // compression ratio (e.g., 1.45x)
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
CreatedAt time.Time `json:"created_at"`
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
CreatedBy string `json:"created_by"`
|
CreatedBy string `json:"created_by"`
|
||||||
@@ -361,26 +359,6 @@ func (s *ZFSService) getSpareDisks(ctx context.Context, poolName string) ([]stri
|
|||||||
return spareDisks, nil
|
return spareDisks, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCompressRatio gets the compression ratio from ZFS
|
|
||||||
func (s *ZFSService) getCompressRatio(ctx context.Context, poolName string) (float64, error) {
|
|
||||||
cmd := exec.CommandContext(ctx, "zfs", "get", "-H", "-o", "value", "compressratio", poolName)
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return 1.0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ratioStr := strings.TrimSpace(string(output))
|
|
||||||
// Remove 'x' suffix if present (e.g., "1.45x" -> "1.45")
|
|
||||||
ratioStr = strings.TrimSuffix(ratioStr, "x")
|
|
||||||
|
|
||||||
ratio, err := strconv.ParseFloat(ratioStr, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 1.0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ratio, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPools lists all ZFS pools
|
// ListPools lists all ZFS pools
|
||||||
func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
||||||
query := `
|
query := `
|
||||||
@@ -429,17 +407,8 @@ func (s *ZFSService) ListPools(ctx context.Context) ([]*ZFSPool, error) {
|
|||||||
pool.SpareDisks = spareDisks
|
pool.SpareDisks = spareDisks
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get compressratio from ZFS system
|
|
||||||
compressRatio, err := s.getCompressRatio(ctx, pool.Name)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to get compressratio", "pool", pool.Name, "error", err)
|
|
||||||
pool.CompressRatio = 1.0 // Default to 1.0 if can't get ratio
|
|
||||||
} else {
|
|
||||||
pool.CompressRatio = compressRatio
|
|
||||||
}
|
|
||||||
|
|
||||||
pools = append(pools, &pool)
|
pools = append(pools, &pool)
|
||||||
s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name, "compressratio", pool.CompressRatio)
|
s.logger.Debug("Added pool to list", "pool_id", pool.ID, "name", pool.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
if err := rows.Err(); err != nil {
|
||||||
|
|||||||
@@ -218,7 +218,7 @@ func (m *ZFSPoolMonitor) updatePoolStatus(ctx context.Context, poolName string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// markMissingPoolsOffline marks pools that exist in database but not in system as offline or deletes them
|
// markMissingPoolsOffline marks pools that exist in database but not in system as offline
|
||||||
func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPools map[string]PoolInfo) error {
|
func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPools map[string]PoolInfo) error {
|
||||||
// Get all pools from database
|
// Get all pools from database
|
||||||
rows, err := m.zfsService.db.QueryContext(ctx, "SELECT id, name FROM zfs_pools WHERE is_active = true")
|
rows, err := m.zfsService.db.QueryContext(ctx, "SELECT id, name FROM zfs_pools WHERE is_active = true")
|
||||||
@@ -235,13 +235,17 @@ func (m *ZFSPoolMonitor) markMissingPoolsOffline(ctx context.Context, systemPool
|
|||||||
|
|
||||||
// Check if pool exists in system
|
// Check if pool exists in system
|
||||||
if _, exists := systemPools[poolName]; !exists {
|
if _, exists := systemPools[poolName]; !exists {
|
||||||
// Pool doesn't exist in system - delete from database (pool was destroyed)
|
// Pool doesn't exist in system, mark as offline
|
||||||
m.logger.Info("Pool not found in system, removing from database", "pool", poolName)
|
_, err = m.zfsService.db.ExecContext(ctx, `
|
||||||
_, err = m.zfsService.db.ExecContext(ctx, "DELETE FROM zfs_pools WHERE id = $1", poolID)
|
UPDATE zfs_pools SET
|
||||||
|
health_status = 'offline',
|
||||||
|
updated_at = NOW()
|
||||||
|
WHERE id = $1
|
||||||
|
`, poolID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.logger.Warn("Failed to delete missing pool from database", "pool", poolName, "error", err)
|
m.logger.Warn("Failed to mark pool as offline", "pool", poolName, "error", err)
|
||||||
} else {
|
} else {
|
||||||
m.logger.Info("Removed missing pool from database", "pool", poolName)
|
m.logger.Info("Marked pool as offline (not found in system)", "pool", poolName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package system
|
|||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/logger"
|
"github.com/atlasos/calypso/internal/common/logger"
|
||||||
"github.com/atlasos/calypso/internal/tasks"
|
"github.com/atlasos/calypso/internal/tasks"
|
||||||
@@ -116,140 +115,3 @@ func (h *Handler) GenerateSupportBundle(c *gin.Context) {
|
|||||||
c.JSON(http.StatusAccepted, gin.H{"task_id": taskID})
|
c.JSON(http.StatusAccepted, gin.H{"task_id": taskID})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListNetworkInterfaces lists all network interfaces
|
|
||||||
func (h *Handler) ListNetworkInterfaces(c *gin.Context) {
|
|
||||||
interfaces, err := h.service.ListNetworkInterfaces(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to list network interfaces", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to list network interfaces"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we return an empty array instead of null
|
|
||||||
if interfaces == nil {
|
|
||||||
interfaces = []NetworkInterface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"interfaces": interfaces})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveNTPSettings saves NTP configuration to the OS
|
|
||||||
func (h *Handler) SaveNTPSettings(c *gin.Context) {
|
|
||||||
var settings NTPSettings
|
|
||||||
if err := c.ShouldBindJSON(&settings); err != nil {
|
|
||||||
h.logger.Error("Invalid request body", "error", err)
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate timezone
|
|
||||||
if settings.Timezone == "" {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "timezone is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate NTP servers
|
|
||||||
if len(settings.NTPServers) == 0 {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "at least one NTP server is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := h.service.SaveNTPSettings(c.Request.Context(), settings); err != nil {
|
|
||||||
h.logger.Error("Failed to save NTP settings", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"message": "NTP settings saved successfully"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNTPSettings retrieves current NTP configuration
|
|
||||||
func (h *Handler) GetNTPSettings(c *gin.Context) {
|
|
||||||
settings, err := h.service.GetNTPSettings(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to get NTP settings", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get NTP settings"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"settings": settings})
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateNetworkInterface updates a network interface configuration
|
|
||||||
func (h *Handler) UpdateNetworkInterface(c *gin.Context) {
|
|
||||||
ifaceName := c.Param("name")
|
|
||||||
if ifaceName == "" {
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "interface name is required"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var req struct {
|
|
||||||
IPAddress string `json:"ip_address" binding:"required"`
|
|
||||||
Subnet string `json:"subnet" binding:"required"`
|
|
||||||
Gateway string `json:"gateway,omitempty"`
|
|
||||||
DNS1 string `json:"dns1,omitempty"`
|
|
||||||
DNS2 string `json:"dns2,omitempty"`
|
|
||||||
Role string `json:"role,omitempty"`
|
|
||||||
}
|
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
|
||||||
h.logger.Error("Invalid request body", "error", err)
|
|
||||||
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request body"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert to service request
|
|
||||||
serviceReq := UpdateNetworkInterfaceRequest{
|
|
||||||
IPAddress: req.IPAddress,
|
|
||||||
Subnet: req.Subnet,
|
|
||||||
Gateway: req.Gateway,
|
|
||||||
DNS1: req.DNS1,
|
|
||||||
DNS2: req.DNS2,
|
|
||||||
Role: req.Role,
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedIface, err := h.service.UpdateNetworkInterface(c.Request.Context(), ifaceName, serviceReq)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to update network interface", "interface", ifaceName, "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"interface": updatedIface})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSystemLogs retrieves recent system logs
|
|
||||||
func (h *Handler) GetSystemLogs(c *gin.Context) {
|
|
||||||
limitStr := c.DefaultQuery("limit", "30")
|
|
||||||
limit, err := strconv.Atoi(limitStr)
|
|
||||||
if err != nil || limit <= 0 || limit > 100 {
|
|
||||||
limit = 30
|
|
||||||
}
|
|
||||||
|
|
||||||
logs, err := h.service.GetSystemLogs(c.Request.Context(), limit)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to get system logs", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get system logs"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"logs": logs})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNetworkThroughput retrieves network throughput data from RRD
|
|
||||||
func (h *Handler) GetNetworkThroughput(c *gin.Context) {
|
|
||||||
// Default to last 5 minutes
|
|
||||||
durationStr := c.DefaultQuery("duration", "5m")
|
|
||||||
duration, err := time.ParseDuration(durationStr)
|
|
||||||
if err != nil {
|
|
||||||
duration = 5 * time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := h.service.GetNetworkThroughput(c.Request.Context(), duration)
|
|
||||||
if err != nil {
|
|
||||||
h.logger.Error("Failed to get network throughput", "error", err)
|
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": "failed to get network throughput"})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.JSON(http.StatusOK, gin.H{"data": data})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,292 +0,0 @@
|
|||||||
package system
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RRDService handles RRD database operations for network monitoring
|
|
||||||
type RRDService struct {
|
|
||||||
logger *logger.Logger
|
|
||||||
rrdDir string
|
|
||||||
interfaceName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRRDService creates a new RRD service
|
|
||||||
func NewRRDService(log *logger.Logger, rrdDir string, interfaceName string) *RRDService {
|
|
||||||
return &RRDService{
|
|
||||||
logger: log,
|
|
||||||
rrdDir: rrdDir,
|
|
||||||
interfaceName: interfaceName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkStats represents network interface statistics
|
|
||||||
type NetworkStats struct {
|
|
||||||
Interface string `json:"interface"`
|
|
||||||
RxBytes uint64 `json:"rx_bytes"`
|
|
||||||
TxBytes uint64 `json:"tx_bytes"`
|
|
||||||
RxPackets uint64 `json:"rx_packets"`
|
|
||||||
TxPackets uint64 `json:"tx_packets"`
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNetworkStats reads network statistics from /proc/net/dev
|
|
||||||
func (r *RRDService) GetNetworkStats(ctx context.Context, interfaceName string) (*NetworkStats, error) {
|
|
||||||
data, err := os.ReadFile("/proc/net/dev")
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to read /proc/net/dev: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
lines := strings.Split(string(data), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if !strings.HasPrefix(line, interfaceName+":") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse line: interface: rx_bytes rx_packets ... tx_bytes tx_packets ...
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) < 17 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract statistics
|
|
||||||
// Format: interface: rx_bytes rx_packets rx_errs rx_drop ... tx_bytes tx_packets ...
|
|
||||||
rxBytes, err := strconv.ParseUint(parts[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rxPackets, err := strconv.ParseUint(parts[2], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
txBytes, err := strconv.ParseUint(parts[9], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
txPackets, err := strconv.ParseUint(parts[10], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
return &NetworkStats{
|
|
||||||
Interface: interfaceName,
|
|
||||||
RxBytes: rxBytes,
|
|
||||||
TxBytes: txBytes,
|
|
||||||
RxPackets: rxPackets,
|
|
||||||
TxPackets: txPackets,
|
|
||||||
Timestamp: time.Now(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("interface %s not found in /proc/net/dev", interfaceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitializeRRD creates RRD database if it doesn't exist
|
|
||||||
func (r *RRDService) InitializeRRD(ctx context.Context) error {
|
|
||||||
// Ensure RRD directory exists
|
|
||||||
if err := os.MkdirAll(r.rrdDir, 0755); err != nil {
|
|
||||||
return fmt.Errorf("failed to create RRD directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", r.interfaceName))
|
|
||||||
|
|
||||||
// Check if RRD file already exists
|
|
||||||
if _, err := os.Stat(rrdFile); err == nil {
|
|
||||||
r.logger.Info("RRD file already exists", "file", rrdFile)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create RRD database
|
|
||||||
// Use COUNTER type to track cumulative bytes, RRD will calculate rate automatically
|
|
||||||
// DS:inbound:COUNTER:20:0:U - inbound cumulative bytes, 20s heartbeat
|
|
||||||
// DS:outbound:COUNTER:20:0:U - outbound cumulative bytes, 20s heartbeat
|
|
||||||
// RRA:AVERAGE:0.5:1:600 - 1 sample per step, 600 steps (100 minutes at 10s interval)
|
|
||||||
// RRA:AVERAGE:0.5:6:700 - 6 samples per step, 700 steps (11.6 hours at 1min interval)
|
|
||||||
// RRA:AVERAGE:0.5:60:730 - 60 samples per step, 730 steps (5 days at 1hour interval)
|
|
||||||
// RRA:MAX:0.5:1:600 - Max values for same intervals
|
|
||||||
// RRA:MAX:0.5:6:700
|
|
||||||
// RRA:MAX:0.5:60:730
|
|
||||||
cmd := exec.CommandContext(ctx, "rrdtool", "create", rrdFile,
|
|
||||||
"--step", "10", // 10 second step
|
|
||||||
"DS:inbound:COUNTER:20:0:U", // Inbound cumulative bytes, 20s heartbeat
|
|
||||||
"DS:outbound:COUNTER:20:0:U", // Outbound cumulative bytes, 20s heartbeat
|
|
||||||
"RRA:AVERAGE:0.5:1:600", // 10s resolution, 100 minutes
|
|
||||||
"RRA:AVERAGE:0.5:6:700", // 1min resolution, 11.6 hours
|
|
||||||
"RRA:AVERAGE:0.5:60:730", // 1hour resolution, 5 days
|
|
||||||
"RRA:MAX:0.5:1:600", // Max values
|
|
||||||
"RRA:MAX:0.5:6:700",
|
|
||||||
"RRA:MAX:0.5:60:730",
|
|
||||||
)
|
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to create RRD: %s: %w", string(output), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r.logger.Info("RRD database created", "file", rrdFile)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateRRD updates RRD database with new network statistics
|
|
||||||
func (r *RRDService) UpdateRRD(ctx context.Context, stats *NetworkStats) error {
|
|
||||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", stats.Interface))
|
|
||||||
|
|
||||||
// Update with cumulative byte counts (COUNTER type)
|
|
||||||
// RRD will automatically calculate the rate (bytes per second)
|
|
||||||
cmd := exec.CommandContext(ctx, "rrdtool", "update", rrdFile,
|
|
||||||
fmt.Sprintf("%d:%d:%d", stats.Timestamp.Unix(), stats.RxBytes, stats.TxBytes),
|
|
||||||
)
|
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to update RRD: %s: %w", string(output), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchRRDData fetches data from RRD database for graphing
|
|
||||||
func (r *RRDService) FetchRRDData(ctx context.Context, startTime time.Time, endTime time.Time, resolution string) ([]NetworkDataPoint, error) {
|
|
||||||
rrdFile := filepath.Join(r.rrdDir, fmt.Sprintf("network-%s.rrd", r.interfaceName))
|
|
||||||
|
|
||||||
// Check if RRD file exists
|
|
||||||
if _, err := os.Stat(rrdFile); os.IsNotExist(err) {
|
|
||||||
return []NetworkDataPoint{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch data using rrdtool fetch
|
|
||||||
// Use AVERAGE consolidation with appropriate resolution
|
|
||||||
cmd := exec.CommandContext(ctx, "rrdtool", "fetch", rrdFile,
|
|
||||||
"AVERAGE",
|
|
||||||
"--start", fmt.Sprintf("%d", startTime.Unix()),
|
|
||||||
"--end", fmt.Sprintf("%d", endTime.Unix()),
|
|
||||||
"--resolution", resolution,
|
|
||||||
)
|
|
||||||
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to fetch RRD data: %s: %w", string(output), err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse rrdtool fetch output
|
|
||||||
// Format:
|
|
||||||
// inbound outbound
|
|
||||||
// 1234567890: 1.2345678901e+06 2.3456789012e+06
|
|
||||||
points := []NetworkDataPoint{}
|
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
|
|
||||||
// Skip header lines
|
|
||||||
dataStart := false
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is the data section
|
|
||||||
if strings.Contains(line, "inbound") && strings.Contains(line, "outbound") {
|
|
||||||
dataStart = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dataStart {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse data line: timestamp: inbound_value outbound_value
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) < 3 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse timestamp
|
|
||||||
timestampStr := strings.TrimSuffix(parts[0], ":")
|
|
||||||
timestamp, err := strconv.ParseInt(timestampStr, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse inbound (bytes per second from COUNTER, convert to Mbps)
|
|
||||||
inboundStr := parts[1]
|
|
||||||
inbound, err := strconv.ParseFloat(inboundStr, 64)
|
|
||||||
if err != nil || inbound < 0 {
|
|
||||||
// Skip NaN or negative values
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Convert bytes per second to Mbps (bytes/s * 8 / 1000000)
|
|
||||||
inboundMbps := inbound * 8 / 1000000
|
|
||||||
|
|
||||||
// Parse outbound
|
|
||||||
outboundStr := parts[2]
|
|
||||||
outbound, err := strconv.ParseFloat(outboundStr, 64)
|
|
||||||
if err != nil || outbound < 0 {
|
|
||||||
// Skip NaN or negative values
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
outboundMbps := outbound * 8 / 1000000
|
|
||||||
|
|
||||||
// Format time as MM:SS
|
|
||||||
t := time.Unix(timestamp, 0)
|
|
||||||
timeStr := fmt.Sprintf("%02d:%02d", t.Minute(), t.Second())
|
|
||||||
|
|
||||||
points = append(points, NetworkDataPoint{
|
|
||||||
Time: timeStr,
|
|
||||||
Inbound: inboundMbps,
|
|
||||||
Outbound: outboundMbps,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return points, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkDataPoint represents a single data point for graphing
|
|
||||||
type NetworkDataPoint struct {
|
|
||||||
Time string `json:"time"`
|
|
||||||
Inbound float64 `json:"inbound"` // Mbps
|
|
||||||
Outbound float64 `json:"outbound"` // Mbps
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCollector starts a background goroutine to periodically collect and update RRD
|
|
||||||
func (r *RRDService) StartCollector(ctx context.Context, interval time.Duration) error {
|
|
||||||
// Initialize RRD if needed
|
|
||||||
if err := r.InitializeRRD(ctx); err != nil {
|
|
||||||
return fmt.Errorf("failed to initialize RRD: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
ticker := time.NewTicker(interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
// Get current stats
|
|
||||||
stats, err := r.GetNetworkStats(ctx, r.interfaceName)
|
|
||||||
if err != nil {
|
|
||||||
r.logger.Warn("Failed to get network stats", "error", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update RRD with cumulative byte counts
|
|
||||||
// RRD COUNTER type will automatically calculate rate
|
|
||||||
if err := r.UpdateRRD(ctx, stats); err != nil {
|
|
||||||
r.logger.Warn("Failed to update RRD", "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -12,98 +11,18 @@ import (
|
|||||||
"github.com/atlasos/calypso/internal/common/logger"
|
"github.com/atlasos/calypso/internal/common/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NTPSettings represents NTP configuration
|
|
||||||
type NTPSettings struct {
|
|
||||||
Timezone string `json:"timezone"`
|
|
||||||
NTPServers []string `json:"ntp_servers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service handles system management operations
|
// Service handles system management operations
|
||||||
type Service struct {
|
type Service struct {
|
||||||
logger *logger.Logger
|
logger *logger.Logger
|
||||||
rrdService *RRDService
|
|
||||||
}
|
|
||||||
|
|
||||||
// detectPrimaryInterface detects the primary network interface (first non-loopback with IP)
|
|
||||||
func detectPrimaryInterface(ctx context.Context) string {
|
|
||||||
// Try to get default route interface
|
|
||||||
cmd := exec.CommandContext(ctx, "ip", "route", "show", "default")
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err == nil {
|
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
if strings.Contains(line, "dev ") {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
for i, part := range parts {
|
|
||||||
if part == "dev" && i+1 < len(parts) {
|
|
||||||
iface := parts[i+1]
|
|
||||||
if iface != "lo" {
|
|
||||||
return iface
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: get first non-loopback interface with IP
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "-4", "addr", "show")
|
|
||||||
output, err = cmd.Output()
|
|
||||||
if err == nil {
|
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
// Look for interface name line (e.g., "2: ens18: <BROADCAST...")
|
|
||||||
if len(line) > 0 && line[0] >= '0' && line[0] <= '9' && strings.Contains(line, ":") {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
iface := strings.TrimSuffix(parts[1], ":")
|
|
||||||
if iface != "" && iface != "lo" {
|
|
||||||
// Check if this interface has an IP (next lines will have "inet")
|
|
||||||
// For simplicity, return first non-loopback interface
|
|
||||||
return iface
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Final fallback
|
|
||||||
return "eth0"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewService creates a new system service
|
// NewService creates a new system service
|
||||||
func NewService(log *logger.Logger) *Service {
|
func NewService(log *logger.Logger) *Service {
|
||||||
// Initialize RRD service for network monitoring
|
|
||||||
rrdDir := "/var/lib/calypso/rrd"
|
|
||||||
|
|
||||||
// Auto-detect primary interface
|
|
||||||
ctx := context.Background()
|
|
||||||
interfaceName := detectPrimaryInterface(ctx)
|
|
||||||
log.Info("Detected primary network interface", "interface", interfaceName)
|
|
||||||
|
|
||||||
rrdService := NewRRDService(log, rrdDir, interfaceName)
|
|
||||||
|
|
||||||
return &Service{
|
return &Service{
|
||||||
logger: log,
|
logger: log,
|
||||||
rrdService: rrdService,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartNetworkMonitoring starts the RRD collector for network monitoring
|
|
||||||
func (s *Service) StartNetworkMonitoring(ctx context.Context) error {
|
|
||||||
return s.rrdService.StartCollector(ctx, 10*time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNetworkThroughput fetches network throughput data from RRD
|
|
||||||
func (s *Service) GetNetworkThroughput(ctx context.Context, duration time.Duration) ([]NetworkDataPoint, error) {
|
|
||||||
endTime := time.Now()
|
|
||||||
startTime := endTime.Add(-duration)
|
|
||||||
|
|
||||||
// Use 10 second resolution for recent data
|
|
||||||
return s.rrdService.FetchRRDData(ctx, startTime, endTime, "10")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceStatus represents a systemd service status
|
// ServiceStatus represents a systemd service status
|
||||||
type ServiceStatus struct {
|
type ServiceStatus struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
@@ -116,37 +35,31 @@ type ServiceStatus struct {
|
|||||||
|
|
||||||
// GetServiceStatus retrieves the status of a systemd service
|
// GetServiceStatus retrieves the status of a systemd service
|
||||||
func (s *Service) GetServiceStatus(ctx context.Context, serviceName string) (*ServiceStatus, error) {
|
func (s *Service) GetServiceStatus(ctx context.Context, serviceName string) (*ServiceStatus, error) {
|
||||||
status := &ServiceStatus{
|
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName,
|
||||||
Name: serviceName,
|
"--property=ActiveState,SubState,LoadState,Description,ActiveEnterTimestamp",
|
||||||
}
|
"--value", "--no-pager")
|
||||||
|
|
||||||
// Get each property individually to ensure correct parsing
|
|
||||||
properties := map[string]*string{
|
|
||||||
"ActiveState": &status.ActiveState,
|
|
||||||
"SubState": &status.SubState,
|
|
||||||
"LoadState": &status.LoadState,
|
|
||||||
"Description": &status.Description,
|
|
||||||
}
|
|
||||||
|
|
||||||
for prop, target := range properties {
|
|
||||||
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName, "--property", prop, "--value", "--no-pager")
|
|
||||||
output, err := cmd.Output()
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Warn("Failed to get property", "service", serviceName, "property", prop, "error", err)
|
return nil, fmt.Errorf("failed to get service status: %w", err)
|
||||||
continue
|
|
||||||
}
|
|
||||||
*target = strings.TrimSpace(string(output))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get timestamp if available
|
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
|
||||||
cmd := exec.CommandContext(ctx, "systemctl", "show", serviceName, "--property", "ActiveEnterTimestamp", "--value", "--no-pager")
|
if len(lines) < 4 {
|
||||||
output, err := cmd.Output()
|
return nil, fmt.Errorf("invalid service status output")
|
||||||
if err == nil {
|
|
||||||
timestamp := strings.TrimSpace(string(output))
|
|
||||||
if timestamp != "" {
|
|
||||||
if t, err := time.Parse("Mon 2006-01-02 15:04:05 MST", timestamp); err == nil {
|
|
||||||
status.Since = t
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
status := &ServiceStatus{
|
||||||
|
Name: serviceName,
|
||||||
|
ActiveState: strings.TrimSpace(lines[0]),
|
||||||
|
SubState: strings.TrimSpace(lines[1]),
|
||||||
|
LoadState: strings.TrimSpace(lines[2]),
|
||||||
|
Description: strings.TrimSpace(lines[3]),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse timestamp if available
|
||||||
|
if len(lines) > 4 && lines[4] != "" {
|
||||||
|
if t, err := time.Parse("Mon 2006-01-02 15:04:05 MST", strings.TrimSpace(lines[4])); err == nil {
|
||||||
|
status.Since = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,15 +69,10 @@ func (s *Service) GetServiceStatus(ctx context.Context, serviceName string) (*Se
|
|||||||
// ListServices lists all Calypso-related services
|
// ListServices lists all Calypso-related services
|
||||||
func (s *Service) ListServices(ctx context.Context) ([]ServiceStatus, error) {
|
func (s *Service) ListServices(ctx context.Context) ([]ServiceStatus, error) {
|
||||||
services := []string{
|
services := []string{
|
||||||
"ssh",
|
|
||||||
"sshd",
|
|
||||||
"smbd",
|
|
||||||
"iscsi-scst",
|
|
||||||
"nfs-server",
|
|
||||||
"nfs",
|
|
||||||
"mhvtl",
|
|
||||||
"calypso-api",
|
"calypso-api",
|
||||||
"scst",
|
"scst",
|
||||||
|
"iscsi-scst",
|
||||||
|
"mhvtl",
|
||||||
"postgresql",
|
"postgresql",
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -220,108 +128,6 @@ func (s *Service) GetJournalLogs(ctx context.Context, serviceName string, lines
|
|||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SystemLogEntry represents a parsed system log entry
|
|
||||||
type SystemLogEntry struct {
|
|
||||||
Time string `json:"time"`
|
|
||||||
Level string `json:"level"`
|
|
||||||
Source string `json:"source"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSystemLogs retrieves recent system logs from journalctl
|
|
||||||
func (s *Service) GetSystemLogs(ctx context.Context, limit int) ([]SystemLogEntry, error) {
|
|
||||||
if limit <= 0 || limit > 100 {
|
|
||||||
limit = 30 // Default to 30 logs
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, "journalctl",
|
|
||||||
"-n", fmt.Sprintf("%d", limit),
|
|
||||||
"-o", "json",
|
|
||||||
"--no-pager",
|
|
||||||
"--since", "1 hour ago") // Only get logs from last hour
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to get system logs: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var logs []SystemLogEntry
|
|
||||||
linesOutput := strings.Split(strings.TrimSpace(string(output)), "\n")
|
|
||||||
for _, line := range linesOutput {
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var logEntry map[string]interface{}
|
|
||||||
if err := json.Unmarshal([]byte(line), &logEntry); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse timestamp (__REALTIME_TIMESTAMP is in microseconds)
|
|
||||||
var timeStr string
|
|
||||||
if timestamp, ok := logEntry["__REALTIME_TIMESTAMP"].(float64); ok {
|
|
||||||
// Convert microseconds to nanoseconds for time.Unix (1 microsecond = 1000 nanoseconds)
|
|
||||||
t := time.Unix(0, int64(timestamp)*1000)
|
|
||||||
timeStr = t.Format("15:04:05")
|
|
||||||
} else if timestamp, ok := logEntry["_SOURCE_REALTIME_TIMESTAMP"].(float64); ok {
|
|
||||||
t := time.Unix(0, int64(timestamp)*1000)
|
|
||||||
timeStr = t.Format("15:04:05")
|
|
||||||
} else {
|
|
||||||
timeStr = time.Now().Format("15:04:05")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse log level (priority)
|
|
||||||
level := "INFO"
|
|
||||||
if priority, ok := logEntry["PRIORITY"].(float64); ok {
|
|
||||||
switch int(priority) {
|
|
||||||
case 0: // emerg
|
|
||||||
level = "EMERG"
|
|
||||||
case 1, 2, 3: // alert, crit, err
|
|
||||||
level = "ERROR"
|
|
||||||
case 4: // warning
|
|
||||||
level = "WARN"
|
|
||||||
case 5: // notice
|
|
||||||
level = "NOTICE"
|
|
||||||
case 6: // info
|
|
||||||
level = "INFO"
|
|
||||||
case 7: // debug
|
|
||||||
level = "DEBUG"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse source (systemd unit or syslog identifier)
|
|
||||||
source := "system"
|
|
||||||
if unit, ok := logEntry["_SYSTEMD_UNIT"].(string); ok && unit != "" {
|
|
||||||
// Remove .service suffix if present
|
|
||||||
source = strings.TrimSuffix(unit, ".service")
|
|
||||||
} else if ident, ok := logEntry["SYSLOG_IDENTIFIER"].(string); ok && ident != "" {
|
|
||||||
source = ident
|
|
||||||
} else if comm, ok := logEntry["_COMM"].(string); ok && comm != "" {
|
|
||||||
source = comm
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse message
|
|
||||||
message := ""
|
|
||||||
if msg, ok := logEntry["MESSAGE"].(string); ok {
|
|
||||||
message = msg
|
|
||||||
}
|
|
||||||
|
|
||||||
if message != "" {
|
|
||||||
logs = append(logs, SystemLogEntry{
|
|
||||||
Time: timeStr,
|
|
||||||
Level: level,
|
|
||||||
Source: source,
|
|
||||||
Message: message,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reverse to get newest first
|
|
||||||
for i, j := 0, len(logs)-1; i < j; i, j = i+1, j-1 {
|
|
||||||
logs[i], logs[j] = logs[j], logs[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return logs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateSupportBundle generates a diagnostic support bundle
|
// GenerateSupportBundle generates a diagnostic support bundle
|
||||||
func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string) error {
|
func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string) error {
|
||||||
// Create bundle directory
|
// Create bundle directory
|
||||||
@@ -369,505 +175,3 @@ func (s *Service) GenerateSupportBundle(ctx context.Context, outputPath string)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetworkInterface represents a network interface
|
|
||||||
type NetworkInterface struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
IPAddress string `json:"ip_address"`
|
|
||||||
Subnet string `json:"subnet"`
|
|
||||||
Status string `json:"status"` // "Connected" or "Down"
|
|
||||||
Speed string `json:"speed"` // e.g., "10 Gbps", "1 Gbps"
|
|
||||||
Role string `json:"role"` // "Management", "ISCSI", or empty
|
|
||||||
Gateway string `json:"gateway,omitempty"`
|
|
||||||
DNS1 string `json:"dns1,omitempty"`
|
|
||||||
DNS2 string `json:"dns2,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListNetworkInterfaces lists all network interfaces
|
|
||||||
func (s *Service) ListNetworkInterfaces(ctx context.Context) ([]NetworkInterface, error) {
|
|
||||||
// First, get all interface names and their states
|
|
||||||
cmd := exec.CommandContext(ctx, "ip", "link", "show")
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to list interfaces", "error", err)
|
|
||||||
return nil, fmt.Errorf("failed to list interfaces: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
interfaceMap := make(map[string]*NetworkInterface)
|
|
||||||
lines := strings.Split(string(output), "\n")
|
|
||||||
|
|
||||||
s.logger.Debug("Parsing network interfaces", "output_lines", len(lines))
|
|
||||||
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse interface name and state
|
|
||||||
// Format: "2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000"
|
|
||||||
// Look for lines that start with a number followed by ":" (interface definition line)
|
|
||||||
// Simple check: line starts with digit, contains ":", and contains "state"
|
|
||||||
if len(line) > 0 && line[0] >= '0' && line[0] <= '9' && strings.Contains(line, ":") && strings.Contains(line, "state") {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract interface name (e.g., "ens18:" or "lo:")
|
|
||||||
ifaceName := strings.TrimSuffix(parts[1], ":")
|
|
||||||
if ifaceName == "" || ifaceName == "lo" {
|
|
||||||
continue // Skip loopback
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract state - look for "state UP" or "state DOWN" in the line
|
|
||||||
state := "Down"
|
|
||||||
if strings.Contains(line, "state UP") {
|
|
||||||
state = "Connected"
|
|
||||||
} else if strings.Contains(line, "state DOWN") {
|
|
||||||
state = "Down"
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("Found interface", "name", ifaceName, "state", state)
|
|
||||||
|
|
||||||
interfaceMap[ifaceName] = &NetworkInterface{
|
|
||||||
Name: ifaceName,
|
|
||||||
Status: state,
|
|
||||||
Speed: "Unknown",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Debug("Found interfaces from ip link", "count", len(interfaceMap))
|
|
||||||
|
|
||||||
// Get IP addresses for each interface
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "-4", "addr", "show")
|
|
||||||
output, err = cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to get IP addresses", "error", err)
|
|
||||||
} else {
|
|
||||||
lines = strings.Split(string(output), "\n")
|
|
||||||
var currentIfaceName string
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse interface name (e.g., "2: ens18: <BROADCAST,MULTICAST,UP,LOWER_UP>")
|
|
||||||
if strings.Contains(line, ":") && !strings.Contains(line, "inet") && !strings.HasPrefix(line, "valid_lft") && !strings.HasPrefix(line, "altname") {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
currentIfaceName = strings.TrimSuffix(parts[1], ":")
|
|
||||||
s.logger.Debug("Processing interface for IP", "name", currentIfaceName)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse IP address (e.g., "inet 10.10.14.16/24 brd 10.10.14.255 scope global ens18")
|
|
||||||
if strings.HasPrefix(line, "inet ") && currentIfaceName != "" && currentIfaceName != "lo" {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
ipWithSubnet := parts[1] // e.g., "10.10.14.16/24"
|
|
||||||
ipParts := strings.Split(ipWithSubnet, "/")
|
|
||||||
if len(ipParts) == 2 {
|
|
||||||
ip := ipParts[0]
|
|
||||||
subnet := ipParts[1]
|
|
||||||
|
|
||||||
// Find or create interface
|
|
||||||
iface, exists := interfaceMap[currentIfaceName]
|
|
||||||
if !exists {
|
|
||||||
s.logger.Debug("Creating new interface entry", "name", currentIfaceName)
|
|
||||||
iface = &NetworkInterface{
|
|
||||||
Name: currentIfaceName,
|
|
||||||
Status: "Down",
|
|
||||||
Speed: "Unknown",
|
|
||||||
}
|
|
||||||
interfaceMap[currentIfaceName] = iface
|
|
||||||
}
|
|
||||||
|
|
||||||
iface.IPAddress = ip
|
|
||||||
iface.Subnet = subnet
|
|
||||||
s.logger.Debug("Set IP for interface", "name", currentIfaceName, "ip", ip, "subnet", subnet)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get default gateway for each interface
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "route", "show")
|
|
||||||
output, err = cmd.Output()
|
|
||||||
if err == nil {
|
|
||||||
lines = strings.Split(string(output), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if line == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse default route: "default via 10.10.14.1 dev ens18"
|
|
||||||
if strings.HasPrefix(line, "default via ") {
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
// Find "via" and "dev" in the parts
|
|
||||||
var gateway string
|
|
||||||
var ifaceName string
|
|
||||||
for i, part := range parts {
|
|
||||||
if part == "via" && i+1 < len(parts) {
|
|
||||||
gateway = parts[i+1]
|
|
||||||
}
|
|
||||||
if part == "dev" && i+1 < len(parts) {
|
|
||||||
ifaceName = parts[i+1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if gateway != "" && ifaceName != "" {
|
|
||||||
if iface, exists := interfaceMap[ifaceName]; exists {
|
|
||||||
iface.Gateway = gateway
|
|
||||||
s.logger.Info("Set default gateway for interface", "name", ifaceName, "gateway", gateway)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if strings.Contains(line, " via ") && strings.Contains(line, " dev ") {
|
|
||||||
// Parse network route: "10.10.14.0/24 via 10.10.14.1 dev ens18"
|
|
||||||
// Or: "192.168.1.0/24 via 192.168.1.1 dev eth0"
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
var gateway string
|
|
||||||
var ifaceName string
|
|
||||||
for i, part := range parts {
|
|
||||||
if part == "via" && i+1 < len(parts) {
|
|
||||||
gateway = parts[i+1]
|
|
||||||
}
|
|
||||||
if part == "dev" && i+1 < len(parts) {
|
|
||||||
ifaceName = parts[i+1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Only set gateway if it's not already set (prefer default route)
|
|
||||||
if gateway != "" && ifaceName != "" {
|
|
||||||
if iface, exists := interfaceMap[ifaceName]; exists {
|
|
||||||
if iface.Gateway == "" {
|
|
||||||
iface.Gateway = gateway
|
|
||||||
s.logger.Info("Set gateway from network route for interface", "name", ifaceName, "gateway", gateway)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
s.logger.Warn("Failed to get routes", "error", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get DNS servers from systemd-resolved or /etc/resolv.conf
|
|
||||||
// Try systemd-resolved first
|
|
||||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--status")
|
|
||||||
output, err = cmd.Output()
|
|
||||||
dnsServers := []string{}
|
|
||||||
if err == nil {
|
|
||||||
// Parse DNS from systemd-resolve output
|
|
||||||
lines = strings.Split(string(output), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if strings.HasPrefix(line, "DNS Servers:") {
|
|
||||||
// Format: "DNS Servers: 8.8.8.8 8.8.4.4"
|
|
||||||
parts := strings.Fields(line)
|
|
||||||
if len(parts) >= 3 {
|
|
||||||
dnsServers = parts[2:]
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fallback to /etc/resolv.conf
|
|
||||||
data, err := os.ReadFile("/etc/resolv.conf")
|
|
||||||
if err == nil {
|
|
||||||
lines = strings.Split(string(data), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if strings.HasPrefix(line, "nameserver ") {
|
|
||||||
dns := strings.TrimPrefix(line, "nameserver ")
|
|
||||||
dns = strings.TrimSpace(dns)
|
|
||||||
if dns != "" {
|
|
||||||
dnsServers = append(dnsServers, dns)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert map to slice
|
|
||||||
var interfaces []NetworkInterface
|
|
||||||
s.logger.Debug("Converting interface map to slice", "map_size", len(interfaceMap))
|
|
||||||
for _, iface := range interfaceMap {
|
|
||||||
// Get speed for each interface using ethtool
|
|
||||||
if iface.Name != "" && iface.Name != "lo" {
|
|
||||||
cmd := exec.CommandContext(ctx, "ethtool", iface.Name)
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err == nil {
|
|
||||||
// Parse speed from ethtool output
|
|
||||||
ethtoolLines := strings.Split(string(output), "\n")
|
|
||||||
for _, ethtoolLine := range ethtoolLines {
|
|
||||||
if strings.Contains(ethtoolLine, "Speed:") {
|
|
||||||
parts := strings.Fields(ethtoolLine)
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
iface.Speed = parts[1]
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set DNS servers (use first two if available)
|
|
||||||
if len(dnsServers) > 0 {
|
|
||||||
iface.DNS1 = dnsServers[0]
|
|
||||||
}
|
|
||||||
if len(dnsServers) > 1 {
|
|
||||||
iface.DNS2 = dnsServers[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine role based on interface name or IP (simple heuristic)
|
|
||||||
// You can enhance this with configuration file or database lookup
|
|
||||||
if strings.Contains(iface.Name, "eth") || strings.Contains(iface.Name, "ens") {
|
|
||||||
// Default to Management for first interface, ISCSI for others
|
|
||||||
if iface.Name == "eth0" || iface.Name == "ens18" {
|
|
||||||
iface.Role = "Management"
|
|
||||||
} else {
|
|
||||||
// Check if IP is in typical iSCSI range (10.x.x.x)
|
|
||||||
if strings.HasPrefix(iface.IPAddress, "10.") && iface.IPAddress != "" {
|
|
||||||
iface.Role = "ISCSI"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
interfaces = append(interfaces, *iface)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no interfaces found, return empty slice
|
|
||||||
if len(interfaces) == 0 {
|
|
||||||
s.logger.Warn("No network interfaces found")
|
|
||||||
return []NetworkInterface{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("Listed network interfaces", "count", len(interfaces))
|
|
||||||
return interfaces, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateNetworkInterfaceRequest represents the request to update a network interface
|
|
||||||
type UpdateNetworkInterfaceRequest struct {
|
|
||||||
IPAddress string `json:"ip_address"`
|
|
||||||
Subnet string `json:"subnet"`
|
|
||||||
Gateway string `json:"gateway,omitempty"`
|
|
||||||
DNS1 string `json:"dns1,omitempty"`
|
|
||||||
DNS2 string `json:"dns2,omitempty"`
|
|
||||||
Role string `json:"role,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateNetworkInterface updates network interface configuration
|
|
||||||
func (s *Service) UpdateNetworkInterface(ctx context.Context, ifaceName string, req UpdateNetworkInterfaceRequest) (*NetworkInterface, error) {
|
|
||||||
// Validate interface exists
|
|
||||||
cmd := exec.CommandContext(ctx, "ip", "link", "show", ifaceName)
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("interface %s not found: %w", ifaceName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove existing IP address if any
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "addr", "flush", "dev", ifaceName)
|
|
||||||
cmd.Run() // Ignore error, interface might not have IP
|
|
||||||
|
|
||||||
// Set new IP address and subnet
|
|
||||||
ipWithSubnet := fmt.Sprintf("%s/%s", req.IPAddress, req.Subnet)
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "addr", "add", ipWithSubnet, "dev", ifaceName)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to set IP address", "interface", ifaceName, "error", err, "output", string(output))
|
|
||||||
return nil, fmt.Errorf("failed to set IP address: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove existing default route if any
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "route", "del", "default")
|
|
||||||
cmd.Run() // Ignore error, might not exist
|
|
||||||
|
|
||||||
// Set gateway if provided
|
|
||||||
if req.Gateway != "" {
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "route", "add", "default", "via", req.Gateway, "dev", ifaceName)
|
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to set gateway", "interface", ifaceName, "error", err, "output", string(output))
|
|
||||||
return nil, fmt.Errorf("failed to set gateway: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update DNS in systemd-resolved or /etc/resolv.conf
|
|
||||||
if req.DNS1 != "" || req.DNS2 != "" {
|
|
||||||
// Try using systemd-resolve first
|
|
||||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--status")
|
|
||||||
if cmd.Run() == nil {
|
|
||||||
// systemd-resolve is available, use it
|
|
||||||
dnsServers := []string{}
|
|
||||||
if req.DNS1 != "" {
|
|
||||||
dnsServers = append(dnsServers, req.DNS1)
|
|
||||||
}
|
|
||||||
if req.DNS2 != "" {
|
|
||||||
dnsServers = append(dnsServers, req.DNS2)
|
|
||||||
}
|
|
||||||
if len(dnsServers) > 0 {
|
|
||||||
// Use resolvectl to set DNS (newer systemd)
|
|
||||||
cmd = exec.CommandContext(ctx, "resolvectl", "dns", ifaceName, strings.Join(dnsServers, " "))
|
|
||||||
if cmd.Run() != nil {
|
|
||||||
// Fallback to systemd-resolve
|
|
||||||
cmd = exec.CommandContext(ctx, "systemd-resolve", "--interface", ifaceName, "--set-dns", strings.Join(dnsServers, " "))
|
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to set DNS via systemd-resolve", "error", err, "output", string(output))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Fallback: update /etc/resolv.conf
|
|
||||||
resolvContent := "# Generated by Calypso\n"
|
|
||||||
if req.DNS1 != "" {
|
|
||||||
resolvContent += fmt.Sprintf("nameserver %s\n", req.DNS1)
|
|
||||||
}
|
|
||||||
if req.DNS2 != "" {
|
|
||||||
resolvContent += fmt.Sprintf("nameserver %s\n", req.DNS2)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpPath := "/tmp/resolv.conf." + fmt.Sprintf("%d", time.Now().Unix())
|
|
||||||
if err := os.WriteFile(tmpPath, []byte(resolvContent), 0644); err != nil {
|
|
||||||
s.logger.Warn("Failed to write temporary resolv.conf", "error", err)
|
|
||||||
} else {
|
|
||||||
cmd = exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("mv %s /etc/resolv.conf", tmpPath))
|
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to update /etc/resolv.conf", "error", err, "output", string(output))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bring interface up
|
|
||||||
cmd = exec.CommandContext(ctx, "ip", "link", "set", ifaceName, "up")
|
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to bring interface up", "interface", ifaceName, "error", err, "output", string(output))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return updated interface
|
|
||||||
updatedIface := &NetworkInterface{
|
|
||||||
Name: ifaceName,
|
|
||||||
IPAddress: req.IPAddress,
|
|
||||||
Subnet: req.Subnet,
|
|
||||||
Gateway: req.Gateway,
|
|
||||||
DNS1: req.DNS1,
|
|
||||||
DNS2: req.DNS2,
|
|
||||||
Role: req.Role,
|
|
||||||
Status: "Connected",
|
|
||||||
Speed: "Unknown", // Will be updated on next list
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("Updated network interface", "interface", ifaceName, "ip", req.IPAddress, "subnet", req.Subnet)
|
|
||||||
return updatedIface, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SaveNTPSettings saves NTP configuration to the OS
|
|
||||||
func (s *Service) SaveNTPSettings(ctx context.Context, settings NTPSettings) error {
|
|
||||||
// Set timezone using timedatectl
|
|
||||||
if settings.Timezone != "" {
|
|
||||||
cmd := exec.CommandContext(ctx, "timedatectl", "set-timezone", settings.Timezone)
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to set timezone", "timezone", settings.Timezone, "error", err, "output", string(output))
|
|
||||||
return fmt.Errorf("failed to set timezone: %w", err)
|
|
||||||
}
|
|
||||||
s.logger.Info("Timezone set", "timezone", settings.Timezone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Configure NTP servers in systemd-timesyncd
|
|
||||||
if len(settings.NTPServers) > 0 {
|
|
||||||
configPath := "/etc/systemd/timesyncd.conf"
|
|
||||||
|
|
||||||
// Build config content
|
|
||||||
configContent := "[Time]\n"
|
|
||||||
configContent += "NTP="
|
|
||||||
for i, server := range settings.NTPServers {
|
|
||||||
if i > 0 {
|
|
||||||
configContent += " "
|
|
||||||
}
|
|
||||||
configContent += server
|
|
||||||
}
|
|
||||||
configContent += "\n"
|
|
||||||
|
|
||||||
// Write to temporary file first, then move to final location (requires root)
|
|
||||||
tmpPath := "/tmp/timesyncd.conf." + fmt.Sprintf("%d", time.Now().Unix())
|
|
||||||
if err := os.WriteFile(tmpPath, []byte(configContent), 0644); err != nil {
|
|
||||||
s.logger.Error("Failed to write temporary NTP config", "error", err)
|
|
||||||
return fmt.Errorf("failed to write temporary NTP configuration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Move to final location using sudo (requires root privileges)
|
|
||||||
cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("mv %s %s", tmpPath, configPath))
|
|
||||||
output, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to move NTP config", "error", err, "output", string(output))
|
|
||||||
os.Remove(tmpPath) // Clean up temp file
|
|
||||||
return fmt.Errorf("failed to move NTP configuration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Restart systemd-timesyncd to apply changes
|
|
||||||
cmd = exec.CommandContext(ctx, "systemctl", "restart", "systemd-timesyncd")
|
|
||||||
output, err = cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Error("Failed to restart systemd-timesyncd", "error", err, "output", string(output))
|
|
||||||
return fmt.Errorf("failed to restart systemd-timesyncd: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Info("NTP servers configured", "servers", settings.NTPServers)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetNTPSettings retrieves current NTP configuration from the OS
|
|
||||||
func (s *Service) GetNTPSettings(ctx context.Context) (*NTPSettings, error) {
|
|
||||||
settings := &NTPSettings{
|
|
||||||
NTPServers: []string{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get current timezone using timedatectl
|
|
||||||
cmd := exec.CommandContext(ctx, "timedatectl", "show", "--property=Timezone", "--value")
|
|
||||||
output, err := cmd.Output()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to get timezone", "error", err)
|
|
||||||
settings.Timezone = "Etc/UTC" // Default fallback
|
|
||||||
} else {
|
|
||||||
settings.Timezone = strings.TrimSpace(string(output))
|
|
||||||
if settings.Timezone == "" {
|
|
||||||
settings.Timezone = "Etc/UTC"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read NTP servers from systemd-timesyncd config
|
|
||||||
configPath := "/etc/systemd/timesyncd.conf"
|
|
||||||
data, err := os.ReadFile(configPath)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Warn("Failed to read NTP config", "error", err)
|
|
||||||
// Default NTP servers if config file doesn't exist
|
|
||||||
settings.NTPServers = []string{"pool.ntp.org", "time.google.com"}
|
|
||||||
} else {
|
|
||||||
// Parse NTP servers from config file
|
|
||||||
lines := strings.Split(string(data), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
line = strings.TrimSpace(line)
|
|
||||||
if strings.HasPrefix(line, "NTP=") {
|
|
||||||
ntpLine := strings.TrimPrefix(line, "NTP=")
|
|
||||||
if ntpLine != "" {
|
|
||||||
servers := strings.Fields(ntpLine)
|
|
||||||
settings.NTPServers = servers
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If no NTP servers found in config, use defaults
|
|
||||||
if len(settings.NTPServers) == 0 {
|
|
||||||
settings.NTPServers = []string{"pool.ntp.org", "time.google.com"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return settings, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
package tape_vtl
|
package tape_vtl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/atlasos/calypso/internal/common/database"
|
"github.com/atlasos/calypso/internal/common/database"
|
||||||
@@ -30,7 +29,6 @@ func NewHandler(db *database.DB, log *logger.Logger) *Handler {
|
|||||||
|
|
||||||
// ListLibraries lists all virtual tape libraries
|
// ListLibraries lists all virtual tape libraries
|
||||||
func (h *Handler) ListLibraries(c *gin.Context) {
|
func (h *Handler) ListLibraries(c *gin.Context) {
|
||||||
h.logger.Info("ListLibraries called")
|
|
||||||
libraries, err := h.service.ListLibraries(c.Request.Context())
|
libraries, err := h.service.ListLibraries(c.Request.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("Failed to list libraries", "error", err)
|
h.logger.Error("Failed to list libraries", "error", err)
|
||||||
@@ -38,36 +36,7 @@ func (h *Handler) ListLibraries(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
h.logger.Info("ListLibraries result", "count", len(libraries), "is_nil", libraries == nil)
|
c.JSON(http.StatusOK, gin.H{"libraries": libraries})
|
||||||
|
|
||||||
// Ensure we return an empty array instead of null
|
|
||||||
if libraries == nil {
|
|
||||||
h.logger.Warn("Libraries is nil, converting to empty array")
|
|
||||||
libraries = []VirtualTapeLibrary{}
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("Returning libraries", "count", len(libraries), "libraries", libraries)
|
|
||||||
|
|
||||||
// Ensure we always return an array, never null
|
|
||||||
if libraries == nil {
|
|
||||||
libraries = []VirtualTapeLibrary{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force empty array if nil (double check)
|
|
||||||
if libraries == nil {
|
|
||||||
h.logger.Warn("Libraries is still nil in handler, forcing empty array")
|
|
||||||
libraries = []VirtualTapeLibrary{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use explicit JSON marshalling to ensure empty array, not null
|
|
||||||
response := map[string]interface{}{
|
|
||||||
"libraries": libraries,
|
|
||||||
}
|
|
||||||
|
|
||||||
h.logger.Info("Response payload", "count", len(libraries), "response_type", fmt.Sprintf("%T", libraries))
|
|
||||||
|
|
||||||
// Use JSON marshalling that handles empty slices correctly
|
|
||||||
c.JSON(http.StatusOK, response)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLibrary retrieves a library by ID
|
// GetLibrary retrieves a library by ID
|
||||||
@@ -326,3 +295,4 @@ func (h *Handler) UnloadTape(c *gin.Context) {
|
|||||||
|
|
||||||
c.JSON(http.StatusAccepted, gin.H{"task_id": taskID})
|
c.JSON(http.StatusAccepted, gin.H{"task_id": taskID})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -67,7 +67,7 @@ func (m *MHVTLMonitor) Stop() {
|
|||||||
|
|
||||||
// syncMHVTL parses mhvtl configuration and syncs to database
|
// syncMHVTL parses mhvtl configuration and syncs to database
|
||||||
func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
||||||
m.logger.Info("Running MHVTL configuration sync")
|
m.logger.Debug("Running MHVTL configuration sync")
|
||||||
|
|
||||||
deviceConfPath := filepath.Join(m.configPath, "device.conf")
|
deviceConfPath := filepath.Join(m.configPath, "device.conf")
|
||||||
if _, err := os.Stat(deviceConfPath); os.IsNotExist(err) {
|
if _, err := os.Stat(deviceConfPath); os.IsNotExist(err) {
|
||||||
@@ -84,11 +84,6 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
|||||||
|
|
||||||
m.logger.Info("Parsed MHVTL configuration", "libraries", len(libraries), "drives", len(drives))
|
m.logger.Info("Parsed MHVTL configuration", "libraries", len(libraries), "drives", len(drives))
|
||||||
|
|
||||||
// Log parsed drives for debugging
|
|
||||||
for _, drive := range drives {
|
|
||||||
m.logger.Debug("Parsed drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync libraries to database
|
// Sync libraries to database
|
||||||
for _, lib := range libraries {
|
for _, lib := range libraries {
|
||||||
if err := m.syncLibrary(ctx, lib); err != nil {
|
if err := m.syncLibrary(ctx, lib); err != nil {
|
||||||
@@ -99,9 +94,7 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
|||||||
// Sync drives to database
|
// Sync drives to database
|
||||||
for _, drive := range drives {
|
for _, drive := range drives {
|
||||||
if err := m.syncDrive(ctx, drive); err != nil {
|
if err := m.syncDrive(ctx, drive); err != nil {
|
||||||
m.logger.Error("Failed to sync drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot, "error", err)
|
m.logger.Error("Failed to sync drive", "drive_id", drive.DriveID, "error", err)
|
||||||
} else {
|
|
||||||
m.logger.Debug("Synced drive", "drive_id", drive.DriveID, "library_id", drive.LibraryID, "slot", drive.Slot)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,7 +106,7 @@ func (m *MHVTLMonitor) syncMHVTL(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m.logger.Info("MHVTL configuration sync completed")
|
m.logger.Debug("MHVTL configuration sync completed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// LibraryInfo represents a library from device.conf
|
// LibraryInfo represents a library from device.conf
|
||||||
@@ -196,7 +189,6 @@ func (m *MHVTLMonitor) parseDeviceConf(ctx context.Context, path string) ([]Libr
|
|||||||
Target: matches[3],
|
Target: matches[3],
|
||||||
LUN: matches[4],
|
LUN: matches[4],
|
||||||
}
|
}
|
||||||
// Library ID and Slot might be on the same line or next line
|
|
||||||
if matches := libraryIDRegex.FindStringSubmatch(line); matches != nil {
|
if matches := libraryIDRegex.FindStringSubmatch(line); matches != nil {
|
||||||
libID, _ := strconv.Atoi(matches[1])
|
libID, _ := strconv.Atoi(matches[1])
|
||||||
slot, _ := strconv.Atoi(matches[2])
|
slot, _ := strconv.Atoi(matches[2])
|
||||||
@@ -206,63 +198,34 @@ func (m *MHVTLMonitor) parseDeviceConf(ctx context.Context, path string) ([]Libr
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse library fields (only if we're in a library section and not in a drive section)
|
// Parse library fields
|
||||||
if currentLibrary != nil && currentDrive == nil {
|
if currentLibrary != nil {
|
||||||
// Handle both "Vendor identification:" and " Vendor identification:" (with leading space)
|
if strings.HasPrefix(line, "Vendor identification:") {
|
||||||
if strings.Contains(line, "Vendor identification:") {
|
currentLibrary.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:"))
|
||||||
parts := strings.Split(line, "Vendor identification:")
|
} else if strings.HasPrefix(line, "Product identification:") {
|
||||||
if len(parts) > 1 {
|
currentLibrary.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:"))
|
||||||
currentLibrary.Vendor = strings.TrimSpace(parts[1])
|
} else if strings.HasPrefix(line, "Unit serial number:") {
|
||||||
m.logger.Debug("Parsed vendor", "vendor", currentLibrary.Vendor, "library_id", currentLibrary.LibraryID)
|
currentLibrary.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:"))
|
||||||
}
|
} else if strings.HasPrefix(line, "Home directory:") {
|
||||||
} else if strings.Contains(line, "Product identification:") {
|
currentLibrary.HomeDirectory = strings.TrimSpace(strings.TrimPrefix(line, "Home directory:"))
|
||||||
parts := strings.Split(line, "Product identification:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentLibrary.Product = strings.TrimSpace(parts[1])
|
|
||||||
m.logger.Info("Parsed library product", "product", currentLibrary.Product, "library_id", currentLibrary.LibraryID)
|
|
||||||
}
|
|
||||||
} else if strings.Contains(line, "Unit serial number:") {
|
|
||||||
parts := strings.Split(line, "Unit serial number:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentLibrary.SerialNumber = strings.TrimSpace(parts[1])
|
|
||||||
}
|
|
||||||
} else if strings.Contains(line, "Home directory:") {
|
|
||||||
parts := strings.Split(line, "Home directory:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentLibrary.HomeDirectory = strings.TrimSpace(parts[1])
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse drive fields
|
// Parse drive fields
|
||||||
if currentDrive != nil {
|
if currentDrive != nil {
|
||||||
// Check for Library ID and Slot first (can be on separate line)
|
if strings.HasPrefix(line, "Vendor identification:") {
|
||||||
if strings.Contains(line, "Library ID:") && strings.Contains(line, "Slot:") {
|
currentDrive.Vendor = strings.TrimSpace(strings.TrimPrefix(line, "Vendor identification:"))
|
||||||
|
} else if strings.HasPrefix(line, "Product identification:") {
|
||||||
|
currentDrive.Product = strings.TrimSpace(strings.TrimPrefix(line, "Product identification:"))
|
||||||
|
} else if strings.HasPrefix(line, "Unit serial number:") {
|
||||||
|
currentDrive.SerialNumber = strings.TrimSpace(strings.TrimPrefix(line, "Unit serial number:"))
|
||||||
|
} else if strings.HasPrefix(line, "Library ID:") && strings.Contains(line, "Slot:") {
|
||||||
matches := libraryIDRegex.FindStringSubmatch(line)
|
matches := libraryIDRegex.FindStringSubmatch(line)
|
||||||
if matches != nil {
|
if matches != nil {
|
||||||
libID, _ := strconv.Atoi(matches[1])
|
libID, _ := strconv.Atoi(matches[1])
|
||||||
slot, _ := strconv.Atoi(matches[2])
|
slot, _ := strconv.Atoi(matches[2])
|
||||||
currentDrive.LibraryID = libID
|
currentDrive.LibraryID = libID
|
||||||
currentDrive.Slot = slot
|
currentDrive.Slot = slot
|
||||||
m.logger.Debug("Parsed drive Library ID and Slot", "drive_id", currentDrive.DriveID, "library_id", libID, "slot", slot)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Handle both "Vendor identification:" and " Vendor identification:" (with leading space)
|
|
||||||
if strings.Contains(line, "Vendor identification:") {
|
|
||||||
parts := strings.Split(line, "Vendor identification:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentDrive.Vendor = strings.TrimSpace(parts[1])
|
|
||||||
}
|
|
||||||
} else if strings.Contains(line, "Product identification:") {
|
|
||||||
parts := strings.Split(line, "Product identification:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentDrive.Product = strings.TrimSpace(parts[1])
|
|
||||||
}
|
|
||||||
} else if strings.Contains(line, "Unit serial number:") {
|
|
||||||
parts := strings.Split(line, "Unit serial number:")
|
|
||||||
if len(parts) > 1 {
|
|
||||||
currentDrive.SerialNumber = strings.TrimSpace(parts[1])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -292,17 +255,9 @@ func (m *MHVTLMonitor) syncLibrary(ctx context.Context, libInfo LibraryInfo) err
|
|||||||
libInfo.LibraryID,
|
libInfo.LibraryID,
|
||||||
).Scan(&existingID)
|
).Scan(&existingID)
|
||||||
|
|
||||||
m.logger.Debug("Syncing library", "library_id", libInfo.LibraryID, "vendor", libInfo.Vendor, "product", libInfo.Product)
|
|
||||||
|
|
||||||
// Use product identification for library name (without library ID)
|
|
||||||
libraryName := fmt.Sprintf("VTL-%d", libInfo.LibraryID)
|
libraryName := fmt.Sprintf("VTL-%d", libInfo.LibraryID)
|
||||||
if libInfo.Product != "" {
|
if libInfo.Product != "" {
|
||||||
// Use only product name, without library ID
|
libraryName = fmt.Sprintf("%s-%d", libInfo.Product, libInfo.LibraryID)
|
||||||
libraryName = libInfo.Product
|
|
||||||
m.logger.Info("Using product for library name", "product", libInfo.Product, "library_id", libInfo.LibraryID, "name", libraryName)
|
|
||||||
} else if libInfo.Vendor != "" {
|
|
||||||
libraryName = libInfo.Vendor
|
|
||||||
m.logger.Info("Using vendor for library name (product not available)", "vendor", libInfo.Vendor, "library_id", libInfo.LibraryID)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
@@ -320,41 +275,23 @@ func (m *MHVTLMonitor) syncLibrary(ctx context.Context, libInfo LibraryInfo) err
|
|||||||
_, err = m.service.db.ExecContext(ctx, `
|
_, err = m.service.db.ExecContext(ctx, `
|
||||||
INSERT INTO virtual_tape_libraries (
|
INSERT INTO virtual_tape_libraries (
|
||||||
name, description, mhvtl_library_id, backing_store_path,
|
name, description, mhvtl_library_id, backing_store_path,
|
||||||
vendor, slot_count, drive_count, is_active
|
slot_count, drive_count, is_active
|
||||||
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
) VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
`, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
`, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
||||||
libInfo.LibraryID, backingStorePath, libInfo.Vendor, slotCount, driveCount, true)
|
libInfo.LibraryID, backingStorePath, slotCount, driveCount, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to insert library: %w", err)
|
return fmt.Errorf("failed to insert library: %w", err)
|
||||||
}
|
}
|
||||||
m.logger.Info("Created virtual library from MHVTL", "library_id", libInfo.LibraryID, "name", libraryName)
|
m.logger.Info("Created virtual library from MHVTL", "library_id", libInfo.LibraryID, "name", libraryName)
|
||||||
} else if err == nil {
|
} else if err == nil {
|
||||||
// Update existing library - also update name if product is available
|
// Update existing library
|
||||||
updateName := libraryName
|
|
||||||
// If product exists and current name doesn't match, update it
|
|
||||||
if libInfo.Product != "" {
|
|
||||||
var currentName string
|
|
||||||
err := m.service.db.QueryRowContext(ctx,
|
|
||||||
"SELECT name FROM virtual_tape_libraries WHERE id = $1", existingID,
|
|
||||||
).Scan(¤tName)
|
|
||||||
if err == nil {
|
|
||||||
// Use only product name, without library ID
|
|
||||||
expectedName := libInfo.Product
|
|
||||||
if currentName != expectedName {
|
|
||||||
updateName = expectedName
|
|
||||||
m.logger.Info("Updating library name", "old", currentName, "new", updateName, "product", libInfo.Product)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.logger.Info("Updating existing library", "library_id", libInfo.LibraryID, "product", libInfo.Product, "vendor", libInfo.Vendor, "old_name", libraryName, "new_name", updateName)
|
|
||||||
_, err = m.service.db.ExecContext(ctx, `
|
_, err = m.service.db.ExecContext(ctx, `
|
||||||
UPDATE virtual_tape_libraries SET
|
UPDATE virtual_tape_libraries SET
|
||||||
name = $1, description = $2, backing_store_path = $3,
|
name = $1, description = $2, backing_store_path = $3,
|
||||||
vendor = $4, is_active = $5, updated_at = NOW()
|
is_active = $4, updated_at = NOW()
|
||||||
WHERE id = $6
|
WHERE id = $5
|
||||||
`, updateName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
`, libraryName, fmt.Sprintf("MHVTL Library %d (%s)", libInfo.LibraryID, libInfo.Product),
|
||||||
libInfo.HomeDirectory, libInfo.Vendor, true, existingID)
|
libInfo.HomeDirectory, true, existingID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to update library: %w", err)
|
return fmt.Errorf("failed to update library: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,7 +33,6 @@ type VirtualTapeLibrary struct {
|
|||||||
Description string `json:"description"`
|
Description string `json:"description"`
|
||||||
MHVTLibraryID int `json:"mhvtl_library_id"`
|
MHVTLibraryID int `json:"mhvtl_library_id"`
|
||||||
BackingStorePath string `json:"backing_store_path"`
|
BackingStorePath string `json:"backing_store_path"`
|
||||||
Vendor string `json:"vendor,omitempty"`
|
|
||||||
SlotCount int `json:"slot_count"`
|
SlotCount int `json:"slot_count"`
|
||||||
DriveCount int `json:"drive_count"`
|
DriveCount int `json:"drive_count"`
|
||||||
IsActive bool `json:"is_active"`
|
IsActive bool `json:"is_active"`
|
||||||
@@ -224,83 +223,49 @@ func (s *Service) createTape(ctx context.Context, tape *VirtualTape) error {
|
|||||||
func (s *Service) ListLibraries(ctx context.Context) ([]VirtualTapeLibrary, error) {
|
func (s *Service) ListLibraries(ctx context.Context) ([]VirtualTapeLibrary, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, name, description, mhvtl_library_id, backing_store_path,
|
SELECT id, name, description, mhvtl_library_id, backing_store_path,
|
||||||
COALESCE(vendor, '') as vendor,
|
|
||||||
slot_count, drive_count, is_active, created_at, updated_at, created_by
|
slot_count, drive_count, is_active, created_at, updated_at, created_by
|
||||||
FROM virtual_tape_libraries
|
FROM virtual_tape_libraries
|
||||||
ORDER BY name
|
ORDER BY name
|
||||||
`
|
`
|
||||||
|
|
||||||
s.logger.Info("Executing query to list libraries")
|
|
||||||
rows, err := s.db.QueryContext(ctx, query)
|
rows, err := s.db.QueryContext(ctx, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error("Failed to query libraries", "error", err)
|
|
||||||
return nil, fmt.Errorf("failed to list libraries: %w", err)
|
return nil, fmt.Errorf("failed to list libraries: %w", err)
|
||||||
}
|
}
|
||||||
s.logger.Info("Query executed successfully, got rows")
|
|
||||||
defer rows.Close()
|
defer rows.Close()
|
||||||
|
|
||||||
libraries := make([]VirtualTapeLibrary, 0) // Initialize as empty slice, not nil
|
var libraries []VirtualTapeLibrary
|
||||||
s.logger.Info("Starting to scan library rows", "query", query)
|
|
||||||
rowCount := 0
|
|
||||||
for rows.Next() {
|
for rows.Next() {
|
||||||
rowCount++
|
|
||||||
var lib VirtualTapeLibrary
|
var lib VirtualTapeLibrary
|
||||||
var description sql.NullString
|
|
||||||
var createdBy sql.NullString
|
|
||||||
err := rows.Scan(
|
err := rows.Scan(
|
||||||
&lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath,
|
&lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath,
|
||||||
&lib.Vendor,
|
|
||||||
&lib.SlotCount, &lib.DriveCount, &lib.IsActive,
|
&lib.SlotCount, &lib.DriveCount, &lib.IsActive,
|
||||||
&lib.CreatedAt, &lib.UpdatedAt, &createdBy,
|
&lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error("Failed to scan library", "error", err, "row", rowCount)
|
s.logger.Error("Failed to scan library", "error", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if description.Valid {
|
|
||||||
lib.Description = description.String
|
|
||||||
}
|
|
||||||
if createdBy.Valid {
|
|
||||||
lib.CreatedBy = createdBy.String
|
|
||||||
}
|
|
||||||
libraries = append(libraries, lib)
|
libraries = append(libraries, lib)
|
||||||
s.logger.Info("Added library to list", "library_id", lib.ID, "name", lib.Name, "mhvtl_id", lib.MHVTLibraryID)
|
|
||||||
}
|
|
||||||
s.logger.Info("Finished scanning library rows", "total_rows", rowCount, "libraries_added", len(libraries))
|
|
||||||
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
s.logger.Error("Error iterating library rows", "error", err)
|
|
||||||
return nil, fmt.Errorf("error iterating library rows: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.logger.Info("Listed virtual tape libraries", "count", len(libraries), "is_nil", libraries == nil)
|
return libraries, rows.Err()
|
||||||
// Ensure we return an empty slice, not nil
|
|
||||||
if libraries == nil {
|
|
||||||
s.logger.Warn("Libraries is nil in service, converting to empty array")
|
|
||||||
libraries = []VirtualTapeLibrary{}
|
|
||||||
}
|
|
||||||
s.logger.Info("Returning from service", "count", len(libraries), "is_nil", libraries == nil)
|
|
||||||
return libraries, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLibrary retrieves a library by ID
|
// GetLibrary retrieves a library by ID
|
||||||
func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrary, error) {
|
func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrary, error) {
|
||||||
query := `
|
query := `
|
||||||
SELECT id, name, description, mhvtl_library_id, backing_store_path,
|
SELECT id, name, description, mhvtl_library_id, backing_store_path,
|
||||||
COALESCE(vendor, '') as vendor,
|
|
||||||
slot_count, drive_count, is_active, created_at, updated_at, created_by
|
slot_count, drive_count, is_active, created_at, updated_at, created_by
|
||||||
FROM virtual_tape_libraries
|
FROM virtual_tape_libraries
|
||||||
WHERE id = $1
|
WHERE id = $1
|
||||||
`
|
`
|
||||||
|
|
||||||
var lib VirtualTapeLibrary
|
var lib VirtualTapeLibrary
|
||||||
var description sql.NullString
|
|
||||||
var createdBy sql.NullString
|
|
||||||
err := s.db.QueryRowContext(ctx, query, id).Scan(
|
err := s.db.QueryRowContext(ctx, query, id).Scan(
|
||||||
&lib.ID, &lib.Name, &description, &lib.MHVTLibraryID, &lib.BackingStorePath,
|
&lib.ID, &lib.Name, &lib.Description, &lib.MHVTLibraryID, &lib.BackingStorePath,
|
||||||
&lib.Vendor,
|
|
||||||
&lib.SlotCount, &lib.DriveCount, &lib.IsActive,
|
&lib.SlotCount, &lib.DriveCount, &lib.IsActive,
|
||||||
&lib.CreatedAt, &lib.UpdatedAt, &createdBy,
|
&lib.CreatedAt, &lib.UpdatedAt, &lib.CreatedBy,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == sql.ErrNoRows {
|
if err == sql.ErrNoRows {
|
||||||
@@ -309,13 +274,6 @@ func (s *Service) GetLibrary(ctx context.Context, id string) (*VirtualTapeLibrar
|
|||||||
return nil, fmt.Errorf("failed to get library: %w", err)
|
return nil, fmt.Errorf("failed to get library: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if description.Valid {
|
|
||||||
lib.Description = description.String
|
|
||||||
}
|
|
||||||
if createdBy.Valid {
|
|
||||||
lib.CreatedBy = createdBy.String
|
|
||||||
}
|
|
||||||
|
|
||||||
return &lib, nil
|
return &lib, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -542,3 +500,4 @@ func (s *Service) DeleteLibrary(ctx context.Context, id string) error {
|
|||||||
s.logger.Info("Virtual tape library deleted", "id", id, "name", lib.Name)
|
s.logger.Info("Virtual tape library deleted", "id", id, "name", lib.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
/etc/bacula
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Calypso Stack Log Aggregator
|
|
||||||
Documentation=https://github.com/atlasos/calypso
|
|
||||||
After=network.target
|
|
||||||
Wants=calypso-api.service calypso-frontend.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
# Run as root to access journald and write to /var/syslog
|
|
||||||
# Format: timestamp [service] message
|
|
||||||
ExecStart=/bin/bash -c '/usr/bin/journalctl -u calypso-api.service -u calypso-frontend.service -f --no-pager -o short-iso >> /var/syslog/calypso.log 2>&1'
|
|
||||||
Restart=always
|
|
||||||
RestartSec=5
|
|
||||||
|
|
||||||
# Security hardening
|
|
||||||
NoNewPrivileges=false
|
|
||||||
PrivateTmp=true
|
|
||||||
ReadWritePaths=/var/syslog
|
|
||||||
|
|
||||||
# Resource limits
|
|
||||||
LimitNOFILE=65536
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
|
|
||||||
@@ -1,354 +0,0 @@
|
|||||||
# Bacula VTL Integration - Root Cause Analysis & Troubleshooting
|
|
||||||
|
|
||||||
## Issue Summary
|
|
||||||
Bacula Storage Daemon was unable to read slots from mhVTL (Virtual Tape Library) autochanger devices, reporting "Device has 0 slots" despite mtx-changer script working correctly when called manually.
|
|
||||||
|
|
||||||
## Environment
|
|
||||||
- **OS**: Ubuntu Linux
|
|
||||||
- **Bacula Version**: 13.0.4
|
|
||||||
- **VTL**: mhVTL (Virtual Tape Library)
|
|
||||||
- **Autochangers**:
|
|
||||||
- Quantum Scalar i500 (4 drives, 43 slots)
|
|
||||||
- Quantum Scalar i40 (4 drives, 44 slots)
|
|
||||||
- **Tape Drives**: 8x QUANTUM ULTRIUM-HH8 (LTO-8)
|
|
||||||
|
|
||||||
## Root Cause Analysis
|
|
||||||
|
|
||||||
### Primary Issues Identified
|
|
||||||
|
|
||||||
#### 1. **Incorrect Tape Device Type**
|
|
||||||
**Problem**: Using rewinding tape devices (`/dev/st*`) instead of non-rewinding devices (`/dev/nst*`)
|
|
||||||
|
|
||||||
**Impact**: Tape would rewind after each operation, causing data loss and operational failures
|
|
||||||
|
|
||||||
**Solution**: Changed all Archive Device directives from `/dev/st*` to `/dev/nst*`
|
|
||||||
|
|
||||||
```diff
|
|
||||||
Device {
|
|
||||||
Name = Drive-0
|
|
||||||
- Archive Device = /dev/st0
|
|
||||||
+ Archive Device = /dev/nst0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 2. **Missing Drive Index Parameter**
|
|
||||||
**Problem**: Device configurations lacked Drive Index parameter
|
|
||||||
|
|
||||||
**Impact**: Bacula couldn't properly identify which physical drive in the autochanger to use
|
|
||||||
|
|
||||||
**Solution**: Added Drive Index (0-3) to each Device resource
|
|
||||||
|
|
||||||
```diff
|
|
||||||
Device {
|
|
||||||
Name = Drive-0
|
|
||||||
+ Drive Index = 0
|
|
||||||
Archive Device = /dev/nst0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 3. **Incorrect AlwaysOpen Setting**
|
|
||||||
**Problem**: AlwaysOpen was set to `no`
|
|
||||||
|
|
||||||
**Impact**: Device wouldn't remain open, causing connection issues with VTL
|
|
||||||
|
|
||||||
**Solution**: Changed AlwaysOpen to `yes` for all tape devices
|
|
||||||
|
|
||||||
```diff
|
|
||||||
Device {
|
|
||||||
Name = Drive-0
|
|
||||||
- AlwaysOpen = no
|
|
||||||
+ AlwaysOpen = yes
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 4. **Wrong Changer Device Path**
|
|
||||||
**Problem**: Using `/dev/sch*` (medium changer device) instead of `/dev/sg*` (generic SCSI device)
|
|
||||||
|
|
||||||
**Impact**: bacula user couldn't access the changer due to permission issues (cdrom group vs tape group)
|
|
||||||
|
|
||||||
**Solution**: Changed Changer Device to use sg devices
|
|
||||||
|
|
||||||
```diff
|
|
||||||
Autochanger {
|
|
||||||
Name = Scalar-i500
|
|
||||||
- Changer Device = /dev/sch0
|
|
||||||
+ Changer Device = /dev/sg7
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Device Mapping**:
|
|
||||||
- `/dev/sch0` → `/dev/sg7` (Scalar i500)
|
|
||||||
- `/dev/sch1` → `/dev/sg8` (Scalar i40)
|
|
||||||
|
|
||||||
#### 5. **Missing User Permissions**
|
|
||||||
**Problem**: bacula user not in required groups for device access
|
|
||||||
|
|
||||||
**Impact**: "Permission denied" errors when accessing tape and changer devices
|
|
||||||
|
|
||||||
**Solution**: Added bacula user to tape and cdrom groups
|
|
||||||
|
|
||||||
```bash
|
|
||||||
usermod -a -G tape,cdrom bacula
|
|
||||||
systemctl restart bacula-sd
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 6. **Incorrect Storage Resource Configuration**
|
|
||||||
**Problem**: Storage resource in Director config referenced autochanger name instead of individual drives
|
|
||||||
|
|
||||||
**Impact**: Bacula couldn't properly communicate with individual tape drives
|
|
||||||
|
|
||||||
**Solution**: Listed all drives explicitly in Storage resource
|
|
||||||
|
|
||||||
```diff
|
|
||||||
Storage {
|
|
||||||
Name = Scalar-i500
|
|
||||||
- Device = Scalar-i500
|
|
||||||
+ Device = Drive-0
|
|
||||||
+ Device = Drive-1
|
|
||||||
+ Device = Drive-2
|
|
||||||
+ Device = Drive-3
|
|
||||||
Autochanger = Scalar-i500
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 7. **mtx-changer List Output Format**
|
|
||||||
**Problem**: Script output format didn't match Bacula's expected format
|
|
||||||
|
|
||||||
**Impact**: "Invalid Slot number" errors, preventing volume labeling
|
|
||||||
|
|
||||||
**Original Output**: `1 Full:VolumeTag=E01001L8`
|
|
||||||
**Expected Output**: `1:E01001L8`
|
|
||||||
|
|
||||||
**Solution**: Fixed sed pattern in list command
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Original (incorrect)
|
|
||||||
list)
|
|
||||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:/ /'
|
|
||||||
;;
|
|
||||||
|
|
||||||
# Fixed
|
|
||||||
list)
|
|
||||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/'
|
|
||||||
;;
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting Steps
|
|
||||||
|
|
||||||
### Step 1: Verify mtx-changer Script Works Manually
|
|
||||||
```bash
|
|
||||||
# Test slots command
|
|
||||||
/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots
|
|
||||||
# Expected output: 43
|
|
||||||
|
|
||||||
# Test list command
|
|
||||||
/usr/lib/bacula/scripts/mtx-changer /dev/sg7 list
|
|
||||||
# Expected output: 1:E01001L8, 2:E01002L8, etc.
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 2: Test as bacula User
|
|
||||||
```bash
|
|
||||||
# Test if bacula user can access devices
|
|
||||||
su -s /bin/bash bacula -c "/usr/lib/bacula/scripts/mtx-changer /dev/sg7 slots"
|
|
||||||
|
|
||||||
# If permission denied, check groups
|
|
||||||
groups bacula
|
|
||||||
# Should include: bacula tape cdrom
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 3: Verify Device Permissions
|
|
||||||
```bash
|
|
||||||
# Check changer devices
|
|
||||||
ls -l /dev/sch* /dev/sg7 /dev/sg8
|
|
||||||
# sg devices should be in tape group
|
|
||||||
|
|
||||||
# Check tape devices
|
|
||||||
ls -l /dev/nst*
|
|
||||||
# Should be in tape group with rw permissions
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 4: Test Bacula Storage Daemon Connection
|
|
||||||
```bash
|
|
||||||
# From bconsole
|
|
||||||
echo "status storage=Scalar-i500" | bconsole
|
|
||||||
|
|
||||||
# Should show autochanger and drives
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 5: Update Slots
|
|
||||||
```bash
|
|
||||||
echo -e "update slots storage=Scalar-i500\n0\n" | bconsole
|
|
||||||
|
|
||||||
# Should show: Device "Drive-0" has 43 slots
|
|
||||||
# NOT: Device has 0 slots
|
|
||||||
```
|
|
||||||
|
|
||||||
### Step 6: Label Tapes
|
|
||||||
```bash
|
|
||||||
echo -e "label barcodes storage=Scalar-i500 pool=Default\n0\nyes\n" | bconsole
|
|
||||||
|
|
||||||
# Should successfully label tapes using barcodes
|
|
||||||
```
|
|
||||||
|
|
||||||
## Configuration Files
|
|
||||||
|
|
||||||
### /etc/bacula/bacula-sd.conf (Storage Daemon)
|
|
||||||
```bash
|
|
||||||
Autochanger {
|
|
||||||
Name = Scalar-i500
|
|
||||||
Device = Drive-0, Drive-1, Drive-2, Drive-3
|
|
||||||
Changer Command = "/usr/lib/bacula/scripts/mtx-changer %c %o %S %a %d"
|
|
||||||
Changer Device = /dev/sg7
|
|
||||||
}
|
|
||||||
|
|
||||||
Device {
|
|
||||||
Name = Drive-0
|
|
||||||
Drive Index = 0
|
|
||||||
Changer Device = /dev/sg7
|
|
||||||
Media Type = LTO-8
|
|
||||||
Archive Device = /dev/nst0
|
|
||||||
AutomaticMount = yes
|
|
||||||
AlwaysOpen = yes
|
|
||||||
RemovableMedia = yes
|
|
||||||
RandomAccess = no
|
|
||||||
AutoChanger = yes
|
|
||||||
Maximum Concurrent Jobs = 1
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### /etc/bacula/bacula-dir.conf (Director)
|
|
||||||
```bash
|
|
||||||
Storage {
|
|
||||||
Name = Scalar-i500
|
|
||||||
Address = localhost
|
|
||||||
SDPort = 9103
|
|
||||||
Password = "QJQPnZ5Q5p6D73RcvR7ksrOm9UG3mAhvV"
|
|
||||||
Device = Drive-0
|
|
||||||
Device = Drive-1
|
|
||||||
Device = Drive-2
|
|
||||||
Device = Drive-3
|
|
||||||
Media Type = LTO-8
|
|
||||||
Autochanger = Scalar-i500
|
|
||||||
Maximum Concurrent Jobs = 4
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### /usr/lib/bacula/scripts/mtx-changer
|
|
||||||
```bash
|
|
||||||
#!/bin/sh
|
|
||||||
MTX=/usr/sbin/mtx
|
|
||||||
|
|
||||||
ctl=$1
|
|
||||||
cmd="$2"
|
|
||||||
slot=$3
|
|
||||||
device=$4
|
|
||||||
drive=$5
|
|
||||||
|
|
||||||
case "$cmd" in
|
|
||||||
loaded)
|
|
||||||
${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" >/dev/null 2>&1
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
${MTX} -f $ctl status | grep "Data Transfer Element $slot:Full" | awk '{print $7}' | sed 's/.*=//'
|
|
||||||
else
|
|
||||||
echo "0"
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
|
|
||||||
load)
|
|
||||||
${MTX} -f $ctl load $slot $drive
|
|
||||||
;;
|
|
||||||
|
|
||||||
unload)
|
|
||||||
${MTX} -f $ctl unload $slot $drive
|
|
||||||
;;
|
|
||||||
|
|
||||||
list)
|
|
||||||
${MTX} -f $ctl status | grep "Storage Element" | grep "Full" | awk '{print $3 $4}' | sed 's/:Full:VolumeTag=/:/'
|
|
||||||
;;
|
|
||||||
|
|
||||||
slots)
|
|
||||||
${MTX} -f $ctl status | grep "Storage Changer" | awk '{print $5}'
|
|
||||||
;;
|
|
||||||
|
|
||||||
*)
|
|
||||||
echo "Invalid command: $cmd"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
```
|
|
||||||
|
|
||||||
## Verification Commands
|
|
||||||
|
|
||||||
### Check Device Mapping
|
|
||||||
```bash
|
|
||||||
lsscsi -g | grep -E "mediumx|tape"
|
|
||||||
```
|
|
||||||
|
|
||||||
### Check VTL Services
|
|
||||||
```bash
|
|
||||||
systemctl list-units 'vtl*'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Test Manual Tape Load
|
|
||||||
```bash
|
|
||||||
# Load tape to drive
|
|
||||||
mtx -f /dev/sg7 load 1 0
|
|
||||||
|
|
||||||
# Check drive status
|
|
||||||
mt -f /dev/nst0 status
|
|
||||||
|
|
||||||
# Unload tape
|
|
||||||
mtx -f /dev/sg7 unload 1 0
|
|
||||||
```
|
|
||||||
|
|
||||||
### List Labeled Volumes
|
|
||||||
```bash
|
|
||||||
echo "list volumes pool=Default" | bconsole
|
|
||||||
```
|
|
||||||
|
|
||||||
## Common Errors and Solutions
|
|
||||||
|
|
||||||
### Error: "Device has 0 slots"
|
|
||||||
**Cause**: Wrong changer device or permission issues
|
|
||||||
**Solution**: Use /dev/sg* devices and verify bacula user in tape/cdrom groups
|
|
||||||
|
|
||||||
### Error: "Permission denied" accessing /dev/sch0
|
|
||||||
**Cause**: bacula user not in cdrom group
|
|
||||||
**Solution**: `usermod -a -G cdrom bacula && systemctl restart bacula-sd`
|
|
||||||
|
|
||||||
### Error: "Invalid Slot number"
|
|
||||||
**Cause**: mtx-changer list output format incorrect
|
|
||||||
**Solution**: Fix sed pattern to output `slot:volumetag` format
|
|
||||||
|
|
||||||
### Error: "No medium found" after successful load
|
|
||||||
**Cause**: Using rewinding devices (/dev/st*) or AlwaysOpen=no
|
|
||||||
**Solution**: Use /dev/nst* and set AlwaysOpen=yes
|
|
||||||
|
|
||||||
### Error: "READ ELEMENT STATUS Command Failed"
|
|
||||||
**Cause**: Permission issue or VTL service problem
|
|
||||||
**Solution**: Check user permissions and restart vtllibrary service
|
|
||||||
|
|
||||||
## Results
|
|
||||||
|
|
||||||
### Scalar i500 (WORKING)
|
|
||||||
- ✅ 43 slots detected
|
|
||||||
- ✅ 20 tapes successfully labeled (E01001L8 - E01020L8)
|
|
||||||
- ✅ Autochanger operations functional
|
|
||||||
- ✅ Ready for backup jobs
|
|
||||||
|
|
||||||
### Scalar i40 (ISSUE)
|
|
||||||
- ⚠️ 44 slots detected
|
|
||||||
- ❌ Hardware Error during tape load operations
|
|
||||||
- ❌ 0 tapes labeled
|
|
||||||
- **Status**: Requires mhVTL configuration investigation or system restart
|
|
||||||
|
|
||||||
## References
|
|
||||||
- Bacula Documentation: https://www.bacula.org/
|
|
||||||
- Article: "Using Bacula with mhVTL" - https://karellen.blogspot.com/2012/02/using-bacula-with-mhvtl.html
|
|
||||||
- mhVTL Project: https://github.com/markh794/mhvtl
|
|
||||||
|
|
||||||
## Date
|
|
||||||
Created: 2025-12-31
|
|
||||||
Author: Warp AI Agent
|
|
||||||
@@ -1,344 +0,0 @@
|
|||||||
# Calypso Appliance Health Check Script
|
|
||||||
|
|
||||||
## Overview
|
|
||||||
Comprehensive health check script for all Calypso Appliance components. Performs automated checks across system resources, services, network, storage, and backup infrastructure.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
Script location: `/usr/local/bin/calypso-healthcheck`
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
### Basic Usage
|
|
||||||
```bash
|
|
||||||
# Run health check (requires root)
|
|
||||||
calypso-healthcheck
|
|
||||||
|
|
||||||
# Run and save to specific location
|
|
||||||
calypso-healthcheck 2>&1 | tee /root/healthcheck-$(date +%Y%m%d).log
|
|
||||||
```
|
|
||||||
|
|
||||||
### Exit Codes
|
|
||||||
- `0` - All checks passed (100% healthy)
|
|
||||||
- `1` - Healthy with warnings (some non-critical issues)
|
|
||||||
- `2` - Degraded (80%+ checks passed, some failures)
|
|
||||||
- `3` - Critical (less than 80% checks passed)
|
|
||||||
|
|
||||||
### Automated Checks
|
|
||||||
|
|
||||||
#### System Resources (4 checks)
|
|
||||||
- Root filesystem usage (threshold: 80%)
|
|
||||||
- /var filesystem usage (threshold: 80%)
|
|
||||||
- Memory usage (threshold: 90%)
|
|
||||||
- CPU load average
|
|
||||||
|
|
||||||
#### Database Services (2 checks)
|
|
||||||
- PostgreSQL service status
|
|
||||||
- Database presence (calypso, bacula)
|
|
||||||
|
|
||||||
#### Calypso Application (7 checks)
|
|
||||||
- calypso-api service
|
|
||||||
- calypso-frontend service
|
|
||||||
- calypso-logger service
|
|
||||||
- API port 8443
|
|
||||||
- Frontend port 3000
|
|
||||||
- API health endpoint
|
|
||||||
- Frontend health endpoint
|
|
||||||
|
|
||||||
#### Backup Services - Bacula (8 checks)
|
|
||||||
- bacula-director service
|
|
||||||
- bacula-fd service
|
|
||||||
- bacula-sd service
|
|
||||||
- Director bconsole connectivity
|
|
||||||
- Storage (Scalar-i500) accessibility
|
|
||||||
- Director port 9101
|
|
||||||
- FD port 9102
|
|
||||||
- SD port 9103
|
|
||||||
|
|
||||||
#### Virtual Tape Library - mhVTL (4 checks)
|
|
||||||
- mhvtl.target status
|
|
||||||
- vtllibrary@10 (Scalar i500)
|
|
||||||
- vtllibrary@30 (Scalar i40)
|
|
||||||
- VTL device count (2 changers, 8 tape drives)
|
|
||||||
- Scalar i500 slots detection
|
|
||||||
|
|
||||||
#### Storage Protocols (9 checks)
|
|
||||||
- NFS server service
|
|
||||||
- Samba (smbd) service
|
|
||||||
- NetBIOS (nmbd) service
|
|
||||||
- SCST service
|
|
||||||
- iSCSI target service
|
|
||||||
- NFS port 2049
|
|
||||||
- SMB port 445
|
|
||||||
- NetBIOS port 139
|
|
||||||
- iSCSI port 3260
|
|
||||||
|
|
||||||
#### Monitoring & Management (2 checks)
|
|
||||||
- SNMP daemon
|
|
||||||
- SNMP port 161
|
|
||||||
|
|
||||||
#### Network Connectivity (2 checks)
|
|
||||||
- Internet connectivity (ping 8.8.8.8)
|
|
||||||
- Network manager status
|
|
||||||
|
|
||||||
**Total: 39+ automated checks**
|
|
||||||
|
|
||||||
## Output Format
|
|
||||||
|
|
||||||
### Console Output
|
|
||||||
- Color-coded status indicators:
|
|
||||||
- ✓ Green = Passed
|
|
||||||
- ⚠ Yellow = Warning
|
|
||||||
- ✗ Red = Failed
|
|
||||||
|
|
||||||
### Example Output
|
|
||||||
```
|
|
||||||
==========================================
|
|
||||||
CALYPSO APPLIANCE HEALTH CHECK
|
|
||||||
==========================================
|
|
||||||
Date: 2025-12-31 01:46:27
|
|
||||||
Hostname: calypso
|
|
||||||
Uptime: up 6 days, 2 hours, 50 minutes
|
|
||||||
Log file: /var/log/calypso-healthcheck-20251231-014627.log
|
|
||||||
|
|
||||||
========================================
|
|
||||||
SYSTEM RESOURCES
|
|
||||||
========================================
|
|
||||||
✓ Root filesystem (18% used)
|
|
||||||
✓ Var filesystem (18% used)
|
|
||||||
✓ Memory usage (49% used, 8206MB available)
|
|
||||||
✓ CPU load average (2.18, 8 cores)
|
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
========================================
|
|
||||||
HEALTH CHECK SUMMARY
|
|
||||||
========================================
|
|
||||||
|
|
||||||
Total Checks: 39
|
|
||||||
Passed: 35
|
|
||||||
Warnings: 0
|
|
||||||
Failed: 4
|
|
||||||
|
|
||||||
⚠ OVERALL STATUS: DEGRADED (89%)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Log Files
|
|
||||||
All checks are logged to: `/var/log/calypso-healthcheck-YYYYMMDD-HHMMSS.log`
|
|
||||||
|
|
||||||
Logs include:
|
|
||||||
- Timestamp and system information
|
|
||||||
- Detailed check results
|
|
||||||
- Summary statistics
|
|
||||||
- Overall health status
|
|
||||||
|
|
||||||
## Scheduling
|
|
||||||
|
|
||||||
### Manual Execution
|
|
||||||
```bash
|
|
||||||
# Run on demand
|
|
||||||
sudo calypso-healthcheck
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cron Job (Recommended)
|
|
||||||
Add to crontab for automated checks:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Daily health check at 2 AM
|
|
||||||
0 2 * * * /usr/local/bin/calypso-healthcheck > /dev/null 2>&1
|
|
||||||
|
|
||||||
# Weekly health check on Monday at 6 AM with email notification
|
|
||||||
0 6 * * 1 /usr/local/bin/calypso-healthcheck 2>&1 | mail -s "Calypso Health Check" admin@example.com
|
|
||||||
```
|
|
||||||
|
|
||||||
### Systemd Timer (Alternative)
|
|
||||||
Create `/etc/systemd/system/calypso-healthcheck.timer`:
|
|
||||||
```ini
|
|
||||||
[Unit]
|
|
||||||
Description=Daily Calypso Health Check
|
|
||||||
Requires=calypso-healthcheck.service
|
|
||||||
|
|
||||||
[Timer]
|
|
||||||
OnCalendar=daily
|
|
||||||
Persistent=true
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=timers.target
|
|
||||||
```
|
|
||||||
|
|
||||||
Create `/etc/systemd/system/calypso-healthcheck.service`:
|
|
||||||
```ini
|
|
||||||
[Unit]
|
|
||||||
Description=Calypso Appliance Health Check
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=oneshot
|
|
||||||
ExecStart=/usr/local/bin/calypso-healthcheck
|
|
||||||
```
|
|
||||||
|
|
||||||
Enable:
|
|
||||||
```bash
|
|
||||||
systemctl enable --now calypso-healthcheck.timer
|
|
||||||
```
|
|
||||||
|
|
||||||
## Troubleshooting
|
|
||||||
|
|
||||||
### Common Failures
|
|
||||||
|
|
||||||
#### API/Frontend Health Endpoints Failing
|
|
||||||
```bash
|
|
||||||
# Check if services are running
|
|
||||||
systemctl status calypso-api calypso-frontend
|
|
||||||
|
|
||||||
# Check service logs
|
|
||||||
journalctl -u calypso-api -n 50
|
|
||||||
journalctl -u calypso-frontend -n 50
|
|
||||||
|
|
||||||
# Test manually
|
|
||||||
curl -k https://localhost:8443/health
|
|
||||||
curl -k https://localhost:3000/health
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Bacula Director Not Responding
|
|
||||||
```bash
|
|
||||||
# Check service
|
|
||||||
systemctl status bacula-director
|
|
||||||
|
|
||||||
# Test bconsole
|
|
||||||
echo "status director" | bconsole
|
|
||||||
|
|
||||||
# Check logs
|
|
||||||
tail -50 /var/log/bacula/bacula.log
|
|
||||||
```
|
|
||||||
|
|
||||||
#### VTL Slots Not Detected
|
|
||||||
```bash
|
|
||||||
# Check VTL services
|
|
||||||
systemctl status mhvtl.target
|
|
||||||
|
|
||||||
# Check devices
|
|
||||||
lsscsi | grep -E "mediumx|tape"
|
|
||||||
|
|
||||||
# Test manually
|
|
||||||
mtx -f /dev/sg7 status
|
|
||||||
echo "update slots storage=Scalar-i500" | bconsole
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Storage Protocols Port Not Listening
|
|
||||||
```bash
|
|
||||||
# Check service status
|
|
||||||
systemctl status nfs-server smbd nmbd scst iscsi-scstd
|
|
||||||
|
|
||||||
# Check listening ports
|
|
||||||
ss -tuln | grep -E "2049|445|139|3260"
|
|
||||||
|
|
||||||
# Restart services if needed
|
|
||||||
systemctl restart nfs-server
|
|
||||||
systemctl restart smbd nmbd
|
|
||||||
```
|
|
||||||
|
|
||||||
## Customization
|
|
||||||
|
|
||||||
### Modify Thresholds
|
|
||||||
Edit `/usr/local/bin/calypso-healthcheck`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Disk usage threshold (default: 80%)
|
|
||||||
check_disk "/" 80 "Root filesystem"
|
|
||||||
|
|
||||||
# Memory usage threshold (default: 90%)
|
|
||||||
if [ "$mem_percent" -lt 90 ]; then
|
|
||||||
|
|
||||||
# Change expected VTL devices
|
|
||||||
if [ "$changer_count" -ge 2 ] && [ "$tape_count" -ge 8 ]; then
|
|
||||||
```
|
|
||||||
|
|
||||||
### Add Custom Checks
|
|
||||||
Add new check functions:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
check_custom() {
|
|
||||||
TOTAL_CHECKS=$((TOTAL_CHECKS + 1))
|
|
||||||
|
|
||||||
if [[ condition ]]; then
|
|
||||||
echo -e "${GREEN}${CHECK}${NC} Custom check passed" | tee -a "$LOG_FILE"
|
|
||||||
PASSED_CHECKS=$((PASSED_CHECKS + 1))
|
|
||||||
else
|
|
||||||
echo -e "${RED}${CROSS}${NC} Custom check failed" | tee -a "$LOG_FILE"
|
|
||||||
FAILED_CHECKS=$((FAILED_CHECKS + 1))
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Call in main script
|
|
||||||
check_custom
|
|
||||||
```
|
|
||||||
|
|
||||||
## Integration
|
|
||||||
|
|
||||||
### Monitoring Systems
|
|
||||||
Export metrics for monitoring:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Nagios/Icinga format
|
|
||||||
calypso-healthcheck
|
|
||||||
if [ $? -eq 0 ]; then
|
|
||||||
echo "OK - All checks passed"
|
|
||||||
exit 0
|
|
||||||
elif [ $? -eq 1 ]; then
|
|
||||||
echo "WARNING - Healthy with warnings"
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
echo "CRITICAL - System degraded"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
```
|
|
||||||
|
|
||||||
### API Integration
|
|
||||||
Parse JSON output:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Add JSON output option
|
|
||||||
calypso-healthcheck --json > /tmp/health.json
|
|
||||||
```
|
|
||||||
|
|
||||||
## Maintenance
|
|
||||||
|
|
||||||
### Log Rotation
|
|
||||||
Logs are stored in `/var/log/calypso-healthcheck-*.log`
|
|
||||||
|
|
||||||
Create `/etc/logrotate.d/calypso-healthcheck`:
|
|
||||||
```
|
|
||||||
/var/log/calypso-healthcheck-*.log {
|
|
||||||
weekly
|
|
||||||
rotate 12
|
|
||||||
compress
|
|
||||||
delaycompress
|
|
||||||
missingok
|
|
||||||
notifempty
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cleanup Old Logs
|
|
||||||
```bash
|
|
||||||
# Remove logs older than 30 days
|
|
||||||
find /var/log -name "calypso-healthcheck-*.log" -mtime +30 -delete
|
|
||||||
```
|
|
||||||
|
|
||||||
## Best Practices
|
|
||||||
|
|
||||||
1. **Run after reboot** - Verify all services started correctly
|
|
||||||
2. **Schedule regular checks** - Daily or weekly automated runs
|
|
||||||
3. **Monitor exit codes** - Alert on degraded/critical status
|
|
||||||
4. **Review logs periodically** - Identify patterns or recurring issues
|
|
||||||
5. **Update checks** - Add new components as system evolves
|
|
||||||
6. **Baseline health** - Establish normal operating parameters
|
|
||||||
7. **Document exceptions** - Note known warnings that are acceptable
|
|
||||||
|
|
||||||
## See Also
|
|
||||||
- `pre-reboot-checklist.md` - Pre-reboot verification
|
|
||||||
- `bacula-vtl-troubleshooting.md` - VTL troubleshooting guide
|
|
||||||
- System logs: `/var/log/syslog`, `/var/log/bacula/`
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
*Created: 2025-12-31*
|
|
||||||
*Script: `/usr/local/bin/calypso-healthcheck`*
|
|
||||||
@@ -1,225 +0,0 @@
|
|||||||
# Calypso Appliance - Pre-Reboot Checklist
|
|
||||||
|
|
||||||
**Date:** 2025-12-31
|
|
||||||
**Status:** ✅ READY FOR REBOOT
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Enabled Services (Auto-start on boot)
|
|
||||||
|
|
||||||
### Core Application Services
|
|
||||||
| Service | Status | Purpose |
|
|
||||||
|---------|--------|---------|
|
|
||||||
| postgresql.service | ✅ enabled | Database backend |
|
|
||||||
| calypso-api.service | ✅ enabled | REST API backend |
|
|
||||||
| calypso-frontend.service | ✅ enabled | Web UI (React) |
|
|
||||||
| calypso-logger.service | ✅ enabled | Application logging |
|
|
||||||
|
|
||||||
### Backup Services (Bacula)
|
|
||||||
| Service | Status | Purpose |
|
|
||||||
|---------|--------|---------|
|
|
||||||
| bacula-director.service | ✅ enabled | Backup orchestration |
|
|
||||||
| bacula-fd.service | ✅ enabled | File daemon (client) |
|
|
||||||
| bacula-sd.service | ✅ enabled | Storage daemon (VTL) |
|
|
||||||
|
|
||||||
### Virtual Tape Library (mhVTL)
|
|
||||||
| Service | Status | Purpose |
|
|
||||||
|---------|--------|---------|
|
|
||||||
| mhvtl.target | ✅ enabled | VTL master target |
|
|
||||||
| vtllibrary@10.service | ✅ enabled | Scalar i500 library |
|
|
||||||
| vtllibrary@30.service | ✅ enabled | Scalar i40 library |
|
|
||||||
| vtltape@11-14.service | ✅ enabled | i500 tape drives (4) |
|
|
||||||
| vtltape@31-34.service | ✅ enabled | i40 tape drives (4) |
|
|
||||||
|
|
||||||
### Storage Protocols
|
|
||||||
| Service | Status | Purpose |
|
|
||||||
|---------|--------|---------|
|
|
||||||
| nfs-server.service | ✅ enabled | NFS file sharing |
|
|
||||||
| nfs-blkmap.service | ✅ enabled | NFS block mapping |
|
|
||||||
| smbd.service | ✅ enabled | Samba/CIFS server |
|
|
||||||
| nmbd.service | ✅ enabled | NetBIOS name service |
|
|
||||||
| scst.service | ✅ enabled | SCSI target subsystem |
|
|
||||||
| iscsi-scstd.service | ✅ enabled | iSCSI target daemon |
|
|
||||||
|
|
||||||
### Monitoring & Management
|
|
||||||
| Service | Status | Purpose |
|
|
||||||
|---------|--------|---------|
|
|
||||||
| snmpd.service | ✅ enabled | SNMP monitoring |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Boot Order & Dependencies
|
|
||||||
|
|
||||||
```
|
|
||||||
1. Network (systemd-networkd)
|
|
||||||
↓
|
|
||||||
2. Storage Foundation
|
|
||||||
- NFS server
|
|
||||||
- Samba (smbd/nmbd)
|
|
||||||
- SCST/iSCSI
|
|
||||||
↓
|
|
||||||
3. PostgreSQL Database
|
|
||||||
↓
|
|
||||||
4. VTL Services (mhvtl.target)
|
|
||||||
- vtllibrary services
|
|
||||||
- vtltape services
|
|
||||||
↓
|
|
||||||
5. Bacula Services
|
|
||||||
- bacula-director (after postgresql)
|
|
||||||
- bacula-fd
|
|
||||||
- bacula-sd (after VTL)
|
|
||||||
↓
|
|
||||||
6. Calypso Application
|
|
||||||
- calypso-api (after postgresql)
|
|
||||||
- calypso-frontend (wants calypso-api)
|
|
||||||
- calypso-logger (wants api & frontend)
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Post-Reboot Verification
|
|
||||||
|
|
||||||
### 1. Check System Boot
|
|
||||||
```bash
|
|
||||||
# Check boot time
|
|
||||||
systemd-analyze
|
|
||||||
systemd-analyze blame | head -20
|
|
||||||
```
|
|
||||||
|
|
||||||
### 2. Check Core Services
|
|
||||||
```bash
|
|
||||||
# Calypso application
|
|
||||||
systemctl status calypso-api calypso-frontend calypso-logger
|
|
||||||
|
|
||||||
# Database
|
|
||||||
systemctl status postgresql
|
|
||||||
|
|
||||||
# Check API health
|
|
||||||
curl -k https://localhost:8443/health
|
|
||||||
curl -k https://localhost:3000/health
|
|
||||||
```
|
|
||||||
|
|
||||||
### 3. Check Backup Services
|
|
||||||
```bash
|
|
||||||
# Bacula status
|
|
||||||
systemctl status bacula-director bacula-fd bacula-sd
|
|
||||||
|
|
||||||
# Test bconsole connection
|
|
||||||
echo "status director" | bconsole
|
|
||||||
|
|
||||||
# Check VTL connection
|
|
||||||
echo "status storage=Scalar-i500" | bconsole
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Check Storage Protocols
|
|
||||||
```bash
|
|
||||||
# NFS
|
|
||||||
systemctl status nfs-server
|
|
||||||
showmount -e localhost
|
|
||||||
|
|
||||||
# Samba
|
|
||||||
systemctl status smbd nmbd
|
|
||||||
smbstatus
|
|
||||||
|
|
||||||
# iSCSI/SCST
|
|
||||||
systemctl status scst iscsi-scstd
|
|
||||||
scstadmin -list_target
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. Check VTL Devices
|
|
||||||
```bash
|
|
||||||
# VTL services
|
|
||||||
systemctl status mhvtl.target
|
|
||||||
|
|
||||||
# Check devices
|
|
||||||
lsscsi | grep -E "mediumx|tape"
|
|
||||||
|
|
||||||
# Test autochanger
|
|
||||||
mtx -f /dev/sg7 status | head -10
|
|
||||||
```
|
|
||||||
|
|
||||||
### 6. Check Monitoring
|
|
||||||
```bash
|
|
||||||
# SNMP
|
|
||||||
systemctl status snmpd
|
|
||||||
snmpwalk -v2c -c public localhost system
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Network Access Points
|
|
||||||
|
|
||||||
| Service | URL/Port | Description |
|
|
||||||
|---------|----------|-------------|
|
|
||||||
| Web UI | https://[IP]:3000 | Calypso frontend |
|
|
||||||
| API | https://[IP]:8443 | REST API |
|
|
||||||
| Bacula Director | localhost:9101 | bconsole access |
|
|
||||||
| PostgreSQL | localhost:5432 | Database |
|
|
||||||
| NFS | tcp/2049 | NFS shares |
|
|
||||||
| Samba | tcp/445, tcp/139 | CIFS/SMB shares |
|
|
||||||
| iSCSI | tcp/3260 | iSCSI targets |
|
|
||||||
| SNMP | udp/161 | Monitoring |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Important Notes
|
|
||||||
|
|
||||||
### Bacula VTL Configuration
|
|
||||||
- **Scalar i500**: 43 slots, 20 tapes labeled (E01001L8-E01020L8) ✅
|
|
||||||
- **Scalar i40**: 44 slots, needs investigation after reboot ⚠️
|
|
||||||
- Changer devices: /dev/sg7 (i500), /dev/sg8 (i40)
|
|
||||||
- Tape devices: /dev/nst0-7 (non-rewinding)
|
|
||||||
- User permissions: bacula in tape+cdrom groups
|
|
||||||
|
|
||||||
### Storage Paths
|
|
||||||
- Calypso working directory: `/development/calypso`
|
|
||||||
- Bacula configs: `/etc/bacula/`
|
|
||||||
- VTL configs: `/etc/mhvtl/`
|
|
||||||
- PostgreSQL data: `/var/lib/postgresql/`
|
|
||||||
|
|
||||||
### Known Issues
|
|
||||||
- Scalar i40 VTL: Hardware error during tape load (requires investigation)
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Emergency Recovery
|
|
||||||
|
|
||||||
If services fail to start after reboot:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Check failed services
|
|
||||||
systemctl --failed
|
|
||||||
|
|
||||||
# View service logs
|
|
||||||
journalctl -xeu calypso-api
|
|
||||||
journalctl -xeu bacula-director
|
|
||||||
journalctl -xeu mhvtl.target
|
|
||||||
|
|
||||||
# Manual service restart
|
|
||||||
systemctl restart calypso-api
|
|
||||||
systemctl restart bacula-sd
|
|
||||||
systemctl restart mhvtl.target
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## Checklist Summary
|
|
||||||
|
|
||||||
- [x] PostgreSQL database: enabled
|
|
||||||
- [x] Calypso services (api, frontend, logger): enabled
|
|
||||||
- [x] Bacula services (director, fd, sd): enabled
|
|
||||||
- [x] mhVTL services (libraries, tape drives): enabled
|
|
||||||
- [x] NFS server: enabled
|
|
||||||
- [x] Samba (smbd, nmbd): enabled
|
|
||||||
- [x] SCST/iSCSI: enabled
|
|
||||||
- [x] SNMP monitoring: enabled
|
|
||||||
- [x] Network services: configured
|
|
||||||
- [x] User permissions: configured
|
|
||||||
- [x] Service dependencies: verified
|
|
||||||
|
|
||||||
**Status: SAFE TO REBOOT** ✅
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
*Generated: 2025-12-31*
|
|
||||||
*Documentation: /development/calypso/docs/*
|
|
||||||
126
docs/services.md
126
docs/services.md
@@ -1,126 +0,0 @@
|
|||||||
# Calypso Appliance Services Documentation
|
|
||||||
|
|
||||||
This document provides an overview of all services that form the Calypso backup appliance.
|
|
||||||
|
|
||||||
## Core Calypso Services
|
|
||||||
|
|
||||||
### calypso-api.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: AtlasOS Calypso API Service (Development)
|
|
||||||
**Purpose**: Main REST API backend for the Calypso appliance, handles all business logic and database operations.
|
|
||||||
**Binary**: `/development/calypso/backend/bin/calypso-api`
|
|
||||||
**Config**: `/development/calypso/backend/config.yaml.example`
|
|
||||||
|
|
||||||
### calypso-frontend.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: Calypso Frontend Development Server
|
|
||||||
**Purpose**: Web UI for managing backups, storage, and monitoring the appliance.
|
|
||||||
**Port**: 3000
|
|
||||||
**Technology**: React + Vite (development mode)
|
|
||||||
|
|
||||||
## Backup Services (Bacula)
|
|
||||||
|
|
||||||
### bacula-director.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: Bacula Director Daemon
|
|
||||||
**Purpose**: Central management daemon that orchestrates all backup, restore, and verify operations.
|
|
||||||
**Config**: `/etc/bacula/bacula-dir.conf`
|
|
||||||
**Docs**: `man:bacula-dir(8)`
|
|
||||||
|
|
||||||
### bacula-sd.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: Bacula Storage Daemon
|
|
||||||
**Purpose**: Manages physical backup storage devices (disks, tapes, virtual tape libraries).
|
|
||||||
**Config**: `/etc/bacula/bacula-sd.conf`
|
|
||||||
|
|
||||||
### bacula-fd.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: Bacula File Daemon
|
|
||||||
**Purpose**: Runs on systems being backed up, manages file access and metadata.
|
|
||||||
**Config**: `/etc/bacula/bacula-fd.conf`
|
|
||||||
|
|
||||||
## Storage/iSCSI Services (SCST)
|
|
||||||
|
|
||||||
### scst.service
|
|
||||||
**Status**: Active (exited)
|
|
||||||
**Description**: SCST - A Generic SCSI Target Subsystem
|
|
||||||
**Purpose**: Kernel-level SCSI target framework providing high-performance storage exports.
|
|
||||||
**Type**: One-shot service that loads SCST kernel modules
|
|
||||||
|
|
||||||
### iscsi-scstd.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: iSCSI SCST Target Daemon
|
|
||||||
**Purpose**: Provides iSCSI protocol support for SCST, allowing network block storage exports.
|
|
||||||
**Port**: 3260 (standard iSCSI port)
|
|
||||||
**Configured Targets**:
|
|
||||||
- `iqn.2025-12.id.atlas:lun01` (enabled)
|
|
||||||
|
|
||||||
### iscsid.service
|
|
||||||
**Status**: Inactive
|
|
||||||
**Description**: iSCSI initiator daemon
|
|
||||||
**Purpose**: Client-side iSCSI service (not currently in use)
|
|
||||||
|
|
||||||
### open-iscsi.service
|
|
||||||
**Status**: Inactive
|
|
||||||
**Description**: Login to default iSCSI targets
|
|
||||||
**Purpose**: Automatic iSCSI target login (not currently in use)
|
|
||||||
|
|
||||||
## Virtual Tape Library
|
|
||||||
|
|
||||||
### mhvtl-load-modules.service
|
|
||||||
**Status**: Active (exited)
|
|
||||||
**Description**: Load mhvtl modules
|
|
||||||
**Purpose**: Loads mhVTL (virtual tape library) kernel modules for tape emulation.
|
|
||||||
**Type**: One-shot service that runs at boot
|
|
||||||
**Docs**: `man:vtltape(1)`, `man:vtllibrary(1)`
|
|
||||||
|
|
||||||
## Database
|
|
||||||
|
|
||||||
### postgresql.service
|
|
||||||
**Status**: Active (exited)
|
|
||||||
**Description**: PostgreSQL RDBMS
|
|
||||||
**Purpose**: Parent service for PostgreSQL database management
|
|
||||||
|
|
||||||
### postgresql@16-main.service
|
|
||||||
**Status**: Running
|
|
||||||
**Description**: PostgreSQL Cluster 16-main
|
|
||||||
**Purpose**: Main database for Calypso API, stores configuration, jobs, and metadata.
|
|
||||||
**Version**: PostgreSQL 16
|
|
||||||
|
|
||||||
## Service Management
|
|
||||||
|
|
||||||
### Check All Services Status
|
|
||||||
```bash
|
|
||||||
systemctl status calypso-api calypso-frontend bacula-director bacula-sd bacula-fd scst iscsi-scstd mhvtl-load-modules postgresql
|
|
||||||
```
|
|
||||||
|
|
||||||
### Rebuild and Restart Core Services
|
|
||||||
```bash
|
|
||||||
/development/calypso/scripts/rebuild-and-restart.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
### Restart Individual Services
|
|
||||||
```bash
|
|
||||||
systemctl restart calypso-api.service
|
|
||||||
systemctl restart calypso-frontend.service
|
|
||||||
systemctl restart bacula-director.service
|
|
||||||
```
|
|
||||||
|
|
||||||
## Service Dependencies
|
|
||||||
|
|
||||||
```
|
|
||||||
PostgreSQL
|
|
||||||
└── Calypso API
|
|
||||||
└── Calypso Frontend
|
|
||||||
|
|
||||||
SCST
|
|
||||||
└── iSCSI SCST Target Daemon
|
|
||||||
|
|
||||||
mhVTL
|
|
||||||
└── Bacula Storage Daemon
|
|
||||||
└── Bacula Director
|
|
||||||
└── Bacula File Daemon
|
|
||||||
```
|
|
||||||
|
|
||||||
## Total Service Count
|
|
||||||
**11 services** forming the complete Calypso backup appliance stack.
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 22 KiB |
@@ -10,10 +10,6 @@ import TapeLibrariesPage from '@/pages/TapeLibraries'
|
|||||||
import VTLDetailPage from '@/pages/VTLDetail'
|
import VTLDetailPage from '@/pages/VTLDetail'
|
||||||
import ISCSITargetsPage from '@/pages/ISCSITargets'
|
import ISCSITargetsPage from '@/pages/ISCSITargets'
|
||||||
import ISCSITargetDetailPage from '@/pages/ISCSITargetDetail'
|
import ISCSITargetDetailPage from '@/pages/ISCSITargetDetail'
|
||||||
import SystemPage from '@/pages/System'
|
|
||||||
import BackupManagementPage from '@/pages/BackupManagement'
|
|
||||||
import IAMPage from '@/pages/IAM'
|
|
||||||
import ProfilePage from '@/pages/Profile'
|
|
||||||
import Layout from '@/components/Layout'
|
import Layout from '@/components/Layout'
|
||||||
|
|
||||||
// Create a client
|
// Create a client
|
||||||
@@ -58,12 +54,7 @@ function App() {
|
|||||||
<Route path="tape/vtl/:id" element={<VTLDetailPage />} />
|
<Route path="tape/vtl/:id" element={<VTLDetailPage />} />
|
||||||
<Route path="iscsi" element={<ISCSITargetsPage />} />
|
<Route path="iscsi" element={<ISCSITargetsPage />} />
|
||||||
<Route path="iscsi/:id" element={<ISCSITargetDetailPage />} />
|
<Route path="iscsi/:id" element={<ISCSITargetDetailPage />} />
|
||||||
<Route path="backup" element={<BackupManagementPage />} />
|
|
||||||
<Route path="alerts" element={<AlertsPage />} />
|
<Route path="alerts" element={<AlertsPage />} />
|
||||||
<Route path="system" element={<SystemPage />} />
|
|
||||||
<Route path="iam" element={<IAMPage />} />
|
|
||||||
<Route path="profile" element={<ProfilePage />} />
|
|
||||||
<Route path="profile/:id" element={<ProfilePage />} />
|
|
||||||
</Route>
|
</Route>
|
||||||
</Routes>
|
</Routes>
|
||||||
<Toaster />
|
<Toaster />
|
||||||
|
|||||||
@@ -1,257 +0,0 @@
|
|||||||
import apiClient from './client'
|
|
||||||
|
|
||||||
export interface BackupJob {
|
|
||||||
id: string
|
|
||||||
job_id: number
|
|
||||||
job_name: string
|
|
||||||
client_name: string
|
|
||||||
job_type: string
|
|
||||||
job_level: string
|
|
||||||
status: 'Running' | 'Completed' | 'Failed' | 'Canceled' | 'Waiting'
|
|
||||||
bytes_written: number
|
|
||||||
files_written: number
|
|
||||||
duration_seconds?: number
|
|
||||||
started_at?: string
|
|
||||||
ended_at?: string
|
|
||||||
error_message?: string
|
|
||||||
storage_name?: string
|
|
||||||
pool_name?: string
|
|
||||||
volume_name?: string
|
|
||||||
created_at: string
|
|
||||||
updated_at: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ListJobsResponse {
|
|
||||||
jobs: BackupJob[]
|
|
||||||
total: number
|
|
||||||
limit: number
|
|
||||||
offset: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ListJobsParams {
|
|
||||||
status?: string
|
|
||||||
job_type?: string
|
|
||||||
client_name?: string
|
|
||||||
job_name?: string
|
|
||||||
limit?: number
|
|
||||||
offset?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateJobRequest {
|
|
||||||
job_name: string
|
|
||||||
client_name: string
|
|
||||||
job_type: string
|
|
||||||
job_level: string
|
|
||||||
storage_name?: string
|
|
||||||
pool_name?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface BackupClient {
|
|
||||||
client_id: number
|
|
||||||
name: string
|
|
||||||
uname?: string
|
|
||||||
enabled: boolean
|
|
||||||
auto_prune?: boolean
|
|
||||||
file_retention?: number
|
|
||||||
job_retention?: number
|
|
||||||
last_backup_at?: string
|
|
||||||
total_jobs?: number
|
|
||||||
total_bytes?: number
|
|
||||||
status?: 'online' | 'offline'
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ListClientsResponse {
|
|
||||||
clients: BackupClient[]
|
|
||||||
total: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ListClientsParams {
|
|
||||||
enabled?: boolean
|
|
||||||
search?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface PoolStats {
|
|
||||||
name: string
|
|
||||||
used_bytes: number
|
|
||||||
total_bytes: number
|
|
||||||
usage_percent: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface DashboardStats {
|
|
||||||
director_status: string
|
|
||||||
director_uptime: string
|
|
||||||
last_job?: BackupJob
|
|
||||||
active_jobs_count: number
|
|
||||||
default_pool?: PoolStats
|
|
||||||
}
|
|
||||||
|
|
||||||
export const backupAPI = {
|
|
||||||
listJobs: async (params?: ListJobsParams): Promise<ListJobsResponse> => {
|
|
||||||
const queryParams = new URLSearchParams()
|
|
||||||
if (params?.status) queryParams.append('status', params.status)
|
|
||||||
if (params?.job_type) queryParams.append('job_type', params.job_type)
|
|
||||||
if (params?.client_name) queryParams.append('client_name', params.client_name)
|
|
||||||
if (params?.job_name) queryParams.append('job_name', params.job_name)
|
|
||||||
if (params?.limit) queryParams.append('limit', params.limit.toString())
|
|
||||||
if (params?.offset) queryParams.append('offset', params.offset.toString())
|
|
||||||
|
|
||||||
const response = await apiClient.get<ListJobsResponse>(
|
|
||||||
`/backup/jobs${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
|
||||||
)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
getJob: async (id: string): Promise<BackupJob> => {
|
|
||||||
const response = await apiClient.get<BackupJob>(`/backup/jobs/${id}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createJob: async (data: CreateJobRequest): Promise<BackupJob> => {
|
|
||||||
const response = await apiClient.post<BackupJob>('/backup/jobs', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
executeBconsoleCommand: async (command: string): Promise<{ output: string }> => {
|
|
||||||
const response = await apiClient.post<{ output: string }>('/backup/console/execute', { command })
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
listClients: async (params?: ListClientsParams): Promise<ListClientsResponse> => {
|
|
||||||
const queryParams = new URLSearchParams()
|
|
||||||
if (params?.enabled !== undefined) queryParams.append('enabled', params.enabled.toString())
|
|
||||||
if (params?.search) queryParams.append('search', params.search)
|
|
||||||
|
|
||||||
const response = await apiClient.get<ListClientsResponse>(
|
|
||||||
`/backup/clients${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
|
||||||
)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
getDashboardStats: async (): Promise<DashboardStats> => {
|
|
||||||
const response = await apiClient.get<DashboardStats>('/backup/dashboard/stats')
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
listStoragePools: async (): Promise<{ pools: StoragePool[]; total: number }> => {
|
|
||||||
const response = await apiClient.get<{ pools: StoragePool[]; total: number }>('/backup/storage/pools')
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
listStorageVolumes: async (poolName?: string): Promise<{ volumes: StorageVolume[]; total: number }> => {
|
|
||||||
const queryParams = new URLSearchParams()
|
|
||||||
if (poolName) queryParams.append('pool_name', poolName)
|
|
||||||
const response = await apiClient.get<{ volumes: StorageVolume[]; total: number }>(
|
|
||||||
`/backup/storage/volumes${queryParams.toString() ? `?${queryParams.toString()}` : ''}`
|
|
||||||
)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
listStorageDaemons: async (): Promise<{ daemons: StorageDaemon[]; total: number }> => {
|
|
||||||
const response = await apiClient.get<{ daemons: StorageDaemon[]; total: number }>('/backup/storage/daemons')
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createStoragePool: async (data: CreateStoragePoolRequest): Promise<StoragePool> => {
|
|
||||||
const response = await apiClient.post<StoragePool>('/backup/storage/pools', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteStoragePool: async (poolId: number): Promise<void> => {
|
|
||||||
await apiClient.delete(`/backup/storage/pools/${poolId}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
createStorageVolume: async (data: CreateStorageVolumeRequest): Promise<StorageVolume> => {
|
|
||||||
const response = await apiClient.post<StorageVolume>('/backup/storage/volumes', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updateStorageVolume: async (volumeId: number, data: UpdateStorageVolumeRequest): Promise<StorageVolume> => {
|
|
||||||
const response = await apiClient.put<StorageVolume>(`/backup/storage/volumes/${volumeId}`, data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteStorageVolume: async (volumeId: number): Promise<void> => {
|
|
||||||
await apiClient.delete(`/backup/storage/volumes/${volumeId}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
listMedia: async (): Promise<{ media: Media[]; total: number }> => {
|
|
||||||
const response = await apiClient.get<{ media: Media[]; total: number }>('/backup/media')
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateStoragePoolRequest {
|
|
||||||
name: string
|
|
||||||
pool_type?: string
|
|
||||||
label_format?: string
|
|
||||||
recycle?: boolean
|
|
||||||
auto_prune?: boolean
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateStorageVolumeRequest {
|
|
||||||
volume_name: string
|
|
||||||
pool_name: string
|
|
||||||
media_type?: string
|
|
||||||
max_vol_bytes?: number
|
|
||||||
vol_retention?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface UpdateStorageVolumeRequest {
|
|
||||||
max_vol_bytes?: number
|
|
||||||
vol_retention?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface Media {
|
|
||||||
media_id: number
|
|
||||||
volume_name: string
|
|
||||||
pool_name: string
|
|
||||||
media_type: string
|
|
||||||
status: string
|
|
||||||
vol_bytes: number
|
|
||||||
max_vol_bytes: number
|
|
||||||
vol_files: number
|
|
||||||
last_written?: string
|
|
||||||
recycle_count: number
|
|
||||||
slot?: number
|
|
||||||
in_changer?: number
|
|
||||||
library_name?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface StoragePool {
|
|
||||||
pool_id: number
|
|
||||||
name: string
|
|
||||||
pool_type: string
|
|
||||||
label_format?: string
|
|
||||||
recycle?: boolean
|
|
||||||
auto_prune?: boolean
|
|
||||||
volume_count: number
|
|
||||||
used_bytes: number
|
|
||||||
total_bytes: number
|
|
||||||
usage_percent: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface StorageVolume {
|
|
||||||
volume_id: number
|
|
||||||
media_id: number
|
|
||||||
volume_name: string
|
|
||||||
pool_name: string
|
|
||||||
media_type: string
|
|
||||||
vol_status: string
|
|
||||||
vol_bytes: number
|
|
||||||
max_vol_bytes: number
|
|
||||||
vol_files: number
|
|
||||||
vol_retention?: string
|
|
||||||
last_written?: string
|
|
||||||
recycle_count: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface StorageDaemon {
|
|
||||||
storage_id: number
|
|
||||||
name: string
|
|
||||||
address: string
|
|
||||||
port: number
|
|
||||||
device_name: string
|
|
||||||
media_type: string
|
|
||||||
status: string
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -5,9 +5,6 @@ const apiClient = axios.create({
|
|||||||
baseURL: '/api/v1',
|
baseURL: '/api/v1',
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
|
||||||
'Pragma': 'no-cache',
|
|
||||||
'Expires': '0',
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -1,191 +0,0 @@
|
|||||||
import apiClient from './client'
|
|
||||||
|
|
||||||
export interface User {
|
|
||||||
id: string
|
|
||||||
username: string
|
|
||||||
email: string
|
|
||||||
full_name: string
|
|
||||||
is_active: boolean
|
|
||||||
is_system: boolean
|
|
||||||
created_at: string
|
|
||||||
updated_at: string
|
|
||||||
last_login_at: string | null
|
|
||||||
roles?: string[]
|
|
||||||
permissions?: string[]
|
|
||||||
groups?: string[]
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface Group {
|
|
||||||
id: string
|
|
||||||
name: string
|
|
||||||
description?: string
|
|
||||||
is_system: boolean
|
|
||||||
user_count: number
|
|
||||||
role_count: number
|
|
||||||
created_at: string
|
|
||||||
updated_at: string
|
|
||||||
users?: string[]
|
|
||||||
roles?: string[]
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateGroupRequest {
|
|
||||||
name: string
|
|
||||||
description?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface UpdateGroupRequest {
|
|
||||||
name?: string
|
|
||||||
description?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface AddUserToGroupRequest {
|
|
||||||
user_id: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateUserRequest {
|
|
||||||
username: string
|
|
||||||
email: string
|
|
||||||
password: string
|
|
||||||
full_name?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface UpdateUserRequest {
|
|
||||||
email?: string
|
|
||||||
full_name?: string
|
|
||||||
is_active?: boolean
|
|
||||||
roles?: string[]
|
|
||||||
groups?: string[]
|
|
||||||
}
|
|
||||||
|
|
||||||
export const iamApi = {
|
|
||||||
listUsers: async (): Promise<User[]> => {
|
|
||||||
const response = await apiClient.get<{ users: User[] }>('/iam/users')
|
|
||||||
return response.data.users || []
|
|
||||||
},
|
|
||||||
|
|
||||||
getUser: async (id: string): Promise<User> => {
|
|
||||||
const response = await apiClient.get<{
|
|
||||||
id: string
|
|
||||||
username: string
|
|
||||||
email: string
|
|
||||||
full_name: string
|
|
||||||
is_active: boolean
|
|
||||||
is_system: boolean
|
|
||||||
roles: string[]
|
|
||||||
permissions: string[]
|
|
||||||
groups: string[]
|
|
||||||
created_at: string
|
|
||||||
updated_at: string
|
|
||||||
last_login_at: string | null
|
|
||||||
}>(`/iam/users/${id}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createUser: async (data: CreateUserRequest): Promise<{ id: string; username: string }> => {
|
|
||||||
const response = await apiClient.post<{ id: string; username: string }>('/iam/users', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updateUser: async (id: string, data: UpdateUserRequest): Promise<void> => {
|
|
||||||
await apiClient.put(`/iam/users/${id}`, data)
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteUser: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/users/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// Groups API
|
|
||||||
listGroups: async (): Promise<Group[]> => {
|
|
||||||
const response = await apiClient.get<{ groups: Group[] }>('/iam/groups')
|
|
||||||
return response.data.groups || []
|
|
||||||
},
|
|
||||||
|
|
||||||
getGroup: async (id: string): Promise<Group> => {
|
|
||||||
const response = await apiClient.get<Group>(`/iam/groups/${id}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createGroup: async (data: CreateGroupRequest): Promise<{ id: string; name: string }> => {
|
|
||||||
const response = await apiClient.post<{ id: string; name: string }>('/iam/groups', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updateGroup: async (id: string, data: UpdateGroupRequest): Promise<void> => {
|
|
||||||
await apiClient.put(`/iam/groups/${id}`, data)
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteGroup: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/groups/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
addUserToGroup: async (groupId: string, userId: string): Promise<void> => {
|
|
||||||
await apiClient.post(`/iam/groups/${groupId}/users`, { user_id: userId })
|
|
||||||
},
|
|
||||||
|
|
||||||
removeUserFromGroup: async (groupId: string, userId: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/groups/${groupId}/users/${userId}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// User role assignment
|
|
||||||
assignRoleToUser: async (userId: string, roleName: string): Promise<void> => {
|
|
||||||
await apiClient.post(`/iam/users/${userId}/roles`, { role_name: roleName })
|
|
||||||
},
|
|
||||||
|
|
||||||
removeRoleFromUser: async (userId: string, roleName: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/users/${userId}/roles?role_name=${encodeURIComponent(roleName)}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// User group assignment
|
|
||||||
assignGroupToUser: async (userId: string, groupName: string): Promise<void> => {
|
|
||||||
await apiClient.post(`/iam/users/${userId}/groups`, { group_name: groupName })
|
|
||||||
},
|
|
||||||
|
|
||||||
removeGroupFromUser: async (userId: string, groupName: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/users/${userId}/groups?group_name=${encodeURIComponent(groupName)}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// List all available roles
|
|
||||||
listRoles: async (): Promise<Array<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }>> => {
|
|
||||||
const response = await apiClient.get<{ roles: Array<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }> }>('/iam/roles')
|
|
||||||
return response.data.roles
|
|
||||||
},
|
|
||||||
|
|
||||||
getRole: async (id: string): Promise<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }> => {
|
|
||||||
const response = await apiClient.get<{ id: string; name: string; description?: string; is_system: boolean; user_count?: number; created_at?: string; updated_at?: string }>(`/iam/roles/${id}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createRole: async (data: { name: string; description?: string }): Promise<{ id: string; name: string }> => {
|
|
||||||
const response = await apiClient.post<{ id: string; name: string }>('/iam/roles', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updateRole: async (id: string, data: { name?: string; description?: string }): Promise<void> => {
|
|
||||||
await apiClient.put(`/iam/roles/${id}`, data)
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteRole: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/roles/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// Role permissions
|
|
||||||
getRolePermissions: async (roleId: string): Promise<string[]> => {
|
|
||||||
const response = await apiClient.get<{ permissions: string[] }>(`/iam/roles/${roleId}/permissions`)
|
|
||||||
return response.data.permissions
|
|
||||||
},
|
|
||||||
|
|
||||||
assignPermissionToRole: async (roleId: string, permissionName: string): Promise<void> => {
|
|
||||||
await apiClient.post(`/iam/roles/${roleId}/permissions`, { permission_name: permissionName })
|
|
||||||
},
|
|
||||||
|
|
||||||
removePermissionFromRole: async (roleId: string, permissionName: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/iam/roles/${roleId}/permissions?permission_name=${encodeURIComponent(permissionName)}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// Permissions
|
|
||||||
listPermissions: async (): Promise<Array<{ id: string; name: string; resource: string; action: string; description?: string }>> => {
|
|
||||||
const response = await apiClient.get<{ permissions: Array<{ id: string; name: string; resource: string; action: string; description?: string }> }>('/iam/permissions')
|
|
||||||
return response.data.permissions
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -9,7 +9,6 @@ export interface SCSTTarget {
|
|||||||
iqn: string
|
iqn: string
|
||||||
alias?: string
|
alias?: string
|
||||||
is_active: boolean
|
is_active: boolean
|
||||||
lun_count?: number
|
|
||||||
created_at: string
|
created_at: string
|
||||||
updated_at: string
|
updated_at: string
|
||||||
}
|
}
|
||||||
@@ -32,11 +31,7 @@ export interface SCSTInitiator {
|
|||||||
iqn: string
|
iqn: string
|
||||||
is_active: boolean
|
is_active: boolean
|
||||||
created_at: string
|
created_at: string
|
||||||
updated_at?: string
|
updated_at: string
|
||||||
target_id?: string
|
|
||||||
target_iqn?: string
|
|
||||||
target_name?: string
|
|
||||||
group_name?: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface SCSTInitiatorGroup {
|
export interface SCSTInitiatorGroup {
|
||||||
@@ -50,19 +45,9 @@ export interface SCSTInitiatorGroup {
|
|||||||
|
|
||||||
export interface SCSTHandler {
|
export interface SCSTHandler {
|
||||||
name: string
|
name: string
|
||||||
label: string
|
|
||||||
description?: string
|
description?: string
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface SCSTPortal {
|
|
||||||
id: string
|
|
||||||
ip_address: string
|
|
||||||
port: number
|
|
||||||
is_active: boolean
|
|
||||||
created_at: string
|
|
||||||
updated_at: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateTargetRequest {
|
export interface CreateTargetRequest {
|
||||||
iqn: string
|
iqn: string
|
||||||
target_type: string
|
target_type: string
|
||||||
@@ -88,37 +73,21 @@ export interface AddInitiatorRequest {
|
|||||||
|
|
||||||
export const scstAPI = {
|
export const scstAPI = {
|
||||||
listTargets: async (): Promise<SCSTTarget[]> => {
|
listTargets: async (): Promise<SCSTTarget[]> => {
|
||||||
const response = await apiClient.get('/scst/targets', {
|
const response = await apiClient.get('/scst/targets')
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.targets || []
|
return response.data.targets || []
|
||||||
},
|
},
|
||||||
|
|
||||||
getTarget: async (id: string): Promise<{
|
getTarget: async (id: string): Promise<{
|
||||||
target: SCSTTarget
|
target: SCSTTarget
|
||||||
luns: SCSTLUN[]
|
luns: SCSTLUN[]
|
||||||
initiator_groups?: SCSTInitiatorGroup[]
|
|
||||||
}> => {
|
}> => {
|
||||||
const response = await apiClient.get(`/scst/targets/${id}`, {
|
const response = await apiClient.get(`/scst/targets/${id}`)
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data
|
return response.data
|
||||||
},
|
},
|
||||||
|
|
||||||
createTarget: async (data: CreateTargetRequest): Promise<SCSTTarget> => {
|
createTarget: async (data: CreateTargetRequest): Promise<SCSTTarget> => {
|
||||||
const response = await apiClient.post('/scst/targets', data)
|
const response = await apiClient.post('/scst/targets', data)
|
||||||
// Backend returns target directly, not wrapped in { target: ... }
|
return response.data.target
|
||||||
return response.data
|
|
||||||
},
|
},
|
||||||
|
|
||||||
addLUN: async (targetId: string, data: AddLUNRequest): Promise<{ task_id: string }> => {
|
addLUN: async (targetId: string, data: AddLUNRequest): Promise<{ task_id: string }> => {
|
||||||
@@ -126,11 +95,6 @@ export const scstAPI = {
|
|||||||
return response.data
|
return response.data
|
||||||
},
|
},
|
||||||
|
|
||||||
removeLUN: async (targetId: string, lunId: string): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.delete(`/scst/targets/${targetId}/luns/${lunId}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
addInitiator: async (targetId: string, data: AddInitiatorRequest): Promise<{ task_id: string }> => {
|
addInitiator: async (targetId: string, data: AddInitiatorRequest): Promise<{ task_id: string }> => {
|
||||||
const response = await apiClient.post(`/scst/targets/${targetId}/initiators`, data)
|
const response = await apiClient.post(`/scst/targets/${targetId}/initiators`, data)
|
||||||
return response.data
|
return response.data
|
||||||
@@ -142,177 +106,8 @@ export const scstAPI = {
|
|||||||
},
|
},
|
||||||
|
|
||||||
listHandlers: async (): Promise<SCSTHandler[]> => {
|
listHandlers: async (): Promise<SCSTHandler[]> => {
|
||||||
const response = await apiClient.get('/scst/handlers', {
|
const response = await apiClient.get('/scst/handlers')
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.handlers || []
|
return response.data.handlers || []
|
||||||
},
|
},
|
||||||
|
|
||||||
listPortals: async (): Promise<SCSTPortal[]> => {
|
|
||||||
const response = await apiClient.get('/scst/portals', {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.portals || []
|
|
||||||
},
|
|
||||||
|
|
||||||
getPortal: async (id: string): Promise<SCSTPortal> => {
|
|
||||||
const response = await apiClient.get(`/scst/portals/${id}`, {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createPortal: async (data: { ip_address: string; port?: number; is_active?: boolean }): Promise<SCSTPortal> => {
|
|
||||||
const response = await apiClient.post('/scst/portals', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updatePortal: async (id: string, data: { ip_address: string; port?: number; is_active?: boolean }): Promise<SCSTPortal> => {
|
|
||||||
const response = await apiClient.put(`/scst/portals/${id}`, data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deletePortal: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/scst/portals/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
enableTarget: async (targetId: string): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.post(`/scst/targets/${targetId}/enable`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
disableTarget: async (targetId: string): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.post(`/scst/targets/${targetId}/disable`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteTarget: async (targetId: string): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.delete(`/scst/targets/${targetId}`)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
listInitiators: async (): Promise<SCSTInitiator[]> => {
|
|
||||||
const response = await apiClient.get('/scst/initiators', {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.initiators || []
|
|
||||||
},
|
|
||||||
|
|
||||||
getInitiator: async (id: string): Promise<SCSTInitiator> => {
|
|
||||||
const response = await apiClient.get(`/scst/initiators/${id}`, {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
removeInitiator: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/scst/initiators/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
listExtents: async (): Promise<SCSTExtent[]> => {
|
|
||||||
const response = await apiClient.get('/scst/extents', {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.extents || []
|
|
||||||
},
|
|
||||||
|
|
||||||
createExtent: async (extent: CreateExtentRequest): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.post('/scst/extents', extent)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteExtent: async (deviceName: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/scst/extents/${deviceName}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
// Initiator Groups
|
|
||||||
listInitiatorGroups: async (): Promise<SCSTInitiatorGroup[]> => {
|
|
||||||
const response = await apiClient.get('/scst/initiator-groups', {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(), // Add timestamp to prevent browser caching
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data.groups || []
|
|
||||||
},
|
|
||||||
|
|
||||||
getInitiatorGroup: async (id: string): Promise<SCSTInitiatorGroup> => {
|
|
||||||
const response = await apiClient.get(`/scst/initiator-groups/${id}`, {
|
|
||||||
headers: {
|
|
||||||
'Cache-Control': 'no-cache',
|
|
||||||
},
|
|
||||||
params: {
|
|
||||||
_t: Date.now(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
createInitiatorGroup: async (data: { target_id: string; group_name: string }): Promise<SCSTInitiatorGroup> => {
|
|
||||||
const response = await apiClient.post('/scst/initiator-groups', data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
updateInitiatorGroup: async (id: string, data: { group_name: string }): Promise<SCSTInitiatorGroup> => {
|
|
||||||
const response = await apiClient.put(`/scst/initiator-groups/${id}`, data)
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteInitiatorGroup: async (id: string): Promise<void> => {
|
|
||||||
await apiClient.delete(`/scst/initiator-groups/${id}`)
|
|
||||||
},
|
|
||||||
|
|
||||||
addInitiatorToGroup: async (groupId: string, initiatorIQN: string): Promise<{ message: string }> => {
|
|
||||||
const response = await apiClient.post(`/scst/initiator-groups/${groupId}/initiators`, {
|
|
||||||
initiator_iqn: initiatorIQN,
|
|
||||||
})
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SCSTExtent {
|
|
||||||
handler_type: string
|
|
||||||
device_name: string
|
|
||||||
device_path: string
|
|
||||||
is_in_use: boolean
|
|
||||||
lun_count: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface CreateExtentRequest {
|
|
||||||
device_name: string
|
|
||||||
device_path: string
|
|
||||||
handler_type: string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -100,7 +100,6 @@ export interface ZFSPool {
|
|||||||
scrub_interval: number // days
|
scrub_interval: number // days
|
||||||
is_active: boolean
|
is_active: boolean
|
||||||
health_status: string // online, degraded, faulted, offline
|
health_status: string // online, degraded, faulted, offline
|
||||||
compress_ratio?: number // compression ratio (e.g., 1.45)
|
|
||||||
created_at: string
|
created_at: string
|
||||||
updated_at: string
|
updated_at: string
|
||||||
created_by: string
|
created_by: string
|
||||||
|
|||||||
@@ -1,88 +0,0 @@
|
|||||||
import apiClient from './client'
|
|
||||||
|
|
||||||
export interface NetworkInterface {
|
|
||||||
name: string
|
|
||||||
ip_address: string
|
|
||||||
subnet: string
|
|
||||||
status: string // "Connected" or "Down"
|
|
||||||
speed: string // e.g., "10 Gbps", "1 Gbps"
|
|
||||||
role: string // "Management", "ISCSI", or empty
|
|
||||||
gateway?: string
|
|
||||||
dns1?: string
|
|
||||||
dns2?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface UpdateNetworkInterfaceRequest {
|
|
||||||
ip_address: string
|
|
||||||
subnet: string
|
|
||||||
gateway?: string
|
|
||||||
dns1?: string
|
|
||||||
dns2?: string
|
|
||||||
role?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SaveNTPSettingsRequest {
|
|
||||||
timezone: string
|
|
||||||
ntp_servers: string[]
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface NTPSettings {
|
|
||||||
timezone: string
|
|
||||||
ntp_servers: string[]
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ServiceStatus {
|
|
||||||
name: string
|
|
||||||
active_state: string // "active", "inactive", "activating", "deactivating", "failed"
|
|
||||||
sub_state: string
|
|
||||||
load_state: string
|
|
||||||
description: string
|
|
||||||
since?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface SystemLogEntry {
|
|
||||||
time: string
|
|
||||||
level: string
|
|
||||||
source: string
|
|
||||||
message: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface NetworkDataPoint {
|
|
||||||
time: string
|
|
||||||
inbound: number // Mbps
|
|
||||||
outbound: number // Mbps
|
|
||||||
}
|
|
||||||
|
|
||||||
export const systemAPI = {
|
|
||||||
listNetworkInterfaces: async (): Promise<NetworkInterface[]> => {
|
|
||||||
const response = await apiClient.get<{ interfaces: NetworkInterface[] | null }>('/system/interfaces')
|
|
||||||
return response.data.interfaces || []
|
|
||||||
},
|
|
||||||
updateNetworkInterface: async (name: string, data: UpdateNetworkInterfaceRequest): Promise<NetworkInterface> => {
|
|
||||||
const response = await apiClient.put<{ interface: NetworkInterface }>(`/system/interfaces/${name}`, data)
|
|
||||||
return response.data.interface
|
|
||||||
},
|
|
||||||
getNTPSettings: async (): Promise<NTPSettings> => {
|
|
||||||
const response = await apiClient.get<{ settings: NTPSettings }>('/system/ntp')
|
|
||||||
return response.data.settings
|
|
||||||
},
|
|
||||||
saveNTPSettings: async (data: SaveNTPSettingsRequest): Promise<void> => {
|
|
||||||
await apiClient.post('/system/ntp', data)
|
|
||||||
},
|
|
||||||
listServices: async (): Promise<ServiceStatus[]> => {
|
|
||||||
const response = await apiClient.get<{ services: ServiceStatus[] }>('/system/services')
|
|
||||||
return response.data.services || []
|
|
||||||
},
|
|
||||||
restartService: async (name: string): Promise<void> => {
|
|
||||||
await apiClient.post(`/system/services/${name}/restart`)
|
|
||||||
},
|
|
||||||
getSystemLogs: async (limit: number = 30): Promise<SystemLogEntry[]> => {
|
|
||||||
const response = await apiClient.get<{ logs: SystemLogEntry[] }>(`/system/logs?limit=${limit}`)
|
|
||||||
return response.data.logs || []
|
|
||||||
},
|
|
||||||
getNetworkThroughput: async (duration: string = '5m'): Promise<NetworkDataPoint[]> => {
|
|
||||||
const response = await apiClient.get<{ data: NetworkDataPoint[] }>(`/system/network/throughput?duration=${duration}`)
|
|
||||||
return response.data.data || []
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -26,7 +26,6 @@ export interface VirtualTapeLibrary {
|
|||||||
name: string
|
name: string
|
||||||
mhvtl_library_id: number
|
mhvtl_library_id: number
|
||||||
storage_path: string
|
storage_path: string
|
||||||
vendor?: string
|
|
||||||
slot_count: number
|
slot_count: number
|
||||||
drive_count: number
|
drive_count: number
|
||||||
is_active: boolean
|
is_active: boolean
|
||||||
|
|||||||
@@ -10,31 +10,15 @@ import {
|
|||||||
Settings,
|
Settings,
|
||||||
Bell,
|
Bell,
|
||||||
Server,
|
Server,
|
||||||
Users,
|
Users
|
||||||
Archive
|
|
||||||
} from 'lucide-react'
|
} from 'lucide-react'
|
||||||
import { useState, useEffect } from 'react'
|
import { useState } from 'react'
|
||||||
|
|
||||||
export default function Layout() {
|
export default function Layout() {
|
||||||
const { user, clearAuth } = useAuthStore()
|
const { user, clearAuth } = useAuthStore()
|
||||||
const navigate = useNavigate()
|
const navigate = useNavigate()
|
||||||
const location = useLocation()
|
const location = useLocation()
|
||||||
const [sidebarOpen, setSidebarOpen] = useState(false)
|
const [sidebarOpen, setSidebarOpen] = useState(true)
|
||||||
|
|
||||||
// Set sidebar open by default on desktop, closed on mobile
|
|
||||||
useEffect(() => {
|
|
||||||
const handleResize = () => {
|
|
||||||
if (window.innerWidth >= 1024) {
|
|
||||||
setSidebarOpen(true)
|
|
||||||
} else {
|
|
||||||
setSidebarOpen(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
handleResize() // Set initial state
|
|
||||||
window.addEventListener('resize', handleResize)
|
|
||||||
return () => window.removeEventListener('resize', handleResize)
|
|
||||||
}, [])
|
|
||||||
|
|
||||||
const handleLogout = () => {
|
const handleLogout = () => {
|
||||||
clearAuth()
|
clearAuth()
|
||||||
@@ -45,15 +29,14 @@ export default function Layout() {
|
|||||||
{ name: 'Dashboard', href: '/', icon: LayoutDashboard },
|
{ name: 'Dashboard', href: '/', icon: LayoutDashboard },
|
||||||
{ name: 'Storage', href: '/storage', icon: HardDrive },
|
{ name: 'Storage', href: '/storage', icon: HardDrive },
|
||||||
{ name: 'Tape Libraries', href: '/tape', icon: Database },
|
{ name: 'Tape Libraries', href: '/tape', icon: Database },
|
||||||
{ name: 'iSCSI Management', href: '/iscsi', icon: Network },
|
{ name: 'iSCSI Targets', href: '/iscsi', icon: Network },
|
||||||
{ name: 'Backup Management', href: '/backup', icon: Archive },
|
|
||||||
{ name: 'Tasks', href: '/tasks', icon: Settings },
|
{ name: 'Tasks', href: '/tasks', icon: Settings },
|
||||||
{ name: 'Alerts', href: '/alerts', icon: Bell },
|
{ name: 'Alerts', href: '/alerts', icon: Bell },
|
||||||
{ name: 'System', href: '/system', icon: Server },
|
{ name: 'System', href: '/system', icon: Server },
|
||||||
]
|
]
|
||||||
|
|
||||||
if (user?.roles.includes('admin')) {
|
if (user?.roles.includes('admin')) {
|
||||||
navigation.push({ name: 'User Management', href: '/iam', icon: Users })
|
navigation.push({ name: 'IAM', href: '/iam', icon: Users })
|
||||||
}
|
}
|
||||||
|
|
||||||
const isActive = (href: string) => {
|
const isActive = (href: string) => {
|
||||||
@@ -64,15 +47,7 @@ export default function Layout() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="h-screen bg-background-dark flex overflow-hidden">
|
<div className="min-h-screen bg-background-dark">
|
||||||
{/* Mobile backdrop overlay */}
|
|
||||||
{sidebarOpen && (
|
|
||||||
<div
|
|
||||||
className="fixed inset-0 bg-black/50 z-40 lg:hidden"
|
|
||||||
onClick={() => setSidebarOpen(false)}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Sidebar */}
|
{/* Sidebar */}
|
||||||
<div
|
<div
|
||||||
className={`fixed inset-y-0 left-0 z-50 w-64 bg-background-dark border-r border-border-dark text-white transition-transform duration-300 ${
|
className={`fixed inset-y-0 left-0 z-50 w-64 bg-background-dark border-r border-border-dark text-white transition-transform duration-300 ${
|
||||||
@@ -82,26 +57,11 @@ export default function Layout() {
|
|||||||
<div className="flex flex-col h-full">
|
<div className="flex flex-col h-full">
|
||||||
{/* Header */}
|
{/* Header */}
|
||||||
<div className="flex items-center justify-between px-6 py-5 border-b border-border-dark">
|
<div className="flex items-center justify-between px-6 py-5 border-b border-border-dark">
|
||||||
<div className="flex items-center gap-3">
|
<div className="flex items-center gap-2">
|
||||||
<img
|
<div className="w-8 h-8 bg-primary rounded-lg flex items-center justify-center">
|
||||||
src="/logo.png"
|
|
||||||
alt="Calypso Logo"
|
|
||||||
className="w-10 h-10 object-contain"
|
|
||||||
onError={(e) => {
|
|
||||||
// Fallback to text if image not found
|
|
||||||
const target = e.target as HTMLImageElement
|
|
||||||
target.style.display = 'none'
|
|
||||||
const fallback = target.nextElementSibling as HTMLElement
|
|
||||||
if (fallback) fallback.style.display = 'flex'
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
<div className="w-10 h-10 bg-primary rounded-lg flex items-center justify-center hidden">
|
|
||||||
<span className="text-white font-bold text-sm">C</span>
|
<span className="text-white font-bold text-sm">C</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex flex-col">
|
|
||||||
<h1 className="text-xl font-black text-white font-display tracking-tight">Calypso</h1>
|
<h1 className="text-xl font-black text-white font-display tracking-tight">Calypso</h1>
|
||||||
<p className="text-[10px] text-text-secondary leading-tight">Dev Release V.1</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
<button
|
<button
|
||||||
onClick={() => setSidebarOpen(false)}
|
onClick={() => setSidebarOpen(false)}
|
||||||
@@ -137,15 +97,12 @@ export default function Layout() {
|
|||||||
|
|
||||||
{/* Footer */}
|
{/* Footer */}
|
||||||
<div className="p-4 border-t border-border-dark bg-[#0d1419]">
|
<div className="p-4 border-t border-border-dark bg-[#0d1419]">
|
||||||
<Link
|
<div className="mb-3 px-2">
|
||||||
to="/profile"
|
|
||||||
className="mb-3 px-2 py-2 rounded-lg hover:bg-card-dark transition-colors block"
|
|
||||||
>
|
|
||||||
<p className="text-sm font-semibold text-white mb-0.5">{user?.username}</p>
|
<p className="text-sm font-semibold text-white mb-0.5">{user?.username}</p>
|
||||||
<p className="text-xs text-text-secondary font-mono">
|
<p className="text-xs text-text-secondary font-mono">
|
||||||
{user?.roles.join(', ').toUpperCase()}
|
{user?.roles.join(', ').toUpperCase()}
|
||||||
</p>
|
</p>
|
||||||
</Link>
|
</div>
|
||||||
<button
|
<button
|
||||||
onClick={handleLogout}
|
onClick={handleLogout}
|
||||||
className="w-full flex items-center gap-2 px-4 py-2.5 rounded-lg text-text-secondary hover:bg-card-dark hover:text-white transition-colors border border-border-dark"
|
className="w-full flex items-center gap-2 px-4 py-2.5 rounded-lg text-text-secondary hover:bg-card-dark hover:text-white transition-colors border border-border-dark"
|
||||||
@@ -158,20 +115,11 @@ export default function Layout() {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Main content */}
|
{/* Main content */}
|
||||||
<div className={`transition-all duration-300 flex-1 flex flex-col overflow-hidden ${sidebarOpen ? 'lg:ml-64' : 'ml-0'} bg-background-dark`}>
|
<div className={`transition-all duration-300 ${sidebarOpen ? 'lg:ml-64' : 'ml-0'} bg-background-dark`}>
|
||||||
{/* Top bar with burger menu button */}
|
{/* Top bar - removed for dashboard design */}
|
||||||
<div className="flex-none lg:hidden bg-background-dark border-b border-border-dark px-4 py-3">
|
|
||||||
<button
|
|
||||||
onClick={() => setSidebarOpen(true)}
|
|
||||||
className="text-text-secondary hover:text-white transition-colors"
|
|
||||||
aria-label="Open menu"
|
|
||||||
>
|
|
||||||
<Menu className="h-6 w-6" />
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Page content */}
|
{/* Page content */}
|
||||||
<main className="flex-1 overflow-y-auto">
|
<main className="min-h-screen">
|
||||||
<Outlet />
|
<Outlet />
|
||||||
</main>
|
</main>
|
||||||
</div>
|
</div>
|
||||||
|
|||||||
@@ -69,7 +69,6 @@
|
|||||||
|
|
||||||
.custom-scrollbar::-webkit-scrollbar-track {
|
.custom-scrollbar::-webkit-scrollbar-track {
|
||||||
background: #111a22;
|
background: #111a22;
|
||||||
border-radius: 4px;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
.custom-scrollbar::-webkit-scrollbar-thumb {
|
.custom-scrollbar::-webkit-scrollbar-thumb {
|
||||||
@@ -81,24 +80,6 @@
|
|||||||
background: #476685;
|
background: #476685;
|
||||||
}
|
}
|
||||||
|
|
||||||
.custom-scrollbar {
|
|
||||||
-webkit-overflow-scrolling: touch;
|
|
||||||
overscroll-behavior: contain;
|
|
||||||
scroll-behavior: smooth;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Ensure mouse wheel scrolling works */
|
|
||||||
.custom-scrollbar,
|
|
||||||
.custom-scrollbar * {
|
|
||||||
touch-action: pan-y;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Firefox scrollbar */
|
|
||||||
.custom-scrollbar {
|
|
||||||
scrollbar-width: thin;
|
|
||||||
scrollbar-color: #324d67 #111a22;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Electric glow animation for buttons */
|
/* Electric glow animation for buttons */
|
||||||
@keyframes electric-glow {
|
@keyframes electric-glow {
|
||||||
0%, 100% {
|
0%, 100% {
|
||||||
@@ -141,23 +122,3 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Custom Toggle Switch */
|
|
||||||
.toggle-checkbox:checked {
|
|
||||||
right: 0;
|
|
||||||
border-color: #137fec;
|
|
||||||
}
|
|
||||||
|
|
||||||
.toggle-checkbox:checked + .toggle-label {
|
|
||||||
background-color: #137fec;
|
|
||||||
}
|
|
||||||
|
|
||||||
.toggle-checkbox {
|
|
||||||
right: 0;
|
|
||||||
left: auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
.toggle-checkbox:checked {
|
|
||||||
right: 0;
|
|
||||||
left: auto;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,6 @@ import { useState, useMemo, useEffect } from 'react'
|
|||||||
import apiClient from '@/api/client'
|
import apiClient from '@/api/client'
|
||||||
import { monitoringApi } from '@/api/monitoring'
|
import { monitoringApi } from '@/api/monitoring'
|
||||||
import { storageApi } from '@/api/storage'
|
import { storageApi } from '@/api/storage'
|
||||||
import { systemAPI } from '@/api/system'
|
|
||||||
import { formatBytes } from '@/lib/format'
|
import { formatBytes } from '@/lib/format'
|
||||||
import {
|
import {
|
||||||
Cpu,
|
Cpu,
|
||||||
@@ -26,8 +25,45 @@ import {
|
|||||||
ResponsiveContainer,
|
ResponsiveContainer,
|
||||||
} from 'recharts'
|
} from 'recharts'
|
||||||
|
|
||||||
// Mock data - moved outside component to prevent re-creation on every render
|
export default function Dashboard() {
|
||||||
const MOCK_ACTIVE_JOBS = [
|
const [activeTab, setActiveTab] = useState<'jobs' | 'logs' | 'alerts'>('jobs')
|
||||||
|
const [networkDataPoints, setNetworkDataPoints] = useState<Array<{ time: string; inbound: number; outbound: number }>>([])
|
||||||
|
const refreshInterval = 5
|
||||||
|
|
||||||
|
const { data: health } = useQuery({
|
||||||
|
queryKey: ['health'],
|
||||||
|
queryFn: async () => {
|
||||||
|
const response = await apiClient.get('/health')
|
||||||
|
return response.data
|
||||||
|
},
|
||||||
|
refetchInterval: refreshInterval * 1000,
|
||||||
|
})
|
||||||
|
|
||||||
|
const { data: metrics } = useQuery({
|
||||||
|
queryKey: ['metrics'],
|
||||||
|
queryFn: monitoringApi.getMetrics,
|
||||||
|
refetchInterval: refreshInterval * 1000,
|
||||||
|
})
|
||||||
|
|
||||||
|
const { data: alerts } = useQuery({
|
||||||
|
queryKey: ['alerts', 'dashboard'],
|
||||||
|
queryFn: () => monitoringApi.listAlerts({ is_acknowledged: false, limit: 10 }),
|
||||||
|
refetchInterval: refreshInterval * 1000,
|
||||||
|
})
|
||||||
|
|
||||||
|
const { data: repositories = [] } = useQuery({
|
||||||
|
queryKey: ['storage', 'repositories'],
|
||||||
|
queryFn: storageApi.listRepositories,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Calculate uptime (mock for now, would come from metrics)
|
||||||
|
const uptime = metrics?.system?.uptime_seconds || 0
|
||||||
|
const days = Math.floor(uptime / 86400)
|
||||||
|
const hours = Math.floor((uptime % 86400) / 3600)
|
||||||
|
const minutes = Math.floor((uptime % 3600) / 60)
|
||||||
|
|
||||||
|
// Mock active jobs (would come from tasks API)
|
||||||
|
const activeJobs = [
|
||||||
{
|
{
|
||||||
id: '1',
|
id: '1',
|
||||||
name: 'Daily Backup: VM-Cluster-01',
|
name: 'Daily Backup: VM-Cluster-01',
|
||||||
@@ -45,125 +81,65 @@ const MOCK_ACTIVE_JOBS = [
|
|||||||
speed: '1.2 GB/s',
|
speed: '1.2 GB/s',
|
||||||
status: 'running',
|
status: 'running',
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
|
// Mock system logs
|
||||||
|
const systemLogs = [
|
||||||
|
{ time: '10:45:22', level: 'INFO', source: 'systemd', message: 'Started User Manager for UID 1000.' },
|
||||||
|
{ time: '10:45:15', level: 'WARN', source: 'smartd', message: 'Device: /dev/ada5, SMART Usage Attribute: 194 Temperature_Celsius changed from 38 to 41' },
|
||||||
|
{ time: '10:44:58', level: 'INFO', source: 'kernel', message: 'ix0: link state changed to UP' },
|
||||||
|
{ time: '10:42:10', level: 'INFO', source: 'zfs', message: 'zfs_arc_reclaim_thread: reclaiming 157286400 bytes ...' },
|
||||||
|
]
|
||||||
|
|
||||||
export default function Dashboard() {
|
const totalStorage = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.size_bytes || 0), 0) : 0
|
||||||
const [activeTab, setActiveTab] = useState<'jobs' | 'logs' | 'alerts'>('jobs')
|
const usedStorage = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.used_bytes || 0), 0) : 0
|
||||||
const [networkDataPoints, setNetworkDataPoints] = useState<Array<{ time: string; inbound: number; outbound: number }>>([])
|
const storagePercent = totalStorage > 0 ? (usedStorage / totalStorage) * 100 : 0
|
||||||
const refreshInterval = 5
|
|
||||||
|
|
||||||
// Fetch system logs with auto-refresh every 10 minutes
|
// Initialize network data
|
||||||
const { data: systemLogs = [], isLoading: logsLoading, refetch: refetchLogs } = useQuery({
|
|
||||||
queryKey: ['system-logs'],
|
|
||||||
queryFn: () => systemAPI.getSystemLogs(30),
|
|
||||||
refetchInterval: 10 * 60 * 1000, // 10 minutes
|
|
||||||
})
|
|
||||||
|
|
||||||
const { data: health } = useQuery({
|
|
||||||
queryKey: ['health'],
|
|
||||||
queryFn: async () => {
|
|
||||||
const response = await apiClient.get('/health')
|
|
||||||
return response.data
|
|
||||||
},
|
|
||||||
refetchInterval: refreshInterval * 1000,
|
|
||||||
staleTime: refreshInterval * 1000 * 2, // Consider data fresh for 2x the interval
|
|
||||||
refetchOnWindowFocus: false, // Don't refetch on window focus
|
|
||||||
refetchOnMount: false, // Don't refetch on mount if data is fresh
|
|
||||||
notifyOnChangeProps: ['data', 'error'],
|
|
||||||
structuralSharing: (oldData, newData) => {
|
|
||||||
// Only update if data actually changed
|
|
||||||
if (JSON.stringify(oldData) === JSON.stringify(newData)) {
|
|
||||||
return oldData
|
|
||||||
}
|
|
||||||
return newData
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
const { data: metrics } = useQuery({
|
|
||||||
queryKey: ['metrics'],
|
|
||||||
queryFn: monitoringApi.getMetrics,
|
|
||||||
refetchInterval: refreshInterval * 1000,
|
|
||||||
staleTime: refreshInterval * 1000 * 2,
|
|
||||||
refetchOnWindowFocus: false,
|
|
||||||
refetchOnMount: false,
|
|
||||||
notifyOnChangeProps: ['data', 'error'],
|
|
||||||
structuralSharing: (oldData, newData) => {
|
|
||||||
if (JSON.stringify(oldData) === JSON.stringify(newData)) {
|
|
||||||
return oldData
|
|
||||||
}
|
|
||||||
return newData
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
const { data: alerts } = useQuery({
|
|
||||||
queryKey: ['alerts', 'dashboard'],
|
|
||||||
queryFn: () => monitoringApi.listAlerts({ is_acknowledged: false, limit: 10 }),
|
|
||||||
refetchInterval: refreshInterval * 1000,
|
|
||||||
staleTime: refreshInterval * 1000 * 2,
|
|
||||||
refetchOnWindowFocus: false,
|
|
||||||
refetchOnMount: false,
|
|
||||||
notifyOnChangeProps: ['data', 'error'],
|
|
||||||
structuralSharing: (oldData, newData) => {
|
|
||||||
if (JSON.stringify(oldData) === JSON.stringify(newData)) {
|
|
||||||
return oldData
|
|
||||||
}
|
|
||||||
return newData
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
const { data: repositories = [] } = useQuery({
|
|
||||||
queryKey: ['storage', 'repositories'],
|
|
||||||
queryFn: storageApi.listRepositories,
|
|
||||||
staleTime: 60 * 1000, // Consider repositories fresh for 60 seconds
|
|
||||||
refetchOnWindowFocus: false,
|
|
||||||
refetchOnMount: false,
|
|
||||||
notifyOnChangeProps: ['data', 'error'],
|
|
||||||
structuralSharing: (oldData, newData) => {
|
|
||||||
if (JSON.stringify(oldData) === JSON.stringify(newData)) {
|
|
||||||
return oldData
|
|
||||||
}
|
|
||||||
return newData
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Memoize uptime calculations to prevent recalculation on every render
|
|
||||||
const { days, hours, minutes } = useMemo(() => {
|
|
||||||
const uptimeValue = metrics?.system?.uptime_seconds || 0
|
|
||||||
return {
|
|
||||||
days: Math.floor(uptimeValue / 86400),
|
|
||||||
hours: Math.floor((uptimeValue % 86400) / 3600),
|
|
||||||
minutes: Math.floor((uptimeValue % 3600) / 60),
|
|
||||||
}
|
|
||||||
}, [metrics?.system?.uptime_seconds])
|
|
||||||
|
|
||||||
// Use memoized storage calculations to prevent unnecessary recalculations
|
|
||||||
const { totalStorage, usedStorage, storagePercent } = useMemo(() => {
|
|
||||||
const total = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.size_bytes || 0), 0) : 0
|
|
||||||
const used = Array.isArray(repositories) ? repositories.reduce((sum, repo) => sum + (repo?.used_bytes || 0), 0) : 0
|
|
||||||
const percent = total > 0 ? (used / total) * 100 : 0
|
|
||||||
return { totalStorage: total, usedStorage: used, storagePercent: percent }
|
|
||||||
}, [repositories])
|
|
||||||
|
|
||||||
// Fetch network throughput data from RRD
|
|
||||||
const { data: networkThroughput = [] } = useQuery({
|
|
||||||
queryKey: ['network-throughput'],
|
|
||||||
queryFn: () => systemAPI.getNetworkThroughput('5m'),
|
|
||||||
refetchInterval: 5 * 1000, // Refresh every 5 seconds
|
|
||||||
})
|
|
||||||
|
|
||||||
// Update network data points when new data arrives
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (networkThroughput.length > 0) {
|
// Generate initial 30 data points
|
||||||
// Take last 30 points
|
const initialData = []
|
||||||
const points = networkThroughput.slice(-30).map((point) => ({
|
const now = Date.now()
|
||||||
time: point.time,
|
for (let i = 29; i >= 0; i--) {
|
||||||
inbound: Math.round(point.inbound),
|
const time = new Date(now - i * 5000)
|
||||||
outbound: Math.round(point.outbound),
|
const minutes = time.getMinutes().toString().padStart(2, '0')
|
||||||
}))
|
const seconds = time.getSeconds().toString().padStart(2, '0')
|
||||||
setNetworkDataPoints(points)
|
|
||||||
|
const baseInbound = 800 + Math.random() * 400
|
||||||
|
const baseOutbound = 400 + Math.random() * 200
|
||||||
|
|
||||||
|
initialData.push({
|
||||||
|
time: `${minutes}:${seconds}`,
|
||||||
|
inbound: Math.round(baseInbound),
|
||||||
|
outbound: Math.round(baseOutbound),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}, [networkThroughput])
|
setNetworkDataPoints(initialData)
|
||||||
|
|
||||||
|
// Update data every 5 seconds
|
||||||
|
const interval = setInterval(() => {
|
||||||
|
setNetworkDataPoints((prev) => {
|
||||||
|
const now = new Date()
|
||||||
|
const minutes = now.getMinutes().toString().padStart(2, '0')
|
||||||
|
const seconds = now.getSeconds().toString().padStart(2, '0')
|
||||||
|
|
||||||
|
const baseInbound = 800 + Math.random() * 400
|
||||||
|
const baseOutbound = 400 + Math.random() * 200
|
||||||
|
|
||||||
|
const newPoint = {
|
||||||
|
time: `${minutes}:${seconds}`,
|
||||||
|
inbound: Math.round(baseInbound),
|
||||||
|
outbound: Math.round(baseOutbound),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep only last 30 points
|
||||||
|
const updated = [...prev.slice(1), newPoint]
|
||||||
|
return updated
|
||||||
|
})
|
||||||
|
}, 5000)
|
||||||
|
|
||||||
|
return () => clearInterval(interval)
|
||||||
|
}, [])
|
||||||
|
|
||||||
// Calculate current and peak throughput
|
// Calculate current and peak throughput
|
||||||
const currentThroughput = useMemo(() => {
|
const currentThroughput = useMemo(() => {
|
||||||
@@ -181,15 +157,11 @@ export default function Dashboard() {
|
|||||||
return Math.max(...networkDataPoints.map((d) => d.inbound + d.outbound))
|
return Math.max(...networkDataPoints.map((d) => d.inbound + d.outbound))
|
||||||
}, [networkDataPoints])
|
}, [networkDataPoints])
|
||||||
|
|
||||||
// Memoize system status to prevent recalculation
|
const systemStatus = health?.status === 'healthy' ? 'System Healthy' : 'System Degraded'
|
||||||
const { systemStatus, isHealthy } = useMemo(() => {
|
const isHealthy = health?.status === 'healthy'
|
||||||
const status = health?.status === 'healthy' ? 'System Healthy' : 'System Degraded'
|
|
||||||
const healthy = health?.status === 'healthy'
|
|
||||||
return { systemStatus: status, isHealthy: healthy }
|
|
||||||
}, [health?.status])
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="h-full bg-background-dark text-white">
|
<div className="min-h-screen bg-background-dark text-white overflow-hidden">
|
||||||
{/* Header */}
|
{/* Header */}
|
||||||
<header className="flex-none px-6 py-5 border-b border-border-dark bg-background-dark/95 backdrop-blur z-10">
|
<header className="flex-none px-6 py-5 border-b border-border-dark bg-background-dark/95 backdrop-blur z-10">
|
||||||
<div className="flex flex-wrap justify-between items-end gap-3 max-w-[1600px] mx-auto">
|
<div className="flex flex-wrap justify-between items-end gap-3 max-w-[1600px] mx-auto">
|
||||||
@@ -448,9 +420,9 @@ export default function Dashboard() {
|
|||||||
}`}
|
}`}
|
||||||
>
|
>
|
||||||
Active Jobs{' '}
|
Active Jobs{' '}
|
||||||
{MOCK_ACTIVE_JOBS.length > 0 && (
|
{activeJobs.length > 0 && (
|
||||||
<span className="ml-2 bg-primary/20 text-primary px-1.5 py-0.5 rounded text-xs">
|
<span className="ml-2 bg-primary/20 text-primary px-1.5 py-0.5 rounded text-xs">
|
||||||
{MOCK_ACTIVE_JOBS.length}
|
{activeJobs.length}
|
||||||
</span>
|
</span>
|
||||||
)}
|
)}
|
||||||
</button>
|
</button>
|
||||||
@@ -501,7 +473,7 @@ export default function Dashboard() {
|
|||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody className="text-sm divide-y divide-border-dark">
|
<tbody className="text-sm divide-y divide-border-dark">
|
||||||
{MOCK_ACTIVE_JOBS.map((job) => (
|
{activeJobs.map((job) => (
|
||||||
<tr key={job.id} className="group hover:bg-[#233648] transition-colors">
|
<tr key={job.id} className="group hover:bg-[#233648] transition-colors">
|
||||||
<td className="px-6 py-4 font-medium text-white">{job.name}</td>
|
<td className="px-6 py-4 font-medium text-white">{job.name}</td>
|
||||||
<td className="px-6 py-4 text-text-secondary">{job.type}</td>
|
<td className="px-6 py-4 text-text-secondary">{job.type}</td>
|
||||||
@@ -540,30 +512,11 @@ export default function Dashboard() {
|
|||||||
<h4 className="text-xs uppercase text-text-secondary font-bold tracking-wider">
|
<h4 className="text-xs uppercase text-text-secondary font-bold tracking-wider">
|
||||||
Recent System Events
|
Recent System Events
|
||||||
</h4>
|
</h4>
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<button
|
|
||||||
onClick={() => refetchLogs()}
|
|
||||||
disabled={logsLoading}
|
|
||||||
className="text-xs text-primary hover:text-white transition-colors flex items-center gap-1 disabled:opacity-50"
|
|
||||||
>
|
|
||||||
<RefreshCw size={14} className={logsLoading ? 'animate-spin' : ''} />
|
|
||||||
Refresh
|
|
||||||
</button>
|
|
||||||
<button className="text-xs text-primary hover:text-white transition-colors">
|
<button className="text-xs text-primary hover:text-white transition-colors">
|
||||||
View All Logs
|
View All Logs
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
|
||||||
<div className="flex-1 overflow-y-auto custom-scrollbar bg-[#111a22]">
|
<div className="flex-1 overflow-y-auto custom-scrollbar bg-[#111a22]">
|
||||||
{logsLoading ? (
|
|
||||||
<div className="flex items-center justify-center py-8">
|
|
||||||
<span className="text-text-secondary">Loading logs...</span>
|
|
||||||
</div>
|
|
||||||
) : systemLogs.length === 0 ? (
|
|
||||||
<div className="flex items-center justify-center py-8">
|
|
||||||
<span className="text-text-secondary">No logs available</span>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<table className="w-full text-left border-collapse">
|
<table className="w-full text-left border-collapse">
|
||||||
<tbody className="text-sm font-mono divide-y divide-border-dark/50">
|
<tbody className="text-sm font-mono divide-y divide-border-dark/50">
|
||||||
{systemLogs.map((log, idx) => (
|
{systemLogs.map((log, idx) => (
|
||||||
@@ -574,7 +527,7 @@ export default function Dashboard() {
|
|||||||
<td className="px-6 py-2 w-24">
|
<td className="px-6 py-2 w-24">
|
||||||
<span
|
<span
|
||||||
className={
|
className={
|
||||||
log.level === 'INFO' || log.level === 'NOTICE' || log.level === 'DEBUG'
|
log.level === 'INFO'
|
||||||
? 'text-emerald-500'
|
? 'text-emerald-500'
|
||||||
: log.level === 'WARN'
|
: log.level === 'WARN'
|
||||||
? 'text-yellow-500'
|
? 'text-yellow-500'
|
||||||
@@ -592,7 +545,6 @@ export default function Dashboard() {
|
|||||||
))}
|
))}
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,10 @@
|
|||||||
import { useParams, useNavigate } from 'react-router-dom'
|
import { useParams, useNavigate } from 'react-router-dom'
|
||||||
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'
|
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'
|
||||||
import { scstAPI, type SCSTTarget, type SCSTExtent } from '@/api/scst'
|
import { scstAPI, type SCSTHandler } from '@/api/scst'
|
||||||
import { Card, CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card'
|
import { Card, CardContent, CardHeader, CardTitle, CardDescription } from '@/components/ui/card'
|
||||||
import { Button } from '@/components/ui/button'
|
import { Button } from '@/components/ui/button'
|
||||||
import { ArrowLeft, Plus, RefreshCw, HardDrive, Users, Trash2 } from 'lucide-react'
|
import { ArrowLeft, Plus, RefreshCw, HardDrive, Users } from 'lucide-react'
|
||||||
import { useState, useEffect } from 'react'
|
import { useState } from 'react'
|
||||||
|
|
||||||
export default function ISCSITargetDetail() {
|
export default function ISCSITargetDetail() {
|
||||||
const { id } = useParams<{ id: string }>()
|
const { id } = useParams<{ id: string }>()
|
||||||
@@ -13,74 +13,15 @@ export default function ISCSITargetDetail() {
|
|||||||
const [showAddLUN, setShowAddLUN] = useState(false)
|
const [showAddLUN, setShowAddLUN] = useState(false)
|
||||||
const [showAddInitiator, setShowAddInitiator] = useState(false)
|
const [showAddInitiator, setShowAddInitiator] = useState(false)
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
console.log('showAddLUN state:', showAddLUN)
|
|
||||||
}, [showAddLUN])
|
|
||||||
|
|
||||||
const { data, isLoading } = useQuery({
|
const { data, isLoading } = useQuery({
|
||||||
queryKey: ['scst-target', id],
|
queryKey: ['scst-target', id],
|
||||||
queryFn: () => scstAPI.getTarget(id!),
|
queryFn: () => scstAPI.getTarget(id!),
|
||||||
enabled: !!id,
|
enabled: !!id,
|
||||||
})
|
})
|
||||||
|
|
||||||
const removeLUNMutation = useMutation({
|
const { data: handlers } = useQuery<SCSTHandler[]>({
|
||||||
mutationFn: ({ targetId, lunId }: { targetId: string; lunId: string }) =>
|
queryKey: ['scst-handlers'],
|
||||||
scstAPI.removeLUN(targetId, lunId),
|
queryFn: scstAPI.listHandlers,
|
||||||
onMutate: async ({ lunId }) => {
|
|
||||||
// Cancel any outgoing refetches
|
|
||||||
await queryClient.cancelQueries({ queryKey: ['scst-target', id] })
|
|
||||||
await queryClient.cancelQueries({ queryKey: ['scst-targets'] })
|
|
||||||
|
|
||||||
// Snapshot the previous value
|
|
||||||
const previousTarget = queryClient.getQueryData(['scst-target', id])
|
|
||||||
const previousTargets = queryClient.getQueryData<SCSTTarget[]>(['scst-targets'])
|
|
||||||
|
|
||||||
// Optimistically update to remove the LUN
|
|
||||||
queryClient.setQueryData(['scst-target', id], (old: any) => {
|
|
||||||
if (!old) return old
|
|
||||||
return {
|
|
||||||
...old,
|
|
||||||
luns: old.luns ? old.luns.filter((lun: any) => lun.id !== lunId) : []
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Optimistically update LUN count in targets list
|
|
||||||
queryClient.setQueryData<SCSTTarget[]>(['scst-targets'], (old) => {
|
|
||||||
if (!old) return old
|
|
||||||
return old.map(t =>
|
|
||||||
t.id === id
|
|
||||||
? { ...t, lun_count: Math.max(0, (t.lun_count || 0) - 1) }
|
|
||||||
: t
|
|
||||||
)
|
|
||||||
})
|
|
||||||
|
|
||||||
return { previousTarget, previousTargets }
|
|
||||||
},
|
|
||||||
onSuccess: () => {
|
|
||||||
// Invalidate queries to refetch data from the server.
|
|
||||||
// This is simpler and less prone to race conditions than the previous implementation.
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-target', id] });
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-targets'] });
|
|
||||||
},
|
|
||||||
onError: (error: any, _variables, context) => {
|
|
||||||
// If 404, treat as success (LUN already deleted)
|
|
||||||
if (error.response?.status === 404) {
|
|
||||||
// LUN already deleted, just refresh to sync UI
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-target', id] });
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-targets'] });
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rollback optimistic update
|
|
||||||
if (context?.previousTarget) {
|
|
||||||
queryClient.setQueryData(['scst-target', id], context.previousTarget)
|
|
||||||
}
|
|
||||||
if (context?.previousTargets) {
|
|
||||||
queryClient.setQueryData<SCSTTarget[]>(['scst-targets'], context.previousTargets)
|
|
||||||
}
|
|
||||||
|
|
||||||
alert(`Failed to remove LUN: ${error.response?.data?.error || error.message}`)
|
|
||||||
},
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if (isLoading) {
|
if (isLoading) {
|
||||||
@@ -92,8 +33,6 @@ export default function ISCSITargetDetail() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const { target, luns } = data
|
const { target, luns } = data
|
||||||
// Ensure luns is always an array, not null
|
|
||||||
const lunsArray = luns || []
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6 min-h-screen bg-background-dark p-6">
|
<div className="space-y-6 min-h-screen bg-background-dark p-6">
|
||||||
@@ -152,12 +91,12 @@ export default function ISCSITargetDetail() {
|
|||||||
<div className="space-y-2">
|
<div className="space-y-2">
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
<span className="text-text-secondary">Total LUNs:</span>
|
<span className="text-text-secondary">Total LUNs:</span>
|
||||||
<span className="font-medium text-white">{lunsArray.length}</span>
|
<span className="font-medium text-white">{luns.length}</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex justify-between">
|
<div className="flex justify-between">
|
||||||
<span className="text-text-secondary">Active:</span>
|
<span className="text-text-secondary">Active:</span>
|
||||||
<span className="font-medium text-white">
|
<span className="font-medium text-white">
|
||||||
{lunsArray.filter((l) => l.is_active).length}
|
{luns.filter((l) => l.is_active).length}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -177,7 +116,7 @@ export default function ISCSITargetDetail() {
|
|||||||
onClick={() => setShowAddLUN(true)}
|
onClick={() => setShowAddLUN(true)}
|
||||||
>
|
>
|
||||||
<Plus className="h-4 w-4 mr-2" />
|
<Plus className="h-4 w-4 mr-2" />
|
||||||
Assign Extent
|
Add LUN
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button
|
||||||
variant="outline"
|
variant="outline"
|
||||||
@@ -201,21 +140,14 @@ export default function ISCSITargetDetail() {
|
|||||||
<CardTitle>LUNs (Logical Unit Numbers)</CardTitle>
|
<CardTitle>LUNs (Logical Unit Numbers)</CardTitle>
|
||||||
<CardDescription>Storage devices exported by this target</CardDescription>
|
<CardDescription>Storage devices exported by this target</CardDescription>
|
||||||
</div>
|
</div>
|
||||||
<Button
|
<Button variant="outline" size="sm" onClick={() => setShowAddLUN(true)}>
|
||||||
variant="outline"
|
|
||||||
size="sm"
|
|
||||||
onClick={(e) => {
|
|
||||||
e.stopPropagation()
|
|
||||||
setShowAddLUN(true)
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<Plus className="h-4 w-4 mr-2" />
|
<Plus className="h-4 w-4 mr-2" />
|
||||||
Assign Extent
|
Add LUN
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
</CardHeader>
|
</CardHeader>
|
||||||
<CardContent>
|
<CardContent>
|
||||||
{lunsArray.length > 0 ? (
|
{luns.length > 0 ? (
|
||||||
<div className="overflow-x-auto">
|
<div className="overflow-x-auto">
|
||||||
<table className="min-w-full divide-y divide-gray-200">
|
<table className="min-w-full divide-y divide-gray-200">
|
||||||
<thead className="bg-[#1a2632]">
|
<thead className="bg-[#1a2632]">
|
||||||
@@ -235,13 +167,10 @@ export default function ISCSITargetDetail() {
|
|||||||
<th className="px-6 py-3 text-left text-xs font-medium text-text-secondary uppercase">
|
<th className="px-6 py-3 text-left text-xs font-medium text-text-secondary uppercase">
|
||||||
Status
|
Status
|
||||||
</th>
|
</th>
|
||||||
<th className="px-6 py-3 text-right text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Actions
|
|
||||||
</th>
|
|
||||||
</tr>
|
</tr>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody className="bg-card-dark divide-y divide-border-dark">
|
<tbody className="bg-card-dark divide-y divide-border-dark">
|
||||||
{lunsArray.map((lun) => (
|
{luns.map((lun) => (
|
||||||
<tr key={lun.id} className="hover:bg-[#233648]">
|
<tr key={lun.id} className="hover:bg-[#233648]">
|
||||||
<td className="px-6 py-4 whitespace-nowrap text-sm font-medium text-white">
|
<td className="px-6 py-4 whitespace-nowrap text-sm font-medium text-white">
|
||||||
{lun.lun_number}
|
{lun.lun_number}
|
||||||
@@ -266,21 +195,6 @@ export default function ISCSITargetDetail() {
|
|||||||
{lun.is_active ? 'Active' : 'Inactive'}
|
{lun.is_active ? 'Active' : 'Inactive'}
|
||||||
</span>
|
</span>
|
||||||
</td>
|
</td>
|
||||||
<td className="px-6 py-4 whitespace-nowrap text-right text-sm">
|
|
||||||
<button
|
|
||||||
onClick={(e) => {
|
|
||||||
e.stopPropagation()
|
|
||||||
if (confirm(`Remove LUN ${lun.lun_number} from this target?`)) {
|
|
||||||
removeLUNMutation.mutate({ targetId: target.id, lunId: lun.id })
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
disabled={removeLUNMutation.isPending}
|
|
||||||
className="p-1.5 hover:bg-red-500/10 rounded text-text-secondary hover:text-red-400 transition-colors disabled:opacity-50"
|
|
||||||
title="Remove LUN"
|
|
||||||
>
|
|
||||||
<Trash2 size={16} />
|
|
||||||
</button>
|
|
||||||
</td>
|
|
||||||
</tr>
|
</tr>
|
||||||
))}
|
))}
|
||||||
</tbody>
|
</tbody>
|
||||||
@@ -290,32 +204,24 @@ export default function ISCSITargetDetail() {
|
|||||||
<div className="text-center py-8">
|
<div className="text-center py-8">
|
||||||
<HardDrive className="h-12 w-12 text-gray-400 mx-auto mb-4" />
|
<HardDrive className="h-12 w-12 text-gray-400 mx-auto mb-4" />
|
||||||
<p className="text-sm text-text-secondary mb-4">No LUNs configured</p>
|
<p className="text-sm text-text-secondary mb-4">No LUNs configured</p>
|
||||||
<Button
|
<Button variant="outline" onClick={() => setShowAddLUN(true)}>
|
||||||
variant="outline"
|
|
||||||
onClick={(e) => {
|
|
||||||
e.stopPropagation()
|
|
||||||
setShowAddLUN(true)
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<Plus className="h-4 w-4 mr-2" />
|
<Plus className="h-4 w-4 mr-2" />
|
||||||
Assign First Extent
|
Add First LUN
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
</CardContent>
|
</CardContent>
|
||||||
</Card>
|
</Card>
|
||||||
|
|
||||||
{/* Assign Extent Form */}
|
{/* Add LUN Form */}
|
||||||
{showAddLUN && (
|
{showAddLUN && (
|
||||||
<AssignExtentForm
|
<AddLUNForm
|
||||||
targetId={target.id}
|
targetId={target.id}
|
||||||
|
handlers={handlers || []}
|
||||||
onClose={() => setShowAddLUN(false)}
|
onClose={() => setShowAddLUN(false)}
|
||||||
onSuccess={async () => {
|
onSuccess={() => {
|
||||||
setShowAddLUN(false)
|
setShowAddLUN(false)
|
||||||
// Invalidate queries to refetch data.
|
queryClient.invalidateQueries({ queryKey: ['scst-target', id] })
|
||||||
// Invalidate extents since one is now in use.
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-target', id] });
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['scst-extents'] });
|
|
||||||
}}
|
}}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
@@ -335,129 +241,102 @@ export default function ISCSITargetDetail() {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
interface AssignExtentFormProps {
|
interface AddLUNFormProps {
|
||||||
targetId: string
|
targetId: string
|
||||||
|
handlers: SCSTHandler[]
|
||||||
onClose: () => void
|
onClose: () => void
|
||||||
onSuccess: () => Promise<void>
|
onSuccess: () => void
|
||||||
}
|
}
|
||||||
|
|
||||||
function AssignExtentForm({ targetId, onClose, onSuccess }: AssignExtentFormProps) {
|
function AddLUNForm({ targetId, handlers, onClose, onSuccess }: AddLUNFormProps) {
|
||||||
const [selectedExtent, setSelectedExtent] = useState('')
|
const [handlerType, setHandlerType] = useState('')
|
||||||
|
const [devicePath, setDevicePath] = useState('')
|
||||||
|
const [deviceName, setDeviceName] = useState('')
|
||||||
const [lunNumber, setLunNumber] = useState(0)
|
const [lunNumber, setLunNumber] = useState(0)
|
||||||
|
|
||||||
// Fetch available extents
|
|
||||||
const { data: extents = [], isLoading: extentsLoading } = useQuery<SCSTExtent[]>({
|
|
||||||
queryKey: ['scst-extents'],
|
|
||||||
queryFn: scstAPI.listExtents,
|
|
||||||
staleTime: 0,
|
|
||||||
refetchOnMount: true,
|
|
||||||
})
|
|
||||||
|
|
||||||
// Filter only extents that are not in use
|
|
||||||
const availableExtents = extents.filter(extent => !extent.is_in_use)
|
|
||||||
|
|
||||||
const addLUNMutation = useMutation({
|
const addLUNMutation = useMutation({
|
||||||
mutationFn: (data: { device_name: string; device_path: string; lun_number: number; handler_type: string }) =>
|
mutationFn: (data: { device_name: string; device_path: string; lun_number: number; handler_type: string }) =>
|
||||||
scstAPI.addLUN(targetId, data),
|
scstAPI.addLUN(targetId, data),
|
||||||
onSuccess: async () => {
|
onSuccess: () => {
|
||||||
await onSuccess()
|
onSuccess()
|
||||||
},
|
|
||||||
onError: (error: any) => {
|
|
||||||
const errorMessage = error.response?.data?.error || error.message || 'Failed to assign extent'
|
|
||||||
alert(errorMessage)
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
const handleSubmit = (e: React.FormEvent) => {
|
const handleSubmit = (e: React.FormEvent) => {
|
||||||
e.preventDefault()
|
e.preventDefault()
|
||||||
if (!selectedExtent || lunNumber < 0) {
|
if (!handlerType || !devicePath || !deviceName || lunNumber < 0) {
|
||||||
alert('Please select an extent and specify LUN number')
|
alert('All fields are required')
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
const extent = availableExtents.find(e => e.device_name === selectedExtent)
|
|
||||||
if (!extent) {
|
|
||||||
alert('Selected extent not found')
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
addLUNMutation.mutate({
|
addLUNMutation.mutate({
|
||||||
device_name: extent.device_name,
|
handler_type: handlerType.trim(),
|
||||||
device_path: extent.device_path,
|
device_path: devicePath.trim(),
|
||||||
handler_type: extent.handler_type,
|
device_name: deviceName.trim(),
|
||||||
lun_number: lunNumber,
|
lun_number: lunNumber,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="fixed inset-0 bg-black/50 z-50 flex items-center justify-center p-4">
|
<Card>
|
||||||
<div className="bg-card-dark border border-border-dark rounded-xl max-w-2xl w-full max-h-[90vh] overflow-y-auto">
|
<CardHeader>
|
||||||
<div className="p-6 border-b border-border-dark">
|
<CardTitle>Add LUN</CardTitle>
|
||||||
<h2 className="text-xl font-bold text-white">Assign Extent</h2>
|
<CardDescription>Add a storage device to this target</CardDescription>
|
||||||
<p className="text-sm text-text-secondary mt-1">Assign an existing extent to this target as a LUN</p>
|
</CardHeader>
|
||||||
</div>
|
<CardContent>
|
||||||
<form onSubmit={handleSubmit} className="p-6 space-y-4">
|
<form onSubmit={handleSubmit} className="space-y-4">
|
||||||
<div>
|
<div>
|
||||||
<label htmlFor="extent" className="block text-sm font-medium text-white mb-1">
|
<label htmlFor="handlerType" className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
Available Extent *
|
Handler Type *
|
||||||
</label>
|
</label>
|
||||||
{extentsLoading ? (
|
|
||||||
<div className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-text-secondary text-sm">
|
|
||||||
Loading extents...
|
|
||||||
</div>
|
|
||||||
) : availableExtents.length === 0 ? (
|
|
||||||
<div className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-text-secondary text-sm">
|
|
||||||
No available extents. Please create an extent first in the Extents tab.
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<select
|
<select
|
||||||
id="extent"
|
id="handlerType"
|
||||||
value={selectedExtent}
|
value={handlerType}
|
||||||
onChange={(e) => setSelectedExtent(e.target.value)}
|
onChange={(e) => setHandlerType(e.target.value)}
|
||||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary"
|
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||||
required
|
required
|
||||||
>
|
>
|
||||||
<option value="">Select an extent...</option>
|
<option value="">Select a handler</option>
|
||||||
{availableExtents.map((extent) => (
|
{handlers.map((h) => (
|
||||||
<option key={extent.device_name} value={extent.device_name}>
|
<option key={h.name} value={h.name}>
|
||||||
{extent.device_name} ({extent.handler_type}) - {extent.device_path}
|
{h.name} {h.description && `- ${h.description}`}
|
||||||
</option>
|
</option>
|
||||||
))}
|
))}
|
||||||
</select>
|
</select>
|
||||||
)}
|
|
||||||
<p className="mt-1 text-xs text-text-secondary">
|
|
||||||
Select an extent that has been created in the Extents tab
|
|
||||||
</p>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{selectedExtent && (
|
|
||||||
<div className="p-4 bg-[#0f161d] border border-border-dark rounded-lg">
|
|
||||||
<p className="text-sm text-text-secondary mb-2">Extent Details:</p>
|
|
||||||
{(() => {
|
|
||||||
const extent = availableExtents.find(e => e.device_name === selectedExtent)
|
|
||||||
if (!extent) return null
|
|
||||||
return (
|
|
||||||
<div className="space-y-1 text-sm">
|
|
||||||
<div className="flex justify-between">
|
|
||||||
<span className="text-text-secondary">Device Name:</span>
|
|
||||||
<span className="text-white font-mono">{extent.device_name}</span>
|
|
||||||
</div>
|
|
||||||
<div className="flex justify-between">
|
|
||||||
<span className="text-text-secondary">Handler:</span>
|
|
||||||
<span className="text-white">{extent.handler_type}</span>
|
|
||||||
</div>
|
|
||||||
<div className="flex justify-between">
|
|
||||||
<span className="text-text-secondary">Path:</span>
|
|
||||||
<span className="text-white font-mono text-xs">{extent.device_path}</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})()}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
<div>
|
<div>
|
||||||
<label htmlFor="lunNumber" className="block text-sm font-medium text-white mb-1">
|
<label htmlFor="deviceName" className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Device Name *
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
id="deviceName"
|
||||||
|
type="text"
|
||||||
|
value={deviceName}
|
||||||
|
onChange={(e) => setDeviceName(e.target.value)}
|
||||||
|
placeholder="device1"
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||||
|
required
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label htmlFor="devicePath" className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Device Path *
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
id="devicePath"
|
||||||
|
type="text"
|
||||||
|
value={devicePath}
|
||||||
|
onChange={(e) => setDevicePath(e.target.value)}
|
||||||
|
placeholder="/dev/sda or /dev/calypso/vg1/lv1"
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500 font-mono text-sm"
|
||||||
|
required
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label htmlFor="lunNumber" className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
LUN Number *
|
LUN Number *
|
||||||
</label>
|
</label>
|
||||||
<input
|
<input
|
||||||
@@ -466,29 +345,22 @@ function AssignExtentForm({ targetId, onClose, onSuccess }: AssignExtentFormProp
|
|||||||
value={lunNumber}
|
value={lunNumber}
|
||||||
onChange={(e) => setLunNumber(parseInt(e.target.value) || 0)}
|
onChange={(e) => setLunNumber(parseInt(e.target.value) || 0)}
|
||||||
min="0"
|
min="0"
|
||||||
max="255"
|
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
|
||||||
className="w-full px-3 py-2 bg-[#0f161d] border border-border-dark rounded-lg text-white text-sm focus:outline-none focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
required
|
required
|
||||||
/>
|
/>
|
||||||
<p className="mt-1 text-xs text-text-secondary">
|
|
||||||
Logical Unit Number (0-255, typically start from 0)
|
|
||||||
</p>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="flex justify-end gap-2 pt-4 border-t border-border-dark">
|
<div className="flex justify-end gap-2">
|
||||||
<Button type="button" variant="outline" onClick={onClose}>
|
<Button type="button" variant="outline" onClick={onClose}>
|
||||||
Cancel
|
Cancel
|
||||||
</Button>
|
</Button>
|
||||||
<Button
|
<Button type="submit" disabled={addLUNMutation.isPending}>
|
||||||
type="submit"
|
{addLUNMutation.isPending ? 'Adding...' : 'Add LUN'}
|
||||||
disabled={addLUNMutation.isPending || availableExtents.length === 0}
|
|
||||||
>
|
|
||||||
{addLUNMutation.isPending ? 'Assigning...' : 'Assign Extent'}
|
|
||||||
</Button>
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
</form>
|
</form>
|
||||||
</div>
|
</CardContent>
|
||||||
</div>
|
</Card>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -29,46 +29,13 @@ export default function LoginPage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
|
||||||
<style>{`
|
|
||||||
input:-webkit-autofill,
|
|
||||||
input:-webkit-autofill:hover,
|
|
||||||
input:-webkit-autofill:focus,
|
|
||||||
input:-webkit-autofill:active {
|
|
||||||
-webkit-box-shadow: 0 0 0 30px #111a22 inset !important;
|
|
||||||
-webkit-text-fill-color: #ffffff !important;
|
|
||||||
box-shadow: 0 0 0 30px #111a22 inset !important;
|
|
||||||
caret-color: #ffffff !important;
|
|
||||||
}
|
|
||||||
input:-webkit-autofill::first-line {
|
|
||||||
color: #ffffff !important;
|
|
||||||
}
|
|
||||||
`}</style>
|
|
||||||
<div className="min-h-screen flex items-center justify-center bg-background-dark">
|
<div className="min-h-screen flex items-center justify-center bg-background-dark">
|
||||||
<div className="max-w-md w-full space-y-8 p-8 bg-card-dark border border-border-dark rounded-lg shadow-md">
|
<div className="max-w-md w-full space-y-8 p-8 bg-card-dark border border-border-dark rounded-lg shadow-md">
|
||||||
<div className="flex flex-col items-center">
|
<div>
|
||||||
{/* Logo */}
|
<h2 className="mt-6 text-center text-3xl font-extrabold text-white">
|
||||||
<div className="mb-4">
|
AtlasOS - Calypso
|
||||||
<img
|
|
||||||
src="/logo.png"
|
|
||||||
alt="Calypso Logo"
|
|
||||||
className="w-16 h-16 object-contain"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
{/* Title */}
|
|
||||||
<h2 className="text-center text-3xl font-extrabold text-white">
|
|
||||||
Calypso
|
|
||||||
</h2>
|
</h2>
|
||||||
{/* Version */}
|
|
||||||
<p className="mt-1 text-center text-xs text-text-secondary">
|
|
||||||
Dev Release V.1
|
|
||||||
</p>
|
|
||||||
{/* Subtitle */}
|
|
||||||
<p className="mt-2 text-center text-sm text-text-secondary">
|
<p className="mt-2 text-center text-sm text-text-secondary">
|
||||||
Adastra Backup Storage Appliance
|
|
||||||
</p>
|
|
||||||
{/* Sign in instruction */}
|
|
||||||
<p className="mt-4 text-center text-sm text-text-secondary">
|
|
||||||
Sign in to your account
|
Sign in to your account
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
@@ -88,11 +55,10 @@ export default function LoginPage() {
|
|||||||
name="username"
|
name="username"
|
||||||
type="text"
|
type="text"
|
||||||
required
|
required
|
||||||
className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-t-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm autofill:bg-[#111a22] autofill:text-white"
|
className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-t-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm"
|
||||||
placeholder="Username"
|
placeholder="Username"
|
||||||
value={username}
|
value={username}
|
||||||
onChange={(e) => setUsername(e.target.value)}
|
onChange={(e) => setUsername(e.target.value)}
|
||||||
autoComplete="username"
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
@@ -104,11 +70,10 @@ export default function LoginPage() {
|
|||||||
name="password"
|
name="password"
|
||||||
type="password"
|
type="password"
|
||||||
required
|
required
|
||||||
className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-b-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm autofill:bg-[#111a22] autofill:text-white"
|
className="appearance-none rounded-none relative block w-full px-3 py-2 border border-border-dark bg-[#111a22] placeholder-text-secondary text-white rounded-b-md focus:outline-none focus:ring-primary focus:border-primary focus:z-10 sm:text-sm"
|
||||||
placeholder="Password"
|
placeholder="Password"
|
||||||
value={password}
|
value={password}
|
||||||
onChange={(e) => setPassword(e.target.value)}
|
onChange={(e) => setPassword(e.target.value)}
|
||||||
autoComplete="current-password"
|
|
||||||
/>
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -138,7 +103,6 @@ export default function LoginPage() {
|
|||||||
</form>
|
</form>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</>
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,376 +0,0 @@
|
|||||||
import { useState, useEffect } from 'react'
|
|
||||||
import { useParams, useNavigate } from 'react-router-dom'
|
|
||||||
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'
|
|
||||||
import { useAuthStore } from '@/store/auth'
|
|
||||||
import { iamApi, type User, type UpdateUserRequest } from '@/api/iam'
|
|
||||||
import { Button } from '@/components/ui/button'
|
|
||||||
import { ArrowLeft, Save, Mail, User as UserIcon, Shield, Calendar, Clock, Edit2, X } from 'lucide-react'
|
|
||||||
|
|
||||||
export default function Profile() {
|
|
||||||
const { id } = useParams<{ id?: string }>()
|
|
||||||
const navigate = useNavigate()
|
|
||||||
const { user: currentUser } = useAuthStore()
|
|
||||||
const queryClient = useQueryClient()
|
|
||||||
const [isEditing, setIsEditing] = useState(false)
|
|
||||||
const [editForm, setEditForm] = useState({
|
|
||||||
email: '',
|
|
||||||
full_name: '',
|
|
||||||
})
|
|
||||||
|
|
||||||
// Determine which user to show
|
|
||||||
const targetUserId = id || currentUser?.id
|
|
||||||
|
|
||||||
// Check permission: only allow if viewing own profile or user is admin
|
|
||||||
const canView = !!currentUser && !!targetUserId && (
|
|
||||||
targetUserId === currentUser.id ||
|
|
||||||
currentUser.roles.includes('admin')
|
|
||||||
)
|
|
||||||
|
|
||||||
const { data: profileUser, isLoading } = useQuery<User>({
|
|
||||||
queryKey: ['iam-user', targetUserId],
|
|
||||||
queryFn: () => iamApi.getUser(targetUserId!),
|
|
||||||
enabled: canView,
|
|
||||||
})
|
|
||||||
|
|
||||||
const updateMutation = useMutation({
|
|
||||||
mutationFn: (data: UpdateUserRequest) => iamApi.updateUser(targetUserId!, data),
|
|
||||||
onSuccess: () => {
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['iam-user', targetUserId] })
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['iam-users'] })
|
|
||||||
setIsEditing(false)
|
|
||||||
// If updating own profile, refresh auth store
|
|
||||||
if (targetUserId === currentUser?.id) {
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['auth-me'] })
|
|
||||||
}
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
if (profileUser) {
|
|
||||||
setEditForm({
|
|
||||||
email: profileUser.email || '',
|
|
||||||
full_name: profileUser.full_name || '',
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}, [profileUser])
|
|
||||||
|
|
||||||
if (!canView) {
|
|
||||||
return (
|
|
||||||
<div className="flex-1 overflow-y-auto p-8">
|
|
||||||
<div className="max-w-[1200px] mx-auto">
|
|
||||||
<div className="bg-red-500/10 border border-red-500/20 rounded-lg p-6 text-center">
|
|
||||||
<p className="text-red-400 font-semibold">Access Denied</p>
|
|
||||||
<p className="text-text-secondary text-sm mt-2">
|
|
||||||
You don't have permission to view this profile.
|
|
||||||
</p>
|
|
||||||
<Button
|
|
||||||
variant="outline"
|
|
||||||
onClick={() => navigate(-1)}
|
|
||||||
className="mt-4"
|
|
||||||
>
|
|
||||||
<ArrowLeft className="h-4 w-4 mr-2" />
|
|
||||||
Go Back
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isLoading) {
|
|
||||||
return (
|
|
||||||
<div className="flex-1 overflow-y-auto p-8">
|
|
||||||
<div className="max-w-[1200px] mx-auto">
|
|
||||||
<p className="text-text-secondary">Loading profile...</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!profileUser) {
|
|
||||||
return (
|
|
||||||
<div className="flex-1 overflow-y-auto p-8">
|
|
||||||
<div className="max-w-[1200px] mx-auto">
|
|
||||||
<div className="bg-card-dark border border-border-dark rounded-lg p-6 text-center">
|
|
||||||
<p className="text-text-secondary">User not found</p>
|
|
||||||
<Button
|
|
||||||
variant="outline"
|
|
||||||
onClick={() => navigate(-1)}
|
|
||||||
className="mt-4"
|
|
||||||
>
|
|
||||||
<ArrowLeft className="h-4 w-4 mr-2" />
|
|
||||||
Go Back
|
|
||||||
</Button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const isOwnProfile = targetUserId === currentUser?.id
|
|
||||||
const canEdit = isOwnProfile || currentUser?.roles.includes('admin')
|
|
||||||
|
|
||||||
const handleSave = () => {
|
|
||||||
updateMutation.mutate({
|
|
||||||
email: editForm.email,
|
|
||||||
full_name: editForm.full_name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
const formatDate = (dateString: string) => {
|
|
||||||
return new Date(dateString).toLocaleString()
|
|
||||||
}
|
|
||||||
|
|
||||||
const formatLastLogin = (lastLoginAt: string | null) => {
|
|
||||||
if (!lastLoginAt) return 'Never'
|
|
||||||
return formatDate(lastLoginAt)
|
|
||||||
}
|
|
||||||
|
|
||||||
const getAvatarInitials = () => {
|
|
||||||
if (profileUser?.full_name) {
|
|
||||||
return profileUser.full_name
|
|
||||||
.split(' ')
|
|
||||||
.map((n: string) => n[0])
|
|
||||||
.join('')
|
|
||||||
.substring(0, 2)
|
|
||||||
.toUpperCase()
|
|
||||||
}
|
|
||||||
return profileUser?.username?.substring(0, 2).toUpperCase() || 'U'
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="flex-1 overflow-y-auto p-8">
|
|
||||||
<div className="max-w-[1200px] mx-auto flex flex-col gap-6">
|
|
||||||
{/* Header */}
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
<Button
|
|
||||||
variant="ghost"
|
|
||||||
size="sm"
|
|
||||||
onClick={() => navigate(-1)}
|
|
||||||
className="text-text-secondary hover:text-white"
|
|
||||||
>
|
|
||||||
<ArrowLeft className="h-4 w-4 mr-2" />
|
|
||||||
Back
|
|
||||||
</Button>
|
|
||||||
<div>
|
|
||||||
<h1 className="text-3xl font-black text-white leading-tight">User Profile</h1>
|
|
||||||
<p className="text-text-secondary text-sm mt-1">
|
|
||||||
{isOwnProfile ? 'Your profile information' : `Viewing profile for ${profileUser.username}`}
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{canEdit && (
|
|
||||||
<div className="flex gap-2">
|
|
||||||
{isEditing ? (
|
|
||||||
<>
|
|
||||||
<Button
|
|
||||||
variant="outline"
|
|
||||||
onClick={() => {
|
|
||||||
setIsEditing(false)
|
|
||||||
setEditForm({
|
|
||||||
email: profileUser.email || '',
|
|
||||||
full_name: profileUser.full_name || '',
|
|
||||||
})
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<X className="h-4 w-4 mr-2" />
|
|
||||||
Cancel
|
|
||||||
</Button>
|
|
||||||
<Button
|
|
||||||
onClick={handleSave}
|
|
||||||
disabled={updateMutation.isPending}
|
|
||||||
>
|
|
||||||
<Save className="h-4 w-4 mr-2" />
|
|
||||||
{updateMutation.isPending ? 'Saving...' : 'Save Changes'}
|
|
||||||
</Button>
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<Button onClick={() => setIsEditing(true)}>
|
|
||||||
<Edit2 className="h-4 w-4 mr-2" />
|
|
||||||
Edit Profile
|
|
||||||
</Button>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Profile Card */}
|
|
||||||
<div className="bg-card-dark border border-border-dark rounded-xl overflow-hidden">
|
|
||||||
{/* Profile Header */}
|
|
||||||
<div className="bg-gradient-to-r from-primary/20 to-blue-600/20 p-8 border-b border-border-dark">
|
|
||||||
<div className="flex items-center gap-6">
|
|
||||||
<div className="w-24 h-24 rounded-full bg-gradient-to-br from-blue-500 to-indigo-600 flex items-center justify-center text-white text-3xl font-bold">
|
|
||||||
{getAvatarInitials()}
|
|
||||||
</div>
|
|
||||||
<div className="flex-1">
|
|
||||||
<h2 className="text-2xl font-bold text-white">
|
|
||||||
{profileUser.full_name || profileUser.username}
|
|
||||||
</h2>
|
|
||||||
<p className="text-text-secondary mt-1">@{profileUser.username}</p>
|
|
||||||
<div className="flex items-center gap-4 mt-3">
|
|
||||||
<div className={`inline-flex items-center gap-2 px-3 py-1 rounded-full text-xs font-bold ${
|
|
||||||
profileUser.is_active
|
|
||||||
? 'bg-green-500/10 text-green-400 border border-green-500/20'
|
|
||||||
: 'bg-red-500/10 text-red-400 border border-red-500/20'
|
|
||||||
}`}>
|
|
||||||
<span className={`w-2 h-2 rounded-full ${profileUser.is_active ? 'bg-green-400' : 'bg-red-400'}`}></span>
|
|
||||||
{profileUser.is_active ? 'Active' : 'Inactive'}
|
|
||||||
</div>
|
|
||||||
{profileUser.is_system && (
|
|
||||||
<div className="inline-flex items-center gap-2 px-3 py-1 rounded-full text-xs font-bold bg-purple-500/10 text-purple-400 border border-purple-500/20">
|
|
||||||
<Shield size={12} />
|
|
||||||
System User
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Profile Content */}
|
|
||||||
<div className="p-8">
|
|
||||||
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
|
||||||
{/* Basic Information */}
|
|
||||||
<div className="space-y-6">
|
|
||||||
<div>
|
|
||||||
<h3 className="text-lg font-bold text-white mb-4 flex items-center gap-2">
|
|
||||||
<UserIcon className="h-5 w-5 text-primary" />
|
|
||||||
Basic Information
|
|
||||||
</h3>
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Username
|
|
||||||
</label>
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white font-mono">
|
|
||||||
{profileUser.username}
|
|
||||||
</div>
|
|
||||||
<p className="text-xs text-text-secondary mt-1">Username cannot be changed</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Email Address
|
|
||||||
</label>
|
|
||||||
{isEditing ? (
|
|
||||||
<input
|
|
||||||
type="email"
|
|
||||||
value={editForm.email}
|
|
||||||
onChange={(e) => setEditForm({ ...editForm, email: e.target.value })}
|
|
||||||
className="w-full bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
|
||||||
placeholder="email@example.com"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white flex items-center gap-2">
|
|
||||||
<Mail className="h-4 w-4 text-text-secondary" />
|
|
||||||
{profileUser.email || '-'}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Full Name
|
|
||||||
</label>
|
|
||||||
{isEditing ? (
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={editForm.full_name}
|
|
||||||
onChange={(e) => setEditForm({ ...editForm, full_name: e.target.value })}
|
|
||||||
className="w-full bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white focus:outline-none focus:ring-2 focus:ring-primary focus:border-transparent"
|
|
||||||
placeholder="Full Name"
|
|
||||||
/>
|
|
||||||
) : (
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white">
|
|
||||||
{profileUser.full_name || '-'}
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Account Details */}
|
|
||||||
<div className="space-y-6">
|
|
||||||
<div>
|
|
||||||
<h3 className="text-lg font-bold text-white mb-4 flex items-center gap-2">
|
|
||||||
<Shield className="h-5 w-5 text-primary" />
|
|
||||||
Account Details
|
|
||||||
</h3>
|
|
||||||
<div className="space-y-4">
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Roles
|
|
||||||
</label>
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3">
|
|
||||||
{profileUser.roles && profileUser.roles.length > 0 ? (
|
|
||||||
<div className="flex flex-wrap gap-2">
|
|
||||||
{profileUser.roles.map((role) => (
|
|
||||||
<span
|
|
||||||
key={role}
|
|
||||||
className="inline-flex items-center gap-1.5 px-2.5 py-1 rounded-md bg-primary/10 text-primary text-xs font-medium border border-primary/20"
|
|
||||||
>
|
|
||||||
<Shield size={12} />
|
|
||||||
{role}
|
|
||||||
</span>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<span className="text-text-secondary text-sm">No roles assigned</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Permissions
|
|
||||||
</label>
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3">
|
|
||||||
{profileUser.permissions && profileUser.permissions.length > 0 ? (
|
|
||||||
<div className="flex flex-wrap gap-2">
|
|
||||||
{profileUser.permissions.map((perm) => (
|
|
||||||
<span
|
|
||||||
key={perm}
|
|
||||||
className="inline-flex items-center px-2 py-1 rounded-md bg-slate-700 text-slate-300 text-xs font-medium"
|
|
||||||
>
|
|
||||||
{perm}
|
|
||||||
</span>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<span className="text-text-secondary text-sm">No permissions assigned</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Last Login
|
|
||||||
</label>
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white flex items-center gap-2">
|
|
||||||
<Clock className="h-4 w-4 text-text-secondary" />
|
|
||||||
{formatLastLogin(profileUser.last_login_at)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<label className="block text-xs font-bold text-text-secondary uppercase tracking-wider mb-2">
|
|
||||||
Account Created
|
|
||||||
</label>
|
|
||||||
<div className="bg-[#0f161d] border border-border-dark rounded-lg px-4 py-3 text-white flex items-center gap-2">
|
|
||||||
<Calendar className="h-4 w-4 text-text-secondary" />
|
|
||||||
{formatDate(profileUser.created_at)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -186,10 +186,8 @@ export default function StoragePage() {
|
|||||||
const { data: zfsPools = [], isLoading: poolsLoading } = useQuery({
|
const { data: zfsPools = [], isLoading: poolsLoading } = useQuery({
|
||||||
queryKey: ['storage', 'zfs', 'pools'],
|
queryKey: ['storage', 'zfs', 'pools'],
|
||||||
queryFn: zfsApi.listPools,
|
queryFn: zfsApi.listPools,
|
||||||
refetchInterval: 3000, // Auto-refresh every 3 seconds
|
refetchInterval: 2000, // Auto-refresh every 2 seconds
|
||||||
staleTime: 0, // Always consider data stale
|
staleTime: 0, // Always consider data stale
|
||||||
refetchOnWindowFocus: true,
|
|
||||||
refetchOnMount: true,
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Fetch ARC stats with auto-refresh every 2 seconds for live data
|
// Fetch ARC stats with auto-refresh every 2 seconds for live data
|
||||||
@@ -256,10 +254,8 @@ export default function StoragePage() {
|
|||||||
|
|
||||||
const deletePoolMutation = useMutation({
|
const deletePoolMutation = useMutation({
|
||||||
mutationFn: (poolId: string) => zfsApi.deletePool(poolId),
|
mutationFn: (poolId: string) => zfsApi.deletePool(poolId),
|
||||||
onSuccess: async () => {
|
onSuccess: () => {
|
||||||
// Invalidate and immediately refetch
|
queryClient.invalidateQueries({ queryKey: ['storage', 'zfs', 'pools'] })
|
||||||
await queryClient.invalidateQueries({ queryKey: ['storage', 'zfs', 'pools'] })
|
|
||||||
queryClient.refetchQueries({ queryKey: ['storage', 'zfs', 'pools'] })
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['storage', 'disks'] })
|
queryClient.invalidateQueries({ queryKey: ['storage', 'disks'] })
|
||||||
setSelectedPool(null)
|
setSelectedPool(null)
|
||||||
alert('Pool destroyed successfully!')
|
alert('Pool destroyed successfully!')
|
||||||
@@ -345,51 +341,20 @@ export default function StoragePage() {
|
|||||||
|
|
||||||
const healthyPools = allPools.filter((p) => {
|
const healthyPools = allPools.filter((p) => {
|
||||||
if ('health_status' in p) {
|
if ('health_status' in p) {
|
||||||
const health = (p as ZFSPool).health_status?.toLowerCase() || ''
|
return p.is_active && (p as ZFSPool).health_status === 'online'
|
||||||
return p.is_active && health === 'online'
|
|
||||||
}
|
}
|
||||||
return p.is_active
|
return p.is_active
|
||||||
}).length
|
}).length
|
||||||
const degradedPools = allPools.filter((p) => {
|
const degradedPools = allPools.filter((p) => {
|
||||||
if ('health_status' in p) {
|
if ('health_status' in p) {
|
||||||
const health = (p as ZFSPool).health_status?.toLowerCase() || ''
|
return !p.is_active || (p as ZFSPool).health_status !== 'online'
|
||||||
return !p.is_active || health !== 'online'
|
|
||||||
}
|
}
|
||||||
return !p.is_active
|
return !p.is_active
|
||||||
}).length
|
}).length
|
||||||
const healthStatus = degradedPools === 0 ? 'Optimal' : 'Degraded'
|
const healthStatus = degradedPools === 0 ? 'Optimal' : 'Degraded'
|
||||||
|
|
||||||
// Calculate efficiency ratio from ZFS pools
|
// Mock efficiency data (would come from backend)
|
||||||
// Efficiency = average compressratio across all active pools
|
const efficiencyRatio = 1.45
|
||||||
// Use actual compressratio from ZFS if available, otherwise estimate
|
|
||||||
const activeZFSPools = zfsPools.filter(p => p.is_active && p.health_status?.toLowerCase() === 'online')
|
|
||||||
const efficiencyRatio = activeZFSPools.length > 0
|
|
||||||
? activeZFSPools.reduce((sum, pool) => {
|
|
||||||
// Use actual compressratio from ZFS if available
|
|
||||||
if (pool.compress_ratio && pool.compress_ratio > 0) {
|
|
||||||
// Deduplication can add additional savings (typically 1.2-2x)
|
|
||||||
const dedupMultiplier = pool.deduplication ? 1.3 : 1.0
|
|
||||||
return sum + (pool.compress_ratio * dedupMultiplier)
|
|
||||||
}
|
|
||||||
// Fallback: estimate based on compression type
|
|
||||||
const compressionMultiplier: Record<string, number> = {
|
|
||||||
'lz4': 1.5,
|
|
||||||
'zstd': 2.5,
|
|
||||||
'gzip': 2.0,
|
|
||||||
'gzip-1': 1.8,
|
|
||||||
'gzip-9': 2.5,
|
|
||||||
'off': 1.0,
|
|
||||||
}
|
|
||||||
const baseRatio = compressionMultiplier[pool.compression?.toLowerCase() || 'lz4'] || 1.5
|
|
||||||
const dedupMultiplier = pool.deduplication ? 1.3 : 1.0
|
|
||||||
return sum + (baseRatio * dedupMultiplier)
|
|
||||||
}, 0) / activeZFSPools.length
|
|
||||||
: 1.0
|
|
||||||
|
|
||||||
// Get compression and deduplication status from pools
|
|
||||||
const hasCompression = activeZFSPools.some(p => p.compression && p.compression.toLowerCase() !== 'off')
|
|
||||||
const hasDedup = activeZFSPools.some(p => p.deduplication)
|
|
||||||
const compressionType = activeZFSPools.find(p => p.compression && p.compression.toLowerCase() !== 'off')?.compression?.toUpperCase() || 'LZ4'
|
|
||||||
// Use live ARC stats if available, otherwise fallback to 0
|
// Use live ARC stats if available, otherwise fallback to 0
|
||||||
const arcHitRatio = arcStats?.hit_ratio ?? 0
|
const arcHitRatio = arcStats?.hit_ratio ?? 0
|
||||||
const arcCacheUsage = arcStats?.cache_usage ?? 0
|
const arcCacheUsage = arcStats?.cache_usage ?? 0
|
||||||
@@ -513,21 +478,8 @@ export default function StoragePage() {
|
|||||||
<span className="text-xs text-white/70">Ratio</span>
|
<span className="text-xs text-white/70">Ratio</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex gap-2 mt-3">
|
<div className="flex gap-2 mt-3">
|
||||||
{hasCompression && (
|
<span className="px-2 py-0.5 rounded bg-blue-500/10 text-blue-500 text-[10px] font-bold">LZ4</span>
|
||||||
<span className="px-2 py-0.5 rounded bg-blue-500/10 text-blue-500 text-[10px] font-bold">
|
<span className="px-2 py-0.5 rounded bg-purple-500/10 text-purple-500 text-[10px] font-bold">DEDUP ON</span>
|
||||||
{compressionType}
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{hasDedup && (
|
|
||||||
<span className="px-2 py-0.5 rounded bg-purple-500/10 text-purple-500 text-[10px] font-bold">
|
|
||||||
DEDUP ON
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{!hasCompression && !hasDedup && (
|
|
||||||
<span className="px-2 py-0.5 rounded bg-gray-500/10 text-gray-500 text-[10px] font-bold">
|
|
||||||
NO COMPRESSION
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
@@ -606,7 +558,7 @@ export default function StoragePage() {
|
|||||||
|
|
||||||
// Check if it's a ZFS pool or LVM repository
|
// Check if it's a ZFS pool or LVM repository
|
||||||
const isZFSPool = 'raid_level' in pool
|
const isZFSPool = 'raid_level' in pool
|
||||||
const healthStatus = isZFSPool ? ((pool as ZFSPool).health_status?.toLowerCase() || 'online') : 'online'
|
const healthStatus = isZFSPool ? (pool as ZFSPool).health_status : 'online'
|
||||||
const isHealthy = pool.is_active && (healthStatus === 'online' || healthStatus === '')
|
const isHealthy = pool.is_active && (healthStatus === 'online' || healthStatus === '')
|
||||||
|
|
||||||
const statusColor = isHealthy
|
const statusColor = isHealthy
|
||||||
@@ -857,11 +809,11 @@ export default function StoragePage() {
|
|||||||
<div className="flex items-center gap-3 mb-3">
|
<div className="flex items-center gap-3 mb-3">
|
||||||
<span className="material-symbols-outlined text-primary text-[20px]">info</span>
|
<span className="material-symbols-outlined text-primary text-[20px]">info</span>
|
||||||
<span className="text-sm font-bold text-primary">
|
<span className="text-sm font-bold text-primary">
|
||||||
{selectedPool.is_active && selectedPool.health_status?.toLowerCase() === 'online' ? 'Healthy' : 'Degraded'}
|
{selectedPool.is_active && selectedPool.health_status === 'online' ? 'Healthy' : 'Degraded'}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<p className="text-xs text-white/90 leading-relaxed mb-3">
|
<p className="text-xs text-white/90 leading-relaxed mb-3">
|
||||||
{selectedPool.is_active && selectedPool.health_status?.toLowerCase() === 'online'
|
{selectedPool.is_active && selectedPool.health_status === 'online'
|
||||||
? 'This pool is operating normally.'
|
? 'This pool is operating normally.'
|
||||||
: 'This pool has issues and requires attention.'}
|
: 'This pool has issues and requires attention.'}
|
||||||
</p>
|
</p>
|
||||||
|
|||||||
@@ -1,840 +0,0 @@
|
|||||||
import { useState, useRef, useEffect } from 'react'
|
|
||||||
import { Link } from 'react-router-dom'
|
|
||||||
import { useQuery, useMutation, useQueryClient } from '@tanstack/react-query'
|
|
||||||
import { systemAPI, NetworkInterface } from '@/api/system'
|
|
||||||
|
|
||||||
export default function System() {
|
|
||||||
const [snmpEnabled, setSnmpEnabled] = useState(false)
|
|
||||||
const [openMenu, setOpenMenu] = useState<string | null>(null)
|
|
||||||
const [editingInterface, setEditingInterface] = useState<NetworkInterface | null>(null)
|
|
||||||
const [viewingInterface, setViewingInterface] = useState<NetworkInterface | null>(null)
|
|
||||||
const [timezone, setTimezone] = useState('Etc/UTC')
|
|
||||||
const [ntpServers, setNtpServers] = useState<string[]>(['pool.ntp.org', 'time.google.com'])
|
|
||||||
const [showAddNtpServer, setShowAddNtpServer] = useState(false)
|
|
||||||
const [newNtpServer, setNewNtpServer] = useState('')
|
|
||||||
const menuRef = useRef<HTMLDivElement>(null)
|
|
||||||
|
|
||||||
const queryClient = useQueryClient()
|
|
||||||
|
|
||||||
// Save NTP settings mutation
|
|
||||||
const saveNTPSettingsMutation = useMutation({
|
|
||||||
mutationFn: (data: { timezone: string; ntp_servers: string[] }) => systemAPI.saveNTPSettings(data),
|
|
||||||
onSuccess: () => {
|
|
||||||
// Refetch NTP settings to get the updated values
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['system', 'ntp'] })
|
|
||||||
// Show success message (you can add a toast notification here)
|
|
||||||
alert('NTP settings saved successfully!')
|
|
||||||
},
|
|
||||||
onError: (error: any) => {
|
|
||||||
alert(`Failed to save NTP settings: ${error.message || 'Unknown error'}`)
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Fetch network interfaces
|
|
||||||
const { data: interfaces = [], isLoading: interfacesLoading } = useQuery({
|
|
||||||
queryKey: ['system', 'interfaces'],
|
|
||||||
queryFn: () => systemAPI.listNetworkInterfaces(),
|
|
||||||
refetchInterval: 5000, // Refresh every 5 seconds
|
|
||||||
})
|
|
||||||
|
|
||||||
// Fetch services
|
|
||||||
const { data: services = [], isLoading: servicesLoading } = useQuery({
|
|
||||||
queryKey: ['system', 'services'],
|
|
||||||
queryFn: () => systemAPI.listServices(),
|
|
||||||
refetchInterval: 5000, // Refresh every 5 seconds
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
// Fetch NTP settings on mount
|
|
||||||
const { data: ntpSettings } = useQuery({
|
|
||||||
queryKey: ['system', 'ntp'],
|
|
||||||
queryFn: () => systemAPI.getNTPSettings(),
|
|
||||||
})
|
|
||||||
|
|
||||||
// Update state when NTP settings are loaded
|
|
||||||
useEffect(() => {
|
|
||||||
if (ntpSettings) {
|
|
||||||
setTimezone(ntpSettings.timezone)
|
|
||||||
setNtpServers(ntpSettings.ntp_servers)
|
|
||||||
}
|
|
||||||
}, [ntpSettings])
|
|
||||||
|
|
||||||
// Close menu when clicking outside
|
|
||||||
useEffect(() => {
|
|
||||||
const handleClickOutside = (event: MouseEvent) => {
|
|
||||||
if (menuRef.current && !menuRef.current.contains(event.target as Node)) {
|
|
||||||
setOpenMenu(null)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
document.addEventListener('mousedown', handleClickOutside)
|
|
||||||
return () => document.removeEventListener('mousedown', handleClickOutside)
|
|
||||||
}, [])
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="flex-1 flex flex-col h-full overflow-hidden relative bg-background-dark">
|
|
||||||
{/* Top Navigation */}
|
|
||||||
<header className="flex h-16 items-center justify-between border-b border-border-dark bg-background-dark px-6 lg:px-10 shrink-0 z-10">
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
{/* Breadcrumbs */}
|
|
||||||
<div className="flex items-center gap-2 text-sm">
|
|
||||||
<Link to="/" className="text-text-secondary hover:text-white transition-colors">
|
|
||||||
System
|
|
||||||
</Link>
|
|
||||||
<span className="text-text-secondary">/</span>
|
|
||||||
<span className="text-white font-medium">Configuration</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
<div className="hidden md:flex items-center gap-2 px-3 py-1.5 rounded-full bg-green-500/10 border border-green-500/20">
|
|
||||||
<div className="h-2 w-2 rounded-full bg-green-500 animate-pulse"></div>
|
|
||||||
<span className="text-xs font-medium text-green-500">System Healthy</span>
|
|
||||||
</div>
|
|
||||||
<div className="h-6 w-px bg-border-dark mx-2"></div>
|
|
||||||
<button className="flex items-center justify-center gap-2 rounded-lg bg-border-dark px-4 py-2 text-sm font-bold text-white hover:bg-[#2f455a] transition-colors">
|
|
||||||
<span className="material-symbols-outlined text-[18px]">restart_alt</span>
|
|
||||||
<span className="hidden sm:inline">Reboot</span>
|
|
||||||
</button>
|
|
||||||
<button className="flex items-center justify-center gap-2 rounded-lg bg-red-500/10 px-4 py-2 text-sm font-bold text-red-500 hover:bg-red-500/20 transition-colors border border-red-500/20">
|
|
||||||
<span className="material-symbols-outlined text-[18px]">power_settings_new</span>
|
|
||||||
<span className="hidden sm:inline">Shutdown</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</header>
|
|
||||||
|
|
||||||
{/* Scrollable Content */}
|
|
||||||
<div className="flex-1 overflow-y-auto p-4 md:p-8 lg:px-12 scroll-smooth">
|
|
||||||
<div className="mx-auto max-w-7xl">
|
|
||||||
{/* Page Header */}
|
|
||||||
<div className="mb-8 flex flex-col gap-4 sm:flex-row sm:items-center sm:justify-between">
|
|
||||||
<div>
|
|
||||||
<h1 className="text-3xl font-bold tracking-tight text-white mb-2">System Configuration</h1>
|
|
||||||
<p className="text-text-secondary text-sm max-w-2xl">
|
|
||||||
Manage network interfaces, time synchronization, service states, and remote management protocols.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<button className="flex items-center justify-center gap-2 rounded-lg bg-primary px-5 py-2.5 text-sm font-bold text-white hover:bg-blue-600 transition-all shadow-lg shadow-blue-500/20">
|
|
||||||
<span className="material-symbols-outlined text-[20px]">save</span>
|
|
||||||
Save Changes
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Grid Layout */}
|
|
||||||
<div className="grid grid-cols-1 gap-6 xl:grid-cols-2">
|
|
||||||
{/* Network Card */}
|
|
||||||
<div className="flex flex-col rounded-xl border border-border-dark bg-card-dark shadow-sm">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">lan</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Network Interfaces</h2>
|
|
||||||
</div>
|
|
||||||
<button className="text-xs font-bold text-primary hover:text-blue-400">CONFIGURE DNS</button>
|
|
||||||
</div>
|
|
||||||
<div className="p-2">
|
|
||||||
{interfacesLoading ? (
|
|
||||||
<div className="flex items-center justify-center py-8">
|
|
||||||
<span className="text-text-secondary">Loading interfaces...</span>
|
|
||||||
</div>
|
|
||||||
) : interfaces.length === 0 ? (
|
|
||||||
<div className="flex items-center justify-center py-8">
|
|
||||||
<span className="text-text-secondary">No network interfaces found</span>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
interfaces.map((iface: NetworkInterface) => {
|
|
||||||
const isConnected = iface.status === 'Connected'
|
|
||||||
const roleBgColor = iface.role === 'ISCSI' ? 'bg-purple-500/20' : 'bg-primary/20'
|
|
||||||
const roleTextColor = iface.role === 'ISCSI' ? 'text-purple-400' : 'text-primary'
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div
|
|
||||||
key={iface.name}
|
|
||||||
className={`group flex items-center justify-between rounded-lg p-3 hover:bg-border-dark/50 transition-colors ${!isConnected ? 'opacity-70' : ''}`}
|
|
||||||
>
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
<div className={`flex h-10 w-10 items-center justify-center rounded-lg bg-border-dark ${isConnected ? 'text-white' : 'text-text-secondary'}`}>
|
|
||||||
<span className="material-symbols-outlined">settings_ethernet</span>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<p className={`font-bold ${isConnected ? 'text-white' : 'text-text-secondary'}`}>{iface.name}</p>
|
|
||||||
{iface.role && (
|
|
||||||
<span className={`rounded ${roleBgColor} px-1.5 py-0.5 text-[10px] font-bold ${roleTextColor} uppercase`}>
|
|
||||||
{iface.role}
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
{iface.ip_address ? (
|
|
||||||
<p className="font-mono text-xs text-text-secondary">
|
|
||||||
{iface.ip_address} <span className="opacity-50 mx-1">/</span> {iface.subnet}
|
|
||||||
</p>
|
|
||||||
) : (
|
|
||||||
<p className="font-mono text-xs text-text-secondary">No Carrier</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
<div className="hidden sm:flex flex-col items-end">
|
|
||||||
{isConnected ? (
|
|
||||||
<>
|
|
||||||
<div className="flex items-center gap-1.5">
|
|
||||||
<div className="h-2 w-2 rounded-full bg-green-500"></div>
|
|
||||||
<span className="text-xs font-medium text-white">Connected</span>
|
|
||||||
</div>
|
|
||||||
{iface.speed && iface.speed !== 'Unknown' && (
|
|
||||||
<span className="text-xs text-text-secondary">{iface.speed}</span>
|
|
||||||
)}
|
|
||||||
</>
|
|
||||||
) : (
|
|
||||||
<div className="flex items-center gap-1.5">
|
|
||||||
<div className="h-2 w-2 rounded-full bg-red-500"></div>
|
|
||||||
<span className="text-xs font-medium text-red-500">Down</span>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
<div className="relative" ref={menuRef}>
|
|
||||||
<button
|
|
||||||
onClick={() => setOpenMenu(openMenu === iface.name ? null : iface.name)}
|
|
||||||
className="h-8 w-8 rounded-full hover:bg-border-dark flex items-center justify-center text-text-secondary hover:text-white transition-colors"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined">more_vert</span>
|
|
||||||
</button>
|
|
||||||
{openMenu === iface.name && (
|
|
||||||
<div className="absolute right-0 mt-1 w-48 rounded-lg border border-border-dark bg-card-dark shadow-lg z-50">
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
setEditingInterface(iface)
|
|
||||||
setOpenMenu(null)
|
|
||||||
}}
|
|
||||||
className="w-full flex items-center gap-2 px-4 py-2.5 text-sm text-white hover:bg-border-dark transition-colors first:rounded-t-lg"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[18px]">edit</span>
|
|
||||||
<span>Edit Connection</span>
|
|
||||||
</button>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
setViewingInterface(iface)
|
|
||||||
setOpenMenu(null)
|
|
||||||
}}
|
|
||||||
className="w-full flex items-center gap-2 px-4 py-2.5 text-sm text-white hover:bg-border-dark transition-colors"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[18px]">info</span>
|
|
||||||
<span>View Details</span>
|
|
||||||
</button>
|
|
||||||
<div className="border-t border-border-dark"></div>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
// TODO: Implement disable/enable
|
|
||||||
setOpenMenu(null)
|
|
||||||
}}
|
|
||||||
className="w-full flex items-center gap-2 px-4 py-2.5 text-sm text-white hover:bg-border-dark transition-colors last:rounded-b-lg"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[18px]">
|
|
||||||
{isConnected ? 'toggle_on' : 'toggle_off'}
|
|
||||||
</span>
|
|
||||||
<span>{isConnected ? 'Disable' : 'Enable'}</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Services Card */}
|
|
||||||
<div className="flex flex-col rounded-xl border border-border-dark bg-card-dark shadow-sm">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">memory</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Service Control</h2>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="h-2 w-2 rounded-full bg-green-500"></span>
|
|
||||||
<span className="text-xs text-text-secondary">All Systems Normal</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="p-4 flex flex-col gap-1">
|
|
||||||
{servicesLoading ? (
|
|
||||||
<div className="flex items-center justify-center py-8">
|
|
||||||
<span className="text-text-secondary">Loading services...</span>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
// Service configs to display - map backend service names to display configs
|
|
||||||
[
|
|
||||||
{ key: 'ssh', serviceNames: ['ssh', 'sshd'], displayName: 'SSH Service', description: 'Remote command line access', icon: 'terminal' },
|
|
||||||
{ key: 'smb', serviceNames: ['smbd', 'samba', 'smb'], displayName: 'SMB / CIFS', description: 'Windows file sharing', icon: 'folder_shared' },
|
|
||||||
{ key: 'iscsi', serviceNames: ['iscsi-scst', 'iscsi', 'scst'], displayName: 'iSCSI Target', description: 'Block storage sharing', icon: 'storage' },
|
|
||||||
{ key: 'nfs', serviceNames: ['nfs-server', 'nfs', 'nfsd'], displayName: 'NFS Service', description: 'Unix file sharing', icon: 'share' },
|
|
||||||
{ key: 'vtl', serviceNames: ['mhvtl', 'vtl'], displayName: 'VTL Service', description: 'Virtual tape library emulation', icon: 'album' },
|
|
||||||
].map((config) => {
|
|
||||||
const service = services.find(s => {
|
|
||||||
const serviceNameLower = s.name.toLowerCase()
|
|
||||||
return config.serviceNames.some(name => serviceNameLower.includes(name.toLowerCase()) || name.toLowerCase().includes(serviceNameLower))
|
|
||||||
})
|
|
||||||
const isActive = service?.active_state === 'active'
|
|
||||||
const status = isActive ? 'RUNNING' : 'STOPPED'
|
|
||||||
const statusColor = isActive ? 'bg-green-500/20 text-green-500 border-green-500/20' : 'bg-yellow-500/20 text-yellow-500 border-yellow-500/20'
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div key={config.key} className="flex items-center justify-between rounded-lg bg-[#111a22] p-3 border border-transparent hover:border-border-dark transition-colors">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<div className="p-2 rounded bg-border-dark/50 text-white">
|
|
||||||
<span className="material-symbols-outlined text-[20px]">{config.icon}</span>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<p className="text-sm font-bold text-white">{config.displayName}</p>
|
|
||||||
<p className="text-xs text-text-secondary">{config.description}</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-4">
|
|
||||||
<span className={`px-2 py-0.5 rounded text-[10px] font-bold border ${statusColor}`}>{status}</span>
|
|
||||||
<label className="relative inline-block w-10 h-5 mr-2 align-middle select-none cursor-pointer">
|
|
||||||
<input
|
|
||||||
checked={isActive}
|
|
||||||
onChange={() => {
|
|
||||||
if (service) {
|
|
||||||
systemAPI.restartService(service.name).then(() => {
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['system', 'services'] })
|
|
||||||
}).catch((err) => {
|
|
||||||
alert(`Failed to ${isActive ? 'stop' : 'start'} service: ${err.message || 'Unknown error'}`)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
className="sr-only peer"
|
|
||||||
id={`${config.key}-toggle`}
|
|
||||||
name="toggle"
|
|
||||||
type="checkbox"
|
|
||||||
/>
|
|
||||||
<span className="absolute inset-0 rounded-full bg-border-dark transition-colors duration-300 peer-checked:bg-primary/20"></span>
|
|
||||||
<span className="absolute left-0.5 top-0.5 h-4 w-4 rounded-full bg-white transition-transform duration-300 peer-checked:translate-x-5"></span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Date & Time Card */}
|
|
||||||
<div className="flex flex-col rounded-xl border border-border-dark bg-card-dark shadow-sm">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">schedule</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Date & Time</h2>
|
|
||||||
</div>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
saveNTPSettingsMutation.mutate({
|
|
||||||
timezone,
|
|
||||||
ntp_servers: ntpServers,
|
|
||||||
})
|
|
||||||
}}
|
|
||||||
disabled={saveNTPSettingsMutation.isPending}
|
|
||||||
className="flex items-center justify-center gap-2 rounded-lg bg-primary px-4 py-2 text-sm font-bold text-white hover:bg-blue-600 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[16px]">save</span>
|
|
||||||
{saveNTPSettingsMutation.isPending ? 'Saving...' : 'Save'}
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
<div className="p-6 flex flex-col gap-6">
|
|
||||||
<div className="grid grid-cols-2 gap-4">
|
|
||||||
<div className="col-span-2">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">System Timezone</label>
|
|
||||||
<div className="relative">
|
|
||||||
<select
|
|
||||||
value={timezone}
|
|
||||||
onChange={(e) => setTimezone(e.target.value)}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] py-2.5 pl-3 pr-10 text-sm text-white focus:border-primary focus:ring-1 focus:ring-primary appearance-none"
|
|
||||||
>
|
|
||||||
<option>Etc/UTC</option>
|
|
||||||
<option>Asia/Jakarta</option>
|
|
||||||
<option>Asia/Singapore</option>
|
|
||||||
<option>Asia/Bangkok</option>
|
|
||||||
<option>Asia/Manila</option>
|
|
||||||
<option>Asia/Tokyo</option>
|
|
||||||
<option>Asia/Shanghai</option>
|
|
||||||
<option>Asia/Hong_Kong</option>
|
|
||||||
<option>Europe/London</option>
|
|
||||||
<option>Europe/Paris</option>
|
|
||||||
<option>America/New_York</option>
|
|
||||||
<option>America/Los_Angeles</option>
|
|
||||||
</select>
|
|
||||||
<div className="pointer-events-none absolute inset-y-0 right-0 flex items-center px-2 text-white">
|
|
||||||
<span className="material-symbols-outlined text-sm">expand_more</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<div className="flex items-center justify-between mb-2">
|
|
||||||
<label className="block text-xs font-medium text-text-secondary uppercase">NTP Servers</label>
|
|
||||||
<button
|
|
||||||
onClick={() => setShowAddNtpServer(true)}
|
|
||||||
className="text-xs text-primary font-bold hover:text-white flex items-center gap-1"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[14px]">add</span> Add Server
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
<div className="flex flex-col gap-2">
|
|
||||||
{showAddNtpServer && (
|
|
||||||
<div className="flex items-center gap-2 rounded-lg bg-[#111a22] p-3 border border-border-dark">
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={newNtpServer}
|
|
||||||
onChange={(e) => setNewNtpServer(e.target.value)}
|
|
||||||
onKeyDown={(e) => {
|
|
||||||
if (e.key === 'Enter' && newNtpServer.trim()) {
|
|
||||||
if (!ntpServers.includes(newNtpServer.trim())) {
|
|
||||||
setNtpServers([...ntpServers, newNtpServer.trim()])
|
|
||||||
setNewNtpServer('')
|
|
||||||
setShowAddNtpServer(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (e.key === 'Escape') {
|
|
||||||
setNewNtpServer('')
|
|
||||||
setShowAddNtpServer(false)
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
placeholder="Enter NTP server address (e.g., 0.pool.ntp.org)"
|
|
||||||
className="flex-1 bg-transparent text-sm text-white placeholder-gray-500 focus:outline-none"
|
|
||||||
autoFocus
|
|
||||||
/>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
if (newNtpServer.trim() && !ntpServers.includes(newNtpServer.trim())) {
|
|
||||||
setNtpServers([...ntpServers, newNtpServer.trim()])
|
|
||||||
setNewNtpServer('')
|
|
||||||
setShowAddNtpServer(false)
|
|
||||||
}
|
|
||||||
}}
|
|
||||||
className="text-green-500 hover:text-green-400"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[16px]">check</span>
|
|
||||||
</button>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
setNewNtpServer('')
|
|
||||||
setShowAddNtpServer(false)
|
|
||||||
}}
|
|
||||||
className="text-red-500 hover:text-red-400"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[16px]">close</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
{ntpServers.map((server, index) => (
|
|
||||||
<div key={index} className="flex items-center justify-between rounded-lg bg-[#111a22] p-3 border border-border-dark">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<div className="h-2 w-2 rounded-full bg-green-500 animate-pulse"></div>
|
|
||||||
<span className="text-sm font-mono text-white">{server}</span>
|
|
||||||
</div>
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="text-xs text-text-secondary">Stratum 2 • 12ms</span>
|
|
||||||
<button
|
|
||||||
onClick={() => {
|
|
||||||
setNtpServers(ntpServers.filter((_, i) => i !== index))
|
|
||||||
}}
|
|
||||||
className="text-red-500 hover:text-red-400"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[16px]">delete</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Management & SNMP Card */}
|
|
||||||
<div className="flex flex-col rounded-xl border border-border-dark bg-card-dark shadow-sm">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">hub</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Management</h2>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="p-6 flex flex-col gap-6">
|
|
||||||
<div>
|
|
||||||
<div className="flex items-center justify-between mb-4">
|
|
||||||
<div>
|
|
||||||
<h3 className="text-sm font-bold text-white">SNMP Monitoring</h3>
|
|
||||||
<p className="text-xs text-text-secondary">Enable Simple Network Management Protocol</p>
|
|
||||||
</div>
|
|
||||||
<label className="relative inline-block w-10 h-5 align-middle select-none cursor-pointer">
|
|
||||||
<input
|
|
||||||
checked={snmpEnabled}
|
|
||||||
onChange={(e) => setSnmpEnabled(e.target.checked)}
|
|
||||||
className="sr-only peer"
|
|
||||||
id="snmp-toggle"
|
|
||||||
name="toggle"
|
|
||||||
type="checkbox"
|
|
||||||
/>
|
|
||||||
<span className="absolute inset-0 rounded-full bg-border-dark transition-colors duration-300 peer-checked:bg-primary/20"></span>
|
|
||||||
<span className="absolute left-0.5 top-0.5 h-4 w-4 rounded-full bg-white transition-transform duration-300 peer-checked:translate-x-5"></span>
|
|
||||||
</label>
|
|
||||||
</div>
|
|
||||||
<div className={`grid grid-cols-1 gap-4 transition-opacity ${snmpEnabled ? 'opacity-100' : 'opacity-50 pointer-events-none'}`}>
|
|
||||||
<div>
|
|
||||||
<label className="mb-1.5 block text-xs font-medium text-text-secondary uppercase">Community String</label>
|
|
||||||
<input
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="e.g. public"
|
|
||||||
type="text"
|
|
||||||
defaultValue="public"
|
|
||||||
disabled={!snmpEnabled}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<label className="mb-1.5 block text-xs font-medium text-text-secondary uppercase">Trap Receiver IP</label>
|
|
||||||
<input
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="e.g. 192.168.1.100"
|
|
||||||
type="text"
|
|
||||||
disabled={!snmpEnabled}
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div className="border-t border-border-dark pt-4">
|
|
||||||
<h3 className="text-sm font-bold text-white mb-3">Syslog Forwarding</h3>
|
|
||||||
<div className="flex gap-2">
|
|
||||||
<input
|
|
||||||
className="flex-1 rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="Syslog Server Address (UDP:514)"
|
|
||||||
type="text"
|
|
||||||
/>
|
|
||||||
<button className="rounded-lg bg-border-dark px-4 py-2 text-sm font-bold text-white hover:bg-[#2f455a] transition-colors">
|
|
||||||
Test
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Bottom Spacer */}
|
|
||||||
<div className="h-10"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Edit Connection Modal */}
|
|
||||||
{editingInterface && (
|
|
||||||
<EditConnectionModal
|
|
||||||
interface={editingInterface}
|
|
||||||
onClose={() => setEditingInterface(null)}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* View Details Modal */}
|
|
||||||
{viewingInterface && (
|
|
||||||
<ViewDetailsModal
|
|
||||||
interface={viewingInterface}
|
|
||||||
onClose={() => setViewingInterface(null)}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Edit Connection Modal Component
|
|
||||||
interface EditConnectionModalProps {
|
|
||||||
interface: NetworkInterface
|
|
||||||
onClose: () => void
|
|
||||||
}
|
|
||||||
|
|
||||||
function EditConnectionModal({ interface: iface, onClose }: EditConnectionModalProps) {
|
|
||||||
const queryClient = useQueryClient()
|
|
||||||
const [formData, setFormData] = useState({
|
|
||||||
ip_address: iface.ip_address || '',
|
|
||||||
subnet: iface.subnet || '24',
|
|
||||||
gateway: iface.gateway || '',
|
|
||||||
dns1: iface.dns1 || '',
|
|
||||||
dns2: iface.dns2 || '',
|
|
||||||
role: iface.role || '',
|
|
||||||
})
|
|
||||||
|
|
||||||
const updateMutation = useMutation({
|
|
||||||
mutationFn: (data: { ip_address: string; subnet: string; gateway?: string; dns1?: string; dns2?: string; role?: string }) =>
|
|
||||||
systemAPI.updateNetworkInterface(iface.name, data),
|
|
||||||
onSuccess: () => {
|
|
||||||
queryClient.invalidateQueries({ queryKey: ['system', 'interfaces'] })
|
|
||||||
onClose()
|
|
||||||
},
|
|
||||||
onError: (error: any) => {
|
|
||||||
alert(`Failed to update interface: ${error.response?.data?.error || error.message}`)
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
const handleSubmit = (e: React.FormEvent) => {
|
|
||||||
e.preventDefault()
|
|
||||||
updateMutation.mutate({
|
|
||||||
ip_address: formData.ip_address,
|
|
||||||
subnet: formData.subnet,
|
|
||||||
gateway: formData.gateway || undefined,
|
|
||||||
dns1: formData.dns1 || undefined,
|
|
||||||
dns2: formData.dns2 || undefined,
|
|
||||||
role: formData.role || undefined,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50 backdrop-blur-sm">
|
|
||||||
<div className="w-full max-w-2xl rounded-xl border border-border-dark bg-card-dark shadow-xl">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">settings_ethernet</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Edit Connection - {iface.name}</h2>
|
|
||||||
</div>
|
|
||||||
<button
|
|
||||||
onClick={onClose}
|
|
||||||
className="h-8 w-8 rounded-full hover:bg-border-dark flex items-center justify-center text-text-secondary hover:text-white transition-colors"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[20px]">close</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<form onSubmit={handleSubmit} className="p-6">
|
|
||||||
<div className="space-y-4">
|
|
||||||
{/* IP Address */}
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
IP Address
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={formData.ip_address}
|
|
||||||
onChange={(e) => setFormData({ ...formData, ip_address: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="192.168.1.100"
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Subnet Mask */}
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Subnet Mask (CIDR)
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={formData.subnet}
|
|
||||||
onChange={(e) => setFormData({ ...formData, subnet: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="24"
|
|
||||||
required
|
|
||||||
/>
|
|
||||||
<p className="mt-1 text-xs text-text-secondary">Enter CIDR notation (e.g., 24 for 255.255.255.0)</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Gateway */}
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Default Gateway
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={formData.gateway}
|
|
||||||
onChange={(e) => setFormData({ ...formData, gateway: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="192.168.1.1"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* DNS Servers */}
|
|
||||||
<div className="grid grid-cols-2 gap-4">
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Primary DNS
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={formData.dns1}
|
|
||||||
onChange={(e) => setFormData({ ...formData, dns1: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="8.8.8.8"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Secondary DNS
|
|
||||||
</label>
|
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={formData.dns2}
|
|
||||||
onChange={(e) => setFormData({ ...formData, dns2: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] p-2.5 text-sm text-white placeholder-gray-500 focus:border-primary focus:ring-1 focus:ring-primary"
|
|
||||||
placeholder="8.8.4.4"
|
|
||||||
/>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Role */}
|
|
||||||
<div>
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">
|
|
||||||
Interface Role
|
|
||||||
</label>
|
|
||||||
<select
|
|
||||||
value={formData.role}
|
|
||||||
onChange={(e) => setFormData({ ...formData, role: e.target.value })}
|
|
||||||
className="block w-full rounded-lg border-border-dark bg-[#111a22] py-2.5 pl-3 pr-10 text-sm text-white focus:border-primary focus:ring-1 focus:ring-primary appearance-none"
|
|
||||||
>
|
|
||||||
<option value="">None</option>
|
|
||||||
<option value="Management">Management</option>
|
|
||||||
<option value="ISCSI">iSCSI</option>
|
|
||||||
<option value="Storage">Storage</option>
|
|
||||||
</select>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="mt-6 flex items-center justify-end gap-3">
|
|
||||||
<button
|
|
||||||
type="button"
|
|
||||||
onClick={onClose}
|
|
||||||
className="px-4 py-2 text-sm font-bold text-text-secondary hover:text-white transition-colors"
|
|
||||||
>
|
|
||||||
Cancel
|
|
||||||
</button>
|
|
||||||
<button
|
|
||||||
type="submit"
|
|
||||||
disabled={updateMutation.isPending}
|
|
||||||
className="flex items-center gap-2 rounded-lg bg-primary px-5 py-2.5 text-sm font-bold text-white hover:bg-blue-600 transition-all disabled:opacity-50 disabled:cursor-not-allowed"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[18px]">save</span>
|
|
||||||
{updateMutation.isPending ? 'Saving...' : 'Save Changes'}
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</form>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// View Details Modal Component
|
|
||||||
interface ViewDetailsModalProps {
|
|
||||||
interface: NetworkInterface
|
|
||||||
onClose: () => void
|
|
||||||
}
|
|
||||||
|
|
||||||
function ViewDetailsModal({ interface: iface, onClose }: ViewDetailsModalProps) {
|
|
||||||
return (
|
|
||||||
<div className="fixed inset-0 z-50 flex items-center justify-center bg-black/50 backdrop-blur-sm">
|
|
||||||
<div className="w-full max-w-2xl rounded-xl border border-border-dark bg-card-dark shadow-xl">
|
|
||||||
<div className="flex items-center justify-between border-b border-border-dark px-6 py-4">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<span className="material-symbols-outlined text-primary">info</span>
|
|
||||||
<h2 className="text-lg font-bold text-white">Interface Details - {iface.name}</h2>
|
|
||||||
</div>
|
|
||||||
<button
|
|
||||||
onClick={onClose}
|
|
||||||
className="h-8 w-8 rounded-full hover:bg-border-dark flex items-center justify-center text-text-secondary hover:text-white transition-colors"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[20px]">close</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="p-6">
|
|
||||||
<div className="space-y-4">
|
|
||||||
{/* Status */}
|
|
||||||
<div className="flex items-center justify-between p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<div className="flex items-center gap-3">
|
|
||||||
<div className={`h-3 w-3 rounded-full ${iface.status === 'Connected' ? 'bg-green-500' : 'bg-red-500'}`}></div>
|
|
||||||
<span className="text-sm font-medium text-text-secondary">Status</span>
|
|
||||||
</div>
|
|
||||||
<span className={`text-sm font-bold ${iface.status === 'Connected' ? 'text-green-500' : 'text-red-500'}`}>
|
|
||||||
{iface.status}
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Network Configuration Grid */}
|
|
||||||
<div className="grid grid-cols-2 gap-4">
|
|
||||||
{/* IP Address */}
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">IP Address</label>
|
|
||||||
<p className="text-sm font-mono text-white">{iface.ip_address || 'Not configured'}</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Subnet */}
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">Subnet Mask (CIDR)</label>
|
|
||||||
<p className="text-sm font-mono text-white">/{iface.subnet || 'N/A'}</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Gateway */}
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">Default Gateway</label>
|
|
||||||
<p className="text-sm font-mono text-white">{iface.gateway || 'Not configured'}</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Speed */}
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">Link Speed</label>
|
|
||||||
<p className="text-sm font-mono text-white">{iface.speed || 'Unknown'}</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* DNS Servers */}
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-3 block text-xs font-medium text-text-secondary uppercase">DNS Servers</label>
|
|
||||||
<div className="space-y-2">
|
|
||||||
{iface.dns1 ? (
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="text-xs text-text-secondary">Primary:</span>
|
|
||||||
<span className="text-sm font-mono text-white">{iface.dns1}</span>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<p className="text-xs text-text-secondary">Primary DNS: Not configured</p>
|
|
||||||
)}
|
|
||||||
{iface.dns2 ? (
|
|
||||||
<div className="flex items-center gap-2">
|
|
||||||
<span className="text-xs text-text-secondary">Secondary:</span>
|
|
||||||
<span className="text-sm font-mono text-white">{iface.dns2}</span>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<p className="text-xs text-text-secondary">Secondary DNS: Not configured</p>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Interface Role */}
|
|
||||||
{iface.role && (
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">Interface Role</label>
|
|
||||||
<span className={`inline-block px-3 py-1 rounded text-xs font-bold uppercase ${
|
|
||||||
iface.role === 'ISCSI'
|
|
||||||
? 'bg-purple-500/20 text-purple-400 border border-purple-500/20'
|
|
||||||
: 'bg-primary/20 text-primary border border-primary/20'
|
|
||||||
}`}>
|
|
||||||
{iface.role}
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
|
|
||||||
{/* Full Network Address */}
|
|
||||||
{iface.ip_address && iface.subnet && (
|
|
||||||
<div className="p-4 rounded-lg bg-[#111a22] border border-border-dark">
|
|
||||||
<label className="mb-2 block text-xs font-medium text-text-secondary uppercase">Full Network Address</label>
|
|
||||||
<p className="text-sm font-mono text-white">{iface.ip_address}/{iface.subnet}</p>
|
|
||||||
</div>
|
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Close Button */}
|
|
||||||
<div className="mt-6 flex justify-end">
|
|
||||||
<button
|
|
||||||
onClick={onClose}
|
|
||||||
className="flex items-center justify-center gap-2 rounded-lg bg-primary px-6 py-2.5 text-sm font-bold text-white hover:bg-blue-600 transition-all"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-[18px]">close</span>
|
|
||||||
Close
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -365,11 +365,7 @@ export default function TapeLibraries() {
|
|||||||
</td>
|
</td>
|
||||||
<td className="py-4 px-6">
|
<td className="py-4 px-6">
|
||||||
<p className="text-white text-sm font-medium">
|
<p className="text-white text-sm font-medium">
|
||||||
{isVTL
|
{isVTL ? 'MHVTL' : 'physical' in library ? (library as PhysicalTapeLibrary).vendor : 'N/A'}
|
||||||
? (library as VirtualTapeLibrary).vendor || 'MHVTL'
|
|
||||||
: 'physical' in library
|
|
||||||
? (library as PhysicalTapeLibrary).vendor
|
|
||||||
: 'N/A'}
|
|
||||||
</p>
|
</p>
|
||||||
<p className="text-text-secondary text-xs">
|
<p className="text-text-secondary text-xs">
|
||||||
LTO-8 • {library.drive_count} {library.drive_count === 1 ? 'Drive' : 'Drives'}
|
LTO-8 • {library.drive_count} {library.drive_count === 1 ? 'Drive' : 'Drives'}
|
||||||
@@ -394,13 +390,7 @@ export default function TapeLibraries() {
|
|||||||
</div>
|
</div>
|
||||||
</td>
|
</td>
|
||||||
<td className="py-4 px-6">
|
<td className="py-4 px-6">
|
||||||
<div
|
<div className="flex items-center gap-2 group/copy cursor-pointer">
|
||||||
className="flex items-center gap-2 group/copy cursor-pointer"
|
|
||||||
onClick={() => {
|
|
||||||
const iqn = `iqn.2023-10.com.vtl:${library.name.toLowerCase().replace(/\s+/g, '')}`
|
|
||||||
navigator.clipboard.writeText(iqn)
|
|
||||||
}}
|
|
||||||
>
|
|
||||||
<code className="text-xs text-text-secondary font-mono bg-[#111a22] px-2 py-1 rounded border border-border-dark group-hover/copy:text-white transition-colors">
|
<code className="text-xs text-text-secondary font-mono bg-[#111a22] px-2 py-1 rounded border border-border-dark group-hover/copy:text-white transition-colors">
|
||||||
iqn.2023-10.com.vtl:{library.name.toLowerCase().replace(/\s+/g, '')}
|
iqn.2023-10.com.vtl:{library.name.toLowerCase().replace(/\s+/g, '')}
|
||||||
</code>
|
</code>
|
||||||
@@ -470,10 +460,10 @@ export default function TapeLibraries() {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Tape Detail Drawer */}
|
{/* Tape Detail Drawer */}
|
||||||
{selectedLibrary && activeTab === 'vtl' && (
|
{selectedLibrary && activeTab === 'vtl' && libraryTapes.length > 0 && (
|
||||||
<div className="bg-surface-dark border-t border-border-dark p-6 absolute bottom-0 w-full transform translate-y-0 transition-transform z-30 shadow-2xl shadow-black max-h-[70vh] overflow-y-auto">
|
<div className="bg-surface-dark border-t border-border-dark p-6 absolute bottom-0 w-full transform translate-y-0 transition-transform z-30 shadow-2xl shadow-black">
|
||||||
<div className="max-w-[1400px] mx-auto">
|
<div className="max-w-[1400px] mx-auto">
|
||||||
<div className="flex flex-col sm:flex-row justify-between items-start sm:items-center gap-4 mb-6">
|
<div className="flex justify-between items-center mb-4">
|
||||||
<div className="flex items-center gap-3">
|
<div className="flex items-center gap-3">
|
||||||
<span className="material-symbols-outlined text-primary text-2xl">cable</span>
|
<span className="material-symbols-outlined text-primary text-2xl">cable</span>
|
||||||
<div>
|
<div>
|
||||||
@@ -485,7 +475,7 @@ export default function TapeLibraries() {
|
|||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex gap-3 flex-wrap">
|
<div className="flex gap-3">
|
||||||
<button className="px-3 py-2 bg-[#111a22] border border-border-dark rounded-lg text-text-secondary hover:text-white text-sm font-medium transition-colors">
|
<button className="px-3 py-2 bg-[#111a22] border border-border-dark rounded-lg text-text-secondary hover:text-white text-sm font-medium transition-colors">
|
||||||
Bulk Format
|
Bulk Format
|
||||||
</button>
|
</button>
|
||||||
@@ -498,37 +488,20 @@ export default function TapeLibraries() {
|
|||||||
</Link>
|
</Link>
|
||||||
<button
|
<button
|
||||||
onClick={() => setSelectedLibrary(null)}
|
onClick={() => setSelectedLibrary(null)}
|
||||||
className="p-2 text-text-secondary hover:text-white"
|
className="lg:hidden p-2 text-text-secondary hover:text-white"
|
||||||
>
|
>
|
||||||
<span className="material-symbols-outlined">close</span>
|
<span className="material-symbols-outlined">close</span>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<div className="grid grid-cols-2 md:grid-cols-4 lg:grid-cols-6 gap-3">
|
||||||
{libraryTapes.length === 0 ? (
|
|
||||||
<div className="text-center py-12">
|
|
||||||
<span className="material-symbols-outlined text-6xl text-text-secondary mb-4 block">album</span>
|
|
||||||
<h3 className="text-lg font-medium text-white mb-2">No Tapes Found</h3>
|
|
||||||
<p className="text-sm text-text-secondary mb-4">
|
|
||||||
This library has no tapes yet. Create tapes to get started.
|
|
||||||
</p>
|
|
||||||
<Link
|
|
||||||
to={`/tape/vtl/${selectedLibrary}/tapes/create`}
|
|
||||||
className="inline-flex items-center gap-2 px-4 py-2 bg-primary hover:bg-blue-600 rounded-lg text-white text-sm font-bold"
|
|
||||||
>
|
|
||||||
<span className="material-symbols-outlined text-lg">add</span>
|
|
||||||
Add Tapes
|
|
||||||
</Link>
|
|
||||||
</div>
|
|
||||||
) : (
|
|
||||||
<div className="grid grid-cols-2 sm:grid-cols-3 md:grid-cols-4 lg:grid-cols-6 xl:grid-cols-8 gap-3">
|
|
||||||
{libraryTapes.map((tape) => (
|
{libraryTapes.map((tape) => (
|
||||||
<div
|
<div
|
||||||
key={tape.id}
|
key={tape.id}
|
||||||
className={`p-3 rounded-lg border flex flex-col gap-2 relative group hover:border-primary transition-all cursor-pointer min-h-[120px] ${
|
className={`p-3 rounded border flex flex-col gap-2 relative group hover:border-primary transition-colors cursor-pointer ${
|
||||||
tape.status === 'in_drive'
|
tape.status === 'in_drive'
|
||||||
? 'bg-[#111a22] border-green-500/30 shadow-lg shadow-green-500/10'
|
? 'bg-[#111a22] border-green-500/30'
|
||||||
: 'bg-[#111a22] border-border-dark hover:shadow-lg hover:shadow-primary/10'
|
: 'bg-[#111a22] border-border-dark'
|
||||||
}`}
|
}`}
|
||||||
>
|
>
|
||||||
<div className="flex justify-between items-start">
|
<div className="flex justify-between items-start">
|
||||||
@@ -539,30 +512,23 @@ export default function TapeLibraries() {
|
|||||||
>
|
>
|
||||||
album
|
album
|
||||||
</span>
|
</span>
|
||||||
<span className="text-[10px] uppercase font-bold text-text-secondary bg-[#1c2834] px-1.5 py-0.5 rounded">
|
<span className="text-[10px] uppercase font-bold text-text-secondary bg-[#1c2834] px-1 rounded">
|
||||||
SLOT {tape.slot_number}
|
Slot {tape.slot_number}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<div className="flex-1 flex flex-col justify-center">
|
<div>
|
||||||
<p className="text-white text-xs font-mono font-bold truncate" title={tape.barcode}>
|
<p className="text-white text-xs font-mono font-bold">{tape.barcode}</p>
|
||||||
{tape.barcode}
|
<p className="text-text-secondary text-[10px]">
|
||||||
</p>
|
|
||||||
<p className="text-text-secondary text-[10px] mt-1">
|
|
||||||
{formatBytes(tape.size_bytes, 1)} / {formatBytes(tape.size_bytes, 1)}
|
{formatBytes(tape.size_bytes, 1)} / {formatBytes(tape.size_bytes, 1)}
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<div className="absolute inset-0 bg-black/70 hidden group-hover:flex items-center justify-center gap-2 backdrop-blur-sm rounded-lg">
|
<div className="absolute inset-0 bg-black/60 hidden group-hover:flex items-center justify-center gap-2 backdrop-blur-[1px] rounded">
|
||||||
<button className="p-2 text-white hover:text-primary hover:bg-primary/20 rounded transition-colors" title="Eject">
|
<span className="material-symbols-outlined text-white hover:text-primary text-lg">eject</span>
|
||||||
<span className="material-symbols-outlined text-lg">eject</span>
|
<span className="material-symbols-outlined text-white hover:text-red-400 text-lg">delete</span>
|
||||||
</button>
|
|
||||||
<button className="p-2 text-white hover:text-red-400 hover:bg-red-400/20 rounded transition-colors" title="Delete">
|
|
||||||
<span className="material-symbols-outlined text-lg">delete</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
</div>
|
</div>
|
||||||
)}
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|||||||
@@ -42,12 +42,10 @@ export default {
|
|||||||
foreground: "hsl(var(--card-foreground))",
|
foreground: "hsl(var(--card-foreground))",
|
||||||
},
|
},
|
||||||
// Dark theme colors from example
|
// Dark theme colors from example
|
||||||
"background-dark": "#101922",
|
"background-dark": "#111a22",
|
||||||
"card-dark": "#192633",
|
"card-dark": "#1a2632",
|
||||||
"border-dark": "#233648",
|
"border-dark": "#324d67",
|
||||||
"text-secondary": "#92adc9",
|
"text-secondary": "#92adc9",
|
||||||
"surface-dark": "#111a22",
|
|
||||||
"surface-highlight": "#1c2936",
|
|
||||||
},
|
},
|
||||||
fontFamily: {
|
fontFamily: {
|
||||||
display: ["Manrope", "sans-serif"],
|
display: ["Manrope", "sans-serif"],
|
||||||
|
|||||||
@@ -13,11 +13,6 @@ export default defineConfig({
|
|||||||
server: {
|
server: {
|
||||||
host: '0.0.0.0', // Listen on all interfaces to allow access via IP address
|
host: '0.0.0.0', // Listen on all interfaces to allow access via IP address
|
||||||
port: 3000,
|
port: 3000,
|
||||||
allowedHosts: [
|
|
||||||
'atlas-demo.avt.data-center.id',
|
|
||||||
'localhost',
|
|
||||||
'.localhost',
|
|
||||||
],
|
|
||||||
proxy: {
|
proxy: {
|
||||||
'/api': {
|
'/api': {
|
||||||
target: 'http://localhost:8080',
|
target: 'http://localhost:8080',
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
echo "🔨 Rebuilding Calypso API..."
|
|
||||||
cd /development/calypso/backend
|
|
||||||
make build
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "🔨 Rebuilding Calypso Frontend..."
|
|
||||||
cd /development/calypso/frontend
|
|
||||||
npm run build
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "🔄 Restarting API service..."
|
|
||||||
systemctl restart calypso-api.service
|
|
||||||
|
|
||||||
echo "🔄 Restarting Frontend service..."
|
|
||||||
systemctl restart calypso-frontend.service
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "✅ Build and restart complete!"
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
systemctl status calypso-api.service calypso-frontend.service --no-pager
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 22 KiB |
@@ -336,5 +336,4 @@ Feature dianggap selesai jika:
|
|||||||
Jika ada konflik antar dokumen:
|
Jika ada konflik antar dokumen:
|
||||||
**FILE INI ADALAH SUMBER KEBENARAN.**
|
**FILE INI ADALAH SUMBER KEBENARAN.**
|
||||||
|
|
||||||
|
|
||||||
AtlasOS – Calypso adalah **tape & VTL appliance kelas enterprise**.
|
AtlasOS – Calypso adalah **tape & VTL appliance kelas enterprise**.
|
||||||
@@ -1,372 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
|
|
||||||
<html class="dark" lang="en"><head>
|
|
||||||
<meta charset="utf-8"/>
|
|
||||||
<meta content="width=device-width, initial-scale=1.0" name="viewport"/>
|
|
||||||
<title>Bacula Backup Management</title>
|
|
||||||
<!-- Google Fonts -->
|
|
||||||
<link href="https://fonts.googleapis.com" rel="preconnect"/>
|
|
||||||
<link crossorigin="" href="https://fonts.gstatic.com" rel="preconnect"/>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Manrope:wght@200..800&display=swap" rel="stylesheet"/>
|
|
||||||
<!-- Material Symbols -->
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:wght,FILL@100..700,0..1&display=swap" rel="stylesheet"/>
|
|
||||||
<link href="https://fonts.googleapis.com/css2?family=Material+Symbols+Outlined:wght,FILL@100..700,0..1&display=swap" rel="stylesheet"/>
|
|
||||||
<!-- Tailwind CSS -->
|
|
||||||
<script src="https://cdn.tailwindcss.com?plugins=forms,container-queries"></script>
|
|
||||||
<!-- Theme Config -->
|
|
||||||
<script id="tailwind-config">
|
|
||||||
tailwind.config = {
|
|
||||||
darkMode: "class",
|
|
||||||
theme: {
|
|
||||||
extend: {
|
|
||||||
colors: {
|
|
||||||
"primary": "#137fec",
|
|
||||||
"background-light": "#f6f7f8",
|
|
||||||
"background-dark": "#101922",
|
|
||||||
"surface-dark": "#111a22",
|
|
||||||
"surface-highlight": "#1c2936",
|
|
||||||
"border-dark": "#324d67",
|
|
||||||
"text-secondary": "#92adc9",
|
|
||||||
},
|
|
||||||
fontFamily: {
|
|
||||||
"display": ["Manrope", "sans-serif"]
|
|
||||||
},
|
|
||||||
borderRadius: {
|
|
||||||
"DEFAULT": "0.25rem",
|
|
||||||
"lg": "0.5rem",
|
|
||||||
"xl": "0.75rem",
|
|
||||||
"full": "9999px"
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
<style>
|
|
||||||
/* Custom scrollbar for dark theme dashboard feel */
|
|
||||||
::-webkit-scrollbar {
|
|
||||||
width: 8px;
|
|
||||||
height: 8px;
|
|
||||||
}
|
|
||||||
::-webkit-scrollbar-track {
|
|
||||||
background: #101922;
|
|
||||||
}
|
|
||||||
::-webkit-scrollbar-thumb {
|
|
||||||
background: #324d67;
|
|
||||||
border-radius: 4px;
|
|
||||||
}
|
|
||||||
::-webkit-scrollbar-thumb:hover {
|
|
||||||
background: #4b6a88;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
<body class="bg-background-light dark:bg-background-dark text-slate-900 dark:text-white font-display">
|
|
||||||
<div class="relative flex h-screen w-full overflow-hidden">
|
|
||||||
<!-- Side Navigation -->
|
|
||||||
<aside class="flex w-64 flex-col border-r border-border-dark bg-surface-dark flex-shrink-0">
|
|
||||||
<div class="flex h-full flex-col justify-between p-4">
|
|
||||||
<div class="flex flex-col gap-6">
|
|
||||||
<!-- Host Info -->
|
|
||||||
<div class="flex flex-col px-2">
|
|
||||||
<div class="flex items-center gap-2 mb-1">
|
|
||||||
<span class="material-symbols-outlined text-primary">dns</span>
|
|
||||||
<h1 class="text-white text-base font-bold leading-normal">System Admin</h1>
|
|
||||||
</div>
|
|
||||||
<p class="text-text-secondary text-xs font-mono pl-8">Host: backup-srv-01</p>
|
|
||||||
</div>
|
|
||||||
<!-- Nav Links -->
|
|
||||||
<nav class="flex flex-col gap-2">
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg text-text-secondary hover:bg-surface-highlight hover:text-white transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined">speed</span>
|
|
||||||
<p class="text-sm font-medium leading-normal">Overview</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg text-text-secondary hover:bg-surface-highlight hover:text-white transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined">database</span>
|
|
||||||
<p class="text-sm font-medium leading-normal">Storage</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg text-text-secondary hover:bg-surface-highlight hover:text-white transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined">hub</span>
|
|
||||||
<p class="text-sm font-medium leading-normal">Networking</p>
|
|
||||||
</a>
|
|
||||||
<!-- Active Item -->
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg bg-[#233648] text-white shadow-sm border border-border-dark/30" href="#">
|
|
||||||
<span class="material-symbols-outlined text-primary fill-1">backup</span>
|
|
||||||
<p class="text-sm font-bold leading-normal">Bacula Backup</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg text-text-secondary hover:bg-surface-highlight hover:text-white transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined">description</span>
|
|
||||||
<p class="text-sm font-medium leading-normal">Logs</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-3 px-3 py-2 rounded-lg text-text-secondary hover:bg-surface-highlight hover:text-white transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined">settings</span>
|
|
||||||
<p class="text-sm font-medium leading-normal">System</p>
|
|
||||||
</a>
|
|
||||||
</nav>
|
|
||||||
</div>
|
|
||||||
<!-- Bottom User Profile -->
|
|
||||||
<div class="flex items-center gap-3 px-3 py-3 mt-auto rounded-lg bg-surface-highlight border border-border-dark">
|
|
||||||
<div class="h-8 w-8 rounded-full bg-primary/20 flex items-center justify-center text-primary font-bold text-xs">
|
|
||||||
AD
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col">
|
|
||||||
<p class="text-white text-xs font-bold">Admin User</p>
|
|
||||||
<p class="text-text-secondary text-[10px]">root@localhost</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</aside>
|
|
||||||
<!-- Main Content Area -->
|
|
||||||
<main class="flex-1 flex flex-col h-full overflow-y-auto bg-background-dark">
|
|
||||||
<div class="flex flex-col w-full max-w-[1200px] mx-auto p-6 md:p-8 gap-6">
|
|
||||||
<!-- Page Heading -->
|
|
||||||
<header class="flex flex-wrap justify-between items-end gap-4 border-b border-border-dark pb-6">
|
|
||||||
<div class="flex flex-col gap-2">
|
|
||||||
<div class="flex items-center gap-3">
|
|
||||||
<h1 class="text-white text-3xl md:text-4xl font-black leading-tight tracking-tight">Bacula Backup Manager</h1>
|
|
||||||
<span class="flex h-3 w-3 rounded-full bg-green-500 shadow-[0_0_8px_rgba(34,197,94,0.6)]"></span>
|
|
||||||
</div>
|
|
||||||
<p class="text-text-secondary text-base font-normal max-w-2xl">
|
|
||||||
Manage backup jobs, configure clients, and monitor storage pools from a central director console.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
<div class="flex gap-3">
|
|
||||||
<button class="flex items-center gap-2 cursor-pointer justify-center rounded-lg h-10 px-4 bg-surface-highlight border border-border-dark text-white text-sm font-bold hover:bg-[#2a3c50] transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-base">terminal</span>
|
|
||||||
<span>Console</span>
|
|
||||||
</button>
|
|
||||||
<button class="flex items-center gap-2 cursor-pointer justify-center rounded-lg h-10 px-4 bg-primary text-white text-sm font-bold shadow-lg shadow-primary/20 hover:bg-primary/90 transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-base">refresh</span>
|
|
||||||
<span>Restart Director</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</header>
|
|
||||||
<!-- Navigation Tabs -->
|
|
||||||
<div class="w-full overflow-x-auto">
|
|
||||||
<div class="flex border-b border-border-dark gap-8 min-w-max">
|
|
||||||
<a class="flex items-center gap-2 border-b-[3px] border-primary text-white pb-3 pt-2" href="#">
|
|
||||||
<span class="material-symbols-outlined text-base">dashboard</span>
|
|
||||||
<p class="text-sm font-bold tracking-wide">Dashboard</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-2 border-b-[3px] border-transparent text-text-secondary hover:text-white pb-3 pt-2 transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined text-base">task</span>
|
|
||||||
<p class="text-sm font-bold tracking-wide">Jobs</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-2 border-b-[3px] border-transparent text-text-secondary hover:text-white pb-3 pt-2 transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined text-base">devices</span>
|
|
||||||
<p class="text-sm font-bold tracking-wide">Clients</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-2 border-b-[3px] border-transparent text-text-secondary hover:text-white pb-3 pt-2 transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined text-base">storage</span>
|
|
||||||
<p class="text-sm font-bold tracking-wide">Storage</p>
|
|
||||||
</a>
|
|
||||||
<a class="flex items-center gap-2 border-b-[3px] border-transparent text-text-secondary hover:text-white pb-3 pt-2 transition-colors" href="#">
|
|
||||||
<span class="material-symbols-outlined text-base">history</span>
|
|
||||||
<p class="text-sm font-bold tracking-wide">Restore</p>
|
|
||||||
</a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Stats Dashboard -->
|
|
||||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-4">
|
|
||||||
<!-- Service Status Card -->
|
|
||||||
<div class="flex flex-col justify-between rounded-lg p-5 bg-surface-highlight border border-border-dark relative overflow-hidden group">
|
|
||||||
<div class="absolute right-0 top-0 p-4 opacity-10 group-hover:opacity-20 transition-opacity">
|
|
||||||
<span class="material-symbols-outlined text-6xl">health_and_safety</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-1 z-10">
|
|
||||||
<p class="text-text-secondary text-sm font-medium uppercase tracking-wider">Director Status</p>
|
|
||||||
<div class="flex items-center gap-2 mt-1">
|
|
||||||
<span class="material-symbols-outlined text-green-500">check_circle</span>
|
|
||||||
<p class="text-white text-2xl font-bold">Active</p>
|
|
||||||
</div>
|
|
||||||
<p class="text-green-500 text-xs font-mono mt-1">Uptime: 14d 2h 12m</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Last Backup Card -->
|
|
||||||
<div class="flex flex-col justify-between rounded-lg p-5 bg-surface-highlight border border-border-dark relative overflow-hidden group">
|
|
||||||
<div class="absolute right-0 top-0 p-4 opacity-10 group-hover:opacity-20 transition-opacity">
|
|
||||||
<span class="material-symbols-outlined text-6xl">schedule</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-1 z-10">
|
|
||||||
<p class="text-text-secondary text-sm font-medium uppercase tracking-wider">Last Job</p>
|
|
||||||
<div class="flex items-center gap-2 mt-1">
|
|
||||||
<p class="text-white text-2xl font-bold">Success</p>
|
|
||||||
</div>
|
|
||||||
<p class="text-text-secondary text-xs mt-1">DailyBackup • 2h 15m ago</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Active Jobs Card -->
|
|
||||||
<div class="flex flex-col justify-between rounded-lg p-5 bg-surface-highlight border border-border-dark relative overflow-hidden group">
|
|
||||||
<div class="absolute right-0 top-0 p-4 opacity-10 group-hover:opacity-20 transition-opacity">
|
|
||||||
<span class="material-symbols-outlined text-6xl">pending_actions</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-1 z-10">
|
|
||||||
<p class="text-text-secondary text-sm font-medium uppercase tracking-wider">Active Jobs</p>
|
|
||||||
<div class="flex items-center gap-2 mt-1">
|
|
||||||
<p class="text-primary text-2xl font-bold">3 Running</p>
|
|
||||||
</div>
|
|
||||||
<div class="w-full bg-surface-dark h-1.5 rounded-full mt-3 overflow-hidden">
|
|
||||||
<div class="bg-primary h-full rounded-full animate-pulse w-2/3"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Storage Pool Card -->
|
|
||||||
<div class="flex flex-col justify-between rounded-lg p-5 bg-surface-highlight border border-border-dark relative overflow-hidden group">
|
|
||||||
<div class="absolute right-0 top-0 p-4 opacity-10 group-hover:opacity-20 transition-opacity">
|
|
||||||
<span class="material-symbols-outlined text-6xl">hard_drive</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col gap-1 z-10 w-full">
|
|
||||||
<div class="flex justify-between items-center">
|
|
||||||
<p class="text-text-secondary text-sm font-medium uppercase tracking-wider">Default Pool</p>
|
|
||||||
<span class="text-white text-xs font-bold">78%</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex items-end gap-1 mt-1">
|
|
||||||
<p class="text-white text-2xl font-bold">9.4 TB</p>
|
|
||||||
<p class="text-text-secondary text-sm mb-1">/ 12 TB</p>
|
|
||||||
</div>
|
|
||||||
<div class="w-full bg-surface-dark h-2 rounded-full mt-2 overflow-hidden">
|
|
||||||
<div class="bg-gradient-to-r from-primary to-blue-400 h-full rounded-full" style="width: 78%"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Recent Jobs Section -->
|
|
||||||
<div class="flex flex-col gap-4">
|
|
||||||
<div class="flex items-center justify-between px-1">
|
|
||||||
<h3 class="text-white text-lg font-bold">Recent Job History</h3>
|
|
||||||
<button class="text-primary text-sm font-bold hover:text-blue-300 transition-colors">View All History</button>
|
|
||||||
</div>
|
|
||||||
<div class="rounded-lg border border-border-dark bg-surface-highlight overflow-hidden shadow-sm">
|
|
||||||
<div class="overflow-x-auto">
|
|
||||||
<table class="w-full text-left border-collapse">
|
|
||||||
<thead>
|
|
||||||
<tr class="bg-surface-dark border-b border-border-dark text-text-secondary text-xs uppercase tracking-wider">
|
|
||||||
<th class="px-6 py-4 font-semibold">Status</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Job ID</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Job Name</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Client</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Type</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Level</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Duration</th>
|
|
||||||
<th class="px-6 py-4 font-semibold">Bytes</th>
|
|
||||||
<th class="px-6 py-4 font-semibold text-right">Actions</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody class="divide-y divide-border-dark text-sm">
|
|
||||||
<!-- Running Job -->
|
|
||||||
<tr class="hover:bg-surface-dark/50 transition-colors">
|
|
||||||
<td class="px-6 py-4">
|
|
||||||
<span class="inline-flex items-center gap-1.5 rounded px-2 py-1 text-xs font-medium bg-blue-500/10 text-blue-400 border border-blue-500/20">
|
|
||||||
<span class="block h-1.5 w-1.5 rounded-full bg-blue-400 animate-pulse"></span>
|
|
||||||
Running
|
|
||||||
</span>
|
|
||||||
</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">10423</td>
|
|
||||||
<td class="px-6 py-4 text-white font-medium">WeeklyArchive</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">filesrv-02</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Backup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Full</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">00:45:12</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">142 GB</td>
|
|
||||||
<td class="px-6 py-4 text-right">
|
|
||||||
<button class="text-text-secondary hover:text-white p-1 rounded hover:bg-surface-dark transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-[20px]">cancel</span>
|
|
||||||
</button>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<!-- Successful Job -->
|
|
||||||
<tr class="hover:bg-surface-dark/50 transition-colors">
|
|
||||||
<td class="px-6 py-4">
|
|
||||||
<span class="inline-flex items-center gap-1.5 rounded px-2 py-1 text-xs font-medium bg-green-500/10 text-green-400 border border-green-500/20">
|
|
||||||
<span class="material-symbols-outlined text-[14px]">check</span>
|
|
||||||
OK
|
|
||||||
</span>
|
|
||||||
</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">10422</td>
|
|
||||||
<td class="px-6 py-4 text-white font-medium">DailyBackup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">web-srv-01</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Backup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Incr</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">00:12:05</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">4.2 GB</td>
|
|
||||||
<td class="px-6 py-4 text-right">
|
|
||||||
<button class="text-text-secondary hover:text-white p-1 rounded hover:bg-surface-dark transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-[20px]">more_vert</span>
|
|
||||||
</button>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<!-- Failed Job -->
|
|
||||||
<tr class="hover:bg-surface-dark/50 transition-colors">
|
|
||||||
<td class="px-6 py-4">
|
|
||||||
<span class="inline-flex items-center gap-1.5 rounded px-2 py-1 text-xs font-medium bg-red-500/10 text-red-400 border border-red-500/20">
|
|
||||||
<span class="material-symbols-outlined text-[14px]">error</span>
|
|
||||||
Error
|
|
||||||
</span>
|
|
||||||
</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">10421</td>
|
|
||||||
<td class="px-6 py-4 text-white font-medium">DB_Snapshot</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">db-prod-01</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Backup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Diff</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">00:00:04</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">0 B</td>
|
|
||||||
<td class="px-6 py-4 text-right">
|
|
||||||
<button class="text-text-secondary hover:text-white p-1 rounded hover:bg-surface-dark transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-[20px]">replay</span>
|
|
||||||
</button>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
<!-- Another Success -->
|
|
||||||
<tr class="hover:bg-surface-dark/50 transition-colors">
|
|
||||||
<td class="px-6 py-4">
|
|
||||||
<span class="inline-flex items-center gap-1.5 rounded px-2 py-1 text-xs font-medium bg-green-500/10 text-green-400 border border-green-500/20">
|
|
||||||
<span class="material-symbols-outlined text-[14px]">check</span>
|
|
||||||
OK
|
|
||||||
</span>
|
|
||||||
</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">10420</td>
|
|
||||||
<td class="px-6 py-4 text-white font-medium">CatalogBackup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">backup-srv-01</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Backup</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary">Full</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">00:05:30</td>
|
|
||||||
<td class="px-6 py-4 text-text-secondary font-mono">850 MB</td>
|
|
||||||
<td class="px-6 py-4 text-right">
|
|
||||||
<button class="text-text-secondary hover:text-white p-1 rounded hover:bg-surface-dark transition-colors">
|
|
||||||
<span class="material-symbols-outlined text-[20px]">more_vert</span>
|
|
||||||
</button>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
<!-- Pagination/Footer -->
|
|
||||||
<div class="bg-surface-dark border-t border-border-dark px-6 py-3 flex items-center justify-between">
|
|
||||||
<p class="text-text-secondary text-xs">Showing 4 of 128 jobs</p>
|
|
||||||
<div class="flex gap-2">
|
|
||||||
<button class="p-1 rounded text-text-secondary hover:text-white disabled:opacity-50 hover:bg-surface-highlight">
|
|
||||||
<span class="material-symbols-outlined text-base">chevron_left</span>
|
|
||||||
</button>
|
|
||||||
<button class="p-1 rounded text-text-secondary hover:text-white hover:bg-surface-highlight">
|
|
||||||
<span class="material-symbols-outlined text-base">chevron_right</span>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<!-- Footer Console Widget (Collapsed/Small) -->
|
|
||||||
<div class="mt-auto pt-8">
|
|
||||||
<div class="rounded-lg bg-[#0d131a] border border-border-dark p-4 font-mono text-xs text-text-secondary shadow-inner h-32 overflow-y-auto">
|
|
||||||
<div class="flex items-center justify-between mb-2 text-gray-500 border-b border-white/5 pb-1">
|
|
||||||
<span>Console Log (tail -f)</span>
|
|
||||||
<span class="flex items-center gap-1"><span class="w-2 h-2 bg-green-500 rounded-full animate-pulse"></span> Connected</span>
|
|
||||||
</div>
|
|
||||||
<p class="text-blue-400">[14:22:01] bareos-dir: Connected to Storage at backup-srv-01:9103</p>
|
|
||||||
<p>[14:22:02] bareos-sd: Volume "Vol-0012" selected for appending</p>
|
|
||||||
<p>[14:22:05] bareos-fd: Client "filesrv-02" starting backup of /var/www/html</p>
|
|
||||||
<p class="text-yellow-500">[14:23:10] warning: /var/www/html/cache/tmp locked by another process, skipping</p>
|
|
||||||
<p>[14:23:45] bareos-dir: JobId 10423: Sending Accurate information.</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</main>
|
|
||||||
</div>
|
|
||||||
</body></html>
|
|
||||||
Binary file not shown.
|
Before Width: | Height: | Size: 289 KiB |
Reference in New Issue
Block a user