diff --git a/appliance b/appliance
new file mode 100755
index 0000000..b5a95e4
Binary files /dev/null and b/appliance differ
diff --git a/internal/http/admin_handlers.go b/internal/http/admin_handlers.go
new file mode 100644
index 0000000..99d002e
--- /dev/null
+++ b/internal/http/admin_handlers.go
@@ -0,0 +1,258 @@
+package http
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/example/storage-appliance/internal/auth"
+ "github.com/go-chi/chi/v5"
+)
+
+// UsersHandler shows the users management page
+func (a *App) UsersHandler(w http.ResponseWriter, r *http.Request) {
+ data := templateData(r, map[string]interface{}{
+ "Title": "User Management",
+ })
+ if err := templates.ExecuteTemplate(w, "users", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// HXUsersHandler returns HTMX partial for users list
+func (a *App) HXUsersHandler(w http.ResponseWriter, r *http.Request) {
+ rbacStore := auth.NewRBACStore(a.DB)
+
+ // Get all users (simplified - in production, you'd want pagination)
+ rows, err := a.DB.QueryContext(r.Context(), `SELECT id, username, created_at FROM users ORDER BY username`)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ defer rows.Close()
+
+ type UserWithRoles struct {
+ ID string
+ Username string
+ CreatedAt string
+ Roles []auth.Role
+ }
+
+ var users []UserWithRoles
+ for rows.Next() {
+ var u UserWithRoles
+ if err := rows.Scan(&u.ID, &u.Username, &u.CreatedAt); err != nil {
+ continue
+ }
+ roles, _ := rbacStore.GetUserRoles(r.Context(), u.ID)
+ u.Roles = roles
+ users = append(users, u)
+ }
+
+ data := map[string]interface{}{
+ "Users": users,
+ }
+ if err := templates.ExecuteTemplate(w, "hx_users", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// CreateUserHandler creates a new user
+func (a *App) CreateUserHandler(w http.ResponseWriter, r *http.Request) {
+ username := r.FormValue("username")
+ password := r.FormValue("password")
+
+ if username == "" || password == "" {
+ http.Error(w, "username and password required", http.StatusBadRequest)
+ return
+ }
+
+ userStore := auth.NewUserStore(a.DB)
+ _, err := userStore.CreateUser(r.Context(), username, password)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ // Return HTMX partial or redirect
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/users", http.StatusFound)
+ }
+}
+
+// DeleteUserHandler deletes a user
+func (a *App) DeleteUserHandler(w http.ResponseWriter, r *http.Request) {
+ userID := chi.URLParam(r, "id")
+
+ _, err := a.DB.ExecContext(r.Context(), `DELETE FROM users WHERE id = ?`, userID)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/users", http.StatusFound)
+ }
+}
+
+// UpdateUserRolesHandler updates roles for a user
+func (a *App) UpdateUserRolesHandler(w http.ResponseWriter, r *http.Request) {
+ userID := chi.URLParam(r, "id")
+
+ var req struct {
+ RoleIDs []string `json:"role_ids"`
+ }
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid request", http.StatusBadRequest)
+ return
+ }
+
+ rbacStore := auth.NewRBACStore(a.DB)
+
+ // Get current roles
+ currentRoles, _ := rbacStore.GetUserRoles(r.Context(), userID)
+
+ // Remove all current roles
+ for _, role := range currentRoles {
+ rbacStore.RemoveRoleFromUser(r.Context(), userID, role.ID)
+ }
+
+ // Add new roles
+ for _, roleID := range req.RoleIDs {
+ rbacStore.AssignRoleToUser(r.Context(), userID, roleID)
+ }
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/users", http.StatusFound)
+ }
+}
+
+// RolesHandler shows the roles management page
+func (a *App) RolesHandler(w http.ResponseWriter, r *http.Request) {
+ data := templateData(r, map[string]interface{}{
+ "Title": "Role Management",
+ })
+ if err := templates.ExecuteTemplate(w, "roles", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// HXRolesHandler returns HTMX partial for roles list
+func (a *App) HXRolesHandler(w http.ResponseWriter, r *http.Request) {
+ rbacStore := auth.NewRBACStore(a.DB)
+
+ roles, err := rbacStore.GetAllRoles(r.Context())
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ type RoleWithPermissions struct {
+ auth.Role
+ Permissions []auth.Permission
+ }
+
+ var rolesWithPerms []RoleWithPermissions
+ for _, role := range roles {
+ rwp := RoleWithPermissions{Role: role}
+ perms, _ := rbacStore.GetRolePermissions(r.Context(), role.ID)
+ rwp.Permissions = perms
+ rolesWithPerms = append(rolesWithPerms, rwp)
+ }
+
+ data := map[string]interface{}{
+ "Roles": rolesWithPerms,
+ }
+ if err := templates.ExecuteTemplate(w, "hx_roles", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// CreateRoleHandler creates a new role
+func (a *App) CreateRoleHandler(w http.ResponseWriter, r *http.Request) {
+ name := r.FormValue("name")
+ description := r.FormValue("description")
+
+ if name == "" {
+ http.Error(w, "name required", http.StatusBadRequest)
+ return
+ }
+
+ roleID := name // Using name as ID for simplicity
+ _, err := a.DB.ExecContext(r.Context(),
+ `INSERT INTO roles (id, name, description) VALUES (?, ?, ?)`,
+ roleID, name, description)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/roles", http.StatusFound)
+ }
+}
+
+// DeleteRoleHandler deletes a role
+func (a *App) DeleteRoleHandler(w http.ResponseWriter, r *http.Request) {
+ roleID := chi.URLParam(r, "id")
+
+ _, err := a.DB.ExecContext(r.Context(), `DELETE FROM roles WHERE id = ?`, roleID)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/roles", http.StatusFound)
+ }
+}
+
+// UpdateRolePermissionsHandler updates permissions for a role
+func (a *App) UpdateRolePermissionsHandler(w http.ResponseWriter, r *http.Request) {
+ roleID := chi.URLParam(r, "id")
+
+ var req struct {
+ PermissionIDs []string `json:"permission_ids"`
+ }
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid request", http.StatusBadRequest)
+ return
+ }
+
+ rbacStore := auth.NewRBACStore(a.DB)
+
+ // Get current permissions
+ currentPerms, _ := rbacStore.GetRolePermissions(r.Context(), roleID)
+
+ // Remove all current permissions
+ for _, perm := range currentPerms {
+ rbacStore.RemovePermissionFromRole(r.Context(), roleID, perm.ID)
+ }
+
+ // Add new permissions
+ for _, permID := range req.PermissionIDs {
+ rbacStore.AssignPermissionToRole(r.Context(), roleID, permID)
+ }
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Refresh", "true")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/admin/roles", http.StatusFound)
+ }
+}
+
diff --git a/internal/http/auth_handlers.go b/internal/http/auth_handlers.go
new file mode 100644
index 0000000..87ae1da
--- /dev/null
+++ b/internal/http/auth_handlers.go
@@ -0,0 +1,125 @@
+package http
+
+import (
+ "encoding/json"
+ "net/http"
+
+ "github.com/example/storage-appliance/internal/auth"
+)
+
+// LoginHandler handles user login
+func (a *App) LoginHandler(w http.ResponseWriter, r *http.Request) {
+ if r.Method == "GET" {
+ // Show login page
+ data := map[string]interface{}{
+ "Title": "Login",
+ }
+ if err := templates.ExecuteTemplate(w, "login", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+ return
+ }
+
+ // Handle POST login
+ var req struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+ }
+
+ if r.Header.Get("Content-Type") == "application/json" {
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, "invalid request", http.StatusBadRequest)
+ return
+ }
+ } else {
+ req.Username = r.FormValue("username")
+ req.Password = r.FormValue("password")
+ }
+
+ // Authenticate user
+ userStore := auth.NewUserStore(a.DB)
+ user, err := userStore.Authenticate(r.Context(), req.Username, req.Password)
+ if err != nil {
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("Content-Type", "text/html")
+ w.WriteHeader(http.StatusUnauthorized)
+ w.Write([]byte(`
Invalid username or password
`))
+ } else {
+ http.Error(w, "invalid credentials", http.StatusUnauthorized)
+ }
+ return
+ }
+
+ // Create session
+ sessionStore := auth.NewSessionStore(a.DB)
+ session, err := sessionStore.CreateSession(r.Context(), user.ID)
+ if err != nil {
+ http.Error(w, "failed to create session", http.StatusInternalServerError)
+ return
+ }
+
+ // Set session cookie
+ http.SetCookie(w, &http.Cookie{
+ Name: auth.SessionCookieName,
+ Value: session.Token,
+ Path: "/",
+ HttpOnly: true,
+ Secure: false, // Set to true in production with HTTPS
+ SameSite: http.SameSiteStrictMode,
+ MaxAge: int(auth.SessionDuration.Seconds()),
+ })
+
+ // Set CSRF token cookie
+ csrfToken := generateCSRFToken()
+ http.SetCookie(w, &http.Cookie{
+ Name: "csrf_token",
+ Value: csrfToken,
+ Path: "/",
+ HttpOnly: false, // Needed for HTMX to read it
+ Secure: false,
+ SameSite: http.SameSiteStrictMode,
+ MaxAge: int(auth.SessionDuration.Seconds()),
+ })
+
+ // Redirect or return success
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Redirect", "/dashboard")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/dashboard", http.StatusFound)
+ }
+}
+
+// LogoutHandler handles user logout
+func (a *App) LogoutHandler(w http.ResponseWriter, r *http.Request) {
+ // Get session token from cookie
+ cookie, err := r.Cookie(auth.SessionCookieName)
+ if err == nil {
+ // Delete session
+ sessionStore := auth.NewSessionStore(a.DB)
+ sessionStore.DeleteSession(r.Context(), cookie.Value)
+ }
+
+ // Clear cookies
+ http.SetCookie(w, &http.Cookie{
+ Name: auth.SessionCookieName,
+ Value: "",
+ Path: "/",
+ HttpOnly: true,
+ MaxAge: -1,
+ })
+ http.SetCookie(w, &http.Cookie{
+ Name: "csrf_token",
+ Value: "",
+ Path: "/",
+ HttpOnly: false,
+ MaxAge: -1,
+ })
+
+ if r.Header.Get("HX-Request") == "true" {
+ w.Header().Set("HX-Redirect", "/login")
+ w.WriteHeader(http.StatusOK)
+ } else {
+ http.Redirect(w, r, "/login", http.StatusFound)
+ }
+}
diff --git a/internal/http/monitoring_handlers.go b/internal/http/monitoring_handlers.go
new file mode 100644
index 0000000..40bc724
--- /dev/null
+++ b/internal/http/monitoring_handlers.go
@@ -0,0 +1,121 @@
+package http
+
+import (
+ "net/http"
+ "strings"
+
+ "github.com/example/storage-appliance/internal/monitoring"
+)
+
+// MetricsHandler serves Prometheus metrics
+func (a *App) MetricsHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ // Create collectors
+ collectors := []monitoring.Collector{
+ monitoring.NewZFSCollector(a.ZFSSvc, a.Runner),
+ monitoring.NewSMARTCollector(a.Runner),
+ monitoring.NewServiceCollector(a.Runner),
+ monitoring.NewHostCollector(),
+ }
+
+ // Export metrics
+ exporter := monitoring.NewPrometheusExporter(collectors...)
+ metrics := exporter.Export(ctx)
+
+ w.Header().Set("Content-Type", "text/plain; version=0.0.4")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(metrics))
+}
+
+// MonitoringHandler shows the monitoring dashboard
+func (a *App) MonitoringHandler(w http.ResponseWriter, r *http.Request) {
+ data := templateData(r, map[string]interface{}{
+ "Title": "Monitoring",
+ })
+ if err := templates.ExecuteTemplate(w, "monitoring", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// HXMonitoringHandler returns HTMX partial for monitoring metrics
+func (a *App) HXMonitoringHandler(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ // Create collectors
+ collectors := []monitoring.Collector{
+ monitoring.NewZFSCollector(a.ZFSSvc, a.Runner),
+ monitoring.NewSMARTCollector(a.Runner),
+ monitoring.NewServiceCollector(a.Runner),
+ monitoring.NewHostCollector(),
+ }
+
+ // Export for UI
+ exporter := monitoring.NewUIExporter(collectors...)
+ groups := exporter.Export(ctx)
+
+ data := map[string]interface{}{
+ "Groups": groups,
+ }
+
+ if err := templates.ExecuteTemplate(w, "hx_monitoring", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
+// HXMonitoringGroupHandler returns HTMX partial for a specific metric group
+func (a *App) HXMonitoringGroupHandler(w http.ResponseWriter, r *http.Request) {
+ groupName := r.URL.Query().Get("group")
+ if groupName == "" {
+ http.Error(w, "group parameter required", http.StatusBadRequest)
+ return
+ }
+
+ ctx := r.Context()
+
+ // Create the specific collector (normalize group name)
+ var collector monitoring.Collector
+ groupLower := strings.ToLower(groupName)
+ switch groupLower {
+ case "zfs":
+ collector = monitoring.NewZFSCollector(a.ZFSSvc, a.Runner)
+ case "smart":
+ collector = monitoring.NewSMARTCollector(a.Runner)
+ case "services", "service":
+ collector = monitoring.NewServiceCollector(a.Runner)
+ case "host":
+ collector = monitoring.NewHostCollector()
+ default:
+ // Try to match by collector name
+ if strings.Contains(groupLower, "zfs") {
+ collector = monitoring.NewZFSCollector(a.ZFSSvc, a.Runner)
+ } else if strings.Contains(groupLower, "smart") {
+ collector = monitoring.NewSMARTCollector(a.Runner)
+ } else if strings.Contains(groupLower, "service") {
+ collector = monitoring.NewServiceCollector(a.Runner)
+ } else if strings.Contains(groupLower, "host") {
+ collector = monitoring.NewHostCollector()
+ } else {
+ http.Error(w, "unknown group", http.StatusBadRequest)
+ return
+ }
+ }
+
+ // Export for UI
+ exporter := monitoring.NewUIExporter(collector)
+ groups := exporter.Export(ctx)
+
+ if len(groups) == 0 {
+ http.Error(w, "no data", http.StatusNotFound)
+ return
+ }
+
+ data := map[string]interface{}{
+ "Group": groups[0],
+ }
+
+ if err := templates.ExecuteTemplate(w, "hx_monitoring_group", data); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+
diff --git a/internal/http/template_helpers.go b/internal/http/template_helpers.go
new file mode 100644
index 0000000..642f060
--- /dev/null
+++ b/internal/http/template_helpers.go
@@ -0,0 +1,20 @@
+package http
+
+import (
+ "net/http"
+)
+
+// templateData adds CSRF token and other common data to template context
+func templateData(r *http.Request, data map[string]interface{}) map[string]interface{} {
+ if data == nil {
+ data = make(map[string]interface{})
+ }
+
+ // Get CSRF token from cookie
+ if cookie, err := r.Cookie("csrf_token"); err == nil {
+ data["CSRFToken"] = cookie.Value
+ }
+
+ return data
+}
+
diff --git a/internal/monitoring/collectors.go b/internal/monitoring/collectors.go
new file mode 100644
index 0000000..679a175
--- /dev/null
+++ b/internal/monitoring/collectors.go
@@ -0,0 +1,438 @@
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/example/storage-appliance/internal/infra/osexec"
+ "github.com/example/storage-appliance/internal/service"
+)
+
+const (
+ DefaultTimeout = 5 * time.Second
+)
+
+// MetricValue represents a single metric value
+type MetricValue struct {
+ Name string
+ Labels map[string]string
+ Value float64
+ Type string // "gauge" or "counter"
+}
+
+// MetricCollection represents a collection of metrics
+type MetricCollection struct {
+ Metrics []MetricValue
+ Errors []string
+}
+
+// Collector interface for different metric collectors
+type Collector interface {
+ Collect(ctx context.Context) MetricCollection
+ Name() string
+}
+
+// ZFSCollector collects ZFS pool health and scrub status
+type ZFSCollector struct {
+ ZFSSvc service.ZFSService
+ Runner osexec.Runner
+}
+
+func NewZFSCollector(zfsSvc service.ZFSService, runner osexec.Runner) *ZFSCollector {
+ return &ZFSCollector{ZFSSvc: zfsSvc, Runner: runner}
+}
+
+func (c *ZFSCollector) Name() string {
+ return "zfs"
+}
+
+func (c *ZFSCollector) Collect(ctx context.Context) MetricCollection {
+ ctx, cancel := context.WithTimeout(ctx, DefaultTimeout)
+ defer cancel()
+
+ collection := MetricCollection{
+ Metrics: []MetricValue{},
+ Errors: []string{},
+ }
+
+ // Get pool list
+ pools, err := c.ZFSSvc.ListPools(ctx)
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to list pools: %v", err))
+ return collection
+ }
+
+ for _, pool := range pools {
+ // Pool health metric (1 = ONLINE, 0.5 = DEGRADED, 0 = FAULTED/OFFLINE)
+ healthValue := 0.0
+ switch strings.ToUpper(pool.Health) {
+ case "ONLINE":
+ healthValue = 1.0
+ case "DEGRADED":
+ healthValue = 0.5
+ case "FAULTED", "OFFLINE", "UNAVAIL":
+ healthValue = 0.0
+ }
+
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "zfs_pool_health",
+ Labels: map[string]string{"pool": pool.Name},
+ Value: healthValue,
+ Type: "gauge",
+ })
+
+ // Get scrub status
+ scrubStatus, err := c.getScrubStatus(ctx, pool.Name)
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to get scrub status for %s: %v", pool.Name, err))
+ continue
+ }
+
+ // Scrub in progress (1 = yes, 0 = no)
+ scrubInProgress := 0.0
+ if strings.Contains(scrubStatus, "scan: scrub in progress") {
+ scrubInProgress = 1.0
+ }
+
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "zfs_pool_scrub_in_progress",
+ Labels: map[string]string{"pool": pool.Name},
+ Value: scrubInProgress,
+ Type: "gauge",
+ })
+ }
+
+ return collection
+}
+
+func (c *ZFSCollector) getScrubStatus(ctx context.Context, pool string) (string, error) {
+ out, _, _, err := osexec.ExecWithRunner(c.Runner, ctx, "zpool", "status", pool)
+ if err != nil {
+ return "", err
+ }
+ for _, line := range strings.Split(out, "\n") {
+ if strings.Contains(line, "scan:") {
+ return strings.TrimSpace(line), nil
+ }
+ }
+ return "no-scan", nil
+}
+
+// SMARTCollector collects SMART health status
+type SMARTCollector struct {
+ Runner osexec.Runner
+}
+
+func NewSMARTCollector(runner osexec.Runner) *SMARTCollector {
+ return &SMARTCollector{Runner: runner}
+}
+
+func (c *SMARTCollector) Name() string {
+ return "smart"
+}
+
+func (c *SMARTCollector) Collect(ctx context.Context) MetricCollection {
+ ctx, cancel := context.WithTimeout(ctx, DefaultTimeout)
+ defer cancel()
+
+ collection := MetricCollection{
+ Metrics: []MetricValue{},
+ Errors: []string{},
+ }
+
+ // List all disks (simplified - try common devices)
+ // In a real implementation, you'd scan /dev/ or use lsblk
+ commonDisks := []string{"sda", "sdb", "sdc", "nvme0n1", "nvme1n1"}
+ disks := []string{}
+ for _, d := range commonDisks {
+ disks = append(disks, fmt.Sprintf("/dev/%s", d))
+ }
+
+ // Check SMART health for each disk
+ for _, disk := range disks {
+ health, err := c.getSMARTHealth(ctx, disk)
+ if err != nil {
+ // Skip devices that don't exist or don't support SMART
+ continue
+ }
+
+ // SMART health: 1 = PASSED, 0 = FAILED
+ healthValue := 0.0
+ if strings.Contains(strings.ToUpper(health), "PASSED") {
+ healthValue = 1.0
+ }
+
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "smart_health",
+ Labels: map[string]string{"device": disk},
+ Value: healthValue,
+ Type: "gauge",
+ })
+ }
+
+ return collection
+}
+
+func (c *SMARTCollector) getSMARTHealth(ctx context.Context, device string) (string, error) {
+ // Use smartctl -H to get health status
+ out, _, code, err := osexec.ExecWithRunner(c.Runner, ctx, "smartctl", "-H", device)
+ if err != nil || code != 0 {
+ return "", fmt.Errorf("smartctl failed: %v", err)
+ }
+ return out, nil
+}
+
+// ServiceCollector collects service states
+type ServiceCollector struct {
+ Runner osexec.Runner
+}
+
+func NewServiceCollector(runner osexec.Runner) *ServiceCollector {
+ return &ServiceCollector{Runner: runner}
+}
+
+func (c *ServiceCollector) Name() string {
+ return "services"
+}
+
+func (c *ServiceCollector) Collect(ctx context.Context) MetricCollection {
+ ctx, cancel := context.WithTimeout(ctx, DefaultTimeout)
+ defer cancel()
+
+ collection := MetricCollection{
+ Metrics: []MetricValue{},
+ Errors: []string{},
+ }
+
+ services := []string{"nfs-server", "smbd", "iscsid", "iscsi", "minio"}
+
+ for _, svc := range services {
+ status, err := c.getServiceStatus(ctx, svc)
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to check %s: %v", svc, err))
+ continue
+ }
+
+ // Service state: 1 = active/running, 0 = inactive/stopped
+ stateValue := 0.0
+ if strings.Contains(strings.ToLower(status), "active") || strings.Contains(strings.ToLower(status), "running") {
+ stateValue = 1.0
+ }
+
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "service_state",
+ Labels: map[string]string{"service": svc},
+ Value: stateValue,
+ Type: "gauge",
+ })
+ }
+
+ return collection
+}
+
+func (c *ServiceCollector) getServiceStatus(ctx context.Context, service string) (string, error) {
+ // Try systemctl first
+ out, _, code, err := osexec.ExecWithRunner(c.Runner, ctx, "systemctl", "is-active", service)
+ if err == nil && code == 0 {
+ return out, nil
+ }
+
+ // Fallback to checking process
+ out, _, code, err = osexec.ExecWithRunner(c.Runner, ctx, "pgrep", "-f", service)
+ if err == nil && code == 0 && strings.TrimSpace(out) != "" {
+ return "running", nil
+ }
+
+ return "inactive", nil
+}
+
+// HostCollector collects host metrics from /proc
+type HostCollector struct{}
+
+func NewHostCollector() *HostCollector {
+ return &HostCollector{}
+}
+
+func (c *HostCollector) Name() string {
+ return "host"
+}
+
+func (c *HostCollector) Collect(ctx context.Context) MetricCollection {
+ ctx, cancel := context.WithTimeout(ctx, DefaultTimeout)
+ defer cancel()
+
+ collection := MetricCollection{
+ Metrics: []MetricValue{},
+ Errors: []string{},
+ }
+
+ // Load average
+ loadavg, err := c.readLoadAvg()
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to read loadavg: %v", err))
+ } else {
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_load1",
+ Labels: map[string]string{},
+ Value: loadavg.Load1,
+ Type: "gauge",
+ })
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_load5",
+ Labels: map[string]string{},
+ Value: loadavg.Load5,
+ Type: "gauge",
+ })
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_load15",
+ Labels: map[string]string{},
+ Value: loadavg.Load15,
+ Type: "gauge",
+ })
+ }
+
+ // Memory info
+ meminfo, err := c.readMemInfo()
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to read meminfo: %v", err))
+ } else {
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_memory_total_bytes",
+ Labels: map[string]string{},
+ Value: meminfo.MemTotal,
+ Type: "gauge",
+ })
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_memory_free_bytes",
+ Labels: map[string]string{},
+ Value: meminfo.MemFree,
+ Type: "gauge",
+ })
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_memory_available_bytes",
+ Labels: map[string]string{},
+ Value: meminfo.MemAvailable,
+ Type: "gauge",
+ })
+ }
+
+ // Disk IO (simplified - read from /proc/diskstats)
+ diskIO, err := c.readDiskIO()
+ if err != nil {
+ collection.Errors = append(collection.Errors, fmt.Sprintf("failed to read disk IO: %v", err))
+ } else {
+ for device, io := range diskIO {
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_disk_reads_completed",
+ Labels: map[string]string{"device": device},
+ Value: io.ReadsCompleted,
+ Type: "counter",
+ })
+ collection.Metrics = append(collection.Metrics, MetricValue{
+ Name: "host_disk_writes_completed",
+ Labels: map[string]string{"device": device},
+ Value: io.WritesCompleted,
+ Type: "counter",
+ })
+ }
+ }
+
+ return collection
+}
+
+type LoadAvg struct {
+ Load1 float64
+ Load5 float64
+ Load15 float64
+}
+
+func (c *HostCollector) readLoadAvg() (LoadAvg, error) {
+ data, err := os.ReadFile("/proc/loadavg")
+ if err != nil {
+ return LoadAvg{}, err
+ }
+
+ fields := strings.Fields(string(data))
+ if len(fields) < 3 {
+ return LoadAvg{}, fmt.Errorf("invalid loadavg format")
+ }
+
+ load1, _ := strconv.ParseFloat(fields[0], 64)
+ load5, _ := strconv.ParseFloat(fields[1], 64)
+ load15, _ := strconv.ParseFloat(fields[2], 64)
+
+ return LoadAvg{Load1: load1, Load5: load5, Load15: load15}, nil
+}
+
+type MemInfo struct {
+ MemTotal float64
+ MemFree float64
+ MemAvailable float64
+}
+
+func (c *HostCollector) readMemInfo() (MemInfo, error) {
+ data, err := os.ReadFile("/proc/meminfo")
+ if err != nil {
+ return MemInfo{}, err
+ }
+
+ info := MemInfo{}
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) < 2 {
+ continue
+ }
+ key := strings.TrimSuffix(fields[0], ":")
+ value, _ := strconv.ParseFloat(fields[1], 64)
+ // Values are in KB, convert to bytes
+ valueBytes := value * 1024
+
+ switch key {
+ case "MemTotal":
+ info.MemTotal = valueBytes
+ case "MemFree":
+ info.MemFree = valueBytes
+ case "MemAvailable":
+ info.MemAvailable = valueBytes
+ }
+ }
+
+ return info, nil
+}
+
+type DiskIO struct {
+ ReadsCompleted float64
+ WritesCompleted float64
+}
+
+func (c *HostCollector) readDiskIO() (map[string]DiskIO, error) {
+ data, err := os.ReadFile("/proc/diskstats")
+ if err != nil {
+ return nil, err
+ }
+
+ result := make(map[string]DiskIO)
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ fields := strings.Fields(line)
+ if len(fields) < 14 {
+ continue
+ }
+ device := fields[2]
+ reads, _ := strconv.ParseFloat(fields[3], 64)
+ writes, _ := strconv.ParseFloat(fields[7], 64)
+
+ result[device] = DiskIO{
+ ReadsCompleted: reads,
+ WritesCompleted: writes,
+ }
+ }
+
+ return result, nil
+}
+
diff --git a/internal/monitoring/prometheus.go b/internal/monitoring/prometheus.go
new file mode 100644
index 0000000..2738bc9
--- /dev/null
+++ b/internal/monitoring/prometheus.go
@@ -0,0 +1,60 @@
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "strings"
+)
+
+// PrometheusExporter exports metrics in Prometheus format
+type PrometheusExporter struct {
+ Collectors []Collector
+}
+
+func NewPrometheusExporter(collectors ...Collector) *PrometheusExporter {
+ return &PrometheusExporter{Collectors: collectors}
+}
+
+// Export collects all metrics and formats them as Prometheus text format
+func (e *PrometheusExporter) Export(ctx context.Context) string {
+ var builder strings.Builder
+ allErrors := []string{}
+
+ for _, collector := range e.Collectors {
+ collection := collector.Collect(ctx)
+ allErrors = append(allErrors, collection.Errors...)
+
+ for _, metric := range collection.Metrics {
+ // Format: metric_name{label1="value1",label2="value2"} value
+ builder.WriteString(metric.Name)
+ if len(metric.Labels) > 0 {
+ builder.WriteString("{")
+ first := true
+ for k, v := range metric.Labels {
+ if !first {
+ builder.WriteString(",")
+ }
+ builder.WriteString(fmt.Sprintf(`%s="%s"`, k, escapeLabelValue(v)))
+ first = false
+ }
+ builder.WriteString("}")
+ }
+ builder.WriteString(fmt.Sprintf(" %v\n", metric.Value))
+ }
+ }
+
+ // Add error metrics if any
+ if len(allErrors) > 0 {
+ builder.WriteString(fmt.Sprintf("monitoring_collector_errors_total %d\n", len(allErrors)))
+ }
+
+ return builder.String()
+}
+
+func escapeLabelValue(v string) string {
+ v = strings.ReplaceAll(v, "\\", "\\\\")
+ v = strings.ReplaceAll(v, "\"", "\\\"")
+ v = strings.ReplaceAll(v, "\n", "\\n")
+ return v
+}
+
diff --git a/internal/monitoring/ui.go b/internal/monitoring/ui.go
new file mode 100644
index 0000000..eb30e97
--- /dev/null
+++ b/internal/monitoring/ui.go
@@ -0,0 +1,149 @@
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// UIMetric represents a metric for UI display
+type UIMetric struct {
+ Name string
+ Value string
+ Status string // "ok", "warning", "error"
+ Timestamp time.Time
+ Error string
+}
+
+// UIMetricGroup represents a group of metrics for UI display
+type UIMetricGroup struct {
+ Title string
+ Metrics []UIMetric
+ Errors []string
+}
+
+// UIExporter exports metrics in a format suitable for UI display
+type UIExporter struct {
+ Collectors []Collector
+}
+
+func NewUIExporter(collectors ...Collector) *UIExporter {
+ return &UIExporter{Collectors: collectors}
+}
+
+// Export collects all metrics and formats them for UI
+func (e *UIExporter) Export(ctx context.Context) []UIMetricGroup {
+ groups := []UIMetricGroup{}
+
+ for _, collector := range e.Collectors {
+ collection := collector.Collect(ctx)
+ // Capitalize first letter
+ name := collector.Name()
+ if len(name) > 0 {
+ name = strings.ToUpper(name[:1]) + name[1:]
+ }
+ group := UIMetricGroup{
+ Title: name,
+ Metrics: []UIMetric{},
+ Errors: collection.Errors,
+ }
+
+ for _, metric := range collection.Metrics {
+ status := "ok"
+ value := formatMetricValue(metric)
+
+ // Determine status based on metric type and value
+ if metric.Name == "zfs_pool_health" {
+ if metric.Value == 0.0 {
+ status = "error"
+ } else if metric.Value == 0.5 {
+ status = "warning"
+ }
+ } else if metric.Name == "smart_health" {
+ if metric.Value == 0.0 {
+ status = "error"
+ }
+ } else if metric.Name == "service_state" {
+ if metric.Value == 0.0 {
+ status = "error"
+ }
+ } else if strings.HasPrefix(metric.Name, "host_load") {
+ if metric.Value > 10.0 {
+ status = "warning"
+ }
+ if metric.Value > 20.0 {
+ status = "error"
+ }
+ }
+
+ group.Metrics = append(group.Metrics, UIMetric{
+ Name: formatMetricName(metric),
+ Value: value,
+ Status: status,
+ Timestamp: time.Now(),
+ })
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups
+}
+
+func formatMetricName(metric MetricValue) string {
+ name := metric.Name
+ if len(metric.Labels) > 0 {
+ labels := []string{}
+ for k, v := range metric.Labels {
+ labels = append(labels, fmt.Sprintf("%s=%s", k, v))
+ }
+ name = fmt.Sprintf("%s{%s}", name, strings.Join(labels, ", "))
+ }
+ return name
+}
+
+func formatMetricValue(metric MetricValue) string {
+ switch metric.Name {
+ case "zfs_pool_health":
+ if metric.Value == 1.0 {
+ return "ONLINE"
+ } else if metric.Value == 0.5 {
+ return "DEGRADED"
+ }
+ return "FAULTED"
+ case "zfs_pool_scrub_in_progress":
+ if metric.Value == 1.0 {
+ return "In Progress"
+ }
+ return "Idle"
+ case "smart_health":
+ if metric.Value == 1.0 {
+ return "PASSED"
+ }
+ return "FAILED"
+ case "service_state":
+ if metric.Value == 1.0 {
+ return "Running"
+ }
+ return "Stopped"
+ case "host_load1", "host_load5", "host_load15":
+ return fmt.Sprintf("%.2f", metric.Value)
+ case "host_memory_total_bytes", "host_memory_free_bytes", "host_memory_available_bytes":
+ return formatBytes(metric.Value)
+ default:
+ return fmt.Sprintf("%.2f", metric.Value)
+ }
+}
+
+func formatBytes(bytes float64) string {
+ units := []string{"B", "KB", "MB", "GB", "TB"}
+ value := bytes
+ unit := 0
+ for value >= 1024 && unit < len(units)-1 {
+ value /= 1024
+ unit++
+ }
+ return fmt.Sprintf("%.2f %s", value, units[unit])
+}
+
diff --git a/internal/templates/hx_monitoring.html b/internal/templates/hx_monitoring.html
new file mode 100644
index 0000000..1b26a27
--- /dev/null
+++ b/internal/templates/hx_monitoring.html
@@ -0,0 +1,68 @@
+{{define "hx_monitoring"}}
+
+ {{range .Groups}}
+
+
+
{{.Title}}
+
+
+
+ {{if .Errors}}
+
+
+
+
+ Warnings:
+
+ {{range .Errors}}
+ - {{.}}
+ {{end}}
+
+
+
+
+
+ {{end}}
+
+
+ {{range .Metrics}}
+
+
+
{{.Name}}
+
{{.Timestamp.Format "15:04:05"}}
+
+
+ {{.Value}}
+ {{if eq .Status "error"}}
+ ⚠️
+ {{else if eq .Status "warning"}}
+ ⚡
+ {{else}}
+ ✓
+ {{end}}
+
+
+ {{else}}
+
No metrics available
+ {{end}}
+
+
+ {{else}}
+
+
+
+
+ Warning: No monitoring data available. Some collectors may have failed.
+
+
+
+
+ {{end}}
+
+{{end}}
+
diff --git a/internal/templates/hx_monitoring_group.html b/internal/templates/hx_monitoring_group.html
new file mode 100644
index 0000000..f3a4819
--- /dev/null
+++ b/internal/templates/hx_monitoring_group.html
@@ -0,0 +1,54 @@
+{{define "hx_monitoring_group"}}
+
+
+
{{.Group.Title}}
+
+
+
+ {{if .Group.Errors}}
+
+
+
+
+ Warnings:
+
+ {{range .Group.Errors}}
+ - {{.}}
+ {{end}}
+
+
+
+
+
+ {{end}}
+
+
+ {{range .Group.Metrics}}
+
+
+
{{.Name}}
+
{{.Timestamp.Format "15:04:05"}}
+
+
+ {{.Value}}
+ {{if eq .Status "error"}}
+ ⚠️
+ {{else if eq .Status "warning"}}
+ ⚡
+ {{else}}
+ ✓
+ {{end}}
+
+
+ {{else}}
+
No metrics available
+ {{end}}
+
+
+{{end}}
+
diff --git a/internal/templates/hx_roles.html b/internal/templates/hx_roles.html
new file mode 100644
index 0000000..ea3ae40
--- /dev/null
+++ b/internal/templates/hx_roles.html
@@ -0,0 +1,41 @@
+{{define "hx_roles"}}
+
+
+
+
+ | Role Name |
+ Description |
+ Permissions |
+ Actions |
+
+
+
+ {{range .Roles}}
+
+ | {{.Name}} |
+ {{.Description}} |
+
+ {{range .Permissions}}
+ {{.Name}}
+ {{else}}
+ No permissions
+ {{end}}
+ |
+
+
+ |
+
+ {{else}}
+
+ | No roles found |
+
+ {{end}}
+
+
+
+{{end}}
+
diff --git a/internal/templates/hx_users.html b/internal/templates/hx_users.html
new file mode 100644
index 0000000..5082006
--- /dev/null
+++ b/internal/templates/hx_users.html
@@ -0,0 +1,41 @@
+{{define "hx_users"}}
+
+
+
+
+ | Username |
+ Roles |
+ Created |
+ Actions |
+
+
+
+ {{range .Users}}
+
+ | {{.Username}} |
+
+ {{range .Roles}}
+ {{.Name}}
+ {{else}}
+ No roles
+ {{end}}
+ |
+ {{.CreatedAt}} |
+
+
+ |
+
+ {{else}}
+
+ | No users found |
+
+ {{end}}
+
+
+
+{{end}}
+
diff --git a/internal/templates/login.html b/internal/templates/login.html
new file mode 100644
index 0000000..fbf40fb
--- /dev/null
+++ b/internal/templates/login.html
@@ -0,0 +1,34 @@
+{{define "login"}}
+
+
+
+
+
+
+ Login - Storage Appliance
+
+
+
+
+
+
+{{end}}
+
diff --git a/internal/templates/monitoring.html b/internal/templates/monitoring.html
new file mode 100644
index 0000000..58a6360
--- /dev/null
+++ b/internal/templates/monitoring.html
@@ -0,0 +1,16 @@
+{{define "monitoring"}}
+{{template "base" .}}
+{{define "content"}}
+
+{{end}}
+{{end}}
+
diff --git a/internal/templates/roles.html b/internal/templates/roles.html
new file mode 100644
index 0000000..d97ebb4
--- /dev/null
+++ b/internal/templates/roles.html
@@ -0,0 +1,37 @@
+{{define "roles"}}
+{{template "base" .}}
+{{define "content"}}
+
+{{end}}
+{{end}}
+
diff --git a/internal/templates/users.html b/internal/templates/users.html
new file mode 100644
index 0000000..544002c
--- /dev/null
+++ b/internal/templates/users.html
@@ -0,0 +1,37 @@
+{{define "users"}}
+{{template "base" .}}
+{{define "content"}}
+
+{{end}}
+{{end}}
+