package database import ( "context" "database/sql" "fmt" "time" ) // QueryStats holds query performance statistics type QueryStats struct { Query string Duration time.Duration Rows int64 Error error Timestamp time.Time } // QueryOptimizer provides query optimization utilities type QueryOptimizer struct { db *DB } // NewQueryOptimizer creates a new query optimizer func NewQueryOptimizer(db *DB) *QueryOptimizer { return &QueryOptimizer{db: db} } // ExecuteWithTimeout executes a query with a timeout func (qo *QueryOptimizer) ExecuteWithTimeout(ctx context.Context, timeout time.Duration, query string, args ...interface{}) (sql.Result, error) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() return qo.db.ExecContext(ctx, query, args...) } // QueryWithTimeout executes a query with a timeout and returns rows func (qo *QueryOptimizer) QueryWithTimeout(ctx context.Context, timeout time.Duration, query string, args ...interface{}) (*sql.Rows, error) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() return qo.db.QueryContext(ctx, query, args...) } // QueryRowWithTimeout executes a query with a timeout and returns a single row func (qo *QueryOptimizer) QueryRowWithTimeout(ctx context.Context, timeout time.Duration, query string, args ...interface{}) *sql.Row { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() return qo.db.QueryRowContext(ctx, query, args...) } // BatchInsert performs a batch insert operation // This is more efficient than multiple individual INSERT statements func (qo *QueryOptimizer) BatchInsert(ctx context.Context, table string, columns []string, values [][]interface{}) error { if len(values) == 0 { return nil } // Build the query query := fmt.Sprintf("INSERT INTO %s (%s) VALUES ", table, joinColumns(columns)) // Build value placeholders placeholders := make([]string, len(values)) args := make([]interface{}, 0, len(values)*len(columns)) argIndex := 1 for i, row := range values { rowPlaceholders := make([]string, len(columns)) for j := range columns { rowPlaceholders[j] = fmt.Sprintf("$%d", argIndex) args = append(args, row[j]) argIndex++ } placeholders[i] = fmt.Sprintf("(%s)", joinStrings(rowPlaceholders, ", ")) } query += joinStrings(placeholders, ", ") _, err := qo.db.ExecContext(ctx, query, args...) return err } // helper functions func joinColumns(columns []string) string { return joinStrings(columns, ", ") } func joinStrings(strs []string, sep string) string { if len(strs) == 0 { return "" } if len(strs) == 1 { return strs[0] } result := strs[0] for i := 1; i < len(strs); i++ { result += sep + strs[i] } return result } // OptimizeConnectionPool optimizes database connection pool settings // This should be called after analyzing query patterns func OptimizeConnectionPool(db *sql.DB, maxConns, maxIdleConns int, maxLifetime time.Duration) { db.SetMaxOpenConns(maxConns) db.SetMaxIdleConns(maxIdleConns) db.SetConnMaxLifetime(maxLifetime) // Set connection idle timeout (how long an idle connection can stay in pool) // Default is 0 (no timeout), but setting a timeout helps prevent stale connections db.SetConnMaxIdleTime(10 * time.Minute) } // GetConnectionStats returns current connection pool statistics func GetConnectionStats(db *sql.DB) map[string]interface{} { stats := db.Stats() return map[string]interface{}{ "max_open_connections": stats.MaxOpenConnections, "open_connections": stats.OpenConnections, "in_use": stats.InUse, "idle": stats.Idle, "wait_count": stats.WaitCount, "wait_duration": stats.WaitDuration.String(), "max_idle_closed": stats.MaxIdleClosed, "max_idle_time_closed": stats.MaxIdleTimeClosed, "max_lifetime_closed": stats.MaxLifetimeClosed, } }