diff --git a/.vscode/launch.json b/.vscode/launch.json index 8f8b809..b5a0643 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -5,7 +5,15 @@ "version": "0.2.0", "configurations": [ { - "name": "Launch Server", + "name": "Launch Package MAC", + "type": "go", + "request": "launch", + "mode": "auto", + "program": "${workspaceFolder}/cmd/app/main.go", + "output": "${workspaceFolder}/stackyard" + }, + { + "name": "Launch Server WIN", "type": "go", "request": "launch", "mode": "auto", @@ -13,7 +21,7 @@ "output": "${workspaceFolder}\\debug-main.exe" }, { - "name": "Build & Launch (Dist)", + "name": "Build & Launch (Dist) WIN", "type": "go", "request": "launch", "mode": "exec", diff --git a/README.md b/README.md index d2efbe3..5859718 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@
- Stackyard + Stackyard
License @@ -57,22 +57,6 @@ go run cmd/app/main.go **[Full Documentation](docs_wiki/)** - Comprehensive guides and references -### Core Documentation -- **[Configuration Guide](docs_wiki/CONFIGURATION_GUIDE.md)** - Complete configuration reference -- **[API Response Structure](docs_wiki/API_RESPONSE_STRUCTURE.md)** - Standard response formats -- **[Architecture Diagrams](docs_wiki/ARCHITECTURE_DIAGRAMS.md)** - System design and flow diagrams -- **[Service Implementation](docs_wiki/SERVICE_IMPLEMENTATION.md)** - How to add new services - -### Infrastructure & Integration -- **[Integration Guide](docs_wiki/INTEGRATION_GUIDE.md)** - Redis, PostgreSQL, Kafka, MinIO setup -- **[Build Scripts](docs_wiki/BUILD_SCRIPTS.md)** - Production deployment automation -- **[Package Management](docs_wiki/CHANGE_PACKAGE_SCRIPTS.md)** - Module renaming tools - -### Security & Features -- **[API Encryption](docs_wiki/ENCRYPTION_API.md)** - End-to-end encryption -- **[API Obfuscation](docs_wiki/API_OBFUSCATION.md)** - Data obfuscation mechanisms -- **[TUI Implementation](docs_wiki/TUI_IMPLEMENTATION.md)** - Terminal interface details - ## Project Structure ``` @@ -111,4 +95,4 @@ Apache License Version 2.0: [LICENSE](LICENSE) --- -**Built with 🐸 using Go, Echo, Alpine.js, Tailwind CSS** +**Built using Go, Echo, Alpine.js, Tailwind CSS** diff --git a/cmd/app/main.go b/cmd/app/main.go index 393cf79..5cce144 100644 --- a/cmd/app/main.go +++ b/cmd/app/main.go @@ -7,13 +7,13 @@ import ( "net/url" "os" "os/signal" + "stackyard/config" + "stackyard/internal/monitoring" + "stackyard/internal/server" + "stackyard/pkg/logger" + "stackyard/pkg/tui" + "stackyard/pkg/utils" "syscall" - "test-go/config" - "test-go/internal/monitoring" - "test-go/internal/server" - "test-go/pkg/logger" - "test-go/pkg/tui" - "test-go/pkg/utils" "time" ) @@ -193,7 +193,7 @@ func runWithTUI(cfg *config.Config, bannerText string, broadcaster *monitoring.L liveTUI.AddLog("info", "Server starting on port "+cfg.Server.Port) liveTUI.AddLog("info", "Environment: "+cfg.App.Env) - // Start Server in background + // Start Server in background - infrastructure will be initialized by the server srv := server.New(cfg, l, broadcaster) go func() { liveTUI.AddLog("info", "HTTP server listening...") diff --git a/docs_wiki/API_OBFUSCATION.md b/docs_wiki/API_OBFUSCATION.md deleted file mode 100644 index 100de8c..0000000 --- a/docs_wiki/API_OBFUSCATION.md +++ /dev/null @@ -1,72 +0,0 @@ -# API Obfuscation Mechanism - -## Overview - -The API Obfuscation feature is designed to obscure JSON data in transit between the backend and the frontend. This adds a layer of stealth to the monitoring system, making traffic analysis more difficult for casual observers. The system uses Base64 encoding for the response body of specific API endpoints. - -## Configuration - -Obfuscation is controlled via the `config.yaml` file. - -```yaml -monitoring: - obfuscate_api: true # Set to true to enable, false to disable -``` - -If enabled, the backend will automatically encode eligible API responses. If disabled, the backend serves standard JSON, and the frontend transparently handles the standard response. - -## Backend Implementation - -The core logic resides in `internal/monitoring/middleware/obfuscator.go`. - -### Middleware Logic - -1. **Scope**: The middleware intercepts requests starting with `/api/`. -2. **Exclusions**: The following paths are explicitly excluded from obfuscation to support streaming or static content: - * `/api/logs` (SSE stream) - * `/api/cpu` (SSE stream) - * `/api/user/photos` (Binary/Static) -3. **Content Negotiation**: The middleware only processes responses where the `Content-Type` includes `application/json`. `text/event-stream` is skipped. -4. **Encoding**: The response body is read into a buffer, encoded using Standard Base64 (padding with `=`), and written back to the response. -5. **Headers**: - * `X-Obfuscated: true` is set to indicate the response is encoded. - * `Content-Length` is updated to reflect the size of the encoded body to prevent truncation or keep-alive issues. - -### Code Reference - -See `Obfuscator` function in `internal/monitoring/middleware/obfuscator.go`. - -## Frontend Implementation - -The frontend handles de-obfuscation transparently using a global `window.fetch` interceptor in `web/monitoring/assets/js/app.js`. - -### Interceptor Strategy - -The interceptor wraps the native `fetch` API and applies a "Parse First, Decode Second" strategy to ensure robustness. - -1. **Check Content Type**: Ignores non-JSON and `text/event-stream` responses. -2. **Strategy 1: Try Parse**: - * Attempts to `JSON.parse()` the response body directly. - * If successful, the response is standard JSON (not obfuscated). It returns the original response. -3. **Strategy 2: Try Decode (Fallback)**: - * If parsing fails (typical for Base64 strings), it attempts to decode the body. - * **Normalization**: Replaces URL-safe characters (`-` to `+`, `_` to `/`) and removes whitespace. - * **Padding**: Ensures the string length is a multiple of 4 by adding `=` padding. - * **Decoding**: Uses `atob()` and `TextDecoder` (UTF-8) to convert the Base64 string back to text. - * **Verification**: Attempts to `JSON.parse()` the decoded string. - * If valid JSON is found, a new `Response` object is created with the decoded content and returned. -4. **Final Fallback**: - * If both strategies fail, the original body is returned. - -This approach ensures the frontend continues to work seamlessly whether obfuscation is enabled or disabled, or if headers are stripped by proxies. - -## Troubleshooting - -### "SyntaxError: Unexpected token 'e', ..." -This error occurs when the frontend tries to parse the raw Base64 string as JSON. This usually indicates the interceptor failed to detect or decode the obfuscated response. The "Parse First, Decode Second" strategy resolves this by specifically catching parse errors and triggering the decode logic. - -### Truncated Data / Network Errors -If the `Content-Length` header does not match the actual body size (e.g., if the body size changed due to encoding but the header remained the original size), browsers may truncate the response. The middleware explicitly sets the correct `Content-Length` of the encoded body. - -### CORS and Headers -The `X-Obfuscated` header is exposed in the CORS configuration (`internal/monitoring/server.go`) to allow the frontend to detect encryption status explicitly, though the heuristic fallback logic ensures functionality even if this header is stripped. diff --git a/docs_wiki/API_RESPONSE_STRUCTURE.md b/docs_wiki/API_RESPONSE_STRUCTURE.md deleted file mode 100644 index 3c35313..0000000 --- a/docs_wiki/API_RESPONSE_STRUCTURE.md +++ /dev/null @@ -1,332 +0,0 @@ -# API Response Structure - -## Overview - -This document describes the standardized request/response structure for the Echo service. - -## Response Format - -All API responses follow a consistent structure using the `response.Response` type. - -### Success Response - -```json -{ - "success": true, - "message": "Optional success message", - "data": { - // Your response data here - }, - "meta": { - "page": 1, - "per_page": 10, - "total": 100, - "total_pages": 10 - }, - "timestamp": 1672531200 -} -``` - -### Error Response - -```json -{ - "success": false, - "error": { - "code": "ERROR_CODE", - "message": "Human-readable error message", - "details": { - "field": "Additional error details" - } - }, - "timestamp": 1672531200 -} -``` - -## Usage Examples - -### Basic Success Response - -```go -import ( - "test-go/pkg/response" - "github.com/labstack/echo/v4" -) - -func GetUser(c echo.Context) error { - user := map[string]string{ - "id": "123", - "name": "John Doe", - } - - return response.Success(c, user, "User retrieved successfully") -} -``` - -### Paginated Response - -```go -func GetUsers(c echo.Context) error { - // Parse pagination from query - var pagination response.PaginationRequest - if err := c.Bind(&pagination); err != nil { - return response.BadRequest(c, "Invalid pagination parameters") - } - - // Get data (example) - users := []User{} // Your users data - total := int64(100) - - // Calculate meta - meta := response.CalculateMeta( - pagination.GetPage(), - pagination.GetPerPage(), - total, - ) - - return response.SuccessWithMeta(c, users, meta, "Users retrieved") -} -``` - -### Error Responses - -```go -// Bad Request -if err := validate(data); err != nil { - return response.BadRequest(c, "Invalid data", map[string]interface{}{ - "validation_errors": err, - }) -} - -// Not Found -user := findUser(id) -if user == nil { - return response.NotFound(c, "User not found") -} - -// Unauthorized -if !isAuthenticated { - return response.Unauthorized(c, "Invalid credentials") -} - -// Internal Server Error -if err := processData(); err != nil { - return response.InternalServerError(c, "Failed to process data") -} -``` - -### Request Validation - -```go -import ( - "test-go/pkg/request" - "test-go/pkg/response" -) - -type CreateUserRequest struct { - Username string `json:"username" validate:"required,username"` - Email string `json:"email" validate:"required,email"` - Age int `json:"age" validate:"required,gte=18,lte=100"` - Phone string `json:"phone" validate:"required,phone"` -} - -func CreateUser(c echo.Context) error { - var req CreateUserRequest - - // Bind and validate in one call - if err := request.Bind(c, &req); err != nil { - if validationErr, ok := err.(*request.ValidationError); ok { - return response.ValidationError(c, "Validation failed", validationErr.GetFieldErrors()) - } - return response.BadRequest(c, err.Error()) - } - - // Process the valid request - user := createUser(req) - - return response.Created(c, user, "User created successfully") -} -``` - -## Available Response Helpers - -### Success Responses -- `response.Success(c, data, message)` - 200 OK -- `response.SuccessWithMeta(c, data, meta, message)` - 200 OK with metadata -- `response.Created(c, data, message)` - 201 Created -- `response.NoContent(c)` - 204 No Content - -### Error Responses -- `response.BadRequest(c, message, details)` - 400 Bad Request -- `response.Unauthorized(c, message)` - 401 Unauthorized -- `response.Forbidden(c, message)` - 403 Forbidden -- `response.NotFound(c, message)` - 404 Not Found -- `response.Conflict(c, message, details)` - 409 Conflict -- `response.ValidationError(c, message, details)` - 422 Unprocessable Entity -- `response.InternalServerError(c, message)` - 500 Internal Server Error -- `response.ServiceUnavailable(c, message)` - 503 Service Unavailable -- `response.Error(c, statusCode, errorCode, message, details)` - Custom error - -## Pagination - -The `PaginationRequest` struct provides convenient methods: - -```go -type PaginationRequest struct { - Page int `query:"page" json:"page"` - PerPage int `query:"per_page" json:"per_page"` - Sort string `query:"sort" json:"sort,omitempty"` - Order string `query:"order" json:"order,omitempty"` -} - -// Methods -pagination.GetPage() // Returns page (default: 1) -pagination.GetPerPage() // Returns per_page (default: 10, max: 100) -pagination.GetOffset() // Calculates offset for DB queries -pagination.GetOrder() // Returns order (default: "desc") -``` - -## Request Validation - -### Built-in Validators -- `required` - Field must not be empty -- `email` - Valid email format -- `min`, `max` - String length or numeric range -- `gte`, `lte` - Greater/less than or equal -- `oneof` - Value must be one of the specified options - -### Custom Validators -- `phone` - Valid phone number format -- `username` - Alphanumeric username (3-20 chars) - -### Common Request Structs - -```go -// ID Request -type IDRequest struct { - ID string `param:"id" validate:"required"` -} - -// Search Request -type SearchRequest struct { - Query string `query:"q" json:"query"` - Filter map[string]string `query:"filter" json:"filter,omitempty"` - Page int `query:"page" json:"page"` - Limit int `query:"limit" json:"limit"` -} - -// Date Range Request -type DateRangeRequest struct { - StartDate string `query:"start_date" json:"start_date"` - EndDate string `query:"end_date" json:"end_date"` -} - -// Sort Request -type SortRequest struct { - SortBy string `query:"sort_by" json:"sort_by"` - SortOrder string `query:"sort_order" json:"sort_order"` -} -``` - -## Best Practices - -1. **Always use the response helpers** - Don't manually construct response objects -2. **Include meaningful error codes** - Make errors machine-readable -3. **Provide context in error messages** - Help developers debug issues -4. **Use validation** - Validate all incoming requests -5. **Return appropriate status codes** - Follow HTTP standards -6. **Include timestamps** - All responses include Unix timestamps -7. **Use pagination** - For list endpoints, always support pagination -8. **Keep responses consistent** - All endpoints should follow the same structure - -## Exposed Endpoints (Service A) -- `GET /api/v1/users` - List users -- `GET /api/v1/users/:id` - Get user details -- `POST /api/v1/users` - Create user -- `PUT /api/v1/users/:id` - Update user -- `DELETE /api/v1/users/:id` - Delete user - -## Example Complete Handler - -```go -package modules - -import ( - "test-go/pkg/request" - "test-go/pkg/response" - "github.com/labstack/echo/v4" -) - -type CreateTaskRequest struct { - Title string `json:"title" validate:"required,min=3,max=100"` - Description string `json:"description" validate:"max=500"` - Priority string `json:"priority" validate:"required,oneof=low medium high"` - DueDate string `json:"due_date"` -} - -type TaskResponse struct { - ID string `json:"id"` - Title string `json:"title"` - Description string `json:"description"` - Priority string `json:"priority"` - Status string `json:"status"` - CreatedAt int64 `json:"created_at"` -} - -func CreateTask(c echo.Context) error { - // Bind and validate - var req CreateTaskRequest - if err := request.Bind(c, &req); err != nil { - if validationErr, ok := err.(*request.ValidationError); ok { - return response.ValidationError(c, "Validation failed", validationErr.GetFieldErrors()) - } - return response.BadRequest(c, err.Error()) - } - - // Business logic - task, err := saveTask(req) - if err != nil { - return response.InternalServerError(c, "Failed to create task") - } - - // Return success - return response.Created(c, task, "Task created successfully") -} - -func GetTasks(c echo.Context) error { - // Parse pagination - var pagination response.PaginationRequest - c.Bind(&pagination) - - // Get tasks from database - tasks, total, err := fetchTasks(pagination.GetOffset(), pagination.GetPerPage()) - if err != nil { - return response.InternalServerError(c, "Failed to fetch tasks") - } - - // Return with metadata - meta := response.CalculateMeta(pagination.GetPage(), pagination.GetPerPage(), total) - return response.SuccessWithMeta(c, tasks, meta) -} - -func GetTask(c echo.Context) error { - id := c.Param("id") - - task := findTask(id) - if task == nil { - return response.NotFound(c, "Task not found") - } - - return response.Success(c, task) -} - -func DeleteTask(c echo.Context) error { - id := c.Param("id") - - if err := deleteTask(id); err != nil { - return response.InternalServerError(c, "Failed to delete task") - } - - return response.NoContent(c) -} -``` diff --git a/docs_wiki/ARCHITECTURE.md b/docs_wiki/ARCHITECTURE.md new file mode 100644 index 0000000..d80a8e9 --- /dev/null +++ b/docs_wiki/ARCHITECTURE.md @@ -0,0 +1,522 @@ +# Architecture Overview + +This document provides a high-level overview of Stackyard's architecture, design decisions, and key concepts. Understanding this foundation will help you build effectively with the framework. + +## System Overview + +Stackyard is a modular, service-oriented Go application framework built on top of the Echo web framework. It emphasizes clean architecture, dependency injection, and production readiness with comprehensive monitoring and infrastructure integrations. + +## Core Architecture Principles + +### 1. Clean Architecture + +Stackyard follows clean architecture principles with clear separation of concerns: + +``` +┌─────────────────────────────────────┐ +│ Delivery Layer │ +│ (HTTP Handlers, Middleware) │ +├─────────────────────────────────────┤ +│ Use Case Layer │ +│ (Business Logic, Services) │ +├─────────────────────────────────────┤ +│ Infrastructure Layer │ +│ (Databases, External APIs, Utils) │ +└─────────────────────────────────────┘ +``` + +**Benefits:** +- **Testability**: Each layer can be tested independently +- **Maintainability**: Changes in one layer don't affect others +- **Flexibility**: Easy to swap implementations (e.g., different databases) + +### 2. Service-Oriented Design + +Applications are built as **composable services** that can be enabled/disabled via configuration: + +- **Modularity**: Services encapsulate related functionality +- **Independence**: Services can be developed and deployed separately +- **Configuration-Driven**: Runtime behavior controlled by `config.yaml` +- **Dependency Injection**: Services receive dependencies through constructors + +### 3. Infrastructure Abstraction + +All external dependencies are abstracted through **infrastructure managers**: + +```go +// Abstract interface +type DatabaseManager interface { + Connect() error + Query(query string, args ...interface{}) (interface{}, error) + Close() error +} + +// Concrete implementation +type PostgresManager struct { + db *sqlx.DB + config PostgresConfig +} +``` + +**Benefits:** +- **Testability**: Easy to mock infrastructure in tests +- **Flexibility**: Can swap implementations without changing business logic +- **Consistency**: All infrastructure follows the same patterns + +## Key Components + +### Application Structure + +``` +stackyard/ +├── cmd/app/ # Application entry point +├── config/ # Configuration management +├── internal/ # Private application code +│ ├── middleware/ # HTTP middleware +│ ├── monitoring/ # Monitoring system +│ ├── server/ # HTTP server setup +│ └── services/ # Business services +│ └── modules/ # Service implementations +├── pkg/ # Public reusable packages +│ ├── infrastructure/# External service integrations +│ ├── logger/ # Logging utilities +│ ├── request/ # Request handling & validation +│ ├── response/ # Standardized API responses +│ └── utils/ # General utilities +├── scripts/ # Build and deployment scripts +└── web/ # Static web assets +``` + +### Request Flow + +``` +1. HTTP Request → 2. Echo Router → 3. Middleware → 4. Handler → 5. Response + + ↓ ↓ ↓ ↓ ↓ + Client Routing Auth/Logging Business Logic JSON + (Browser/Mobile) (URL matching) (Validation) (Services) (Response) +``` + +### Service Registration + +Services are registered dynamically through a **service registry**: + +```go +// Service interface +type Service interface { + Name() string // Human-readable name + RegisterRoutes(*echo.Group) // Register HTTP routes + Enabled() bool // Service activation status + Endpoints() []string // API endpoints list +} + +// Registration +registry := services.NewServiceRegistry() +registry.Register(modules.NewUserService(config)) +registry.Register(modules.NewProductService(config)) +registry.Boot(echoInstance) // Wire up all services +``` + +## Infrastructure Managers + +### Database Managers + +Stackyard supports multiple database types through abstracted managers: + +#### PostgreSQL Manager +- **Multi-tenant support**: Dynamic database switching +- **GORM integration**: Full ORM capabilities with auto-migration +- **Connection pooling**: Efficient connection management +- **Async operations**: Non-blocking database operations + +#### MongoDB Manager +- **Document database**: NoSQL capabilities +- **Multi-tenant**: Database-level isolation +- **Aggregation pipelines**: Complex data processing +- **Async operations**: Worker pool-based execution + +#### Redis Manager +- **Caching**: High-performance key-value storage +- **Pub/Sub**: Message broadcasting capabilities +- **Batch operations**: Efficient bulk operations +- **Async execution**: Worker pool processing + +### Message Queue Managers + +#### Kafka Manager +- **Event streaming**: High-throughput message processing +- **Consumer groups**: Load balancing and fault tolerance +- **Topic management**: Dynamic topic creation and configuration +- **Async operations**: Non-blocking message publishing + +### Object Storage Managers + +#### MinIO Manager +- **S3-compatible**: AWS S3 API compatibility +- **File uploads**: Efficient multipart upload handling +- **Access control**: Bucket and object permissions +- **Async operations**: Background file processing + +### Monitoring & Analytics + +#### Grafana Manager +- **Dashboard creation**: Programmatic dashboard generation +- **Data source integration**: Connect various data sources +- **Annotation support**: Timeline event marking +- **Health monitoring**: Service status tracking + +## Async Architecture + +### Worker Pools + +All infrastructure operations use **worker pools** for concurrency control: + +```go +type WorkerPool struct { + workers int + jobQueue chan func() + stopChan chan struct{} + stopped chan struct{} +} + +// Usage +result := manager.AsyncOperation(ctx, data) +// Operation runs in worker pool, returns immediately +value, err := result.Wait() // Block until complete +``` + +**Benefits:** +- **Resource control**: Limit concurrent operations +- **Performance**: Prevent resource exhaustion +- **Reliability**: Graceful error handling and recovery + +### Async Results + +Operations return **AsyncResult** types for flexible execution: + +```go +type AsyncResult[T any] struct { + Value T + Error error + Done chan struct{} +} + +// Synchronous usage +result := manager.GetUserAsync(ctx, userID) +user, err := result.Wait() + +// Timeout support +user, err := result.WaitWithTimeout(5 * time.Second) + +// Non-blocking check +if result.IsDone() { + user, err := result.Wait() + // Process result +} +``` + +## Configuration System + +### Hierarchical Configuration + +Configuration is managed through a **hierarchical YAML structure**: + +```yaml +app: # Application-level settings + name: "MyApp" + debug: true + +server: # HTTP server configuration + port: "8080" + +services: # Service enable/disable flags + user_service: true + product_service: false + +postgres: # Infrastructure-specific config + enabled: true + connections: + - name: "primary" + host: "localhost" + database: "myapp" +``` + +### Environment Override + +Configuration can be overridden with **environment variables**: + +```bash +export APP_DEBUG=false +export SERVER_PORT=3000 +export POSTGRES_PASSWORD=prod-password +``` + +### Validation & Defaults + +Configuration is **validated at startup** with sensible defaults: + +```go +type Config struct { + App AppConfig `yaml:"app"` + Server ServerConfig `yaml:"server"` + Postgres PostgresConfig `yaml:"postgres" validate:"required_if=Enabled true"` +} + +func (c *Config) Validate() error { + // Custom validation logic + return validate.Struct(c) +} +``` + +## API Design Patterns + +### Standardized Responses + +All API responses follow a **consistent JSON structure**: + +```json +{ + "success": true, + "message": "Operation completed", + "data": { /* response data */ }, + "meta": { /* pagination metadata */ }, + "timestamp": 1642598400 +} +``` + +### Request Validation + +Requests are validated using **struct tags** with automatic error formatting: + +```go +type CreateUserRequest struct { + Name string `json:"name" validate:"required,min=2,max=50"` + Email string `json:"email" validate:"required,email"` + Age int `json:"age" validate:"gte=18,lte=120"` +} + +func (h *Handler) createUser(c echo.Context) error { + var req CreateUserRequest + if err := request.Bind(c, &req); err != nil { + return err // Automatic validation error response + } + // Process valid request... +} +``` + +### Error Handling + +Errors are handled consistently with **standardized error codes**: + +```go +// Automatic error responses +return response.NotFound(c, "User not found") +return response.BadRequest(c, "Invalid input") +return response.InternalServerError(c, "Database error") +``` + +## Security Architecture + +### Authentication & Authorization + +- **API Key authentication**: Simple key-based auth +- **Session management**: Secure session handling +- **Role-based access**: Permission-based authorization + +### Data Protection + +- **API Obfuscation**: Base64 encoding for data in transit +- **Encryption**: AES-256-GCM encryption for sensitive data +- **Input validation**: Comprehensive request validation + +### Infrastructure Security + +- **Connection encryption**: TLS for database connections +- **Secure defaults**: Conservative security settings +- **Audit logging**: Comprehensive operation logging + +## Monitoring & Observability + +### Web Dashboard + +The monitoring dashboard provides: +- **Real-time metrics**: System resource usage +- **API monitoring**: Endpoint performance and errors +- **Service health**: Individual service status +- **Log viewing**: Real-time application logs + +### Terminal UI (TUI) + +The TUI provides: +- **Boot sequence visualization**: Service initialization status +- **Live log monitoring**: Real-time log streaming with filtering +- **Interactive controls**: Keyboard shortcuts for navigation +- **System monitoring**: Resource usage and performance metrics + +### Health Checks + +Comprehensive health endpoints: +- **Application health**: `/health` +- **Infrastructure health**: `/health/infrastructure` +- **Service-specific health**: `/health/{service}` + +## Build & Deployment + +### Multi-Stage Docker Builds + +```dockerfile +FROM golang:1.21-alpine AS builder +# Build stage - compile application + +FROM alpine:latest AS runtime +# Runtime stage - minimal production image + +FROM gcr.io/distroless/static AS ultra-minimal +# Ultra-minimal production image +``` + +### Build Scripts + +Cross-platform build scripts provide: +- **Binary compilation**: Optimized Go builds +- **Asset bundling**: Include static assets in binary +- **Backup management**: Automated backup of previous builds +- **Code obfuscation**: Optional binary obfuscation + +### Deployment Options + +- **Binary deployment**: Direct server deployment +- **Docker containers**: Containerized deployment +- **Kubernetes**: Orchestrated deployment +- **Serverless**: Function-as-a-service deployment + +## Performance Characteristics + +### Concurrency Model + +- **Goroutines**: Lightweight thread management +- **Worker pools**: Controlled concurrency for I/O operations +- **Async processing**: Non-blocking request handling +- **Connection pooling**: Efficient resource utilization + +### Caching Strategy + +- **Multi-level caching**: Memory, Redis, CDN +- **Cache invalidation**: TTL-based and manual invalidation +- **Cache warming**: Pre-population of frequently accessed data + +### Database Optimization + +- **Connection pooling**: Efficient database connection management +- **Query optimization**: Index usage and query planning +- **Batch operations**: Bulk data operations +- **Read/write splitting**: Separate read and write databases + +## Scalability Features + +### Horizontal Scaling + +- **Stateless design**: Services can be scaled independently +- **Load balancing**: Distribute requests across instances +- **Database sharding**: Horizontal database scaling +- **Caching layers**: Reduce database load + +### Vertical Scaling + +- **Resource optimization**: Efficient memory and CPU usage +- **Async processing**: Handle high concurrency +- **Connection pooling**: Optimize external service connections + +## Development Workflow + +### Local Development + +```bash +# Quick start +go run cmd/app/main.go + +# With custom config +go run cmd/app/main.go -config=config.dev.yaml + +# Enable debug logging +export APP_DEBUG=true +go run cmd/app/main.go +``` + +### Testing Strategy + +- **Unit tests**: Individual component testing +- **Integration tests**: End-to-end API testing +- **Performance tests**: Load and stress testing +- **Security tests**: Penetration testing and vulnerability scanning + +### CI/CD Pipeline + +```yaml +# GitHub Actions example +- name: Test + run: go test ./... + +- name: Build + run: ./scripts/build.sh + +- name: Docker Build + run: ./scripts/docker_build.sh + +- name: Deploy + run: kubectl apply -f k8s/ +``` + +## Best Practices + +### Code Organization + +1. **Service boundaries**: Clear separation of business logic +2. **Dependency injection**: Constructor-based dependency injection +3. **Interface segregation**: Small, focused interfaces +4. **Error handling**: Consistent error handling patterns + +### Performance + +1. **Async operations**: Use async for I/O operations +2. **Caching**: Implement appropriate caching strategies +3. **Pagination**: Always paginate large datasets +4. **Monitoring**: Monitor performance metrics + +### Security + +1. **Input validation**: Validate all user inputs +2. **Secure defaults**: Conservative security settings +3. **Regular updates**: Keep dependencies updated +4. **Audit logging**: Log security-relevant events + +## Migration & Extensibility + +### Adding New Services + +1. **Implement Service interface** +2. **Register in service registry** +3. **Configure via config.yaml** +4. **Add tests and documentation** + +### Infrastructure Extensions + +1. **Create infrastructure manager** +2. **Implement async operations** +3. **Add configuration support** +4. **Update dependency injection** + +### API Extensions + +1. **Add new endpoints** +2. **Implement request/response types** +3. **Add validation rules** +4. **Update API documentation** + +## Conclusion + +Stackyard's architecture emphasizes **modularity**, **scalability**, and **maintainability** through clean architecture principles, service-oriented design, and comprehensive infrastructure abstractions. The framework provides a solid foundation for building production-ready applications while maintaining developer productivity and code quality. + +The combination of **async processing**, **dependency injection**, and **configuration-driven behavior** makes Stackyard suitable for applications ranging from simple APIs to complex, multi-tenant SaaS platforms. + +For detailed implementation guides, see the **[Development Guide](DEVELOPMENT.md)**. For complete API reference, see the **[API Reference](REFERENCE.md)**. diff --git a/docs_wiki/ARCHITECTURE_DIAGRAMS.md b/docs_wiki/ARCHITECTURE_DIAGRAMS.md deleted file mode 100644 index 4f52983..0000000 --- a/docs_wiki/ARCHITECTURE_DIAGRAMS.md +++ /dev/null @@ -1,245 +0,0 @@ -# Request/Response Flow Architecture - -```mermaid -flowchart TB - Client[Client Request] - Handler[Handler Function] - Bind[request.Bind] - Validate[request.Validate] - Logic[Business Logic] - Success[response.Success] - Error[response.Error] - Response[JSON Response] - - Client -->|HTTP Request| Handler - Handler --> Bind - Bind -->|Parse JSON| Validate - Validate -->|Invalid| Error - Validate -->|Valid| Logic - Logic -->|Success| Success - Logic -->|Error| Error - Success --> Response - Error --> Response - Response -->|JSON| Client - - style Client fill:#e1f5ff - style Response fill:#e1f5ff - style Success fill:#d4edda - style Error fill:#f8d7da - style Logic fill:#fff3cd -``` - -## Response Structure - -```mermaid -classDiagram - class Response { - +bool success - +string message - +interface{} data - +ErrorDetail error - +Meta meta - +int64 timestamp - } - - class ErrorDetail { - +string code - +string message - +map details - } - - class Meta { - +int page - +int per_page - +int64 total - +int total_pages - +map extra - } - - Response --> ErrorDetail - Response --> Meta -``` - -## Request Validation Flow - -```mermaid -sequenceDiagram - participant C as Client - participant H as Handler - participant B as request.Bind - participant V as Validator - participant R as response - - C->>H: POST /api/v1/users - H->>B: Bind(context, &req) - B->>B: Parse JSON - B->>V: Validate(req) - - alt Validation Failed - V-->>B: ValidationError - B-->>H: return error - H->>R: ValidationError(...) - R-->>C: 422 with error details - else Validation Success - V-->>B: nil - B-->>H: nil - H->>H: Process request - H->>R: Success(data) - R-->>C: 200 with data - end -``` - -## Async Infrastructure Flow - -```mermaid -graph TD - A[HTTP Request] --> B[Handler] - B --> C[Async Operation] - C --> D[Worker Pool] - D --> E[Infrastructure] - E --> F[Result Channel] - F --> G[Response] - - style A fill:#e1f5ff - style C fill:#fff3cd - style D fill:#ffeaa7 - style E fill:#fdcb6e - style G fill:#55a3ff -``` - -## Service Registration Architecture - -```mermaid -graph TD - A[ServiceRegistrar] --> B[Reflection Analysis] - B --> C{Dependencies?} - C -->|No| D[Immediate Registration] - C -->|Yes| E[Wait for Infrastructure] - E --> F[Infrastructure Ready] - F --> G[Register Service] - G --> H[Boot Routes] - - style A fill:#74b9ff - style B fill:#0984e3 - style D fill:#00b894 - style G fill:#00b894 -``` - -## Package Organization - -```mermaid -graph LR - A[pkg/request] -->|Validates| F[Handler] - B[pkg/response] -->|Formats| F - F -->|Uses| G[Service Logic] - G -->|Uses| H[Async Infrastructure] - H -->|Worker Pools| I[Infrastructure Managers] - G -->|Returns| F - - style A fill:#ffeb9c - style B fill:#9cf09c - style H fill:#ffeaa7 - style I fill:#fdcb6e - style F fill:#9cccff - style G fill:#ff9c9c - -## Complete CRUD Example Flow - -```mermaid -graph TD - subgraph "GET /api/v1/users (List)" - A1[Parse Pagination] --> A2[Fetch Data] - A2 --> A3[Calculate Meta] - A3 --> A4[SuccessWithMeta] - end - - subgraph "GET /api/v1/users/:id (Detail)" - B1[Get ID from URL] --> B2[Find in DB] - B2 -->|Found| B3[Success] - B2 -->|Not Found| B4[NotFound] - end - - subgraph "POST /api/v1/users (Create)" - C1[Bind & Validate] --> C2{Valid?} - C2 -->|No| C3[ValidationError] - C2 -->|Yes| C4[Create in DB] - C4 --> C5[Created 201] - end - - subgraph "PUT /api/v1/users/:id (Update)" - D1[Get ID + Bind] --> D2{Valid?} - D2 -->|No| D3[ValidationError] - D2 -->|Yes| D4[Update in DB] - D4 --> D5[Success] - end - - subgraph "DELETE /api/v1/users/:id (Delete)" - E1[Get ID] --> E2[Delete from DB] - E2 --> E3[NoContent 204] - end - - style A4 fill:#d4edda - style B3 fill:#d4edda - style B4 fill:#f8d7da - style C3 fill:#f8d7da - style C5 fill:#d4edda - style D3 fill:#f8d7da - style D5 fill:#d4edda - style E3 fill:#d4edda -``` - -## Response Helper Functions Map - -```mermaid -mindmap - root((Response Helpers)) - Success Responses - Success 200 - SuccessWithMeta 200 - Created 201 - NoContent 204 - Client Errors - BadRequest 400 - Unauthorized 401 - Forbidden 403 - NotFound 404 - Conflict 409 - ValidationError 422 - Server Errors - InternalServerError 500 - ServiceUnavailable 503 - Custom - Error custom code -``` - -## Validation Tags Reference - -```mermaid -graph TB - subgraph "String Validators" - S1[required] - S2[email] - S3[min max] - S4[len] - end - - subgraph "Number Validators" - N1[gte lte] - N2[gt lt] - N3[min max] - end - - subgraph "Custom Validators" - C1[phone] - C2[username] - end - - subgraph "Choice Validators" - O1[oneof] - end - - style S1 fill:#ffcccc - style N1 fill:#ccffcc - style C1 fill:#ccccff - style O1 fill:#ffffcc -``` diff --git a/docs_wiki/ASYNC_INFRASTRUCTURE.md b/docs_wiki/ASYNC_INFRASTRUCTURE.md deleted file mode 100644 index 4c614e5..0000000 --- a/docs_wiki/ASYNC_INFRASTRUCTURE.md +++ /dev/null @@ -1,1170 +0,0 @@ -# Async Infrastructure Implementation Guide - -## Overview - -This document describes the async infrastructure implementation that ensures all database operations, caching, message queuing, and file operations run asynchronously to avoid blocking the main application thread. The implementation uses Go's goroutines, channels, and worker pools to provide non-blocking operations while maintaining thread safety. - -The system also includes **async infrastructure initialization** that allows the HTTP server to start immediately without waiting for database connections and other infrastructure components to initialize. This provides instant responsiveness while infrastructure components initialize in the background. - -## Key Components - -### 1. AsyncResult Types - -The async infrastructure uses generic `AsyncResult[T]` types to handle asynchronous operations: - -```go -type AsyncResult[T any] struct { - Value T - Error error - Done chan struct{} -} -``` - -**Key Methods:** -- `Wait()` - Blocks until operation completes -- `WaitWithTimeout(timeout)` - Waits with timeout -- `IsDone()` - Non-blocking check if operation is complete - -### 2. Worker Pools - -Each infrastructure component includes a worker pool for managing concurrent operations: - -```go -type WorkerPool struct { - workers int - jobQueue chan func() - stopChan chan struct{} - stopped chan struct{} -} -``` - -**Benefits:** -- Controlled concurrency -- Resource management -- Panic recovery -- Graceful shutdown - -### 3. Batch Operations - -Support for batching multiple operations: - -```go -type BatchAsyncResult[T any] struct { - Results []AsyncResult[T] - Done chan struct{} -} -``` - -## Infrastructure Components - -### Redis Manager - -**Async Operations:** -```go -// Set value asynchronously -result := redisManager.SetAsync(ctx, "key", "value", time.Hour) - -// Get value asynchronously -result := redisManager.GetAsync(ctx, "key") - -// Wait for result -value, err := result.Wait() -``` - -**Batch Operations:** -```go -// Set multiple keys -kvPairs := map[string]interface{}{"key1": "value1", "key2": "value2"} -result := redisManager.SetBatchAsync(ctx, kvPairs, time.Hour) - -// Get multiple keys -keys := []string{"key1", "key2"} -result := redisManager.GetBatchAsync(ctx, keys) -``` - -**Worker Pool Integration:** -```go -// Submit background job -redisManager.SubmitAsyncJob(func() { - // Long-running Redis operation -}) -``` - -### Kafka Manager - -**Async Operations:** -```go -// Publish message asynchronously -result := kafkaManager.PublishAsync(ctx, "topic", []byte("message")) - -// Publish with key -result := kafkaManager.PublishWithKeyAsync(ctx, "topic", []byte("key"), []byte("message")) -``` - -**Batch Operations:** -```go -// Publish multiple messages -messages := [][]byte{[]byte("msg1"), []byte("msg2")} -result := kafkaManager.PublishBatchAsync(ctx, "topic", messages) -``` - -**Consumer Operations:** -```go -// Start consumer asynchronously (doesn't block) -kafkaManager.ConsumeAsync(ctx, "topic", func(key, value []byte) error { - // Handle message - return nil -}) -``` - -### MinIO Manager - -**Async Operations:** -```go -// Upload file asynchronously -file, _ := os.Open("file.txt") -defer file.Close() -result := minioManager.UploadFileAsync(ctx, "object.txt", file, size, "text/plain") -``` - -**Batch Operations:** -```go -// Upload multiple files -uploads := []struct{ - ObjectName, Reader, Size, ContentType -}{/* file data */} -result := minioManager.UploadBatchAsync(ctx, uploads) -``` - -### PostgreSQL Manager - -**Async Operations:** -```go -// Execute query asynchronously -result := postgresManager.QueryAsync(ctx, "SELECT * FROM users", args...) - -// Execute DML operations -insertResult := postgresManager.InsertAsync(ctx, "INSERT INTO users...", args...) -updateResult := postgresManager.UpdateAsync(ctx, "UPDATE users...", args...) -deleteResult := postgresManager.DeleteAsync(ctx, "DELETE FROM users...", args...) -``` - -**GORM Async Operations:** -```go -// Async GORM operations -createResult := postgresManager.GORMCreateAsync(ctx, &user) -findResult := postgresManager.GORMFindAsync(ctx, &users) -updateResult := postgresManager.GORMUpdateAsync(ctx, &user, updates, "id = ?", id) -deleteResult := postgresManager.GORMDeleteAsync(ctx, &user, "id = ?", id) -``` - -### MongoDB Manager - -**Async Operations:** -```go -// CRUD operations -insertResult := mongoManager.InsertOneAsync(ctx, "collection", document) -findResult := mongoManager.FindAsync(ctx, "collection", filter) -updateResult := mongoManager.UpdateOneAsync(ctx, "collection", filter, update) -deleteResult := mongoManager.DeleteOneAsync(ctx, "collection", filter) -``` - -**Batch Operations:** -```go -// Batch insert -inserts := []struct{Collection string; Document interface{}}{/* data */} -result := mongoManager.InsertBatchAsync(ctx, inserts) -``` - -### Cron Manager - -**Async Job Execution:** -```go -// Add job that executes asynchronously in worker pool -cronManager.AddAsyncJob("name", "schedule", func() { - // Job logic (runs in worker pool, doesn't block main thread) -})) - -// Run job immediately (asynchronously) -cronManager.RunJobNow(jobID) - -// Get job status and scheduling info -jobs := cronManager.GetJobs() // Returns scheduled jobs with next run times -``` - -**Configuration:** -```yaml -cron: - enabled: true - jobs: - log_cleanup: "0 0 * * *" # Daily at midnight - health_check: "*/10 * * * * *" # Every 10 seconds -``` - -**Features:** -- **Async Execution**: Jobs run in worker pools to avoid blocking -- **Schedule Management**: Add, remove, update job schedules dynamically -- **Status Monitoring**: View active jobs and execution history -- **Graceful Shutdown**: Clean termination of running jobs - -## Async Infrastructure Initialization - -The application implements **async infrastructure initialization** that allows the HTTP server to start immediately without waiting for database connections and other infrastructure components to initialize. This provides instant responsiveness while infrastructure components initialize in the background. - -### Infrastructure Initialization Manager - -The `InfraInitManager` manages asynchronous initialization of all infrastructure components: - -```go -type InfraInitManager struct { - status map[string]*InfraInitStatus - mu sync.RWMutex - logger *logger.Logger - doneChan chan struct{} -} -``` - -### Initialization Process - -1. **Immediate Server Start**: HTTP server starts immediately without waiting -2. **Background Initialization**: Infrastructure components initialize concurrently in goroutines -3. **Progress Tracking**: Real-time status monitoring of initialization progress -4. **Health Endpoints**: API endpoints provide initialization status - -### Usage Example - -```go -// In server startup -infraInitManager := infrastructure.NewInfraInitManager(logger) - -// Start async initialization (doesn't block) -redisMgr, kafkaMgr, _, postgresMgr, mongoMgr, cronMgr := - infraInitManager.StartAsyncInitialization(config, logger) - -// HTTP server starts immediately here -// Infrastructure initializes in background - -// Check initialization status -status := infraInitManager.GetStatus() -progress := infraInitManager.GetInitializationProgress() -``` - -### Health Check Endpoints - -#### GET /health -Enhanced health check with infrastructure status: -```json -{ - "status": "ok", - "server_ready": true, - "infrastructure": { - "redis": { - "name": "redis", - "initialized": true, - "start_time": "2025-12-19T13:13:00Z", - "duration": "2.5s", - "progress": 1.0 - }, - "postgres": { - "name": "postgres", - "initialized": true, - "start_time": "2025-12-19T13:13:00Z", - "duration": "3.2s", - "progress": 1.0 - } - }, - "initialization_progress": 0.85 -} -``` - -#### GET /health/infrastructure -Detailed infrastructure initialization status: -```json -{ - "redis": { - "name": "redis", - "initialized": true, - "error": "", - "start_time": "2025-12-19T13:13:00Z", - "duration": "2.5s", - "progress": 1.0 - }, - "postgres": { - "name": "postgres", - "initialized": false, - "error": "connection timeout", - "start_time": "2025-12-19T13:13:00Z", - "duration": "30s", - "progress": 0.0 - } -} -``` - -### Benefits of Async Initialization - -- **Instant Server Responsiveness**: HTTP server available immediately -- **Graceful Degradation**: Services work with partially initialized infrastructure -- **Better User Experience**: No waiting for database connections on startup -- **Fault Tolerance**: Failed components don't prevent server startup -- **Monitoring Integration**: Real-time visibility into initialization progress - -### Initialization Order - -Components initialize concurrently but with logical dependencies: - -1. **Redis** - Fast cache initialization -2. **PostgreSQL/MongoDB** - Database connections (may take longer) -3. **Kafka** - Message queue connections -4. **MinIO** - Object storage -5. **Cron** - Scheduled jobs - -### Clean Service Registration with Automatic Dependency Detection - -The application implements **clean service registration** with automatic dependency detection using a dedicated `ServiceRegistrar`. Services are registered **only once** in a central location, and the system automatically determines infrastructure dependencies through reflection analysis. - -#### Architecture Overview - -**Clean Separation of Concerns:** -- **`internal/services/register.go`**: Service registration logic and dependency analysis -- **`internal/server/server.go`**: Clean server startup without registration complexity - -**Key Components:** -- **`ServiceRegistrar`**: Handles all service registration and dependency management -- **`ServiceDefinition`**: Simple struct holding service name and constructor -- **Reflection Analysis**: Automatic dependency detection based on struct field types - -#### Service Definition (One Clean Location) - -```go -// internal/services/register.go -allServices := []ServiceDefinition{ - { - Name: "service_a", - Constructor: func() interface{ Service } { - return modules.NewServiceA(sr.config.Services.IsEnabled("service_a")) - }, - }, - { - Name: "service_d", - Constructor: func() interface{ Service } { - return modules.NewServiceD(sr.postgresManager, sr.config.Services.IsEnabled("service_d"), sr.logger) - }, - }, - { - Name: "service_f", - Constructor: func() interface{ Service } { - return modules.NewServiceF(sr.postgresConnMgr, sr.config.Services.IsEnabled("service_f"), sr.logger) - }, - }, - { - Name: "service_g", - Constructor: func() interface{ Service } { - return modules.NewServiceG(sr.mongoConnMgr, sr.config.Services.IsEnabled("service_g"), sr.logger) - }, - }, -} -``` - -#### Server Startup (Clean and Simple) - -```go -// internal/server/server.go - Clean server startup -serviceRegistrar := services.NewServiceRegistrar( - s.config, s.logger, s.infraInitManager, - s.redisManager, s.kafkaManager, s.postgresManager, - s.postgresConnectionManager, s.mongoManager, - s.mongoConnectionManager, s.cronManager, -) - -// Register ALL services with automatic dependency detection -dependentServiceCount := serviceRegistrar.RegisterAllServices(registry, s.echo) - -// Wait for dependent services, then start monitoring -if dependentServiceCount > 0 { - time.Sleep(time.Duration(dependentServiceCount) * 500 * time.Millisecond) -} -// Start monitoring with complete service list -``` - -#### Reflection-Based Dependency Detection - -The `analyzeConstructorDependencies()` method automatically determines dependencies: - -```go -func (sr *ServiceRegistrar) analyzeConstructorDependencies(constructor func() interface{ Service }) []string { - service := constructor() // Create service instance - - // Use reflection to examine struct fields for infrastructure types - serviceValue := reflect.ValueOf(service) - serviceType := serviceValue.Type() - - var dependencies []string - for i := 0; i < serviceType.NumField(); i++ { - fieldType := serviceType.Field(i).Type.String() - - switch fieldType { - case "infrastructure.PostgresConnectionManager": - if sr.config.Postgres.Enabled || sr.config.PostgresMultiConfig.Enabled { - dependencies = append(dependencies, "postgres") - } - case "infrastructure.MongoConnectionManager": - if sr.config.Mongo.Enabled || sr.config.MongoMultiConfig.Enabled { - dependencies = append(dependencies, "mongodb") - } - // ... other infrastructure types automatically detected - } - } - return dependencies -} -``` - -#### Synchronous Registration Process with Complete Synchronization - -**Phase 1: Infrastructure-Independent Services** -```go -// Start immediately - no dependencies -for _, svc := range independentServices { - registry.Register(svc.Constructor()) -} -registry.Boot(echo) -``` - -**Phase 2: Infrastructure-Dependent Services (Synchronous)** -```go -// Use channel to track completion of ALL dependent services -dependentDoneChan := make(chan struct{}, len(dependentServices)) - -for _, svc := range dependentServices { - go func(serviceDef DependentServiceDefinition) { - defer func() { dependentDoneChan <- struct{}{} }() - - // Wait for each required infrastructure component - for _, dep := range serviceDef.Dependencies { - for !infraInitManager.IsInitialized(dep) { - time.Sleep(100 * time.Millisecond) - } - } - // Register service when dependencies are ready - registry.Register(serviceDef.Constructor()) - registry.BootService(echo, serviceDef.Constructor()) - }(svc) -} - -// Wait for ALL dependent services to complete registration -for i := 0; i < len(dependentServices); i++ { - <-dependentDoneChan // Blocks until each service completes -} -``` - -**Phase 3: Monitoring Startup (After All Services)** -```go -// Now ALL services are registered - build complete service list -var servicesList []monitoring.ServiceInfo -for _, srv := range registry.GetServices() { - servicesList = append(servicesList, monitoring.ServiceInfo{ - Name: srv.Name(), - Active: srv.Enabled(), - Endpoints: srv.Endpoints(), - }) -} -go monitoring.Start(config, servicesList) // Complete service list -``` - -#### Benefits of Clean Architecture - -- **Separation of Concerns**: Registration logic separated from server logic -- **Single Definition**: Services defined only once, no duplication -- **Automatic Detection**: Dependencies determined by examining actual code -- **Maintainable**: Easy to add new services - just add to the list -- **Clean Server Code**: Server startup logic remains simple and focused -- **Type-Safe**: Uses Go's type system for reliable dependency detection - -#### Adding New Services - -To add a new service, simply add it to the `allServices` slice: - -```go -{ - Name: "service_new", - Constructor: func() interface{ Service } { - return modules.NewService(s.someManager, s.config.Services.IsEnabled("service_new")) - }, -}, -// System automatically detects dependencies based on constructor parameters -``` - -This approach provides a **clean, maintainable, and automatic** service registration system that scales effortlessly as new services and infrastructure components are added. - -### Error Handling - -Failed initializations are logged but don't prevent server startup: - -```go -// Initialization continues even if some components fail -if err != nil { - status.Error = err.Error() - logger.Error("Failed to initialize infrastructure component", err, "component", name) -} -``` - -### Configuration - -Infrastructure components initialize based on their configuration settings: - -```yaml -redis: - enabled: true # Will initialize if enabled - -postgres: - enabled: false # Will skip initialization - -mongo: - enabled: true - multi_config: - enabled: true # Will initialize multi-connection setup -``` - -## Graceful Shutdown - -The application implements **graceful shutdown** that properly disconnects all infrastructure components when receiving SIGTERM or SIGINT signals. This ensures clean resource cleanup and prevents data corruption. - -### Shutdown Process - -1. **Signal Handling**: Application catches SIGTERM/SIGINT signals -2. **Infrastructure Shutdown**: Components shut down in reverse order of initialization -3. **Resource Cleanup**: Connections closed, worker pools stopped -4. **Logging**: Detailed shutdown progress logged -5. **Error Reporting**: Shutdown errors reported but don't prevent completion - -### Shutdown Order - -Components are shut down in reverse order to ensure dependencies are handled correctly: - -1. **Cron Manager** - Stop scheduled jobs first -2. **MongoDB Connections** - Close document database connections -3. **PostgreSQL Connections** - Close relational database connections -4. **Kafka Manager** - Stop message producers/consumers -5. **Redis Manager** - Close cache connections last - -### Usage Example - -```go -// Signal handling in main.go -sigChan := make(chan os.Signal, 1) -signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - -<-sigChan - -// Graceful shutdown -err := server.Shutdown(context.Background(), logger) -if err != nil { - logger.Error("Shutdown completed with errors", err) -} else { - logger.Info("Shutdown completed successfully") -} -``` - -### Shutdown Method - -The `Server.Shutdown()` method performs orderly shutdown: - -```go -func (s *Server) Shutdown(ctx context.Context, logger *logger.Logger) error { - // 1. Cron Manager - if s.cronManager != nil { - logger.Info("Shutting down Cron Manager...") - if err := s.cronManager.Close(); err != nil { - return fmt.Errorf("cron shutdown error: %w", err) - } - } - - // 2. MongoDB connections - if s.mongoConnectionManager != nil { - logger.Info("Shutting down MongoDB connections...") - if err := s.mongoConnectionManager.CloseAll(); err != nil { - return fmt.Errorf("mongodb shutdown error: %w", err) - } - } - - // 3. PostgreSQL connections - if s.postgresConnectionManager != nil { - logger.Info("Shutting down PostgreSQL connections...") - if err := s.postgresConnectionManager.CloseAll(); err != nil { - return fmt.Errorf("postgres shutdown error: %w", err) - } - } - - // 4. Kafka Manager - if s.kafkaManager != nil { - logger.Info("Shutting down Kafka Manager...") - if err := s.kafkaManager.Close(); err != nil { - return fmt.Errorf("kafka shutdown error: %w", err) - } - } - - // 5. Redis Manager - if s.redisManager != nil { - logger.Info("Shutting down Redis Manager...") - if err := s.redisManager.Close(); err != nil { - return fmt.Errorf("redis shutdown error: %w", err) - } - } - - return nil -} -``` - -### Benefits of Graceful Shutdown - -- **Data Integrity**: Prevents partial writes and corruption -- **Resource Cleanup**: Ensures all connections are properly closed -- **Clean Termination**: No hanging processes or zombie goroutines -- **Monitoring**: Shutdown progress is logged and monitored -- **Kubernetes Compatibility**: Works with container orchestration systems - -### Error Handling During Shutdown - -Shutdown errors are logged but don't prevent the shutdown process: - -```go -if err := component.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("component shutdown error: %w", err)) - logger.Error("Error shutting down component", err) -} else { - logger.Info("Component shut down successfully") -} -``` - -### Timeout Handling - -Shutdown operations can be given timeouts to prevent hanging: - -```go -shutdownCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) -defer cancel() - -err := server.Shutdown(shutdownCtx, logger) -``` - -### Integration with Orchestration - -Works seamlessly with container orchestration systems: - -```bash -# Docker graceful shutdown -docker stop --timeout 30 container_name - -# Kubernetes graceful termination -terminationGracePeriodSeconds: 30 -``` - -### Testing Shutdown - -Test graceful shutdown behavior: - -```bash -# Send SIGTERM signal -kill -TERM - -# Or use Ctrl+C in interactive mode -# Application will log shutdown progress and exit cleanly -``` - -## Usage Patterns - -### Synchronous Usage (Blocking) - -```go -// Execute async operation and wait for result -result := redisManager.GetAsync(ctx, "key") -value, err := result.Wait() - -if err != nil { - // Handle error -} -// Use value -``` - -### Asynchronous Usage (Non-blocking) - -```go -// Start operation without waiting -result := redisManager.GetAsync(ctx, "key") - -// Continue with other work -doOtherWork() - -// Check if done later -if result.IsDone() { - value, err := result.Wait() - // Handle result -} -``` - -### Timeout Handling - -```go -// Wait with timeout -result := redisManager.GetAsync(ctx, "key") -value, err := result.WaitWithTimeout(5 * time.Second) - -if err == context.DeadlineExceeded { - // Handle timeout -} -``` - -### Batch Processing - -```go -// Process multiple operations in parallel -keys := []string{"key1", "key2", "key3"} -result := redisManager.GetBatchAsync(ctx, keys) - -// Wait for all operations to complete -values, errors := result.WaitAll() - -for i, value := range values { - if errors[i] != nil { - // Handle error for this operation - } - // Process value -} -``` - -### Worker Pool Submission - -```go -// Submit long-running tasks to worker pool -redisManager.SubmitAsyncJob(func() { - // Long-running operation - heavyComputation() - redisManager.Set(ctx, "result", computedValue, time.Hour) -}) -``` - -## Service Implementation Examples - -### Database Service with Async Operations - -```go -type UserService struct { - db *infrastructure.PostgresManager -} - -func (s *UserService) CreateUser(c echo.Context) error { - var user User - if err := c.Bind(&user); err != nil { - return response.BadRequest(c, "Invalid user data") - } - - // Async database operation - result := s.db.GORMCreateAsync(context.Background(), &user) - - // Wait for completion - _, err := result.Wait() - if err != nil { - return response.InternalServerError(c, err.Error()) - } - - return response.Created(c, user) -} - -func (s *UserService) GetUsers(c echo.Context) error { - var users []User - - // Async query - result := s.db.GORMFindAsync(context.Background(), &users) - - // Wait for completion - _, err := result.Wait() - if err != nil { - return response.InternalServerError(c, err.Error()) - } - - return response.Success(c, users) -} -``` - -### Cache Service with Async Operations - -```go -type CacheService struct { - redis *infrastructure.RedisManager -} - -func (s *CacheService) SetMultiple(ctx context.Context, data map[string]interface{}) error { - // Async batch set - result := s.redis.SetBatchAsync(ctx, data, time.Hour) - - // Wait for all operations - _, errors := result.WaitAll() - - // Check for any errors - for _, err := range errors { - if err != nil { - return err - } - } - - return nil -} -``` - -### Message Queue Service - -```go -type QueueService struct { - kafka *infrastructure.KafkaManager -} - -func (s *QueueService) PublishEvents(ctx context.Context, events []Event) error { - // Convert events to messages - messages := make([][]byte, len(events)) - for i, event := range events { - data, _ := json.Marshal(event) - messages[i] = data - } - - // Async batch publish - result := s.kafka.PublishBatchAsync(ctx, "events", messages) - - // Wait for completion - _, errors := result.WaitAll() - - // Check for errors - for _, err := range errors { - if err != nil { - return err - } - } - - return nil -} -``` - -## Performance Benefits - -### Non-blocking Operations - -- **Main Thread Availability**: HTTP handlers return immediately while operations run in background -- **Concurrent Processing**: Multiple operations can run simultaneously -- **Resource Efficiency**: Better utilization of system resources - -### Example Performance Comparison - -**Synchronous Approach:** -``` -Request → DB Query (2s) → Response -Total: 2 seconds per request -Throughput: 0.5 requests/second -``` - -**Asynchronous Approach:** -``` -Request → Start Async DB Query → Return Response (immediate) -DB Query completes (2s) → Result stored/cached -Total: ~0ms per request (non-blocking) -Throughput: Limited by DB capacity, not request handling -``` - -### Resource Management - -- **Worker Pools**: Control maximum concurrent operations -- **Connection Pooling**: Reuse database connections efficiently -- **Timeout Handling**: Prevent hanging operations -- **Graceful Shutdown**: Clean up resources properly - -## Error Handling - -### Operation-specific Errors - -```go -result := redisManager.GetAsync(ctx, "key") -value, err := result.Wait() - -if err != nil { - switch err { - case redis.Nil: - // Key not found - case context.DeadlineExceeded: - // Operation timed out - default: - // Other error - } -} -``` - -### Batch Error Handling - -```go -result := redisManager.GetBatchAsync(ctx, keys) -values, errors := result.WaitAll() - -for i, err := range errors { - if err != nil { - log.Printf("Error getting key %s: %v", keys[i], err) - // Handle specific error - } else { - // Process values[i] - } -} -``` - -### Panic Recovery - -Async operations include panic recovery: - -```go -// Automatic panic handling in ExecuteAsync -go func() { - defer func() { - if r := recover(); r != nil { - result.Complete(zeroValue, fmt.Errorf("operation panicked: %v", r)) - } - }() - // Operation logic -}() -``` - -## Monitoring and Observability - -### Status Monitoring - -Each manager provides status information including async operation stats: - -```go -// Redis status includes pool information -redisStatus := redisManager.GetStatus() -// {"connected": true, "pool_workers": 10, "active_jobs": 3} - -// Cron status includes worker pool info -cronStatus := cronManager.GetStatus() -// {"active": true, "jobs": [...], "pool_workers": 5} -``` - -### Performance Metrics - -- **Operation Latency**: Time taken for async operations -- **Queue Depth**: Number of pending operations -- **Error Rates**: Failure rates for different operation types -- **Resource Usage**: Memory and CPU usage by worker pools - -## Configuration - -### Worker Pool Configuration - -```yaml -# Infrastructure worker pool sizes -infrastructure: - redis: - workers: 10 - kafka: - workers: 5 - minio: - workers: 8 - postgres: - workers: 15 - mongodb: - workers: 12 - cron: - workers: 5 -``` - -### Timeout Configuration - -```yaml -# Operation timeouts -infrastructure: - timeouts: - redis: 30s - kafka: 60s - minio: 300s - postgres: 30s - mongodb: 30s -``` - -## Best Practices - -### 1. Context Usage - -Always use context for cancellation: - -```go -ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) -defer cancel() - -result := dbManager.QueryAsync(ctx, "SELECT * FROM users") -``` - -### 2. Error Handling - -Handle errors appropriately: - -```go -result := redisManager.GetAsync(ctx, "key") -value, err := result.Wait() - -if err == context.Canceled { - // Request was canceled - return -} -if err == context.DeadlineExceeded { - // Operation timed out - return -} -// Handle other errors -``` - -### 3. Resource Cleanup - -Use defer for cleanup: - -```go -result := fileManager.UploadAsync(ctx, "file.txt", reader, size, contentType) -defer func() { - if reader != nil { - reader.Close() - } -}() - -// Wait for operation -_, err := result.Wait() -``` - -### 4. Batch Operations - -Use batch operations for multiple items: - -```go -// Instead of multiple individual operations -for _, key := range keys { - redisManager.SetAsync(ctx, key, value, ttl) -} - -// Use batch operation -kvPairs := make(map[string]interface{}) -for _, key := range keys { - kvPairs[key] = value -} -redisManager.SetBatchAsync(ctx, kvPairs, ttl) -``` - -### 5. Worker Pool Sizing - -Size worker pools based on load: - -```go -// High-throughput service -redisPool := NewWorkerPool(50) - -// Low-throughput service -cronPool := NewWorkerPool(5) -``` - -## Migration Guide - -### Converting Synchronous Code - -**Before (Synchronous):** -```go -func (s *Service) GetUser(id string) (*User, error) { - var user User - err := s.db.Where("id = ?", id).First(&user).Error - return &user, err -} -``` - -**After (Asynchronous):** -```go -func (s *Service) GetUser(id string) (*User, error) { - var user User - result := s.db.GORMFirstAsync(ctx, &user, id) - _, err := result.Wait() - return &user, err -} -``` - -### Gradual Migration - -1. **Identify blocking operations** -2. **Add async versions alongside sync versions** -3. **Update callers gradually** -4. **Remove sync versions after full migration** - -## Testing - -### Unit Testing Async Operations - -```go -func TestAsyncRedisGet(t *testing.T) { - // Setup - redis := setupTestRedis() - ctx := context.Background() - - // Set test data - redis.Set(ctx, "test_key", "test_value", time.Hour) - - // Test async operation - result := redis.GetAsync(ctx, "test_key") - - // Wait with timeout - value, err := result.WaitWithTimeout(5 * time.Second) - - assert.NoError(t, err) - assert.Equal(t, "test_value", value) -} -``` - -### Integration Testing - -```go -func TestAsyncDatabaseOperations(t *testing.T) { - // Setup database - db := setupTestDatabase() - - // Test batch operations - users := []User{{Name: "Alice"}, {Name: "Bob"}} - result := db.GORMCreateBatchAsync(ctx, users) - - _, errors := result.WaitAll() - for _, err := range errors { - assert.NoError(t, err) - } -} -``` - -## Troubleshooting - -### Common Issues - -**Operations taking too long:** -- Check worker pool sizing -- Monitor queue depths -- Add timeouts to operations - -**Memory leaks:** -- Ensure proper cleanup of resources -- Monitor goroutine counts -- Use context cancellation - -**Deadlocks:** -- Avoid blocking operations in async jobs -- Use timeouts for all operations -- Monitor for circular dependencies - -**Resource exhaustion:** -- Limit concurrent operations -- Implement backpressure -- Monitor system resources - -## Conclusion - -The async infrastructure implementation provides: - -- **Non-blocking operations** for better application responsiveness -- **Controlled concurrency** through worker pools -- **Resource efficiency** with proper connection pooling -- **Fault tolerance** with timeout and error handling -- **Scalability** through batch operations and connection management - -This approach ensures that your Go application can handle high loads while maintaining excellent user experience and system stability. diff --git a/docs_wiki/BUILD_SCRIPTS.md b/docs_wiki/BUILD_SCRIPTS.md deleted file mode 100644 index b0b4b1b..0000000 --- a/docs_wiki/BUILD_SCRIPTS.md +++ /dev/null @@ -1,482 +0,0 @@ -# Enhanced Build Scripts with Garble Obfuscation - -## What's New in This Version - -### Enhanced Build Scripts (Latest Update) -The build scripts have been significantly enhanced with the following new features: - -- **Automatic Tool Installation**: Scripts now automatically check for and install required Go tools (`goversioninfo`, `garble`) if not present -- **Interactive Obfuscation Choice**: Users are prompted to choose between standard Go build or garble obfuscation build -- **Timeout Handling**: 10-second timeout with sensible default (no obfuscation) for CI/CD compatibility -- **Updated Step Numbers**: Progress indicators now show [0/6] through [5/6] to account for tool checking phase -- **Enhanced Error Handling**: Better error messages and recovery options for tool installation failures - -### TUI Improvements -The Terminal User Interface has been significantly enhanced with advanced features: - -- **Scrollable Log Display**: Full scrolling support through application logs with arrow keys, page up/down, and home/end -- **Real-time Log Filtering**: Press "/" to open a modal filter dialog for searching logs by content or level -- **Auto-scroll Control**: Manual toggle (F1) to enable/disable automatic scrolling to bottom on new logs -- **Log Management**: Press F2 to clear all accumulated logs and reset the view state -- **Modal Filter Interface**: Clean, centered dialog with black background for focused log filtering -- **Sticky Header/Footer**: App information and controls remain visible while scrolling through logs -- **Thread-safe Operations**: All log operations are properly synchronized for concurrent access -- **Unlimited Log Storage**: Removed 1000 log limit to allow unlimited log storage -- **Default Auto-scroll**: Auto-scroll enabled by default on application startup -- **Reusable Dialog System**: Template-based dialog components in `pkg/tui/template/` for easy reuse -- **Dialog Footer Cleanup**: Removed scroll count from footer for cleaner interface - -### Customizable Parameter Parsing System -The application now features a highly customizable parameter parsing system that allows easy addition and configuration of command-line flags: - -- **Flag Definition System**: Command-line flags are defined in `cmd/app/main.go` using a structured `FlagDefinition` type -- **Dynamic Parsing**: The parsing logic in `pkg/utils/parameter.go` automatically handles flag validation and type conversion -- **Easy Extension**: Add new flags by simply adding entries to the flagDefinitions slice -- **Type Safety**: Support for string, int, and bool flag types with built-in validation -- **Custom Validators**: Each flag can have custom validation functions for business logic rules -- **Modular Architecture**: Parsing logic separated from configuration for maintainability - -**Example of Adding a New Flag:** -```go -// In cmd/app/main.go -var flagDefinitions = []utils.FlagDefinition{ - { - Name: "c", - DefaultValue: "", - Description: "URL to load configuration from (YAML format)", - Validator: func(value interface{}) error { - if str, ok := value.(string); ok && str != "" { - if _, err := url.ParseRequestURI(str); err != nil { - return fmt.Errorf("invalid config URL format: %w", err) - } - } - return nil - }, - }, - // Add new flags easily - { - Name: "port", - DefaultValue: 8080, - Description: "Server port to listen on", - Validator: func(value interface{}) error { - if port, ok := value.(int); ok && (port < 1 || port > 65535) { - return fmt.Errorf("port must be between 1 and 65535") - } - return nil - }, - }, -} -``` - -### Previous Features (Still Included) -- Cross-platform support (Unix/Linux/macOS and Windows) -- Automatic backup and archiving of previous builds -- Process management (stops running application instances) -- Asset copying (config files, web assets, databases) -- Comprehensive error handling and troubleshooting - -## Overview - -The enhanced build scripts (`scripts/build.sh` for Unix/Linux/macOS and `scripts/build.bat` for Windows) now include automatic tool installation and user choice for code obfuscation using `garble`. These scripts provide a complete build pipeline with backup management, cross-platform compatibility, and production-ready binary generation. - -## Key Features - -- **Automatic Tool Installation**: Checks and installs required tools (`goversioninfo`, `garble`) -- **User Choice for Obfuscation**: Interactive prompt to enable/disable code obfuscation -- **Timeout Handling**: 10-second timeout with default behavior (no obfuscation) -- **Cross-Platform**: Native implementations for Unix/Linux/macOS and Windows -- **Backup Management**: Automatic backup and archiving of previous builds -- **Process Management**: Stops running application instances before building -- **Asset Copying**: Automatically copies configuration and web assets - -## Enhanced Build Process - -### Tool Installation Phase - -1. **Check goversioninfo**: Verifies if `goversioninfo` is installed - - If not found: Automatically installs `github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest` - - If found: Continues to next step - -2. **Check garble**: Verifies if `garble` is installed - - If not found: Automatically installs `mvdan.cc/garble@latest` - - If found: Continues to next step - -### User Choice Phase - -3. **Obfuscation Prompt**: Interactive prompt with timeout - ``` - Use garble build for obfuscation? (y/N, timeout 10s): - ``` - - **Y/y**: Enables code obfuscation using `garble build` - - **N/n or timeout**: Uses standard `go build` - - **Default**: No obfuscation (safer for development) - -### Standard Build Phase - -4. **Version Info Generation**: Runs `goversioninfo -platform-specific` -5. **Binary Compilation**: Builds with appropriate command - - With obfuscation: `garble build -ldflags="-s -w"` - - Without obfuscation: `go build -ldflags="-s -w"` -6. **Asset Management**: Copies configuration files, web assets, and databases - -## Usage Examples - -### Unix/Linux/macOS - -```bash -# Interactive build with user choice -./scripts/build.sh - -# Example output: -# (\_/) -/# (o.o) stackyard Builder by diameter-tscd -# c(")(") -# ------------------------------------------------------------------------------ -# [0/6] Checking required tools... -# + goversioninfo found -# + garble found -# Use garble build for obfuscation? (y/N, timeout 10s): y -# + Using garble build -# [1/6] Checking for running process... -# + App is not running. -# [2/6] Backing up old files... -# + Backup created at: dist/backups/20251220_235500 -# [3/6] Archiving backup... -# + Backup archived: dist/backups/20251220_235500.zip -# [4/6] Building Go binary... -# + Build successful: dist/stackyard -# [5/6] Copying assets... -# + Copying web folder... -# + Copying config.yaml... -# SUCCESS! Build ready at: dist/ -``` - -### Windows - -```cmd -# Interactive build with user choice -scripts\build.bat - -# Example output: -# (\_/) -# (o.o) stackyard Builder by diameter-tscd -# c(")(") -# ------------------------------------------------------------------------------ -# [0/6] Checking required tools... -# + goversioninfo found -# + garble found -# Use garble build for obfuscation? (Y/N, default N, timeout 10s): Y -# + Using garble build -# [1/6] Checking for running process... -# + App is not running. -# [2/6] Backing up old files... -# + Backup created at: dist/backups/20251220_235500 -# [3/6] Archiving backup... -# + Backup archived: dist/backups/20251220_235500.zip -# [4/6] Building Go binary... -# + Build successful: dist/stackyard.exe -# [5/6] Copying assets... -# + Copying web folder... -# SUCCESS! Build ready at: dist\ -``` - -## Code Obfuscation with Garble - -### What is Garble? - -Garble is a Go code obfuscation tool that: -- **Obfuscates identifiers**: Renames functions, variables, and types -- **Removes debug info**: Strips file paths and source information -- **Maintains functionality**: Preserves program behavior -- **Increases binary size**: Obfuscated binaries are slightly larger - -### When to Use Obfuscation - -**Recommended for:** -- Production deployments -- Commercial applications -- Security-sensitive code -- Intellectual property protection - -**Not recommended for:** -- Development builds -- Debugging scenarios -- Open source projects -- Performance-critical applications (minor overhead) - -### Obfuscation Effects - -```go -// Original code -package main - -func calculateTotal(items []Item) int { - total := 0 - for _, item := range items { - total += item.price - } - return total -} - -// Obfuscated result (example) -package main - -func A(items []B) int { - C := 0 - for _, D := range items { - C += D.E - } - return C -} -``` - -## Configuration Options - -### Build Script Variables - -| Variable | Unix/Linux/macOS | Windows | Description | -|----------|------------------|---------|-------------| -| `DIST_DIR` | `dist` | `dist` | Output directory | -| `APP_NAME` | `stackyard` | `stackyard.exe` | Binary name | -| `MAIN_PATH` | `./cmd/app/main.go` | `./cmd/app/main.go` | Main Go file | - -### Timeout Behavior - -- **Timeout Duration**: 10 seconds -- **Default Choice**: N (no obfuscation) -- **Platform Differences**: - - Unix: Uses `read -t` with signal handling - - Windows: Uses `choice /T` command - -## Error Handling - -### Tool Installation Failures - -```bash -# If goversioninfo installation fails -[0/6] Checking required tools... -! goversioninfo not found. Installing... -x Failed to install goversioninfo -# Script exits with error code -``` - -### Build Failures - -```bash -# If Go compilation fails -[4/6] Building Go binary... -x Build FAILED! Exit code: 2 -# Script exits with build error code -``` - -### Recovery Options - -**Clean Rebuild:** -```bash -# Remove dist directory and rebuild -rm -rf dist/ -./scripts/build.sh -``` - -**Skip Obfuscation:** -```bash -# Choose 'N' when prompted or wait for timeout -# Script will use standard go build -``` - -## Performance Comparison - -### Build Times - -| Build Type | Average Time | Binary Size | Notes | -|------------|--------------|-------------|-------| -| Standard Go | ~15-30s | ~15-25MB | Normal compilation | -| Garble Build | ~45-90s | ~18-28MB | Slower, larger binary | -| UPX Compressed | +10-20s | ~6-10MB | Additional compression | - -### Runtime Performance - -- **Standard Build**: Baseline performance -- **Garble Build**: ~1-5% slower due to obfuscated symbols -- **Memory Usage**: No significant difference - -## Security Considerations - -### Code Protection - -**Obfuscation Benefits:** -- Makes reverse engineering more difficult -- Protects intellectual property -- Complicates debugging by attackers -- Reduces information leakage - -**Limitations:**: -- Not encryption (can be deobfuscated with effort) -- Source code recovery is difficult but not impossible -- Performance debugging becomes harder - -### Best Practices - -1. **Development**: Use standard builds for easier debugging -2. **Staging**: Test with obfuscated builds before production -3. **Production**: Always use obfuscated builds for security -4. **CI/CD**: Automate obfuscation for consistent deployments - -## Integration with CI/CD - -### GitHub Actions Example - -```yaml -name: Build and Deploy - -on: - push: - branches: [ main ] - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - - name: Install Go tools - run: | - go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest - go install mvdan.cc/garble@latest - - - name: Build without obfuscation - run: go build -ldflags="-s -w" -o app ./cmd/app/main.go - - - name: Build with obfuscation - run: garble build -ldflags="-s -w" -o app-obfuscated ./cmd/app/main.go -``` - -### Docker Integration - -```dockerfile -# Multi-stage build with obfuscation choice -FROM golang:1.21-alpine AS builder - -# Install tools -RUN go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@latest -RUN go install mvdan.cc/garble@latest - -# Copy source -COPY . . - -# Build with obfuscation (for production) -RUN goversioninfo -platform-specific -RUN garble build -ldflags="-s -w" -o main ./cmd/app - -FROM alpine:latest -COPY --from=builder /app/main . -CMD ["./main"] -``` - -## Troubleshooting - -### Common Issues - -**"garble: command not found"** -- Ensure garble was installed successfully -- Check Go environment: `go env` -- Verify installation: `go list mvdan.cc/garble` - -**"Build takes too long"** -- Garble builds are slower by design -- Consider using standard builds for development -- Use build caching in CI/CD - -**"Obfuscated binary crashes"** -- Test obfuscated builds thoroughly before production -- Some reflection-based code may need adjustments -- Check for hardcoded function names - -**"Timeout reached during prompt"** -- Script continues with default (no obfuscation) -- For automated builds, consider removing the prompt -- Use environment variables for CI/CD - -### Debug Options - -**Verbose Output:** -```bash -# Add to build script for debugging -set -x # Unix -@echo on # Windows -``` - -**Skip Prompt (Automated Builds):** -```bash -# Modify script to skip user interaction -USE_GARBLE=false # Always use standard build -# Or -USE_GARBLE=true # Always use obfuscation -``` - -## Migration Guide - -### From Previous Version - -1. **Backup existing scripts** (optional but recommended) -2. **Replace build scripts** with enhanced versions -3. **Test installation** of required tools -4. **Verify builds** work with both obfuscation options -5. **Update CI/CD** pipelines if needed - -### Backward Compatibility - -- **Existing builds**: Continue to work unchanged -- **Configuration**: Same environment variables and paths -- **Output format**: Identical directory structure -- **Dependencies**: Only adds optional tools - -## Advanced Usage - -### Custom Build Flags - -Modify the build commands for additional flags: - -```bash -# Unix/Linux/macOS -garble build -ldflags="-s -w -X main.version=1.2.3" -o "$DIST_DIR/$APP_NAME" "$MAIN_PATH" - -# Windows -garble build -ldflags="-s -w -X main.version=1.2.3" -o "%DIST_DIR%\%APP_NAME%" %MAIN_PATH% -``` - -### Environment-Based Choice - -For automated environments: - -```bash -# Set environment variable -export USE_GARBLE=true - -# Modify script to check environment -if [ "$USE_GARBLE" = "true" ]; then - # Skip prompt, use garble -else - # Show prompt -fi -``` - -### Multiple Build Targets - -```bash -# Build both versions -./scripts/build.sh # Interactive choice -USE_GARBLE=false ./scripts/build.sh # Standard only -USE_GARBLE=true ./scripts/build.sh # Obfuscated only -``` - -## Conclusion - -The enhanced build scripts provide a robust, user-friendly solution for Go application building with optional code obfuscation. The interactive prompts ensure developers can make informed choices about security vs. development convenience, while the automatic tool installation removes setup barriers. - -Key benefits: -- **Security**: Optional code obfuscation for production deployments -- **Usability**: Interactive prompts with sensible defaults -- **Automation**: CI/CD friendly with timeout handling -- **Reliability**: Comprehensive error handling and backup management -- **Cross-platform**: Native implementations for all major platforms diff --git a/docs_wiki/CHANGE_PACKAGE_SCRIPTS.md b/docs_wiki/CHANGE_PACKAGE_SCRIPTS.md deleted file mode 100644 index eb07761..0000000 --- a/docs_wiki/CHANGE_PACKAGE_SCRIPTS.md +++ /dev/null @@ -1,340 +0,0 @@ -# Package Name Change Scripts - -## Overview - -The Package Name Change Scripts provide automated tools for renaming the Go module package name across the entire codebase. These scripts are essential when refactoring, renaming, or migrating a Go project to a new module path. - -## Features - -- **Cross-Platform Support**: Separate implementations for Unix/Linux/macOS and Windows -- **Comprehensive Updates**: Updates both the module declaration and all import paths -- **Safety Mechanisms**: Backup creation and error validation -- **Pure Native Tools**: No external dependencies required -- **Recursive Processing**: Handles all `.go` files in the project directory tree - -## Scripts Overview - -### change_package.sh (Unix/Linux/macOS) - -A Bash script that uses standard Unix utilities for text processing and file manipulation. - -**Prerequisites:** -- Bash shell -- `sed` (stream editor) -- `find` (file search utility) -- `awk` (text processing) -- `grep` (pattern matching) - -### change_package.bat (Windows) - -A Windows batch script that uses pure CMD commands for maximum compatibility. - -**Prerequisites:** -- Windows Command Prompt -- No external tools required (uses built-in CMD features) - -## Usage - -### Basic Usage - -**Unix/Linux/macOS:** -```bash -chmod +x scripts/change_package.sh -./scripts/change_package.sh "github.com/new-org/new-project" -``` - -**Windows:** -```cmd -scripts\change_package.bat github.com/new-org/new-project -``` - -### Examples - -```bash -# Change to a GitHub repository -./scripts/change_package.sh "github.com/mycompany/myproject" - -# Change to a local/private module -./scripts/change_package.sh "mycompany.com/internal/project" - -# Change to a generic domain -scripts\change_package.bat "example.com/my-app" - -# Change to a subdirectory module -./scripts/change_package.sh "github.com/user/project/v2" -``` - -## How It Works - -The scripts perform the following operations in sequence: - -### 1. Validation Phase - -- **Input Validation**: Ensures a new module name is provided as argument -- **Module Discovery**: Reads the current module name from `go.mod` -- **Error Checking**: Validates that `go.mod` exists and contains a valid module declaration - -### 2. Update Phase - -- **Module Declaration**: Updates the `module` line in `go.mod` -- **Import Path Scanning**: Recursively finds all `.go` files in the project -- **Import Path Replacement**: Updates all import statements containing the old module name - -### 3. Completion Phase - -- **Success Reporting**: Displays completion message with statistics -- **Backup Notification**: Reminds user about backup files (Unix/Linux/macOS) - -## Technical Implementation - -### Unix/Linux/macOS Implementation - -The Bash script uses: - -```bash -# Read current module name -CURRENT_MODULE=$(grep '^module ' go.mod | awk '{print $2}') - -# Update go.mod with backup -sed -i.bak "s|^module $CURRENT_MODULE|module $NEW_MODULE|" go.mod - -# Update all .go files with backup -find . -name "*.go" -type f -exec sed -i.bak "s|$CURRENT_MODULE|$NEW_MODULE|g" {} + -``` - -### Windows Implementation - -The batch script uses CMD's built-in string replacement: - -```batch -REM Enable delayed expansion for variable manipulation -setlocal enabledelayedexpansion - -REM Extract current module name -for /f "tokens=2" %%i in ('findstr "^module " go.mod') do set CURRENT_MODULE=%%i - -REM Replace in go.mod using temporary file -(for /f "delims=" %%i in (go.mod) do ( - set "line=%%i" - set "line=!line:%search%=%replace%!" - echo !line! -)) > "%tempfile%" - -REM Replace in all .go files -for /r %%f in (*.go) do ( - REM Similar replacement logic for each file -) -``` - -## Safety Features - -### Backup Creation - -**Unix/Linux/macOS:** -- Creates `.bak` backup files for all modified files -- Preserves original content in case of errors -- Easy cleanup after verification - -**Windows:** -- No automatic backups (CMD limitations) -- Recommends committing changes before running -- Files are overwritten directly - -### Error Handling - -- **Argument Validation**: Stops if no new module name provided -- **File Existence**: Checks for `go.mod` presence -- **Module Detection**: Validates current module name extraction -- **Operation Verification**: Checks success of file operations - -### Recovery Options - -If something goes wrong: - -1. **Restore from backups** (Unix/Linux/macOS): - ```bash - find . -name "*.bak" -exec bash -c 'mv "$1" "${1%.bak}"' _ {} \; - ``` - -2. **Git revert** (if changes were committed): - ```bash - git checkout HEAD~1 go.mod - git checkout HEAD~1 -- "*.go" - ``` - -## Best Practices - -### Before Running - -1. **Commit Current Changes**: - ```bash - git add . - git commit -m "Before package name change" - ``` - -2. **Test Compilation**: - ```bash - go build ./... - go test ./... - ``` - -3. **Backup Important Files** (additional safety): - ```bash - cp go.mod go.mod.backup - ``` - -### After Running - -1. **Verify Changes**: - ```bash - grep -r "old-module-name" . --include="*.go" - ``` - -2. **Clean Up Backups** (Unix/Linux/macOS): - ```bash - find . -name "*.bak" -delete - ``` - -3. **Update Dependencies**: - ```bash - go mod tidy - ``` - -4. **Test Again**: - ```bash - go build ./... - go test ./... - ``` - -5. **Update IDE/Editor**: - - Restart your IDE - - Clear any cached module information - -## Common Use Cases - -### Repository Rename - -When moving a project to a new GitHub organization: - -```bash -# Old: github.com/old-org/project -# New: github.com/new-org/project -./scripts/change_package.sh "github.com/new-org/project" -``` - -### Versioning - -When creating a new major version: - -```bash -# Old: github.com/user/project -# New: github.com/user/project/v2 -./scripts/change_package.sh "github.com/user/project/v2" -``` - -### Internal Migration - -When moving from public to private repositories: - -```bash -# Old: github.com/user/project -# New: company.com/internal/project -./scripts/change_package.sh "company.com/internal/project" -``` - -## Troubleshooting - -### "Module declaration not found" - -**Cause**: `go.mod` file missing or malformed -**Solution**: Ensure you're in the correct project directory and `go.mod` exists - -### "Permission denied" - -**Cause**: File permissions or write access issues -**Solution**: Check file permissions and run with appropriate privileges - -### "sed: command not found" (Unix/Linux/macOS) - -**Cause**: `sed` not installed -**Solution**: Install sed utility (usually pre-installed on most systems) - -### "FINDSTR is not recognized" (Windows) - -**Cause**: Running in PowerShell instead of CMD -**Solution**: Use `cmd.exe` or modify script for PowerShell compatibility - -### Import Paths Not Updated - -**Cause**: Custom import aliases or complex import statements -**Solution**: Manually review and update any remaining imports - -### IDE Still Shows Old Imports - -**Cause**: IDE caching module information -**Solution**: Restart IDE and clear module cache - -## Performance Considerations - -- **Large Codebases**: The script processes all `.go` files recursively -- **File Count**: Performance scales with the number of Go files -- **Backup Overhead**: Unix script creates backups (extra I/O) -- **Memory Usage**: Minimal memory footprint for both implementations - -## Integration with CI/CD - -### GitHub Actions Example - -```yaml -- name: Change Package Name - run: | - chmod +x scripts/change_package.sh - ./scripts/change_package.sh "github.com/${{ github.repository }}" - -- name: Verify Changes - run: | - go mod tidy - go build ./... -``` - -### Jenkins Pipeline Example - -```groovy -stage('Package Rename') { - steps { - sh 'chmod +x scripts/change_package.sh' - sh "./scripts/change_package.sh ${NEW_MODULE_NAME}" - } -} - -stage('Verification') { - steps { - sh 'go mod tidy' - sh 'go build ./...' - } -} -``` - -## Security Considerations - -- **Input Validation**: Scripts validate module name format -- **Path Safety**: Operations confined to project directory -- **No Network Access**: Scripts work entirely offline -- **No External Dependencies**: Reduces supply chain risks - -## Future Enhancements - -Potential improvements for future versions: - -- **Dry Run Mode**: Preview changes without applying them -- **Selective Updates**: Update only specific directories -- **Import Alias Handling**: Better support for custom import aliases -- **Validation Checks**: Verify new module name format -- **Progress Indicators**: Show progress for large codebases -- **Undo Functionality**: Automated rollback capability - -## Conclusion - -The Package Name Change Scripts provide a reliable, cross-platform solution for Go module renaming. They handle the complexity of updating both module declarations and import paths while providing safety mechanisms to prevent data loss. - -The scripts follow the principle of "convention over configuration" - they work automatically with standard Go project structures without requiring additional setup or configuration files. diff --git a/docs_wiki/CONFIGURATION_GUIDE.md b/docs_wiki/CONFIGURATION_GUIDE.md deleted file mode 100644 index 01fc2a4..0000000 --- a/docs_wiki/CONFIGURATION_GUIDE.md +++ /dev/null @@ -1,460 +0,0 @@ -# Configuration Guide - -This document provides a comprehensive guide to configuring the application using the `config.yaml` file. It includes all available configuration options with explanations and examples. - -## Table of Contents - -- [Basic Configuration](#basic-configuration) -- [Server Configuration](#server-configuration) -- [Services Configuration](#services-configuration) -- [Authentication](#authentication) -- [Redis Configuration](#redis-configuration) -- [Kafka Configuration](#kafka-configuration) -- [PostgreSQL Configuration](#postgresql-configuration) - - [Single Connection (Legacy)](#single-connection-legacy) - - [Multiple Connections (Recommended)](#multiple-connections-recommended) -- [Monitoring Configuration](#monitoring-configuration) - - [MinIO Configuration](#minio-configuration) - - [External Services](#external-services) -- [Cron Jobs](#cron-jobs) -- [Encryption](#encryption) - -## Basic Configuration - -```yaml -app: - name: "My Fancy Go App" - version: "1.0.0" - debug: true - env: "development" - banner_path: "banner.txt" - startup_delay: 3 # seconds to display boot screen (0 to skip) - quiet_startup: true # suppress console logs (TUI only, logs still go to monitoring) - enable_tui: true # enable fancy TUI mode (false = traditional console logging) -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `name` | string | Application name | "My Fancy Go App" | -| `version` | string | Application version | "1.0.0" | -| `debug` | boolean | Enable debug mode | false | -| `env` | string | Environment (development, production, etc.) | "development" | -| `banner_path` | string | Path to banner text file | "banner.txt" | -| `startup_delay` | integer | Seconds to display boot screen (0 to skip) | 0 | -| `quiet_startup` | boolean | Suppress console logs during startup | false | -| `enable_tui` | boolean | Enable Terminal User Interface | false | - -## Server Configuration - -```yaml -server: - port: "8080" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `port` | string | Server port | "8080" | - -## Services Configuration - -```yaml -services: - service_a: true - service_b: false - service_c: true - service_d: false - service_encryption: false -``` - -Each service can be enabled or disabled individually. Set to `true` to enable, `false` to disable. - -## Authentication - -```yaml -auth: - type: "apikey" - secret: "super-secret-key" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `type` | string | Authentication type | "apikey" | -| `secret` | string | Secret key for authentication | "" | - -## Redis Configuration - -```yaml -redis: - enabled: false - address: "localhost:6379" - password: "" - db: 0 -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable Redis | false | -| `address` | string | Redis server address | "localhost:6379" | -| `password` | string | Redis password | "" | -| `db` | integer | Redis database number | 0 | - -## Kafka Configuration - -```yaml -kafka: - enabled: false - brokers: - - "localhost:9092" - topic: "my-topic" - group_id: "my-group" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable Kafka | false | -| `brokers` | array | List of Kafka broker addresses | ["localhost:9092"] | -| `topic` | string | Default Kafka topic | "my-topic" | -| `group_id` | string | Consumer group ID | "my-group" | - -## PostgreSQL Configuration - -The application supports both single and multiple PostgreSQL connections. Multiple connections allow you to connect to different databases and switch between them dynamically through the web monitoring interface. - -### Single Connection (Legacy) - -```yaml -postgres: - enabled: true - host: "localhost" - port: 5432 - user: "postgres" - password: "Mypostgres01" - dbname: "primary_db" - sslmode: "disable" - max_open_conns: 10 - max_idle_conns: 5 -``` - -### Multiple Connections (Recommended) - -```yaml -postgres: - enabled: true - connections: - - name: "primary" - enabled: true - host: "localhost" - port: 5432 - user: "postgres" - password: "Mypostgres01" - dbname: "primary_db" - sslmode: "disable" - - - name: "secondary" - enabled: true - host: "localhost" - port: 5433 - user: "postgres" - password: "Mypostgres01" - dbname: "secondary_db" - sslmode: "disable" - - - name: "analytics" - enabled: false # Disabled by default - host: "analytics.example.com" - port: 5432 - user: "analytics_user" - password: "analytics_password" - dbname: "analytics_db" - sslmode: "require" -``` - -### Multiple Connections Features - -- **Dynamic Switching**: Switch between database connections through the web monitoring interface -- **Connection Health**: Monitor the status of each database connection individually -- **Selective Queries**: Run queries on specific databases by selecting the connection -- **Load Distribution**: Distribute read/write operations across multiple databases -- **Failover Support**: Automatic fallback when connections become unavailable - -### Web Monitoring Interface - -When multiple connections are configured, the PostgreSQL monitoring page (`/monitoring/postgres`) provides: - -- **Connection Selector**: Dropdown to choose which database to monitor/query -- **Status Indicators**: Green/red dots showing connection health for each database -- **Database Info**: Information about the currently selected database -- **Query Execution**: Run SQL queries on the selected database connection -- **Running Queries**: View active queries on the selected database - -### Usage Examples - -#### Monitoring Multiple Databases - -1. Configure multiple connections in `config.yaml` -2. Start the application with monitoring enabled -3. Access the monitoring dashboard at `http://localhost:9090` -4. Navigate to the "Postgres" tab -5. Use the connection dropdown to switch between databases -6. Monitor health and run queries on each database individually - -#### High Availability Setup - -```yaml -postgres: - enabled: true - connections: - - name: "primary" - enabled: true - host: "db-primary.example.com" - port: 5432 - user: "app_user" - password: "${PRIMARY_DB_PASSWORD}" - dbname: "app_db" - sslmode: "require" - - - name: "replica" - enabled: true - host: "db-replica.example.com" - port: 5432 - user: "app_user" - password: "${REPLICA_DB_PASSWORD}" - dbname: "app_db" - sslmode: "require" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable PostgreSQL | false | -| `connections` | array | List of PostgreSQL connections | [] | -| `connections[].name` | string | Connection name identifier | "" | -| `connections[].enabled` | boolean | Enable this specific connection | false | -| `connections[].host` | string | PostgreSQL host | "localhost" | -| `connections[].port` | integer | PostgreSQL port | 5432 | -| `connections[].user` | string | PostgreSQL username | "postgres" | -| `connections[].password` | string | PostgreSQL password | "" | -| `connections[].dbname` | string | Database name | "" | -| `connections[].sslmode` | string | SSL mode (disable, require, etc.) | "disable" | - -## Monitoring Configuration - -```yaml -monitoring: - enabled: true - port: "9090" - password: "admin" - obfuscate_api: true - title: "Stackyard Admin" - subtitle: "Dashboard Monitor" - max_photo_size_mb: 2 - upload_dir: "web/monitoring/uploads" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable monitoring | false | -| `port` | string | Monitoring port | "9090" | -| `password` | string | Monitoring password | "" | -| `obfuscate_api` | boolean | Enable API obfuscation | false | -| `title` | string | Monitoring dashboard title | "Stackyard Admin" | -| `subtitle` | string | Monitoring dashboard subtitle | "" | -| `max_photo_size_mb` | integer | Maximum photo upload size in MB | 2 | -| `upload_dir` | string | Upload directory path | "web/monitoring/uploads" | - -### MinIO Configuration - -```yaml -monitoring: - minio: - enabled: true - endpoint: "localhost:9003" - access_key: "minioadmin" - secret_key: "minioadmin" - use_ssl: false - bucket: "main" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable MinIO integration | false | -| `endpoint` | string | MinIO endpoint | "localhost:9000" | -| `access_key` | string | MinIO access key | "minioadmin" | -| `secret_key` | string | MinIO secret key | "minioadmin" | -| `use_ssl` | boolean | Use SSL for MinIO connection | false | -| `bucket` | string | Default bucket name | "main" | - -### External Services - -```yaml -monitoring: - external: - services: - - name: "Google" - url: "https://google.com" - - name: "Soundcloud" - url: "https://soundcloud.com" - - name: "Local API" - url: "http://localhost:8080/health" -``` - -## Cron Jobs - -```yaml -cron: - enabled: true - jobs: - log_cleanup: "0 0 * * *" # Run at midnight - health_check: "*/10 * * * * *" # Every 10 seconds -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable cron jobs | false | -| `jobs` | object | Cron job definitions | {} | - -## Encryption - -```yaml -encryption: - enabled: false - algorithm: "aes-256-gcm" - key: "" - rotate_keys: false - key_rotation_interval: "24h" -``` - -| Option | Type | Description | Default | -|--------|------|-------------|---------| -| `enabled` | boolean | Enable encryption | false | -| `algorithm` | string | Encryption algorithm | "aes-256-gcm" | -| `key` | string | Encryption key (32 bytes for AES-256) | "" | -| `rotate_keys` | boolean | Enable automatic key rotation | false | -| `key_rotation_interval` | string | Key rotation interval | "24h" | - -## Complete Example Configuration - -```yaml -# Example configuration with multiple PostgreSQL connections -app: - name: "My Fancy Go App" - version: "1.0.0" - debug: true - env: "development" - banner_path: "banner.txt" - startup_delay: 3 # seconds to display boot screen (0 to skip) - quiet_startup: true # suppress console logs (TUI only, logs still go to monitoring) - enable_tui: true # enable fancy TUI mode (false = traditional console logging) - -server: - port: "8080" - -services: - service_a: true - service_b: false - service_c: true - service_d: false - service_encryption: false - -auth: - type: "apikey" - secret: "super-secret-key" - -redis: - enabled: false - address: "localhost:6379" - password: "" - db: 0 - -kafka: - enabled: false - brokers: - - "localhost:9092" - topic: "my-topic" - group_id: "my-group" - -# NEW: Multiple PostgreSQL connections configuration -postgres: - enabled: true - connections: - - name: "primary" - enabled: true - host: "localhost" - port: 5432 - user: "postgres" - password: "Mypostgres01" - dbname: "primary_db" - sslmode: "disable" - - - name: "secondary" - enabled: true - host: "localhost" - port: 5433 - user: "postgres" - password: "Mypostgres01" - dbname: "secondary_db" - sslmode: "disable" - - - name: "analytics" - enabled: false # Disabled by default - host: "analytics.example.com" - port: 5432 - user: "analytics_user" - password: "analytics_password" - dbname: "analytics_db" - sslmode: "require" - -monitoring: - enabled: true - port: "9090" - password: "admin" - obfuscate_api: true - title: "Stackyard Admin" - subtitle: "Dashboard Monitor" - max_photo_size_mb: 2 - upload_dir: "web/monitoring/uploads" - - minio: - enabled: true - endpoint: "localhost:9003" - access_key: "minioadmin" - secret_key: "minioadmin" - use_ssl: false - bucket: "main" - - external: - services: - - name: "Google" - url: "https://google.com" - - name: "Soundcloud" - url: "https://soundcloud.com" - - name: "Local API" - url: "http://localhost:8080/health" - -cron: - enabled: true - jobs: - log_cleanup: "0 0 * * *" - health_check: "*/10 * * * * *" - -encryption: - enabled: false - algorithm: "aes-256-gcm" - key: "" - rotate_keys: false - key_rotation_interval: "24h" -``` - -## Usage - -1. Copy the example configuration to `config.yaml` -2. Modify the values according to your environment -3. Ensure sensitive information (passwords, secrets) are properly secured -4. For production environments, consider using environment variables for sensitive data - -## Best Practices - -- Use environment variables for sensitive configuration (passwords, API keys) -- Disable unused services to reduce resource consumption -- Use meaningful names for PostgreSQL connections -- Monitor connection health through the monitoring dashboard -- Regularly rotate encryption keys if encryption is enabled diff --git a/docs_wiki/DEVELOPMENT.md b/docs_wiki/DEVELOPMENT.md new file mode 100644 index 0000000..9d77405 --- /dev/null +++ b/docs_wiki/DEVELOPMENT.md @@ -0,0 +1,661 @@ +# Development Guide + +This guide covers how to extend and customize Stackyard for your specific needs. Learn to add new services, integrate databases, handle API requests, and deploy your application. + +## Adding New Services + +Services are the core building blocks of Stackyard applications. Each service encapsulates business logic and exposes API endpoints. + +### Basic Service Structure + +Create a new service in `internal/services/modules/service_yourname.go`: + +```go +package modules + +import ( + "stackyard/pkg/response" + "github.com/labstack/echo/v4" +) + +type YourService struct { + enabled bool +} + +func NewYourService(enabled bool) *YourService { + return &YourService{enabled: enabled} +} + +func (s *YourService) Name() string { return "Your Service" } +func (s *YourService) Enabled() bool { return s.enabled } +func (s *YourService) Endpoints() []string { return []string{"/your-api"} } + +func (s *YourService) RegisterRoutes(g *echo.Group) { + // Register your API endpoints here + g.GET("/your-api", s.getData) + g.POST("/your-api", s.createData) +} + +func (s *YourService) getData(c echo.Context) error { + // Your business logic here + data := map[string]string{"message": "Hello from your service!"} + return response.Success(c, data, "Data retrieved") +} + +func (s *YourService) createData(c echo.Context) error { + // Handle POST request + return response.Created(c, nil, "Data created") +} +``` + +### Register Your Service + +Add to `internal/server/server.go`: + +```go +// Find the service registration section and add: +registry.Register(modules.NewYourService(s.config.Services.IsEnabled("your_service"))) +``` + +### Enable in Configuration + +Add to `config.yaml`: + +```yaml +services: + your_service: true +``` + +### Test Your Service + +```bash +# Restart the application +go run cmd/app/main.go + +# Test the endpoint +curl http://localhost:8080/api/v1/your-api +``` + +## API Development + +### Request Handling & Validation + +Stackyard provides built-in request validation and standardized responses. + +#### Basic Request Handling + +```go +func (s *YourService) createUser(c echo.Context) error { + // Parse JSON request + var req CreateUserRequest + if err := c.Bind(&req); err != nil { + return response.BadRequest(c, "Invalid request format") + } + + // Validate request + if req.Name == "" { + return response.BadRequest(c, "Name is required") + } + + // Process request + user := User{Name: req.Name, Email: req.Email} + // Save to database... + + return response.Created(c, user, "User created successfully") +} +``` + +#### Advanced Validation with Tags + +```go +type CreateUserRequest struct { + Name string `json:"name" validate:"required,min=2,max=50"` + Email string `json:"email" validate:"required,email"` + Age int `json:"age" validate:"required,gte=18,lte=120"` + Phone string `json:"phone" validate:"required,phone"` + Password string `json:"password" validate:"required,min=8"` +} + +func (s *YourService) createUser(c echo.Context) error { + var req CreateUserRequest + + // Bind and validate in one step + if err := request.Bind(c, &req); err != nil { + if validationErr, ok := err.(*request.ValidationError); ok { + return response.ValidationError(c, "Validation failed", validationErr.GetFieldErrors()) + } + return response.BadRequest(c, err.Error()) + } + + // Request is valid, proceed... + return response.Created(c, req, "User created") +} +``` + +#### Custom Validators + +Add custom validation rules in `pkg/request/request.go`: + +```go +// Add to the validator initialization +validate.RegisterValidation("phone", func(fl validator.FieldLevel) bool { + phone := fl.Field().String() + // Your phone validation logic + matched, _ := regexp.MatchString(`^\+?[1-9]\d{1,14}$`, phone) + return matched +}) + +validate.RegisterValidation("username", func(fl validator.FieldLevel) bool { + username := fl.Field().String() + // Alphanumeric, 3-20 characters + matched, _ := regexp.MatchString(`^[a-zA-Z0-9]{3,20}$`, username) + return matched +}) +``` + +### Response Types + +#### Success Responses + +```go +// Simple success +return response.Success(c, data, "Operation completed") + +// Success with metadata (pagination) +meta := response.CalculateMeta(page, perPage, total) +return response.SuccessWithMeta(c, data, meta, "Data retrieved") + +// Created (201) +return response.Created(c, newResource, "Resource created") + +// No content (204) +return response.NoContent(c) +``` + +#### Error Responses + +```go +// Bad request +return response.BadRequest(c, "Invalid input data") + +// Not found +return response.NotFound(c, "Resource not found") + +// Unauthorized +return response.Unauthorized(c, "Authentication required") + +// Forbidden +return response.Forbidden(c, "Access denied") + +// Validation error with field details +fieldErrors := map[string]string{ + "email": "Invalid email format", + "password": "Must be at least 8 characters", +} +return response.ValidationError(c, "Validation failed", fieldErrors) +``` + +#### Pagination + +```go +type PaginationRequest struct { + Page int `query:"page" json:"page"` + PerPage int `query:"per_page" json:"per_page"` +} + +func (s *YourService) listUsers(c echo.Context) error { + var pagination PaginationRequest + if err := c.Bind(&pagination); err != nil { + return response.BadRequest(c, "Invalid pagination parameters") + } + + page := pagination.GetPage() // Default: 1 + perPage := pagination.GetPerPage() // Default: 10, Max: 100 + offset := pagination.GetOffset() // Calculated offset + + // Query with pagination + users, total, err := s.getUsersWithPagination(offset, perPage) + if err != nil { + return response.InternalServerError(c, "Failed to fetch users") + } + + // Return with pagination metadata + meta := response.CalculateMeta(page, perPage, total) + return response.SuccessWithMeta(c, users, meta, "Users retrieved") +} +``` + +## Database Integration + +### PostgreSQL with GORM + +#### Basic Model & Operations + +```go +type User struct { + gorm.Model + Name string `json:"name" gorm:"not null"` + Email string `json:"email" gorm:"unique;not null"` + Password string `json:"-" gorm:"not null"` // Don't serialize +} + +func (s *UserService) createUser(c echo.Context) error { + var req CreateUserRequest + if err := request.Bind(c, &req); err != nil { + return response.BadRequest(c, err.Error()) + } + + user := User{ + Name: req.Name, + Email: req.Email, + Password: hashPassword(req.Password), + } + + if err := s.db.Create(&user).Error; err != nil { + return response.InternalServerError(c, "Failed to create user") + } + + return response.Created(c, user, "User created") +} + +func (s *UserService) getUser(c echo.Context) error { + id := c.Param("id") + var user User + + if err := s.db.First(&user, id).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return response.NotFound(c, "User not found") + } + return response.InternalServerError(c, "Database error") + } + + return response.Success(c, user, "User retrieved") +} +``` + +#### Dependency Injection + +Services receive database managers through constructor injection: + +```go +type UserService struct { + enabled bool + db *infrastructure.PostgresManager +} + +func NewUserService(db *infrastructure.PostgresManager, enabled bool) *UserService { + return &UserService{ + enabled: enabled, + db: db, + } +} +``` + +### Redis Caching + +#### Basic Caching Operations + +```go +type CacheService struct { + redis *infrastructure.RedisManager +} + +func (s *CacheService) cacheUser(userID string, user User) error { + ctx := context.Background() + data, err := json.Marshal(user) + if err != nil { + return err + } + + return s.redis.Set(ctx, fmt.Sprintf("user:%s", userID), string(data), time.Hour) +} + +func (s *CacheService) getCachedUser(userID string) (*User, error) { + ctx := context.Background() + data, err := s.redis.Get(ctx, fmt.Sprintf("user:%s", userID)) + if err != nil { + return nil, err + } + + var user User + if err := json.Unmarshal([]byte(data), &user); err != nil { + return nil, err + } + + return &user, nil +} +``` + +#### Cache-Aside Pattern + +```go +func (s *UserService) getUserWithCache(c echo.Context) error { + userID := c.Param("id") + + // Try cache first + if user, err := s.cache.getCachedUser(userID); err == nil { + return response.Success(c, user, "User retrieved from cache") + } + + // Cache miss - get from database + var user User + if err := s.db.First(&user, userID).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return response.NotFound(c, "User not found") + } + return response.InternalServerError(c, "Database error") + } + + // Cache for future requests + s.cache.cacheUser(userID, user) + + return response.Success(c, user, "User retrieved") +} +``` + +## Event Streaming + +### Server-Sent Events (SSE) + +Add real-time capabilities to your services: + +```go +type NotificationService struct { + enabled bool + broadcaster *utils.EventBroadcaster +} + +func NewNotificationService(enabled bool) *NotificationService { + return &NotificationService{ + enabled: enabled, + broadcaster: utils.NewEventBroadcaster(), + } +} + +func (s *NotificationService) RegisterRoutes(g *echo.Group) { + g.GET("/notifications/stream", s.streamNotifications) + g.POST("/notifications/send", s.sendNotification) +} + +func (s *NotificationService) streamNotifications(c echo.Context) error { + // Subscribe to notification stream + client := s.broadcaster.Subscribe("notifications") + defer s.broadcaster.Unsubscribe(client.ID) + + // Set SSE headers + c.Response().Header().Set(echo.HeaderContentType, "text/event-stream") + c.Response().Header().Set(echo.HeaderCacheControl, "no-cache") + c.Response().Header().Set(echo.HeaderConnection, "keep-alive") + + // Listen for events + for { + select { + case event := <-client.Channel: + // Send SSE event + c.Response().Write([]byte(fmt.Sprintf("data: %s\n\n", event.Data))) + c.Response().Flush() + case <-c.Request().Context().Done(): + return nil + } + } +} + +func (s *NotificationService) sendNotification(c echo.Context) error { + var notification map[string]interface{} + if err := c.Bind(¬ification); err != nil { + return response.BadRequest(c, "Invalid notification data") + } + + // Broadcast to all subscribers + s.broadcaster.Broadcast("notifications", "notification", "New notification", notification) + + return response.Success(c, nil, "Notification sent") +} +``` + +## File Upload Handling + +### Basic File Upload + +```go +func (s *FileService) uploadFile(c echo.Context) error { + // Get file from form + file, err := c.FormFile("file") + if err != nil { + return response.BadRequest(c, "No file provided") + } + + // Open uploaded file + src, err := file.Open() + if err != nil { + return response.InternalServerError(c, "Failed to open file") + } + defer src.Close() + + // Upload to storage (MinIO, local, etc.) + result, err := s.storage.UploadFile(context.Background(), + fmt.Sprintf("uploads/%s", file.Filename), + src, file.Size, file.Header.Get("Content-Type")) + + if err != nil { + return response.InternalServerError(c, "Upload failed") + } + + return response.Created(c, map[string]interface{}{ + "filename": file.Filename, + "size": file.Size, + "url": s.storage.GetFileUrl(result.Key), + }, "File uploaded successfully") +} +``` + +## Configuration Management + +### Adding New Configuration Options + +Add to `config/config.go`: + +```go +type YourServiceConfig struct { + APIKey string `yaml:"api_key"` + Timeout int `yaml:"timeout" default:"30"` + Endpoints []string `yaml:"endpoints"` +} + +type Config struct { + // ... existing fields ... + YourService YourServiceConfig `yaml:"your_service"` +} +``` + +Use in `config.yaml`: + +```yaml +your_service: + api_key: "your-api-key" + timeout: 60 + endpoints: + - "https://api.example.com" + - "https://backup.example.com" +``` + +## Testing + +### Unit Tests + +```go +func TestUserService_GetUser(t *testing.T) { + // Setup + mockDB := &mocks.PostgresManager{} + service := NewUserService(mockDB, true) + + // Mock expectations + expectedUser := User{ID: 1, Name: "John"} + mockDB.On("First", mock.AnythingOfType("*User"), "1").Return(nil).Run(func(args mock.Arguments) { + user := args.Get(0).(*User) + *user = expectedUser + }) + + // Test + c, rec := setupEchoContext() + c.SetParamNames("id") + c.SetParamValues("1") + + err := service.getUser(c) + assert.NoError(t, err) + + // Verify response + var response map[string]interface{} + json.Unmarshal(rec.Body.Bytes(), &response) + assert.True(t, response["success"].(bool)) + assert.Equal(t, "User retrieved", response["message"]) +} +``` + +### Integration Tests + +```go +func TestUserAPI(t *testing.T) { + // Start test server + e := echo.New() + // ... setup routes ... + + // Test HTTP requests + req := httptest.NewRequest(http.MethodGet, "/api/v1/users/1", nil) + rec := httptest.NewRecorder() + c := e.NewContext(req, rec) + c.SetParamNames("id") + c.SetParamValues("1") + + // Execute request + err := userHandler(c) + assert.NoError(t, err) + + // Verify response + assert.Equal(t, http.StatusOK, rec.Code) + var response map[string]interface{} + json.Unmarshal(rec.Body.Bytes(), &response) + assert.True(t, response["success"].(bool)) +} +``` + +## Deployment + +### Building for Production + +```bash +# Build optimized binary +go build -ldflags="-w -s" -o app cmd/app/main.go + +# Or use the build script +./scripts/build.sh +``` + +### Docker Deployment + +Create `Dockerfile`: + +```dockerfile +FROM golang:1.21-alpine AS builder +WORKDIR /app +COPY go.mod go.sum ./ +RUN go mod download +COPY . . +RUN go build -ldflags="-w -s" -o main cmd/app/main.go + +FROM alpine:latest +RUN apk --no-cache add ca-certificates +WORKDIR /root/ +COPY --from=builder /app/main . +EXPOSE 8080 9090 +CMD ["./main"] +``` + +Build and run: + +```bash +docker build -t myapp . +docker run -p 8080:8080 -p 9090:9090 myapp +``` + +### Environment Variables + +Override configuration with environment variables: + +```bash +export APP_DEBUG=false +export SERVER_PORT=3000 +export MONITORING_PASSWORD=secure-password +export POSTGRES_PASSWORD=prod-password + +go run cmd/app/main.go +``` + +## Best Practices + +### Service Design + +1. **Single Responsibility**: Each service should do one thing well +2. **Dependency Injection**: Inject infrastructure dependencies +3. **Error Handling**: Use consistent error responses +4. **Validation**: Always validate input data +5. **Logging**: Log important operations and errors + +### API Design + +1. **RESTful URLs**: Use consistent URL patterns +2. **HTTP Status Codes**: Use appropriate status codes +3. **JSON Responses**: Stick to the standard response format +4. **Versioning**: Include API versioning in URLs +5. **Documentation**: Document all endpoints + +### Performance + +1. **Caching**: Cache frequently accessed data +2. **Pagination**: Always paginate large datasets +3. **Async Operations**: Use async operations for slow tasks +4. **Connection Pooling**: Database connections are automatically pooled +5. **Indexes**: Add database indexes for performance + +### Security + +1. **Input Validation**: Validate all user inputs +2. **Authentication**: Implement proper authentication +3. **Authorization**: Check permissions for operations +4. **HTTPS**: Use HTTPS in production +5. **Secrets**: Never commit secrets to version control + +## Troubleshooting + +### Common Development Issues + +**Service not registering:** +- Check that the service is added to `internal/server/server.go` +- Verify the config key matches in `config.yaml` +- Check for compilation errors + +**Database connection errors:** +- Verify database credentials +- Check network connectivity +- Ensure database server is running + +**API validation errors:** +- Check request JSON structure +- Verify validation tags on struct fields +- Test with valid/invalid data + +**Performance issues:** +- Add database indexes +- Implement caching +- Check for N+1 query problems +- Monitor memory usage + +## Next Steps + +Now that you understand how to develop with Stackyard, explore: + +- **[Architecture Overview](ARCHITECTURE.md)** - Deep dive into the technical design +- **[API Reference](REFERENCE.md)** - Complete technical documentation +- **Built-in Services** - Study `service_a.go`, `service_b.go` for examples + +Happy developing! 🎯 diff --git a/docs_wiki/DOCKER_CONTAINERIZATION.md b/docs_wiki/DOCKER_CONTAINERIZATION.md deleted file mode 100644 index c541fc9..0000000 --- a/docs_wiki/DOCKER_CONTAINERIZATION.md +++ /dev/null @@ -1,579 +0,0 @@ -# Docker Containerization Guide - -## Overview - -This project includes comprehensive Docker containerization with multi-stage builds for development, testing, and production environments. The Docker setup provides consistent deployment across different environments while optimizing for security, performance, and maintainability. - -## Dockerfile Architecture - -The `Dockerfile` implements a multi-stage build strategy with optimized stages for minimal image sizes: - -### 1. Builder Stage (Optimized) - -```dockerfile -FROM golang:1.25.5-alpine3.23 AS builder - -WORKDIR /app - -# Install build dependencies and UPX for compression -RUN apk add --no-cache upx - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY . . - -# Build the binary with optimizations -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \ - go build \ - -ldflags="-w -s" \ - -trimpath \ - -o main ./cmd/app - -# Compress binary with UPX (ultra-brute for maximum compression) -RUN upx --ultra-brute main -``` - -**Optimizations Applied:** -- **UPX Compression**: Reduces binary size by 50-70% using ultra-brute compression -- **Build Flags**: `-ldflags="-w -s"` removes debugging information and symbol table -- **Trimpath**: Removes file system paths from the compiled executable -- **Static Linking**: `CGO_ENABLED=0` ensures fully static binaries - -**Purpose**: Compiles the Go application into a static binary -- Uses Alpine Linux for smaller base image -- Downloads dependencies separately for better layer caching -- Produces a statically linked binary with `CGO_ENABLED=0` -- Targets Linux platform for container compatibility - -### 2. Test Stage - -```dockerfile -FROM builder AS test - -# Run tests -RUN go test ./... -``` - -**Purpose**: Executes the test suite in an isolated environment -- Inherits all source code and dependencies from builder stage -- Runs `go test ./...` to execute all test packages -- Can be targeted separately for CI/CD testing pipelines - -### 3. Development Stage - -```dockerfile -FROM golang:1.25.5-alpine3.23 AS dev - -WORKDIR /app - -# Copy go mod files -COPY go.mod go.sum ./ - -# Download dependencies -RUN go mod download - -# Copy source code -COPY . . - -# Build the binary -RUN go build -o main ./cmd/app - -# Configure for Docker environment -ENV APP_QUIET_STARTUP=false -ENV APP_ENABLE_TUI=false - -# Expose ports for main API server and monitoring server -EXPOSE 8080 9090 - -# Run the application -CMD ["./main"] -``` - -**Purpose**: Provides a development environment with hot-reload capabilities -- Includes full Go toolchain for development tools -- Mounts source code for live development -- Exposes both main API (8080) and monitoring (9090) ports -- Automatically disables TUI and quiet startup for containerized environment - -### 4. Production Stage - -```dockerfile -FROM alpine:latest AS prod - -# Install ca-certificates for HTTPS -RUN apk --no-cache add ca-certificates - -WORKDIR /root/ - -# Copy the binary from builder stage -COPY --from=builder /app/main . - -# Configure for Docker environment -ENV APP_QUIET_STARTUP=false -ENV APP_ENABLE_TUI=false - -# Expose ports for main API server and monitoring server -EXPOSE 8080 9090 - -# Run the application -CMD ["./main"] -``` - -**Purpose**: Creates a minimal production image -- Uses Alpine Linux for security and small size -- Includes only the compiled binary and runtime dependencies -- No source code or build tools included -- Optimized for production deployment -- Automatically disables TUI and quiet startup for containerized environment - -## Building Docker Images - -The project includes automated build scripts for building Docker images across different environments. - -### Using Build Scripts - -#### Unix/Linux/macOS - -```bash -# Make script executable (first time only) -chmod +x scripts/docker_build.sh - -# Build with default settings -./scripts/docker_build.sh - -# Build with custom app name and image name -./scripts/docker_build.sh "my-app" "myregistry/myapp" -``` - -#### Windows - -```cmd -# Build with default settings -scripts\docker_build.bat - -# Build with custom app name and image name -scripts\docker_build.bat "my-app" "myregistry/myapp" -``` - -**Script Parameters:** -- `APP_NAME`: Application name (default: "stackyard") -- `IMAGE_NAME`: Docker image name (default: "myapp") -- `TARGET`: Build target - "all", "test", "dev", or "prod" (default: "all") - -**What the scripts do:** -1. **Test Stage**: Builds and runs tests to ensure code quality (for "test" and "all" targets) -2. **Development Stage**: Builds development image with full Go toolchain (for "dev" and "all" targets) -3. **Production Stage**: Builds optimized production image (for "prod" and "all" targets) -4. **Cleanup**: Removes dangling Docker images to save space - -**Target Options:** -- `all`: Build all stages (test, dev, prod) - default behavior -- `test`: Build and run tests only (2 steps) -- `dev`: Build development image only (1 step) -- `prod`: Build production image only (1 step) - Alpine (~50MB) with full monitoring -- `prod-slim`: Build slim production image (1 step) - Ubuntu (~30-40MB) with full monitoring -- `prod-minimal`: Build minimal production image (1 step) - BusyBox (~10-20MB) with full monitoring -- `ultra-prod`: Build ultra-minimal production image only (1 step) - smallest size using Distroless (~15-30MB, no monitoring) -- `ultra-all`: Build all ultra-minimal stages (ultra-test, ultra-dev, ultra-prod) -- `ultra-dev`: Build ultra-minimal development image (Distroless) - runs pre-built binary only -- `ultra-test`: Build ultra-minimal test image (Distroless) - runs pre-built binary only - -**Usage Examples:** -```bash -# Build everything (default) -./scripts/docker_build.sh - -# Build only production image (fastest) -./scripts/docker_build.sh "myapp" "myregistry/myapp" "prod" - -# Build slim production image (~30-40MB, more secure) -./scripts/docker_build.sh "myapp" "myregistry/myapp" "prod-slim" - -# Build minimal production image (~10-20MB) -./scripts/docker_build.sh "myapp" "myregistry/myapp" "prod-minimal" - -# Build ultra-minimal production image (smallest, no monitoring) -./scripts/docker_build.sh "myapp" "myregistry/myapp" "ultra-prod" - -# Build everything with ultra-prod for production -./scripts/docker_build.sh "myapp" "myregistry/myapp" "ultra-all" -``` - -### Manual Docker Commands - -If you prefer to build manually: - -#### Development Build - -```bash -# Build development image -docker build --target dev -t myapp:dev . - -# Run development container -docker run -p 8080:8080 -p 9090:9090 myapp:dev -``` - -#### Testing Build - -```bash -# Build and run tests -docker build --target test -t myapp:test . - -# Run tests only (will exit after tests complete) -docker run myapp:test -``` - -#### Production Build - -```bash -# Build production image -docker build --target prod -t myapp:latest . - -# Run production container -docker run -p 8080:8080 -p 9090:9090 myapp:latest -``` - -## Configuration in Containers - -### Environment Variables - -The application supports configuration via environment variables that override `config.yaml`: - -```bash -# Run with custom configuration -docker run \ - -e SERVER_PORT=3000 \ - -e MONITORING_PORT=4000 \ - -e REDIS_ENABLED=true \ - -e REDIS_HOST=redis-server \ - -p 3000:3000 \ - -p 4000:4000 \ - myapp:latest -``` - -### Volume Mounts - -For development with live reloading: - -```bash -# Mount config file -docker run \ - -v $(pwd)/config.yaml:/app/config.yaml \ - -p 8080:8080 \ - -p 9090:9090 \ - myapp:dev -``` - -For production with external config: - -```bash -# Mount external config -docker run \ - -v /path/to/config.yaml:/root/config.yaml \ - -p 8080:8080 \ - -p 9090:9090 \ - myapp:latest -``` - -## Networking - -### Port Configuration - -The application exposes two main ports: - -- **8080**: Main API server port (configurable via `server.port`) -- **9090**: Monitoring web interface port (configurable via `monitoring.port`) - -### Port Mapping Examples - -```bash -# Default port mapping -docker run -p 8080:8080 -p 9090:9090 myapp:latest - -# Custom host ports -docker run -p 3000:8080 -p 4000:9090 myapp:latest - -# Bind to specific interface -docker run -p 127.0.0.1:8080:8080 myapp:latest -``` - -## Docker Compose Integration - -### Basic docker-compose.yml - -```yaml -version: '3.8' - -services: - app: - build: - context: . - target: prod - ports: - - "8080:8080" - - "9090:9090" - environment: - - SERVER_PORT=8080 - - MONITORING_PORT=9090 - depends_on: - - postgres - - redis - - postgres: - image: postgres:15-alpine - environment: - - POSTGRES_DB=myapp - - POSTGRES_USER=myapp - - POSTGRES_PASSWORD=password - volumes: - - postgres_data:/var/lib/postgresql/data - ports: - - "5432:5432" - - redis: - image: redis:7-alpine - ports: - - "6379:6379" - -volumes: - postgres_data: -``` - -### Development docker-compose.yml - -```yaml -version: '3.8' - -services: - app: - build: - context: . - target: dev - ports: - - "8080:8080" - - "9090:9090" - volumes: - - .:/app - - /app/main - environment: - - APP_ENV=development - - APP_DEBUG=true - depends_on: - - postgres - - redis - - postgres: - image: postgres:15-alpine - environment: - - POSTGRES_DB=myapp_dev - - POSTGRES_USER=myapp - - POSTGRES_PASSWORD=password - volumes: - - postgres_dev_data:/var/lib/postgresql/data - ports: - - "5432:5432" - - redis: - image: redis:7-alpine - ports: - - "6379:6379" - -volumes: - postgres_dev_data: -``` - -## Multi-Stage Build Benefits - -### Security -- **Minimal Attack Surface**: Production images contain only the runtime binary -- **No Source Code**: Source code is not included in production images -- **Alpine Linux**: Uses secure, minimal base images with regular security updates - -### Performance -- **Small Image Size**: Multi-stage builds reduce final image size significantly -- **Fast Startup**: Optimized binary compilation with static linking -- **Layer Caching**: Dependencies are cached in separate layers for faster rebuilds - -### Maintainability -- **Clear Separation**: Each stage has a specific purpose and can be built independently -- **Targeted Builds**: Can build specific stages for different environments -- **Easy Debugging**: Development stage includes full toolchain for troubleshooting - -## CI/CD Integration - -### GitHub Actions Example - -```yaml -name: Build and Deploy - -on: - push: - branches: [ main ] - -jobs: - test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Build test image - run: docker build --target test -t myapp:test . - - name: Run tests - run: docker run myapp:test - - build: - needs: test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - name: Build production image - run: docker build --target prod -t myapp:latest . - - name: Push to registry - run: | - echo ${{ secrets.DOCKER_PASSWORD }} | docker login -u ${{ secrets.DOCKER_USERNAME }} --password-stdin - docker tag myapp:latest myregistry/myapp:latest - docker push myregistry/myapp:latest -``` - -### Jenkins Pipeline Example - -```groovy -pipeline { - agent any - - stages { - stage('Test') { - steps { - sh 'docker build --target test -t myapp:test .' - sh 'docker run myapp:test' - } - } - - stage('Build') { - steps { - sh 'docker build --target prod -t myapp:latest .' - } - } - - stage('Deploy') { - steps { - sh 'docker-compose up -d' - } - } - } -} -``` - -## Troubleshooting - -### Common Issues - -**"exec ./main: no such file or directory"** -- Ensure `CGO_ENABLED=0` in builder stage for static linking -- Check that the binary was built correctly in the builder stage - -**"connection refused" to database** -- Ensure database containers are started before the app -- Use `depends_on` in docker-compose for proper startup order - -**Large image size** -- Verify that production stage only copies the binary from builder -- Use `.dockerignore` to exclude unnecessary files - -**Permission denied on volume mounts** -- Ensure proper file permissions on mounted directories -- Check user permissions in the container - -### Debug Commands - -```bash -# Check running containers -docker ps - -# View container logs -docker logs - -# Execute shell in running container (not available in Distroless) -docker exec -it /bin/sh - -# Inspect image layers -docker history myapp:latest - -# Check image size -docker images myapp - -# Verify which base image was used -docker inspect myapp:ultra | grep -A 5 "RepoTags" -``` - -### Ultra-Prod Issues - -**"Still shows Alpine image for ultra-prod"** -- **Cause**: Docker may be showing cached images or intermediate layers -- **Solution**: Check `docker images` for the specific tag, or run `docker build --no-cache --target ultra-prod -t myapp:ultra .` - -**"Ultra-prod image not much smaller"** -- **Cause**: Binary size may still be large despite UPX compression -- **Solution**: Check binary size with `ls -lh main` in builder stage, ensure UPX is working - -**"Cannot run ultra-prod container"** -- **Cause**: Distroless images have no shell, debugging is limited -- **Solution**: Use Alpine-based prod image for debugging, switch to ultra-prod for production - -## Best Practices - -### Security -1. **Use Official Images**: Base images from trusted sources only -2. **Regular Updates**: Keep base images and dependencies updated -3. **Minimal Images**: Use Alpine variants for smaller attack surface -4. **No Secrets in Images**: Use environment variables or secret management - -### Performance -1. **Multi-Stage Builds**: Separate build and runtime stages -2. **Layer Optimization**: Order commands to maximize layer caching -3. **Minimal Base Images**: Use Alpine Linux for production -4. **Static Binaries**: Compile with `CGO_ENABLED=0` for portability - -### Development Workflow -1. **Volume Mounts**: Mount source code for live development -2. **Hot Reload**: Use development stage with full Go toolchain -3. **Debug Tools**: Include debugging tools in development images -4. **Consistent Environments**: Use same base images across team - -## Migration Guide - -### From Single-Stage to Multi-Stage - -**Before (single-stage):** -```dockerfile -FROM golang:1.21-alpine - -WORKDIR /app -COPY . . -RUN go build -o main ./cmd/app - -CMD ["./main"] -``` - -**After (multi-stage):** -```dockerfile -FROM golang:1.25.5-alpine3.23 AS builder -# ... build stage - -FROM alpine:latest AS prod -# ... production stage -``` - -### Benefits of Migration -- **50-70% smaller images**: Remove build dependencies from final image -- **Better security**: No source code or build tools in production -- **Faster deployments**: Smaller images transfer and start faster -- **Flexible builds**: Can build different stages for different purposes - -## Conclusion - -The Docker containerization setup provides a robust, secure, and efficient way to deploy the Go application across different environments. The multi-stage build approach ensures optimal image sizes, security, and performance while maintaining flexibility for development and testing workflows. diff --git a/docs_wiki/ENCRYPTION_API.md b/docs_wiki/ENCRYPTION_API.md deleted file mode 100644 index 7e47a16..0000000 --- a/docs_wiki/ENCRYPTION_API.md +++ /dev/null @@ -1,435 +0,0 @@ -# API Request/Response Encryption - -## Overview - -The API Request/Response Encryption feature provides end-to-end encryption for all API communications between clients and the server. This feature enhances security by encrypting sensitive data in transit, protecting against man-in-the-middle attacks and ensuring data confidentiality. - -## Features - -- **AES-256-GCM Encryption**: Industry-standard authenticated encryption providing both confidentiality and integrity -- **Automatic Middleware**: Transparent encryption/decryption for all API endpoints -- **Configurable**: Enable/disable encryption via configuration -- **Key Management**: Support for key rotation and secure key storage -- **Selective Encryption**: Skip encryption for health checks and system endpoints - -## Configuration - -### Basic Configuration - -Add the following section to your `config.yaml` file: - -```yaml -encryption: - enabled: true # Enable encryption feature - algorithm: "aes-256-gcm" # Encryption algorithm - key: "your-32-byte-secret-key-here-12345678" # Encryption key (32 bytes for AES-256) - rotate_keys: false # Enable automatic key rotation - key_rotation_interval: "24h" # Key rotation interval (when enabled) -``` - -### Environment Variables - -You can also configure encryption using environment variables: - -```bash -export ENCRYPTION_ENABLED=true -export ENCRYPTION_ALGORITHM="aes-256-gcm" -export ENCRYPTION_KEY="your-32-byte-secret-key-here-12345678" -export ENCRYPTION_ROTATE_KEYS=false -export ENCRYPTION_KEY_ROTATION_INTERVAL="24h" -``` - -## Implementation Details - -### Middleware Architecture - -The encryption middleware operates at the HTTP layer, providing transparent encryption/decryption: - -1. **Request Processing**: - - Checks for `X-Encrypted-Request: true` header - - Decrypts request body if encrypted - - Validates content type (JSON only) - -2. **Response Processing**: - - Encrypts JSON responses when encryption is enabled - - Sets `X-Encrypted-Response: true` header - - Sets `X-Encryption-Algorithm` header - -3. **Endpoint Exclusions**: - - `/health` - Health check endpoint - - `/restart` - Server restart endpoint - - `/api/v1/encryption/*` - Encryption service endpoints - -### Encryption Service Endpoints - -The encryption service provides the following endpoints under `/api/v1/encryption`: - -#### POST `/encrypt` - Encrypt Data - -**Request:** -```json -{ - "data": "sensitive data to encrypt", - "content_type": "application/json" -} -``` - -**Response:** -```json -{ - "status": "success", - "message": "Data encrypted successfully", - "data": { - "encrypted_data": "base64-encoded-encrypted-data", - "algorithm": "aes-256-gcm", - "timestamp": 1234567890, - "content_type": "application/json" - } -} -``` - -#### POST `/decrypt` - Decrypt Data - -**Request:** -```json -{ - "encrypted_data": "base64-encoded-encrypted-data", - "content_type": "application/json" -} -``` - -**Response:** -```json -{ - "status": "success", - "message": "Data decrypted successfully", - "data": { - "decrypted_data": "original decrypted data", - "algorithm": "aes-256-gcm", - "timestamp": 1234567890, - "content_type": "application/json" - } -} -``` - -#### GET `/status` - Get Encryption Status - -**Response:** -```json -{ - "status": "success", - "message": "Encryption service status", - "data": { - "enabled": true, - "algorithm": "aes-256-gcm", - "current_key": "abcd...", - "key_length": 32, - "rotate_keys": false, - "last_rotation": 1234567890 - } -} -``` - -#### POST `/key-rotate` - Rotate Encryption Key - -**Request:** -```json -{ - "new_key": "new-32-byte-secret-key-here-12345678" -} -``` - -**Response:** -```json -{ - "status": "success", - "message": "Key rotation successful", - "data": { - "message": "Encryption key rotated successfully", - "new_key_preview": "abcd..." - } -} -``` - -## Client Implementation Guide - -### JavaScript Client Example - -```javascript -import axios from 'axios'; -import { encrypt, decrypt } from './encryption-utils'; - -const API_BASE_URL = 'http://localhost:8080/api/v1'; - -// Encryption utility functions -export async function encryptData(data) { - const response = await axios.post(`${API_BASE_URL}/encryption/encrypt`, { - data: JSON.stringify(data), - content_type: 'application/json' - }); - return response.data.data.encrypted_data; -} - -export async function decryptData(encryptedData) { - const response = await axios.post(`${API_BASE_URL}/encryption/decrypt`, { - encrypted_data: encryptedData, - content_type: 'application/json' - }); - return JSON.parse(response.data.data.decrypted_data); -} - -// Encrypted API request -export async function encryptedRequest(endpoint, method = 'GET', data = null) { - const config = { - headers: { - 'Content-Type': 'application/json' - } - }; - - if (data) { - // Encrypt the request data - const encryptedData = await encryptData(data); - config.data = encryptedData; - config.headers['X-Encrypted-Request'] = 'true'; - } - - const response = await axios({ - method, - url: `${API_BASE_URL}${endpoint}`, - ...config - }); - - // Check if response is encrypted - if (response.headers['x-encrypted-response'] === 'true') { - return decryptData(response.data); - } - - return response.data; -} - -// Usage example -async function getUsers() { - try { - const users = await encryptedRequest('/users', 'GET'); - console.log('Users:', users); - } catch (error) { - console.error('Request failed:', error); - } -} -``` - -### Python Client Example - -```python -import requests -import json -import base64 - -API_BASE_URL = "http://localhost:8080/api/v1" - -def encrypt_data(data): - response = requests.post( - f"{API_BASE_URL}/encryption/encrypt", - json={ - "data": json.dumps(data), - "content_type": "application/json" - } - ) - return response.json()["data"]["encrypted_data"] - -def decrypt_data(encrypted_data): - response = requests.post( - f"{API_BASE_URL}/encryption/decrypt", - json={ - "encrypted_data": encrypted_data, - "content_type": "application/json" - } - ) - return json.loads(response.json()["data"]["decrypted_data"]) - -def encrypted_request(endpoint, method="GET", data=None): - headers = { - "Content-Type": "application/json" - } - - if data: - encrypted_data = encrypt_data(data) - headers["X-Encrypted-Request"] = "true" - data = encrypted_data - - response = requests.request( - method, - f"{API_BASE_URL}{endpoint}", - headers=headers, - json=data if data else None - ) - - if response.headers.get("X-Encrypted-Response") == "true": - return decrypt_data(response.text) - - return response.json() - -# Usage example -users = encrypted_request("/users", "GET") -print("Users:", users) -``` - -## Security Best Practices - -### Key Management - -1. **Key Length**: Always use 32-byte keys for AES-256 -2. **Key Storage**: Store encryption keys in environment variables or secret management systems -3. **Key Rotation**: Regularly rotate encryption keys (recommended every 24-48 hours) -4. **Production Keys**: Never commit production keys to version control - -### Configuration Recommendations - -```yaml -# Production configuration example -encryption: - enabled: true - algorithm: "aes-256-gcm" - key: "${ENCRYPTION_KEY}" # Load from environment variable - rotate_keys: true - key_rotation_interval: "24h" -``` - -### Deployment Checklist - -1. ✅ Configure encryption in `config.yaml` -2. ✅ Set strong encryption key (32 bytes minimum) -3. ✅ Enable encryption middleware -4. ✅ Test encryption endpoints -5. ✅ Update client applications to use encrypted requests -6. ✅ Monitor encryption service status -7. ✅ Implement key rotation schedule - -## Troubleshooting - -### Common Issues - -**Issue: "Failed to decrypt request body"** -- **Cause**: Invalid encryption key or corrupted data -- **Solution**: Verify encryption key matches between client and server - -**Issue: "X-Encrypted-Request header missing"** -- **Cause**: Client not setting encryption header -- **Solution**: Ensure client sets `X-Encrypted-Request: true` header - -**Issue: "Content type not supported"** -- **Cause**: Trying to encrypt non-JSON content -- **Solution**: Only encrypt JSON requests/responses - -**Issue: "Encrypted data too short"** -- **Cause**: Invalid or truncated encrypted data -- **Solution**: Check data integrity and encryption process - -### Debugging Tips - -1. **Check Headers**: Verify `X-Encrypted-Request` and `X-Encrypted-Response` headers -2. **Validate Keys**: Ensure encryption keys match between client and server -3. **Test Endpoints**: Use `/encryption/status` to verify service health -4. **Enable Debug Logging**: Set `app.debug: true` for detailed logs - -## Performance Considerations - -- **Overhead**: AES-256-GCM adds minimal processing overhead (~1-5ms per request) -- **Caching**: Consider caching frequently accessed encrypted responses -- **Batch Processing**: For bulk operations, encrypt/decrypt data in batches -- **Key Rotation**: Schedule key rotation during low-traffic periods - -## Migration Guide - -### From Unencrypted to Encrypted API - -1. **Phase 1: Prepare Infrastructure** - - Configure encryption in development environment - - Test encryption endpoints - - Update client libraries - -2. **Phase 2: Dual Mode Operation** - - Enable encryption middleware - - Support both encrypted and unencrypted requests - - Gradually migrate clients - -3. **Phase 3: Full Encryption** - - Enforce encryption for all requests - - Remove unencrypted fallback - - Monitor performance and errors - -### Backward Compatibility - -The encryption feature is designed to be backward compatible: - -- **Disabled by Default**: Encryption is opt-in via configuration -- **Graceful Degradation**: System continues to work if encryption fails -- **Selective Encryption**: Critical endpoints can be encrypted while others remain unencrypted - -## Advanced Configuration - -### Custom Encryption Algorithms - -While AES-256-GCM is recommended, you can implement custom algorithms: - -```go -// Custom encryption service implementation -type CustomEncryptionService struct { - // Implement custom encryption logic -} - -// Register custom service -registry.Register(modules.NewServiceEWithCustomAlgorithm( - s.config.Encryption.Enabled, - "custom-algorithm", - customEncryptionLogic -)) -``` - -### Performance Optimization - -For high-throughput applications: - -```yaml -# Performance-tuned configuration -encryption: - enabled: true - algorithm: "aes-256-gcm" - key: "${ENCRYPTION_KEY}" - # Consider hardware-accelerated encryption if available - use_hardware_acceleration: true -``` - -## Monitoring and Observability - -### Metrics - -The encryption service exposes the following metrics: - -- **Encryption Requests**: Count of encrypted requests -- **Decryption Requests**: Count of decrypted requests -- **Key Rotations**: Count of key rotation operations -- **Encryption Latency**: Time taken for encryption operations -- **Decryption Latency**: Time taken for decryption operations - -### Logging - -Encryption-related events are logged with the following structure: - -```json -{ - "level": "info", - "message": "Encrypted request processed", - "path": "/api/v1/users", - "method": "POST", - "algorithm": "aes-256-gcm", - "latency_ms": 2.4 -} -``` - -## Compliance and Standards - -- **GDPR**: Meets data protection requirements for personal data -- **HIPAA**: Suitable for healthcare data encryption -- **PCI DSS**: Compliant with payment card industry standards -- **NIST**: Follows NIST recommendations for cryptographic standards \ No newline at end of file diff --git a/docs_wiki/ERROR_HANDLING.md b/docs_wiki/ERROR_HANDLING.md deleted file mode 100644 index 1488bbf..0000000 --- a/docs_wiki/ERROR_HANDLING.md +++ /dev/null @@ -1,206 +0,0 @@ -# HTTP Error Handling - -## Overview - -This document describes the custom HTTP error handling implementation that ensures all error responses are returned in a consistent JSON format rather than the default Echo HTML responses. - -## Custom Error Handler - -The server implements a custom HTTP error handler located in `internal/server/server.go`. This handler intercepts all HTTP errors and converts them to standardized JSON responses. - -### Key Features - -- All 404 (Not Found) errors return a specific JSON response with incident tracking -- All HTTP errors return JSON instead of HTML -- Non-HTTP errors are caught and return a 500 Internal Server Error -- Error details include correlation ID for debugging and tracking - -## 404 Not Found Response - -When a request is made to an endpoint that does not exist, the server returns the following JSON response: - -### Response Format - -```json -{ - "success": false, - "status": 404, - "error": { - "code": "ENDPOINT_NOT_FOUND", - "message": "Endpoint not found. This incident will be reported.", - "details": { - "path": "/api/v1/unknown-path", - "method": "GET" - } - }, - "timestamp": 1734235788, - "datetime": "2024-12-15T10:09:48+07:00", - "correlation_id": "550e8400-e29b-41d4-a716-446655440000" -} -``` - -### Response Fields - -| Field | Type | Description | -|-------|------|-------------| -| `success` | boolean | Always `false` for error responses | -| `status` | integer | HTTP status code (404) | -| `error.code` | string | Error code identifier (`ENDPOINT_NOT_FOUND`) | -| `error.message` | string | Human-readable error message | -| `error.details.path` | string | The requested URL path that was not found | -| `error.details.method` | string | The HTTP method used (GET, POST, etc.) | -| `timestamp` | integer | Unix timestamp of the response | -| `datetime` | string | ISO8601 formatted datetime | -| `correlation_id` | string | Unique request ID for tracking and debugging | - -## Other HTTP Errors - -For other HTTP errors (400, 401, 403, 405, 500, etc.), the server returns a simplified JSON response: - -```json -{ - "success": false, - "status": 405, - "error": { - "code": "HTTP_ERROR", - "message": "Method Not Allowed" - }, - "timestamp": 1734235788, - "datetime": "2024-12-15T10:09:48+07:00", - "correlation_id": "550e8400-e29b-41d4-a716-446655440000" -} -``` - -## Internal Server Errors - -For unexpected non-HTTP errors, the server returns a 500 Internal Server Error: - -```json -{ - "success": false, - "status": 500, - "error": { - "code": "INTERNAL_ERROR", - "message": "An unexpected error occurred" - }, - "timestamp": 1734235788, - "datetime": "2024-12-15T10:09:48+07:00", - "correlation_id": "550e8400-e29b-41d4-a716-446655440000" -} -``` - -## Error Codes Reference - -| HTTP Status | Error Code | Description | -|-------------|------------|-------------| -| 400 | `BAD_REQUEST` | Invalid request parameters | -| 401 | `UNAUTHORIZED` | Authentication required | -| 403 | `FORBIDDEN` | Access denied | -| 404 | `ENDPOINT_NOT_FOUND` | Requested endpoint does not exist | -| 404 | `NOT_FOUND` | Requested resource not found (used in handlers) | -| 405 | `HTTP_ERROR` | Method not allowed | -| 409 | `CONFLICT` | Resource conflict | -| 422 | `VALIDATION_ERROR` | Request validation failed | -| 500 | `INTERNAL_ERROR` | Internal server error | -| 503 | `SERVICE_UNAVAILABLE` | Service temporarily unavailable | - -## Implementation Details - -### Location - -The error handler is implemented in the `New()` function in `internal/server/server.go`: - -```go -e.HTTPErrorHandler = func(err error, c echo.Context) { - l.Error("HTTP Error", err) - - // Handle HTTP errors with JSON response - if he, ok := err.(*echo.HTTPError); ok { - var message string - code := he.Code - - // Custom message for 404 Not Found - if code == 404 { - message = "Endpoint not found. This incident will be reported." - response.Error(c, code, "ENDPOINT_NOT_FOUND", message, map[string]interface{}{ - "path": c.Request().URL.Path, - "method": c.Request().Method, - }) - return - } - - // For other HTTP errors, use the original message if it's a string - if msg, ok := he.Message.(string); ok { - message = msg - } else { - message = "An unexpected error occurred" - } - response.Error(c, code, "HTTP_ERROR", message) - return - } - - // For non-HTTP errors, return internal server error - response.InternalServerError(c, "An unexpected error occurred") -} -``` - -### Logging - -All HTTP errors are logged with the following information: - -- Error message -- Stack trace (if available) -- Request context - -Logs can be viewed in the monitoring interface when monitoring is enabled. - -## Testing - -### Test 404 Response - -```bash -# Request to unknown endpoint -curl -X GET http://localhost:8080/api/v1/unknown-endpoint - -# Expected response -{ - "success": false, - "status": 404, - "error": { - "code": "ENDPOINT_NOT_FOUND", - "message": "Endpoint not found. This incident will be reported.", - "details": { - "path": "/api/v1/unknown-endpoint", - "method": "GET" - } - }, - "timestamp": 1734235788, - "datetime": "2024-12-15T10:09:48+07:00", - "correlation_id": "..." -} -``` - -### Test Method Not Allowed - -```bash -# POST request to GET-only endpoint -curl -X POST http://localhost:8080/health - -# Expected response -{ - "success": false, - "status": 405, - "error": { - "code": "HTTP_ERROR", - "message": "Method Not Allowed" - }, - "timestamp": 1734235788, - "datetime": "2024-12-15T10:09:48+07:00", - "correlation_id": "..." -} -``` - -## Related Documentation - -- [API Response Structure](./API_RESPONSE_STRUCTURE.md) - Detailed response format documentation -- [Integration Guide](./INTEGRATION_GUIDE.md) - How to handle errors in client applications diff --git a/docs_wiki/EVENT_STREAMING.md b/docs_wiki/EVENT_STREAMING.md deleted file mode 100644 index b5c343c..0000000 --- a/docs_wiki/EVENT_STREAMING.md +++ /dev/null @@ -1,828 +0,0 @@ -# Live Event Streaming Documentation - -## Overview - -The Event Streaming System provides comprehensive real-time event streaming capabilities through a dual-implementation demonstration showcasing different architectural approaches: - -### Service H - Event Streaming Showcase -**Dual-implementation demonstration** (`internal/services/modules/service_h.go`) showcasing both full implementation and utility approaches: -- **Full Implementation**: Complete event streaming with all broadcasting logic included -- **Utility Demo**: Clean implementation using `pkg/utils/broadcast.go` -- Two sets of API endpoints for comparison -- Educational example of different architectural patterns - -### Broadcast Utility (`pkg/utils/broadcast.go`) -**Reusable broadcasting component** extracted for maximum reusability: -- Clean, well-documented utility for any service to use -- Thread-safe operations with proper synchronization -- Enhanced methods for monitoring and management -- Easy to integrate: just `utils.NewEventBroadcaster()` - -## Service Comparison - -| Implementation | Approach | API Prefix | Lines of Code | Benefits | -|----------------|----------|------------|---------------|----------| -| **Service H (Utility)** | Uses `pkg/utils/broadcast.go` | `/events/` | ~150 lines | Clean, simple, easy to understand | - -Service H demonstrates how easy it is to implement event streaming using the broadcast utility. - -## Key Features - -- **Multiple Event Streams**: Support for concurrent event streams with independent client subscriptions -- **Server-Sent Events (SSE)**: Standards-compliant real-time push notifications -- **Event Broadcasting**: Send events to specific streams or broadcast to all streams -- **Automated Stream Generators**: Background processes generating sample events for demonstration -- **Stream Management**: Dynamic start, stop, pause, and resume operations for streams -- **Client Management**: Automatic subscription/unsubscription with buffered channels -- **Thread-Safe Operations**: Concurrent-safe event broadcasting and client management - -## Architecture - -### Core Components - -#### 1. EventBroadcaster Utility (`pkg/utils/broadcast.go`) -The broadcast functionality has been extracted into a reusable utility in `pkg/utils/broadcast.go`. This makes it easy for any service to implement event streaming without duplicating code. - -**Usage in Service G:** -```go -broadcaster := utils.NewEventBroadcaster() - -// Subscribe to a stream -client := broadcaster.Subscribe("my-stream") - -// Broadcast to a stream -broadcaster.Broadcast("my-stream", "event_type", "message", data) - -// Broadcast to all streams -broadcaster.BroadcastToAll("global_event", "message", data) -``` - -**Core Utility Types:** -```go -type EventBroadcaster struct { - streams map[string][]*StreamClient // streamID -> clients - clients map[string]*StreamClient // clientID -> client - mu sync.RWMutex - nextID int - clientTTL time.Duration -} - -type EventData struct { - ID string `json:"id"` - Type string `json:"type"` - Message string `json:"message"` - Data map[string]interface{} `json:"data,omitempty"` - Timestamp int64 `json:"timestamp"` - StreamID string `json:"stream_id,omitempty"` -} -``` - -#### 2. StreamClient -Represents a connected client for a specific event stream. - -```go -type StreamClient struct { - ID string - StreamID string - Channel chan EventData // Buffered channel (100 messages) -} -``` - -#### 3. EventData -Standardized event data structure sent to clients. - -```go -type EventData struct { - ID string `json:"id"` - Type string `json:"type"` - Message string `json:"message"` - Data map[string]interface{} `json:"data,omitempty"` - Timestamp int64 `json:"timestamp"` - StreamID string `json:"stream_id,omitempty"` -} -``` - -#### 4. StreamGenerator -Manages automated event generation for demonstration streams. - -```go -type StreamGenerator struct { - streamID string - broadcaster *EventBroadcaster - running bool - paused bool - stopChan chan struct{} - pauseChan chan struct{} - mu sync.RWMutex -} -``` - -## API Endpoints - -### Stream Subscription - -#### GET `/api/v1/events/stream/{stream_id}` -Subscribe to a real-time event stream using Server-Sent Events. - -**Parameters:** -- `stream_id` (path): The ID of the stream to subscribe to - -**Response Headers:** -``` -Content-Type: text/event-stream -Cache-Control: no-cache -Connection: keep-alive -Access-Control-Allow-Origin: * -Access-Control-Allow-Headers: Cache-Control -``` - -**Event Format:** -```json -data: {"id":"evt_123456789","type":"user_action","message":"User logged in","data":{"user_id":"12345","action":"login"},"timestamp":1642598400,"stream_id":"default"} - -data: {"id":"evt_123456790","type":"system_alert","message":"High CPU usage detected","data":{"cpu_percent":85.5,"threshold":80},"timestamp":1642598401,"stream_id":"system"} - -``` - -**Example Usage (JavaScript):** -```javascript -const eventSource = new EventSource('/api/v1/events/stream/default'); - -eventSource.onmessage = function(event) { - const eventData = JSON.parse(event.data); - console.log('Received event:', eventData); - - switch(eventData.type) { - case 'user_action': - handleUserAction(eventData); - break; - case 'system_alert': - handleSystemAlert(eventData); - break; - case 'notification': - showNotification(eventData); - break; - } -}; - -eventSource.onerror = function(error) { - console.error('EventSource failed:', error); - eventSource.close(); -}; -``` - -**Example Usage (curl):** -```bash -curl -N -H "Accept: text/event-stream" \ - -H "Cache-Control: no-cache" \ - http://localhost:8080/api/v1/events/stream/default -``` - -### Event Broadcasting - -#### POST `/api/v1/events/broadcast` -Broadcast an event to a specific stream or all streams. - -**Request Body:** -```json -{ - "stream_id": "notifications", // Optional: empty string broadcasts to all streams - "type": "custom_event", - "message": "Custom notification message", - "data": { - "priority": "high", - "sender": "admin", - "recipients": ["user1", "user2"] - } -} -``` - -**Success Response:** -```json -{ - "success": true, - "message": "Event broadcasted to stream: notifications", - "timestamp": 1642598400 -} -``` - -**Broadcast to All Streams:** -```json -{ - "type": "system_maintenance", - "message": "System maintenance scheduled", - "data": { - "maintenance_window": "2024-01-15T02:00:00Z", - "duration_minutes": 30 - } -} -``` - -### Stream Information - -#### GET `/api/v1/events/streams` -Retrieve information about active streams and their connected clients. - -**Response:** -```json -{ - "success": true, - "data": { - "default": { - "clients": 5, - "active": true, - "generator": { - "running": true, - "paused": false - } - }, - "system": { - "clients": 2, - "active": true, - "generator": { - "running": true, - "paused": false - } - }, - "notifications": { - "clients": 0, - "active": true, - "generator": { - "running": false, - "paused": false - } - } - }, - "timestamp": 1642598400 -} -``` - -### Stream Management - -#### POST `/api/v1/events/stream/{stream_id}/start` -Start or restart a stream generator. - -**Response:** -```json -{ - "success": true, - "message": "Stream 'system' started", - "timestamp": 1642598400 -} -``` - -#### POST `/api/v1/events/stream/{stream_id}/stop` -Stop a stream generator and remove it. - -**Response:** -```json -{ - "success": true, - "message": "Stream 'system' stopped and removed", - "timestamp": 1642598400 -} -``` - -#### POST `/api/v1/events/stream/{stream_id}/pause` -Pause a running stream generator. - -**Response:** -```json -{ - "success": true, - "message": "Stream 'system' paused", - "timestamp": 1642598400 -} -``` - -#### POST `/api/v1/events/stream/{stream_id}/resume` -Resume a paused stream generator. - -**Response:** -```json -{ - "success": true, - "message": "Stream 'system' resumed", - "timestamp": 1642598400 -} -``` - -## Default Streams - -The service automatically starts four default streams with sample event generators: - -### 1. `default` Stream -**Purpose:** General events and notifications -**Sample Events:** -- User actions (login, logout, profile updates) -- General system notifications -- Application-specific events - -### 2. `system` Stream -**Purpose:** System-level alerts and metrics -**Sample Events:** -- High CPU/memory usage alerts -- Disk space warnings -- Service health status updates -- System performance metrics - -### 3. `user-activity` Stream -**Purpose:** User action events -**Sample Events:** -- User authentication events -- Profile modifications -- Permission changes -- User-generated content updates - -### 4. `notifications` Stream -**Purpose:** Application notifications -**Sample Events:** -- Push notifications -- Alert messages -- Scheduled reminders -- System announcements - -## Event Types - -### Predefined Event Types - -- **`user_action`**: User-initiated actions (login, update profile, etc.) -- **`system_alert`**: System warnings and alerts (high CPU, low disk space) -- **`data_update`**: Database or data changes -- **`notification`**: General notifications and messages -- **`metric_update`**: System or application metrics -- **`stream_started`**: Stream initialization events -- **`connection`**: Client connection events - -### Custom Event Types - -You can define and use any custom event types in your applications: - -```json -{ - "type": "order_created", - "message": "New order received", - "data": { - "order_id": "ORD-12345", - "customer_id": "CUST-67890", - "total_amount": 299.99, - "items": ["widget-a", "widget-b"] - } -} -``` - -## Usage Examples - -### Frontend Integration (React) - -```javascript -import { useEffect, useState } from 'react'; - -function EventStreamComponent({ streamId }) { - const [events, setEvents] = useState([]); - const [isConnected, setIsConnected] = useState(false); - - useEffect(() => { - const eventSource = new EventSource(`/api/v1/events/stream/${streamId}`); - - eventSource.onopen = () => { - setIsConnected(true); - }; - - eventSource.onmessage = (event) => { - const eventData = JSON.parse(event.data); - setEvents(prev => [...prev.slice(-9), eventData]); // Keep last 10 events - }; - - eventSource.onerror = (error) => { - console.error('EventSource error:', error); - setIsConnected(false); - }; - - return () => { - eventSource.close(); - }; - }, [streamId]); - - return ( -
-
Connection Status: {isConnected ? '🟢 Connected' : '🔴 Disconnected'}
-
- {events.map((event, index) => ( -
- {event.type}: {event.message} - {event.data &&
{JSON.stringify(event.data, null, 2)}
} -
- ))} -
-
- ); -} -``` - -### Backend Event Broadcasting - -```go -// Broadcast a custom event -func notifyOrderCreated(orderID string, customerID string, amount float64) { - eventData := map[string]interface{}{ - "order_id": orderID, - "customer_id": customerID, - "amount": amount, - "timestamp": time.Now().Unix(), - } - - // This would be called from your event streaming service - broadcastToStream("orders", "order_created", "New order received", eventData) -} - -// Broadcast system alert -func alertHighCPU(cpuPercent float64) { - eventData := map[string]interface{}{ - "cpu_percent": cpuPercent, - "threshold": 80.0, - "severity": "warning", - } - - broadcastToAllStreams("system_alert", fmt.Sprintf("High CPU usage: %.1f%%", cpuPercent), eventData) -} -``` - -### Python Client - -```python -import json -import requests -import sseclient - -def stream_events(stream_id): - """Stream events from a specific stream""" - url = f"http://localhost:8080/api/v1/events/stream/{stream_id}" - - response = requests.get(url, stream=True, headers={ - 'Accept': 'text/event-stream', - 'Cache-Control': 'no-cache' - }) - - client = sseclient.SSEClient(response) - - for event in client.events(): - event_data = json.loads(event.data) - print(f"Event: {event_data['type']} - {event_data['message']}") - - # Handle different event types - if event_data['type'] == 'user_action': - handle_user_action(event_data) - elif event_data['type'] == 'system_alert': - handle_system_alert(event_data) - -def broadcast_event(stream_id, event_type, message, data=None): - """Broadcast an event to a stream""" - url = "http://localhost:8080/api/v1/events/broadcast" - - payload = { - "stream_id": stream_id, - "type": event_type, - "message": message, - "data": data or {} - } - - response = requests.post(url, json=payload) - return response.json() -``` - -## Configuration - -The event streaming service is configured via `config.yaml`: - -```yaml -services: - service_g: true # Enable the event streaming service -``` - -No additional configuration is required. The service automatically starts with the default streams when enabled. - -## Performance Considerations - -### Client Connections -- Each client connection uses a buffered channel (100 messages) -- Connections are automatically cleaned up when clients disconnect -- No persistent storage of events (events are ephemeral) - -### Stream Scalability -- Multiple streams can run concurrently -- Each stream generator runs in its own goroutine -- Event broadcasting is thread-safe using RWMutex - -### Memory Usage -- Event channels are buffered to prevent blocking -- Automatic cleanup of disconnected clients -- Configurable client TTL (currently 24 hours) - -## Error Handling - -### Connection Errors -- SSE connections automatically reconnect on network failures -- Client disconnection is handled gracefully -- Channel buffering prevents message loss during temporary disconnections - -### Stream Errors -- Stream generators include panic recovery -- Failed broadcasts don't affect other streams -- Error events can be broadcast to notify clients of issues - -### Validation -- Event type and message validation -- Stream ID validation for subscriptions -- JSON serialization error handling - -## Security Considerations - -### Authentication -- SSE endpoints inherit authentication from the main application -- Consider implementing stream-specific authentication if needed - -### CORS -- CORS headers are configured for cross-origin requests -- Adjust `Access-Control-Allow-Origin` for production deployments - -### Rate Limiting -- Consider implementing rate limiting for broadcast endpoints -- Monitor for abuse of stream creation/management endpoints - -## Monitoring and Debugging - -### Active Streams Monitoring -```bash -# Check active streams and client counts -curl http://localhost:8080/api/v1/events/streams -``` - -### Stream Management -```bash -# Start a stream -curl -X POST http://localhost:8080/api/v1/events/stream/custom/start - -# Stop a stream -curl -X POST http://localhost:8080/api/v1/events/stream/custom/stop - -# Pause a stream -curl -X POST http://localhost:8080/api/v1/events/stream/custom/pause -``` - -### Testing Event Broadcasting -```bash -# Broadcast to specific stream -curl -X POST http://localhost:8080/api/v1/events/broadcast \ - -H "Content-Type: application/json" \ - -d '{ - "stream_id": "default", - "type": "test_event", - "message": "Test message", - "data": {"test": true} - }' - -# Broadcast to all streams -curl -X POST http://localhost:8080/api/v1/events/broadcast \ - -H "Content-Type: application/json" \ - -d '{ - "type": "global_test", - "message": "Global test message" - }' -``` - -## Troubleshooting - -### Common Issues - -**Events not received by clients:** -- Check if the stream exists and is running -- Verify client connection (CORS, network issues) -- Check server logs for SSE header issues - -**High memory usage:** -- Monitor client connections and disconnections -- Check for goroutine leaks in stream generators -- Verify channel buffer sizes are appropriate - -**Stream not starting:** -- Ensure the service is enabled in config.yaml -- Check for errors in stream generator initialization -- Verify there are no naming conflicts with existing streams - -### Debug Mode - -Enable debug logging to troubleshoot issues: - -```yaml -app: - debug: true -``` - -This will provide detailed logs about: -- Client connections/disconnections -- Event broadcasting -- Stream generator lifecycle -- Error conditions - -## Best Practices - -### Client Implementation -1. **Handle Reconnection**: Implement automatic reconnection logic -2. **Event Filtering**: Filter events on the client side when possible -3. **Connection Limits**: Limit concurrent SSE connections per client -4. **Error Handling**: Gracefully handle connection failures - -### Server Implementation -1. **Resource Management**: Monitor and limit concurrent connections -2. **Event Validation**: Validate event data before broadcasting -3. **Stream Lifecycle**: Properly manage stream creation and cleanup -4. **Performance Monitoring**: Track event throughput and latency - -### Event Design -1. **Consistent Schema**: Use consistent event structures -2. **Meaningful Types**: Choose descriptive event type names -3. **Data Enrichment**: Include relevant context in event data -4. **Versioning**: Consider event versioning for API evolution - -## Integration Examples - -### Real-time Dashboard Updates -```javascript -// Update dashboard metrics in real-time -const metricStream = new EventSource('/api/v1/events/stream/system'); - -metricStream.onmessage = function(event) { - const data = JSON.parse(event.data); - if (data.type === 'metric_update') { - updateDashboardMetrics(data.data); - } -}; -``` - -### Notification System -```javascript -// Handle real-time notifications -const notificationStream = new EventSource('/api/v1/events/stream/notifications'); - -notificationStream.onmessage = function(event) { - const notification = JSON.parse(event.data); - showNotificationToast(notification.message, notification.data); -}; -``` - -### Collaborative Editing -```javascript -// Real-time collaborative document editing -const collabStream = new EventSource('/api/v1/events/stream/document_123'); - -collabStream.onmessage = function(event) { - const change = JSON.parse(event.data); - if (change.type === 'document_change') { - applyRemoteChange(change.data); - } -}; -``` - -## Implementation Comparison - -### Full Implementation vs Utility Approach - -Service H demonstrates two different architectural approaches for the same event streaming functionality: - -#### Full Implementation (Service H Full) -- **Code**: ~400 lines with complete broadcasting logic -- **Approach**: Self-contained with all `EventBroadcaster`, `EventData`, and stream management -- **API Prefix**: `/events-full/` -- **Benefits**: Full control, no external dependencies -- **Trade-offs**: Code duplication, higher maintenance burden - -#### Utility Approach (Service H Utility) -- **Code**: ~200 lines (50% less than full implementation) -- **Approach**: Uses `pkg/utils/broadcast.go` for core functionality -- **API Prefix**: `/events-util/` -- **Benefits**: Clean code, centralized maintenance, reusable across services -- **Trade-offs**: Less control over implementation details - -### Using the Broadcast Utility - -Any service can easily implement event streaming using the broadcast utility: - -```go -package modules - -import ( - "test-go/pkg/utils" - "test-go/pkg/logger" - "test-go/pkg/response" - "github.com/labstack/echo/v4" -) - -type MyEventService struct { - enabled bool - broadcaster *utils.EventBroadcaster - logger *logger.Logger -} - -func NewMyEventService(enabled bool, logger *logger.Logger) *MyEventService { - return &MyEventService{ - enabled: enabled, - broadcaster: utils.NewEventBroadcaster(), - logger: logger, - } -} - -func (s *MyEventService) streamEvents(c echo.Context) error { - streamID := c.Param("stream_id") - - // Subscribe using utility - client := s.broadcaster.Subscribe(streamID) - defer s.broadcaster.Unsubscribe(client.ID) - - // Set up SSE headers - c.Response().Header().Set(echo.HeaderContentType, "text/event-stream") - c.Response().Header().Set(echo.HeaderCacheControl, "no-cache") - c.Response().Header().Set(echo.HeaderConnection, "keep-alive") - - // Listen for events - for { - select { - case event := <-client.Channel: - // Handle event using utils.EventData - // Send SSE response... - case <-c.Request().Context().Done(): - return nil - } - } -} - -func (s *MyEventService) broadcastEvent(c echo.Context) error { - // Broadcast using utility - s.broadcaster.Broadcast("my-stream", "custom_event", "My message", data) - return response.Success(c, nil, "Event broadcasted") -} -``` - -### Utility Methods Reference - -The broadcast utility provides these methods: - -```go -// Core functionality -broadcaster := utils.NewEventBroadcaster() -client := broadcaster.Subscribe("stream_id") -broadcaster.Unsubscribe(clientID) -broadcaster.Broadcast(streamID, eventType, message, data) -broadcaster.BroadcastToAll(eventType, message, data) - -// Monitoring and statistics -activeStreams := broadcaster.GetActiveStreams() // map[string]int -streamClients := broadcaster.GetStreamClients(id) // []*StreamClient -totalClients := broadcaster.GetTotalClients() // int -streamCount := broadcaster.GetStreamCount() // int -isActive := broadcaster.IsStreamActive(streamID) // bool -``` - -### Benefits of the Utility Approach - -1. **Code Reuse**: One implementation, multiple services -2. **Consistency**: All services use the same broadcasting logic -3. **Maintainability**: Bug fixes and improvements benefit all services -4. **Clean Services**: Services focus on business logic, not infrastructure -5. **Easy Testing**: Utility can be tested independently -6. **Performance**: Optimized, thread-safe implementation - -### Migration Guide - -To migrate an existing service to use the broadcast utility: - -1. **Remove** duplicate `EventBroadcaster`, `EventData`, `StreamClient` types -2. **Import** `"test-go/pkg/utils"` -3. **Replace** `NewEventBroadcaster()` with `utils.NewEventBroadcaster()` -4. **Update** method calls to use `utils.EventData` type -5. **Test** the service functionality remains intact - -### Testing Both Services - -```bash -# Test Service G (full implementation) -curl -N http://localhost:8080/api/v1/events/stream/default - -# Test Service H (utility demo) -curl -N http://localhost:8080/api/v1/broadcast-demo/stream/demo-notifications - -# Check active streams for both services -curl http://localhost:8080/api/v1/events/streams -curl http://localhost:8080/api/v1/broadcast-demo/streams - -# Broadcast events -curl -X POST http://localhost:8080/api/v1/events/broadcast \ - -H "Content-Type: application/json" \ - -d '{"type":"test","message":"Hello from Service G"}' - -curl -X POST http://localhost:8080/api/v1/broadcast-demo/broadcast \ - -H "Content-Type: application/json" \ - -d '{"type":"test","message":"Hello from Service H"}' -``` - -This event streaming system provides both comprehensive full implementations and clean utility-based approaches, giving developers flexibility in how they implement real-time features while maintaining consistency and reusability. diff --git a/docs_wiki/GETTING_STARTED.md b/docs_wiki/GETTING_STARTED.md new file mode 100644 index 0000000..8930c50 --- /dev/null +++ b/docs_wiki/GETTING_STARTED.md @@ -0,0 +1,251 @@ +# Getting Started with Stackyard + +This guide will get you up and running with Stackyard in minutes. Stackyard is a production-ready Go application framework with modular services, real-time monitoring, and extensive infrastructure integrations. + +## Quick Start + +### 1. Prerequisites + +- **Go 1.21+** - [Download here](https://golang.org/dl/) +- **Git** - For cloning the repository + +### 2. Installation + +```bash +# Clone the repository +git clone https://github.com/diameter-tscd/stackyard.git +cd stackyard + +# Install Go dependencies +go mod download + +# Run the application +go run cmd/app/main.go +``` + +### 3. First Access + +Open your browser and visit: +- **Monitoring Dashboard**: http://localhost:9090 +- **Login**: `admin` / `admin` (Change this immediately) + +## Basic Configuration + +### Essential Settings + +Edit `config.yaml` to configure your application: + +```yaml +app: + name: "My Application" # Your app name + debug: true # Enable for development + +server: + port: "8080" # API server port + +monitoring: + enabled: true # Web monitoring dashboard + port: "9090" # Dashboard port + password: "your-secure-password" # ⚠️ Change from default! +``` + +### Service Configuration + +Enable/disable built-in services: + +```yaml +services: + service_a: true # Basic CRUD API example + service_b: false # Disable if not needed +``` + +## Hello World Example + +Let's create a simple API endpoint: + +### 1. Create a Service + +Add to `internal/services/modules/service_hello.go`: + +```go +package modules + +import ( + "stackyard/pkg/response" + "github.com/labstack/echo/v4" +) + +type HelloService struct { + enabled bool +} + +func NewHelloService(enabled bool) *HelloService { + return &HelloService{enabled: enabled} +} + +func (s *HelloService) Name() string { return "Hello Service" } +func (s *HelloService) Enabled() bool { return s.enabled } +func (s *HelloService) Endpoints() []string { return []string{"/hello"} } + +func (s *HelloService) RegisterRoutes(g *echo.Group) { + g.GET("/hello", s.hello) +} + +func (s *HelloService) hello(c echo.Context) error { + return response.Success(c, map[string]string{ + "message": "Hello, World!", + "status": "running", + }, "Hello endpoint") +} +``` + +### 2. Register the Service + +Add to `internal/server/server.go`: + +```go +registry.Register(modules.NewHelloService(s.config.Services.IsEnabled("hello"))) +``` + +### 3. Enable in Config + +Add to `config.yaml`: + +```yaml +services: + hello: true +``` + +### 4. Test Your API + +```bash +# Test the endpoint +curl http://localhost:8080/api/v1/hello + +# Response: +{ + "success": true, + "message": "Hello endpoint", + "data": { + "message": "Hello, World!", + "status": "running" + }, + "timestamp": 1642598400 +} +``` + +## Database Setup (Optional) + +### PostgreSQL Quick Setup + +Using Docker for development: + +```bash +# Start PostgreSQL +docker run -d \ + --name postgres \ + -e POSTGRES_PASSWORD=mypassword \ + -p 5432:5432 \ + postgres:15 + +# Configure in config.yaml +postgres: + enabled: true + host: "localhost" + user: "postgres" + password: "mypassword" + dbname: "postgres" +``` + +### Redis Quick Setup + +```bash +# Start Redis +docker run -d --name redis -p 6379:6379 redis:7 + +# Configure in config.yaml +redis: + enabled: true + address: "localhost:6379" +``` + +## Monitoring & Debugging + +### Web Dashboard + +Access the monitoring dashboard at http://localhost:9090 to: +- View real-time system metrics +- Monitor API endpoints +- Check service health +- View application logs + +### Terminal UI + +For interactive monitoring, enable the TUI: + +```yaml +app: + enable_tui: true +``` + +The TUI provides: +- Real-time log viewing +- Service initialization status +- System resource monitoring +- Interactive controls + +### Common Issues + +**"Port already in use"** +```bash +# Find what's using the port +lsof -i :8080 + +# Kill the process +kill -9 +``` + +**"Connection refused to database"** +- Check if Docker containers are running: `docker ps` +- Verify database credentials in `config.yaml` +- Wait for database to fully start up + +**"Module not found"** +```bash +# Clean and reinstall dependencies +go clean -modcache +go mod download +``` + +## Next Steps + +🎉 **Congratulations!** You have a running Stackyard application. + +### What to Explore Next: + +1. **[Development Guide](DEVELOPMENT.md)** - Learn to add features and extend the app +2. **[Architecture Overview](ARCHITECTURE.md)** - Understand the technical design +3. **[API Reference](REFERENCE.md)** - Complete technical documentation + +### Useful Commands: + +```bash +# Development +go run cmd/app/main.go # Run in development mode +go build -o app cmd/app/main.go # Build binary + +# Docker +./scripts/docker_build.sh # Build Docker images +docker-compose up # Run with full stack + +# Testing +go test ./... # Run all tests +``` + +## Getting Help + +- **Documentation**: Check the [docs_wiki](.) folder +- **Issues**: Report bugs on GitHub +- **Community**: Join discussions for questions + +Happy coding! 🚀 diff --git a/docs_wiki/GRAFANA_INTEGRATION.md b/docs_wiki/GRAFANA_INTEGRATION.md deleted file mode 100644 index a7560f1..0000000 --- a/docs_wiki/GRAFANA_INTEGRATION.md +++ /dev/null @@ -1,844 +0,0 @@ -# Grafana Integration Guide - -This document provides comprehensive information about Grafana integration in the project, allowing users to easily integrate their applications with Grafana for monitoring and visualization. - -## Overview - -Grafana integration enables seamless dashboard creation, data source management, and real-time monitoring through a comprehensive API. The integration provides: - -- **Dashboard Management**: Create, update, retrieve, and delete Grafana dashboards programmatically -- **Data Source Integration**: Configure and manage data sources for metrics collection -- **Annotation Support**: Add annotations to dashboards for event tracking -- **Health Monitoring**: Real-time health checks and status monitoring -- **Async Operations**: Non-blocking operations with worker pools for performance - -## Features - -- **Complete Dashboard API**: Full CRUD operations for Grafana dashboards -- **Data Source Management**: Create and configure data sources programmatically -- **Annotation System**: Add timeline annotations for events and incidents -- **Health Monitoring**: Real-time Grafana instance health checks -- **Async Operations**: Non-blocking API calls with proper error handling -- **Retry Logic**: Built-in retry mechanisms for reliable API communication -- **Type Safety**: Strongly typed structures for all Grafana entities - -## Configuration - -### Basic Configuration - -Add Grafana configuration to your `config.yaml`: - -```yaml -grafana: - enabled: true - url: "http://localhost:3000" - api_key: "your-grafana-api-key" - username: "admin" # Optional, for basic auth - password: "admin" # Optional, for basic auth -``` - -### Configuration Parameters - -| Parameter | Type | Required | Description | -|-----------|------|----------|-------------| -| `enabled` | boolean | Yes | Enable/disable Grafana integration | -| `url` | string | Yes | Grafana server URL (e.g., `http://localhost:3000`) | -| `api_key` | string | No* | Grafana API key for authentication | -| `username` | string | No | Username for basic authentication | -| `password` | string | No | Password for basic authentication | - -*Either `api_key` OR `username`/`password` must be provided for authentication. - -### Service Configuration - -Enable the Grafana service in your configuration: - -```yaml -services: - service_i: true -``` - -## API Endpoints - -The Grafana integration service provides RESTful endpoints under `/api/v1/grafana`: - -### Dashboard Management - -#### Create Dashboard -```http -POST /api/v1/grafana/dashboards -Content-Type: application/json - -{ - "title": "System Metrics", - "tags": ["system", "metrics"], - "timezone": "UTC", - "panels": [ - { - "id": 1, - "title": "CPU Usage", - "type": "graph", - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "targets": [ - { - "expr": "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)", - "legendFormat": "{{instance}}" - } - ] - } - ], - "time": { - "from": "now-1h", - "to": "now" - }, - "refresh": "5s" -} -``` - -**Response:** -```json -{ - "success": true, - "message": "Dashboard created successfully", - "data": { - "id": 123, - "uid": "abc123def", - "title": "System Metrics", - "version": 1 - }, - "timestamp": 1642598400 -} -``` - -#### Update Dashboard -```http -PUT /api/v1/grafana/dashboards/{uid} -Content-Type: application/json - -{ - "title": "Updated System Metrics", - "panels": [ - { - "id": 1, - "title": "Memory Usage", - "type": "graph", - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "targets": [ - { - "expr": "100 - ((node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100)", - "legendFormat": "Memory Usage %" - } - ] - } - ] -} -``` - -#### Get Dashboard -```http -GET /api/v1/grafana/dashboards/{uid} -``` - -**Response:** -```json -{ - "success": true, - "message": "Dashboard retrieved successfully", - "data": { - "id": 123, - "uid": "abc123def", - "title": "System Metrics", - "panels": [...], - "time": {...}, - "version": 1 - }, - "timestamp": 1642598400 -} -``` - -#### Delete Dashboard -```http -DELETE /api/v1/grafana/dashboards/{uid} -``` - -**Response:** -```json -{ - "success": true, - "message": "Dashboard deleted successfully", - "timestamp": 1642598400 -} -``` - -#### List Dashboards -```http -GET /api/v1/grafana/dashboards?page=1&per_page=50 -``` - -**Response:** -```json -{ - "success": true, - "message": "Dashboards retrieved successfully", - "data": [ - { - "id": 123, - "uid": "abc123def", - "title": "System Metrics", - "tags": ["system", "metrics"] - }, - { - "id": 124, - "uid": "def456ghi", - "title": "Application Metrics", - "tags": ["app", "performance"] - } - ], - "meta": { - "page": 1, - "per_page": 50, - "total": 2, - "total_pages": 1 - }, - "timestamp": 1642598400 -} -``` - -### Data Source Management - -#### Create Data Source -```http -POST /api/v1/grafana/datasources -Content-Type: application/json - -{ - "name": "Prometheus", - "type": "prometheus", - "url": "http://prometheus:9090", - "access": "proxy", - "basicAuth": false, - "jsonData": { - "timeInterval": "15s", - "queryTimeout": "60s" - } -} -``` - -**Response:** -```json -{ - "success": true, - "message": "Data source created successfully", - "data": { - "id": 1, - "uid": "prometheus-uid", - "name": "Prometheus", - "type": "prometheus", - "url": "http://prometheus:9090" - }, - "timestamp": 1642598400 -} -``` - -### Annotation Management - -#### Create Annotation -```http -POST /api/v1/grafana/annotations -Content-Type: application/json - -{ - "dashboardId": 123, - "panelId": 1, - "time": 1642598400000, - "timeEnd": 1642598460000, - "tags": ["deployment", "v1.2.0"], - "text": "Application deployed to production" -} -``` - -### Health Monitoring - -#### Get Grafana Health -```http -GET /api/v1/grafana/health -``` - -**Response:** -```json -{ - "success": true, - "message": "Grafana health check successful", - "data": { - "version": "9.3.0", - "database": "ok", - "commit": "abc123def" - }, - "timestamp": 1642598400 -} -``` - -## Usage Examples - -### Creating a System Monitoring Dashboard - -```bash -# Create a comprehensive system monitoring dashboard -curl -X POST http://localhost:8080/api/v1/grafana/dashboards \ - -H "Content-Type: application/json" \ - -d '{ - "title": "System Overview", - "tags": ["system", "overview"], - "panels": [ - { - "id": 1, - "title": "CPU Usage", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}, - "targets": [{ - "expr": "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)", - "legendFormat": "CPU Usage %" - }] - }, - { - "id": 2, - "title": "Memory Usage", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}, - "targets": [{ - "expr": "100 - ((node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100)", - "legendFormat": "Memory Usage %" - }] - } - ], - "time": {"from": "now-1h", "to": "now"}, - "refresh": "30s" - }' -``` - -### Setting Up Prometheus Data Source - -```bash -# Configure Prometheus as a data source -curl -X POST http://localhost:8080/api/v1/grafana/datasources \ - -H "Content-Type: application/json" \ - -d '{ - "name": "Prometheus", - "type": "prometheus", - "url": "http://prometheus:9090", - "access": "proxy", - "jsonData": { - "timeInterval": "15s", - "queryTimeout": "60s", - "httpMethod": "POST" - } - }' -``` - -### Adding Deployment Annotations - -```bash -# Add an annotation for a deployment event -curl -X POST http://localhost:8080/api/v1/grafana/annotations \ - -H "Content-Type: application/json" \ - -d '{ - "time": 1642598400000, - "tags": ["deployment", "api", "v2.1.0"], - "text": "API service deployed to production environment" - }' -``` - -## Programmatic Usage - -### Go Client Example - -```go -package main - -import ( - "context" - "fmt" - "log" - "test-go/pkg/infrastructure" - "test-go/config" -) - -func main() { - // Load configuration - cfg, err := config.LoadConfig() - if err != nil { - log.Fatal(err) - } - - // Create Grafana manager - grafanaMgr, err := infrastructure.NewGrafanaManager(cfg.Grafana) - if err != nil { - log.Fatal(err) - } - - ctx := context.Background() - - // Create a simple dashboard - dashboard := infrastructure.GrafanaDashboard{ - Title: "Application Metrics", - Tags: []string{"app", "metrics"}, - Panels: []infrastructure.GrafanaPanel{ - { - ID: 1, - Title: "Request Rate", - Type: "graph", - GridPos: infrastructure.GrafanaGridPos{ - H: 8, - W: 12, - X: 0, - Y: 0, - }, - Targets: []infrastructure.GrafanaTarget{ - { - Expr: "rate(http_requests_total[5m])", - LegendFormat: "Request Rate", - }, - }, - }, - }, - Time: infrastructure.GrafanaTimeRange{ - From: "now-1h", - To: "now", - }, - Refresh: "30s", - } - - // Create dashboard asynchronously - result := grafanaMgr.CreateDashboardAsync(ctx, dashboard) - createdDashboard, err := result.Wait() - if err != nil { - log.Fatal(err) - } - - fmt.Printf("Dashboard created: %s (UID: %s)\n", createdDashboard.Title, createdDashboard.UID) -} -``` - -### Creating Data Sources Programmatically - -```go -// Create Prometheus data source -prometheusDS := infrastructure.GrafanaDataSource{ - Name: "Prometheus", - Type: "prometheus", - URL: "http://prometheus:9090", - Access: "proxy", - JSONData: map[string]interface{}{ - "timeInterval": "15s", - "queryTimeout": "60s", - }, -} - -result := grafanaMgr.CreateDataSourceAsync(ctx, prometheusDS) -createdDS, err := result.Wait() -if err != nil { - log.Fatal(err) -} - -fmt.Printf("Data source created: %s (ID: %d)\n", createdDS.Name, createdDS.ID) -``` - -## Integration Patterns - -### Application Monitoring Dashboard - -Create a comprehensive monitoring dashboard for your application: - -```go -func createApplicationDashboard(grafanaMgr *infrastructure.GrafanaManager) error { - dashboard := infrastructure.GrafanaDashboard{ - Title: "Application Monitoring", - Tags: []string{"app", "monitoring", "production"}, - Panels: []infrastructure.GrafanaPanel{ - // HTTP Request Rate - { - ID: 1, - Title: "HTTP Request Rate", - Type: "graph", - GridPos: infrastructure.GrafanaGridPos{H: 8, W: 12, X: 0, Y: 0}, - Targets: []infrastructure.GrafanaTarget{ - { - Expr: "rate(http_requests_total[5m])", - LegendFormat: "Requests/sec", - }, - }, - }, - // Error Rate - { - ID: 2, - Title: "Error Rate", - Type: "graph", - GridPos: infrastructure.GrafanaGridPos{H: 8, W: 12, X: 12, Y: 0}, - Targets: []infrastructure.GrafanaTarget{ - { - Expr: "rate(http_requests_total{status=~\"5..\"}[5m])", - LegendFormat: "Errors/sec", - }, - }, - }, - // Response Time - { - ID: 3, - Title: "Response Time", - Type: "graph", - GridPos: infrastructure.GrafanaGridPos{H: 8, W: 12, X: 0, Y: 8}, - Targets: []infrastructure.GrafanaTarget{ - { - Expr: "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))", - LegendFormat: "95th percentile", - }, - }, - }, - }, - Time: infrastructure.GrafanaTimeRange{ - From: "now-6h", - To: "now", - }, - Refresh: "30s", - } - - _, err := grafanaMgr.CreateDashboard(context.Background(), dashboard) - return err -} -``` - -### Automated Alert Annotations - -Automatically add annotations when alerts are triggered: - -```go -func addAlertAnnotation(grafanaMgr *infrastructure.GrafanaManager, alert Alert) error { - annotation := infrastructure.GrafanaAnnotation{ - Time: alert.Timestamp, - TimeEnd: alert.Timestamp + 300000, // 5 minutes - Tags: []string{"alert", alert.Severity, alert.Service}, - Text: fmt.Sprintf("Alert: %s - %s", alert.Title, alert.Description), - Data: map[string]interface{}{ - "severity": alert.Severity, - "service": alert.Service, - "value": alert.Value, - "threshold": alert.Threshold, - }, - } - - _, err := grafanaMgr.CreateAnnotation(context.Background(), annotation) - return err -} -``` - -## Dashboard Templates - -### System Metrics Dashboard - -```json -{ - "title": "System Metrics", - "tags": ["system", "infrastructure"], - "panels": [ - { - "id": 1, - "title": "CPU Usage", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}, - "targets": [{ - "expr": "100 - (avg by(instance) (irate(node_cpu_seconds_total{mode=\"idle\"}[5m])) * 100)", - "legendFormat": "{{instance}} CPU Usage %" - }] - }, - { - "id": 2, - "title": "Memory Usage", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}, - "targets": [{ - "expr": "100 - ((node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100)", - "legendFormat": "{{instance}} Memory Usage %" - }] - }, - { - "id": 3, - "title": "Disk Usage", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}, - "targets": [{ - "expr": "100 - ((node_filesystem_avail_bytes / node_filesystem_size_bytes) * 100)", - "legendFormat": "{{instance}} {{mountpoint}}" - }] - } - ], - "time": {"from": "now-1h", "to": "now"}, - "refresh": "30s" -} -``` - -### Application Performance Dashboard - -```json -{ - "title": "Application Performance", - "tags": ["application", "performance"], - "panels": [ - { - "id": 1, - "title": "Request Rate", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}, - "targets": [{ - "expr": "rate(http_requests_total[5m])", - "legendFormat": "Requests/sec" - }] - }, - { - "id": 2, - "title": "Response Time (95th percentile)", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 12, "y": 0}, - "targets": [{ - "expr": "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m]))", - "legendFormat": "95th percentile" - }] - }, - { - "id": 3, - "title": "Error Rate", - "type": "graph", - "gridPos": {"h": 8, "w": 12, "x": 0, "y": 8}, - "targets": [{ - "expr": "rate(http_requests_total{status=~\"5..\"}[5m]) / rate(http_requests_total[5m]) * 100", - "legendFormat": "Error Rate %" - }] - } - ], - "time": {"from": "now-6h", "to": "now"}, - "refresh": "30s" -} -``` - -## Security Considerations - -### Authentication -- **API Keys**: Use Grafana API keys with minimal required permissions -- **Basic Auth**: Avoid basic authentication in production environments -- **Network Security**: Ensure Grafana API is accessible only from trusted networks - -### Permissions -- **Service Accounts**: Create dedicated service accounts for API access -- **Least Privilege**: Grant only necessary permissions for dashboard and data source management -- **Token Rotation**: Regularly rotate API keys and tokens - -### Data Protection -- **Sensitive Data**: Avoid storing sensitive information in dashboard configurations -- **Access Control**: Implement proper access controls for dashboard visibility -- **Audit Logging**: Enable audit logging for dashboard changes - -## Performance Optimization - -### Async Operations -All Grafana operations support async execution: - -```go -// Fire-and-forget dashboard creation -result := grafanaMgr.CreateDashboardAsync(ctx, dashboard) -// Continue with other operations while dashboard is being created -``` - -### Batch Operations -For multiple operations, consider batching: - -```go -// Create multiple dashboards concurrently -dashboards := []infrastructure.GrafanaDashboard{dash1, dash2, dash3} -for _, dash := range dashboards { - go func(d infrastructure.GrafanaDashboard) { - _, err := grafanaMgr.CreateDashboard(ctx, d) - if err != nil { - log.Printf("Failed to create dashboard %s: %v", d.Title, err) - } - }(dash) -} -``` - -### Connection Pooling -The Grafana manager uses HTTP client with connection pooling for optimal performance. - -## Error Handling - -### Common Error Scenarios - -1. **Connection Failed** - ``` - Error: failed to connect to Grafana: Get "http://localhost:3000/api/health": dial tcp [::1]:3000: connect: connection refused - Solution: Ensure Grafana is running and accessible - ``` - -2. **Authentication Failed** - ``` - Error: failed to create dashboard: 401 Unauthorized - Solution: Verify API key or credentials are correct - ``` - -3. **Invalid Dashboard JSON** - ``` - Error: failed to create dashboard: 400 Bad Request - Solution: Validate dashboard JSON structure - ``` - -4. **Dashboard Not Found** - ``` - Error: dashboard not found: abc123def - Solution: Verify dashboard UID exists - ``` - -### Retry Logic - -The integration includes automatic retry logic for transient failures: - -- **Max Retries**: 3 attempts -- **Backoff**: Exponential backoff (1s, 2s, 4s) -- **Timeout**: 30-second request timeout - -## Monitoring and Observability - -### Health Checks - -Monitor Grafana integration health: - -```bash -# Check Grafana service health -curl http://localhost:8080/api/v1/grafana/health - -# Check overall infrastructure health -curl http://localhost:8080/health/infrastructure -``` - -### Metrics Integration - -The Grafana integration can be monitored through the existing monitoring dashboard at `http://localhost:9090`. - -### Logging - -All Grafana operations are logged with structured logging: - -``` -INFO: Grafana dashboard created: System Metrics (UID: abc123def) -ERROR: Failed to create Grafana dashboard: 401 Unauthorized -``` - -## Troubleshooting - -### Debug Mode - -Enable debug logging for detailed operation information: - -```yaml -app: - debug: true -``` - -### Connection Issues - -1. **Verify Grafana URL**: Ensure the configured URL is correct and accessible -2. **Check Authentication**: Validate API key or credentials -3. **Network Connectivity**: Test network connectivity to Grafana server -4. **Firewall Rules**: Ensure required ports are open - -### API Issues - -1. **Validate JSON**: Ensure dashboard and data source JSON is valid -2. **Check Permissions**: Verify API key has necessary permissions -3. **Rate Limits**: Check for API rate limiting issues -4. **Version Compatibility**: Ensure Grafana version supports requested features - -### Performance Issues - -1. **Async Operations**: Use async methods for non-blocking operations -2. **Batch Operations**: Group multiple operations to reduce API calls -3. **Connection Reuse**: HTTP client reuses connections automatically -4. **Timeout Configuration**: Adjust timeouts based on network conditions - -## Integration Examples - -### Docker Compose Setup - -```yaml -version: '3.8' -services: - app: - # Your application configuration - environment: - - GRAFANA_ENABLED=true - - GRAFANA_URL=http://grafana:3000 - - GRAFANA_API_KEY=your-api-key - - grafana: - image: grafana/grafana:latest - ports: - - "3000:3000" - environment: - - GF_SECURITY_ADMIN_PASSWORD=admin - - GF_USERS_ALLOW_SIGN_UP=false - volumes: - - grafana_data:/var/lib/grafana - - prometheus: - image: prom/prometheus:latest - ports: - - "9090:9090" - volumes: - - ./prometheus.yml:/etc/prometheus/prometheus.yml - -volumes: - grafana_data: -``` - -### Kubernetes Deployment - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: app-config -data: - config.yaml: | - grafana: - enabled: true - url: "http://grafana.grafana.svc.cluster.local:3000" - api_key: "${GRAFANA_API_KEY}" ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: app -spec: - template: - spec: - containers: - - name: app - env: - - name: GRAFANA_API_KEY - valueFrom: - secretKeyRef: - name: grafana-secrets - key: api-key -``` - -## Conclusion - -The Grafana integration provides a comprehensive, production-ready solution for programmatic dashboard and data source management. With support for async operations, retry logic, and comprehensive error handling, it enables seamless integration between your application and Grafana for monitoring and visualization needs. - -The integration follows the same patterns as other infrastructure components in the project, ensuring consistency and maintainability. Whether you need to create dashboards automatically, manage data sources, or add annotations for events, the Grafana integration provides the tools and APIs to accomplish these tasks efficiently and reliably. diff --git a/docs_wiki/HOME.md b/docs_wiki/HOME.md deleted file mode 100644 index 47ad530..0000000 --- a/docs_wiki/HOME.md +++ /dev/null @@ -1,140 +0,0 @@ -# Welcome to the Project Documentation - -This wiki serves as the central knowledge base for the project, covering architecture, feature implementations, and integration guides. - -## Core Documentation - -### Architecture & Design - -* **[Async Infrastructure Implementation](ASYNC_INFRASTRUCTURE.md)** - * Complete async infrastructure system ensuring non-blocking operations. - * Worker pools, goroutines, and channels for concurrent processing. - * Generic AsyncResult types with timeout and error handling. - * Performance benefits and best practices for async operations. - -* **[Configuration Guide](CONFIGURATION_GUIDE.md)** - * Complete reference for `config.yaml` configuration. - * All available options with explanations and examples. - * Multiple PostgreSQL connections setup guide with web monitoring interface. - * Dynamic connection switching and health monitoring. - -* **[Architecture Diagrams](ARCHITECTURE_DIAGRAMS.md)** - * Visual guide to the system's request/response flow. - * Package organization and dependency graphs. - * Sequence diagrams for validation and error handling. - -* **[API Response Structure](API_RESPONSE_STRUCTURE.md)** - * Standard format for all API responses (`success`, `data`, `meta`). - * Built-in helper functions for success and error responses. - * Pagination and validation standards. - -* **[Request Response Structure](REQUEST_RESPONSE_STRUCTURE.md)** - * Detailed overview of the Echo service structure. - * Request validation patterns and custom validators. - * Dependencies and best practices for creating new endpoints. - -### Security & Privacy - -* **[API Obfuscation](API_OBFUSCATION.md)** - * Mechanism for obscuring JSON data in transit using Base64. - * Configuration guide for enabling/disabling obfuscation. - * Frontend and Backend implementation details. - -* **[API Request/Response Encryption](ENCRYPTION_API.md)** - * End-to-end encryption for all API communications using AES-256-GCM. - * Automatic middleware for transparent encryption/decryption. - * Key management, rotation, and secure storage. - * Client implementation guides for JavaScript and Python. - * Configuration, security best practices, and troubleshooting. - -### User Interface - -* **[TUI Implementation](TUI_IMPLEMENTATION.md)** - * Documentation for the Terminal User Interface (Bubble Tea). - * **Boot Sequence**: Visual feedback during service initialization. - * **Live Logs**: Real-time log display with scrolling, filtering, and management controls. - * **Enhanced Controls**: Keyboard shortcuts for scrolling, filtering, auto-scroll toggle, and log clearing. - * **Reusable Dialog System**: Template-based dialog components for easy reuse. - * **Unlimited Log Storage**: Removed 1000 log limit for unlimited storage. - * **Default Auto-scroll**: Auto-scroll enabled by default on application startup. - -### Build & Deployment - -* **[Build Scripts](BUILD_SCRIPTS.md)** - * Enhanced build scripts with code obfuscation and cross-platform support. - * **Customizable Parameter Parsing System**: Dynamic flag system for command-line configuration. - * **Tool Installation**: Automatic installation of required Go tools (`goversioninfo`, `garble`). - * **Code Obfuscation**: Optional `garble` build for production security. - * **Cross-Platform Support**: Native implementations for Unix/Linux/macOS and Windows. - -### Real-time Features - -* **[Live Event Streaming](EVENT_STREAMING.md)** - * Server-Sent Events (SSE) implementation with multiple concurrent streams. - * Real-time push notifications to connected clients without polling. - * Event broadcasting to specific streams or all streams simultaneously. - * Stream management with start, stop, pause, and resume operations. - * Client management with automatic subscription/unsubscription. - -### Integration & Infrastructure - -* **[MongoDB Integration](MONGODB_INTEGRATION.md)** - * Complete MongoDB integration guide with multiple database support. - * Web monitoring interface for MongoDB databases and collections. - * Manual query execution and real-time database statistics. - * Multi-tenant MongoDB operations with connection switching. - * CRUD operations, aggregation pipelines, and best practices. - -* **[Grafana Integration](GRAFANA_INTEGRATION.md)** - * Complete Grafana API integration for dashboard and data source management. - * Programmatic dashboard creation, updates, and deletion. - * Data source configuration and annotation support. - * Async operations with retry logic and health monitoring. - -* **[Docker Containerization](DOCKER_CONTAINERIZATION.md)** - * Multi-stage Dockerfile for development, testing, and production. - * Docker Compose integration with infrastructure services. - * CI/CD integration, security best practices, and troubleshooting. - -* **[Build Scripts](BUILD_SCRIPTS.md)** - * Automated build process with backup and archiving. - * Cross-platform scripts for Unix/Linux/macOS and Windows. - * Backup management, troubleshooting, and CI/CD integration. - -* **[Package Name Change Scripts](CHANGE_PACKAGE_SCRIPTS.md)** - * Automated tools for renaming Go module package names. - * Cross-platform support for comprehensive codebase refactoring. - * Safety mechanisms, backup creation, and error handling. - -* **[Onboarding Scripts](ONBOARDING_SCRIPTS.md)** - * Interactive setup wizard for first-time configuration. - * Cross-platform scripts for Unix/Linux/macOS and Windows. - * Guided configuration of app settings, services, and infrastructure. - * Security warnings and best practices for production setup. - -* **[Service Implementation Guide](SERVICE_IMPLEMENTATION.md)** - * Creating and implementing new services with use case documentation. - * Service interface and registration with comprehensive descriptions. - * Dynamic configuration setup and best practices. - -* **[Integration Guide](INTEGRATION_GUIDE.md)** - * **Redis**: Configuration and usage of the Redis manager. - * **Postgres**: Database connection and Helper methods. - * Multiple PostgreSQL connections with dynamic switching in monitoring UI. - * **MongoDB**: NoSQL database integration with multi-connection support. - * **Kafka**: Message producing and configuration. - * **MinIO**: Object storage integration for file uploads. - * **Cron Jobs**: Dynamic job scheduling and management. - -### Examples & Samples - -* **[Service F (Multi-Tenant Orders)](../internal/services/modules/service_f.go)** - * Complete example of using multiple PostgreSQL connections. - * Demonstrates tenant-based database isolation. - * Shows dynamic connection selection in API endpoints. - ---- - -## Getting Started - -If you are new to the project, we recommend starting with the **[Integration Guide](INTEGRATION_GUIDE.md)** to understand the available infrastructure components, followed by the **[API Response Structure](API_RESPONSE_STRUCTURE.md)** to learn how to build consistent API endpoints. diff --git a/docs_wiki/INTEGRATION_GUIDE.md b/docs_wiki/INTEGRATION_GUIDE.md deleted file mode 100644 index c5c6580..0000000 --- a/docs_wiki/INTEGRATION_GUIDE.md +++ /dev/null @@ -1,306 +0,0 @@ -# Infrastructure Integration Guide - -This guide documents how to use the integrated infrastructure components in this boilerplate. All components are designed to be modular and can be enabled/disable via `config.yaml`. - -## 1. Redis - -### Configuration (`config.yaml`) -```yaml -redis: - enabled: true - host: "localhost" - port: "6379" - password: "" - db: 0 -``` - -### Usage (Code) -The `RedisManager` provides async operations with worker pools. - -```go -// Inject RedisManager into your service -type MyService struct { - redis *infrastructure.RedisManager -} - -func (s *MyService) Example() { - ctx := context.Background() - - // Async SET - returns immediately - result := s.redis.SetAsync(ctx, "my-key", "my-value", time.Minute*10) - - // Wait for completion - err := result.Wait() - - // Async GET - getResult := s.redis.GetAsync(ctx, "my-key") - value, err := getResult.Wait() - - // Async DELETE - delResult := s.redis.DeleteAsync(ctx, "my-key") - err := delResult.Wait() -} - -// Batch operations for efficiency -func (s *MyService) BatchExample() { - ctx := context.Background() - keys := []string{"key1", "key2", "key3"} - - // Get multiple keys concurrently - result := s.redis.GetBatchAsync(ctx, keys) - values, errors := result.WaitAll() - - // Process results - for i, val := range values { - if errors[i] != nil { - // Handle error - } else { - // Process value - } - } -} -``` - ---- - -## 2. Postgres - -### Configuration (`config.yaml`) - -The application now supports both single and multiple PostgreSQL connections for enhanced flexibility and scalability. Multiple connections allow you to monitor and query different databases through the web monitoring interface. - -#### Single Connection (Original Format - Still Supported) -```yaml -postgres: - enabled: true - host: "localhost" - port: "5432" - user: "postgres" - password: "password" - dbname: "mydb" - sslmode: "disable" - max_open_conns: 10 - max_idle_conns: 5 -``` - -#### Multiple Connections (New Format) -```yaml -postgres: - enabled: true - connections: - - name: "primary" - enabled: true - host: "localhost" - port: "5432" - user: "postgres" - password: "password" - dbname: "primary_db" - sslmode: "disable" - - - name: "secondary" - enabled: true - host: "localhost" - port: "5433" - user: "postgres" - password: "password" - dbname: "secondary_db" - sslmode: "disable" - - - name: "analytics" - enabled: false # Disabled by default - host: "analytics.example.com" - port: "5432" - user: "analytics_user" - password: "analytics_password" - dbname: "analytics_db" - sslmode: "require" -``` - -### Usage (Code) - -The application provides both single connection and multi-connection managers for PostgreSQL. - -#### Using Single Connection (Backward Compatible) -```go -// Inject PostgresManager -type MyService struct { - db *infrastructure.PostgresManager -} - -func (s *MyService) Example() { - // Access underlying sqlx.DB - var users []User - err := s.db.DB.Select(&users, "SELECT * FROM users WHERE active = $1", true) - - // Using transaction helper (if implemented in your manager extensions) or usage of standard sqlx patterns - tx, err := s.db.DB.Beginx() - // ... -} -``` - -#### Using Multiple Connections -```go -// Inject PostgresConnectionManager -type MyService struct { - postgresManager *infrastructure.PostgresConnectionManager -} - -func (s *MyService) Example() { - // Get a specific named connection - if conn, exists := s.postgresManager.GetConnection("primary"); exists { - var users []User - err := conn.DB.Select(&users, "SELECT * FROM users WHERE active = $1", true) - } - - // Get the default connection (first enabled connection) - if defaultConn, exists := s.postgresManager.GetDefaultConnection(); exists { - // Use the default connection - } - - // Get all connections - allConnections := s.postgresManager.GetAllConnections() - - // Get status for all connections - status := s.postgresManager.GetStatus() -} -``` - -### Connection Management Methods - -The `PostgresConnectionManager` provides several useful methods: - -```go -// Get a specific named connection -conn, exists := postgresManager.GetConnection("primary") - -// Get the default connection (first enabled connection) -defaultConn, exists := postgresManager.GetDefaultConnection() - -// Get all connections as a map -allConnections := postgresManager.GetAllConnections() - -// Get status for all connections (useful for monitoring) -statusMap := postgresManager.GetStatus() - -// Close all connections (for graceful shutdown) -err := postgresManager.CloseAll() -``` - -### Best Practices - -1. **Connection Naming**: Use descriptive names like "primary", "secondary", "analytics", "read_replica" -2. **Error Handling**: Always check if a connection exists before using it -3. **Resource Management**: Close connections properly during application shutdown -4. **Configuration**: Disable unused connections to avoid unnecessary resource consumption -5. **Monitoring**: Use the status methods to monitor connection health in your monitoring system - -### Migration Guide - -To migrate from single to multiple connections: - -1. **Update Configuration**: Convert your existing PostgreSQL config to the new format -2. **Update Services**: Modify services to use the connection manager when needed -3. **Test**: Verify all database operations work with the new connection manager -4. **Monitor**: Check the monitoring dashboard to see all PostgreSQL connections - -The system automatically handles backward compatibility, so existing single-connection configurations will continue to work without modification. - ---- - -## 3. Kafka - -### Configuration (`config.yaml`) -```yaml -kafka: - enabled: true - brokers: ["localhost:9092"] - topic: "my-topic" - group_id: "my-group" -``` - -### Usage (Code) -The `KafkaManager` handles producing messages. - -```go -// Inject KafkaManager -type MyService struct { - kafka *infrastructure.KafkaManager -} - -func (s *MyService) SendNotification() { - // Publish a message - err := s.kafka.Publish("notification-topic", []byte("Hello Kafka")) - - // Publish with Key (if supported by your specific implementation extension, default Publish typically sends value) -} -``` - ---- - -## 4. MinIO (Object Storage) - -### Configuration (`config.yaml`) -```yaml -monitoring: - minio: - enabled: true - endpoint: "localhost:9003" - access_key_id: "minioadmin" - secret_access_key: "minioadmin" - use_ssl: false - bucket_name: "main" -``` - -### Usage (Code) -The `MinIOManager` simplifies file uploads and URL retrieval. - -```go -// Inject MinIOManager -type MyService struct { - storage *infrastructure.MinIOManager -} - -func (s *MyService) UploadAvatar(fileHeader *multipart.FileHeader) { - file, _ := fileHeader.Open() - defer file.Close() - - // Upload - info, err := s.storage.UploadFile(context.Background(), "avatars/user-1.jpg", file, fileHeader.Size, "image/jpeg") - - // Get Presigned URL (for private buckets) or direct URL - url := s.storage.GetFileUrl("avatars/user-1.jpg") -} -``` - ---- - -## 5. Cron Jobs - -### Configuration (`config.yaml`) -Cron jobs can be defined in config for simple logging/testing, or registered in code for logic. - -```yaml -cron: - enabled: true - jobs: - "cleanup_logs": "0 0 * * *" # Run at midnight - "health_check": "*/5 * * * *" # Run every 5 minutes -``` - -### Usage (Code) -The `CronManager` allows dynamic job registration. - -```go -// Inject CronManager -type MyService struct { - cron *infrastructure.CronManager -} - -func (s *MyService) InitJobs() { - // Register a new job - id, err := s.cron.AddJob("database_backup", "0 3 * * *", func() { - fmt.Println("Performing database backup...") - // Call service logic here - }) -} -``` diff --git a/docs_wiki/MONGODB_INTEGRATION.md b/docs_wiki/MONGODB_INTEGRATION.md deleted file mode 100644 index d743b67..0000000 --- a/docs_wiki/MONGODB_INTEGRATION.md +++ /dev/null @@ -1,582 +0,0 @@ -# MongoDB Integration Guide - -This document provides comprehensive information about MongoDB integration in the project, including configuration, usage patterns, and implementation details. - -## Overview - -MongoDB has been integrated into the project as a full-featured NoSQL database option alongside PostgreSQL. The implementation supports multiple database connections with dynamic switching, similar to the PostgreSQL multi-tenant architecture. - -## Features - -- **Multiple Database Connections**: Support for multiple MongoDB databases with connection switching -- **Web Monitoring Integration**: Full integration with the web monitoring dashboard -- **CRUD Operations**: Complete Create, Read, Update, Delete operations -- **Aggregation Support**: MongoDB aggregation pipeline operations -- **Connection Health Monitoring**: Real-time connection status and database statistics -- **Manual Query Execution**: Execute raw MongoDB queries through the web interface - -## Configuration - -### Basic Configuration - -Add MongoDB connections to your `config.yaml`: - -```yaml -mongo: - enabled: true - connections: - - name: "primary" - enabled: true - uri: "mongodb://localhost:27017" - database: "primary_db" - - - name: "secondary" - enabled: true - uri: "mongodb://localhost:27018" - database: "secondary_db" - - - name: "analytics" - enabled: false # Disabled by default - uri: "mongodb://analytics.example.com:27017" - database: "analytics_db" -``` - -### Connection Parameters - -| Parameter | Type | Description | Required | -|-----------|------|-------------|----------| -| `name` | string | Connection identifier | Yes | -| `enabled` | boolean | Enable/disable this connection | Yes | -| `uri` | string | MongoDB connection URI | Yes | -| `database` | string | Default database name | Yes | - -### URI Format - -MongoDB URIs follow the standard format: -``` -mongodb://[username:password@]host[:port][/database][?options] -``` - -Examples: -- `mongodb://localhost:27017` - Local single instance -- `mongodb://user:pass@host:27017/db?replicaSet=rs0` - With authentication and replica set - -## Infrastructure Implementation - -### MongoDB Manager - -The `MongoManager` provides a high-level interface to MongoDB: - -```go -type MongoManager struct { - Client *mongo.Client - Database *mongo.Database -} -``` - -### Connection Manager - -The `MongoConnectionManager` handles multiple connections: - -```go -type MongoConnectionManager struct { - connections map[string]*MongoManager - mu sync.RWMutex -} -``` - -### Key Methods - -#### Connection Management -```go -// Get a specific connection -conn, exists := mongoManager.GetConnection("primary") - -// Get default connection -defaultConn, exists := mongoManager.GetDefaultConnection() - -// Get all connections -allConnections := mongoManager.GetAllConnections() - -// Get connection status -status := mongoManager.GetStatus() -``` - -#### Database Operations -```go -// Insert operations -result, err := mongoManager.InsertOne(ctx, "collection", document) -result, err := mongoManager.InsertMany(ctx, "collection", documents) - -// Query operations -cursor, err := mongoManager.Find(ctx, "collection", filter) -singleResult := mongoManager.FindOne(ctx, "collection", filter) - -// Update operations -result, err := mongoManager.UpdateOne(ctx, "collection", filter, update) -result, err := mongoManager.UpdateMany(ctx, "collection", filter, update) - -// Delete operations -result, err := mongoManager.DeleteOne(ctx, "collection", filter) -result, err := mongoManager.DeleteMany(ctx, "collection", filter) - -// Aggregation -cursor, err := mongoManager.Aggregate(ctx, "collection", pipeline) - -// Utility operations -count, err := mongoManager.CountDocuments(ctx, "collection", filter) -collections, err := mongoManager.ListCollections(ctx) -``` - -## Web Monitoring Integration - -### MongoDB Tab - -The MongoDB tab provides a complete interface for database management: - -#### Connection Selector -- Dropdown to switch between configured MongoDB connections -- Real-time connection status indicators -- Automatic refresh of database information - -#### Database Statistics Cards -- **Database**: Current database name -- **Collections**: Number of collections and list -- **Documents**: Total document count across collections -- **Storage Size**: Database storage usage in MB - -#### Manual Query Interface -- **Collection Field**: Specify target collection -- **Query Field**: JSON query/filter (e.g., `{"status": "active"}`) -- **Run Query Button**: Execute the query with loading indicator -- **Results Table**: Dynamic table showing query results - -### API Endpoints - -#### Get Database Info -```http -GET /api/mongo/info?connection=primary -``` - -Response: -```json -{ - "success": true, - "data": { - "database": "primary_db", - "collections": ["users", "orders", "products"], - "stats": { - "collections": 3, - "objects": 1250, - "dataSize": 5242880, - "storageSize": 8388608, - "indexes": 5, - "indexSize": 204800 - } - } -} -``` - -#### Execute Query -```http -POST /api/mongo/query?connection=primary -Content-Type: application/json - -{ - "collection": "users", - "query": {"status": "active"} -} -``` - -Response: -```json -{ - "success": true, - "data": [ - { - "_id": "507f1f77bcf86cd799439011", - "username": "john_doe", - "email": "john@example.com", - "status": "active" - } - ] -} -``` - -## Usage Examples - -### Basic CRUD Operations - -```go -// Inject MongoDB manager -type UserService struct { - mongoManager *infrastructure.MongoManager -} - -func (s *UserService) CreateUser(ctx context.Context, user User) error { - _, err := s.mongoManager.InsertOne(ctx, "users", user) - return err -} - -func (s *UserService) GetUser(ctx context.Context, userID string) (*User, error) { - filter := bson.M{"_id": userID} - var user User - err := s.mongoManager.FindOne(ctx, "users", filter).Decode(&user) - return &user, err -} - -func (s *UserService) UpdateUser(ctx context.Context, userID string, updates bson.M) error { - filter := bson.M{"_id": userID} - _, err := s.mongoManager.UpdateOne(ctx, "users", filter, bson.M{"$set": updates}) - return err -} - -func (s *UserService) DeleteUser(ctx context.Context, userID string) error { - filter := bson.M{"_id": userID} - _, err := s.mongoManager.DeleteOne(ctx, "users", filter) - return err -} -``` - -### Multi-Database Operations - -```go -// Using connection manager for multi-tenant operations -type MultiTenantService struct { - mongoConnectionManager *infrastructure.MongoConnectionManager -} - -func (s *MultiTenantService) GetTenantData(ctx context.Context, tenantID string, collection string) ([]bson.M, error) { - // Get tenant-specific database connection - conn, exists := s.mongoConnectionManager.GetConnection(tenantID) - if !exists { - return nil, fmt.Errorf("tenant database not found: %s", tenantID) - } - - // Query tenant's database - cursor, err := conn.Find(ctx, collection, bson.M{}) - if err != nil { - return nil, err - } - defer cursor.Close(ctx) - - var results []bson.M - if err := cursor.All(ctx, &results); err != nil { - return nil, err - } - - return results, nil -} -``` - -### Aggregation Pipeline - -```go -func (s *UserService) GetUserStats(ctx context.Context) ([]bson.M, error) { - pipeline := []bson.M{ - { - "$group": bson.M{ - "_id": "$status", - "count": bson.M{"$sum": 1}, - }, - }, - { - "$sort": bson.M{"count": -1}, - }, - } - - cursor, err := s.mongoManager.Aggregate(ctx, "users", pipeline) - if err != nil { - return nil, err - } - defer cursor.Close(ctx) - - var results []bson.M - if err := cursor.All(ctx, &results); err != nil { - return nil, err - } - - return results, nil -} -``` - -## Service Implementation - -### Creating a MongoDB Service - -Create a new service file `internal/services/modules/service_g.go`: - -```go -package modules - -import ( - "context" - "fmt" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" - - "github.com/labstack/echo/v4" - "go.mongodb.org/mongo-driver/bson" -) - -type ServiceG struct { - enabled bool - mongoConnectionManager *infrastructure.MongoConnectionManager - logger *logger.Logger -} - -func NewServiceG( - mongoConnectionManager *infrastructure.MongoConnectionManager, - enabled bool, - logger *logger.Logger, -) *ServiceG { - return &ServiceG{ - enabled: enabled, - mongoConnectionManager: mongoConnectionManager, - logger: logger, - } -} - -func (s *ServiceG) Name() string { return "Service G (MongoDB Products)" } -func (s *ServiceG) Enabled() bool { return s.enabled && s.mongoConnectionManager != nil } -func (s *ServiceG) Endpoints() []string { return []string{"/products/{tenant}"} } - -func (s *ServiceG) RegisterRoutes(g *echo.Group) { - sub := g.Group("/products") - - // Routes with tenant parameter for database selection - sub.GET("/:tenant", s.listProductsByTenant) - sub.POST("/:tenant", s.createProduct) - sub.GET("/:tenant/:id", s.getProductByTenant) - sub.PUT("/:tenant/:id", s.updateProduct) - sub.DELETE("/:tenant/:id", s.deleteProduct) -} - -func (s *ServiceG) listProductsByTenant(c echo.Context) error { - tenant := c.Param("tenant") - - // Get the database connection for this tenant - dbConn, exists := s.mongoConnectionManager.GetConnection(tenant) - if !exists { - return response.NotFound(c, fmt.Sprintf("Tenant database '%s' not found or not connected", tenant)) - } - - // Query products from the tenant's database - cursor, err := dbConn.Find(context.Background(), "products", bson.M{}) - if err != nil { - return response.InternalServerError(c, fmt.Sprintf("Failed to query tenant '%s' database: %v", tenant, err)) - } - defer cursor.Close(context.Background()) - - var products []bson.M - if err := cursor.All(context.Background(), &products); err != nil { - return response.InternalServerError(c, fmt.Sprintf("Failed to decode products: %v", err)) - } - - return response.Success(c, products, fmt.Sprintf("Products retrieved from tenant '%s' database", tenant)) -} - -func (s *ServiceG) createProduct(c echo.Context) error { - tenant := c.Param("tenant") - - // Get the database connection for this tenant - dbConn, exists := s.mongoConnectionManager.GetConnection(tenant) - if !exists { - return response.NotFound(c, fmt.Sprintf("Tenant database '%s' not found or not connected", tenant)) - } - - var product bson.M - if err := c.Bind(&product); err != nil { - return response.BadRequest(c, "Invalid product data") - } - - // Insert into tenant's database - result, err := dbConn.InsertOne(context.Background(), "products", product) - if err != nil { - return response.InternalServerError(c, fmt.Sprintf("Failed to create product in tenant '%s' database: %v", tenant, err)) - } - - // Add the generated ID to the response - product["_id"] = result.InsertedID - - return response.Created(c, product, fmt.Sprintf("Product created in tenant '%s' database", tenant)) -} -``` - -### Registering the Service - -Add to `internal/server/server.go`: - -```go -// Add MongoDB service -registry.Register(modules.NewServiceG(s.mongoConnectionManager, s.config.Services.IsEnabled("service_g"), s.logger)) -``` - -Add to `config.yaml`: - -```yaml -services: - service_g: true # Enable MongoDB products service -``` - -## Manual Query Examples - -### Find Documents -```json -{ - "collection": "users", - "query": {"status": "active"} -} -``` - -### Aggregation Query -```json -{ - "collection": "orders", - "query": { - "$group": { - "_id": "$status", - "count": {"$sum": 1} - } - } -} -``` - -### Complex Filter -```json -{ - "collection": "products", - "query": { - "price": {"$gte": 100}, - "category": "electronics" - } -} -``` - -## Best Practices - -### Connection Management -1. **Use Connection Manager**: Always use `MongoConnectionManager` for multi-database scenarios -2. **Connection Validation**: Check connection existence before operations -3. **Error Handling**: Implement proper error handling for connection failures -4. **Resource Cleanup**: Close cursors and handle context cancellation - -### Query Optimization -1. **Indexes**: Create appropriate indexes for frequently queried fields -2. **Projection**: Use projection to limit returned fields -3. **Pagination**: Implement pagination for large result sets -4. **Timeouts**: Set appropriate timeouts for long-running operations - -### Security -1. **Authentication**: Use MongoDB authentication in production -2. **SSL/TLS**: Enable SSL for production deployments -3. **Access Control**: Implement proper database user permissions -4. **Input Validation**: Validate all user inputs to prevent injection - -### Performance -1. **Connection Pooling**: MongoDB driver handles connection pooling automatically -2. **Batch Operations**: Use `InsertMany` and `UpdateMany` for bulk operations -3. **Aggregation Pipeline**: Use aggregation for complex data processing -4. **Indexing Strategy**: Design indexes based on query patterns - -## Monitoring and Troubleshooting - -### Connection Status -- Check `/api/mongo/info` endpoint for database information -- Monitor connection status in the web dashboard -- View real-time statistics and collection counts - -### Query Debugging -- Use manual query interface for testing queries -- Check MongoDB logs for slow queries -- Monitor query execution times - -### Common Issues - -**Connection Refused** -- Verify MongoDB is running and accessible -- Check connection URI format -- Ensure network connectivity - -**Authentication Failed** -- Verify username/password in URI -- Check user permissions in MongoDB -- Ensure database user exists - -**Query Timeout** -- Increase timeout values for long-running queries -- Optimize query performance with indexes -- Consider query restructuring - -## Migration Guide - -### From Single to Multi-Database - -1. **Update Configuration**: Convert single MongoDB config to multi-connection format -2. **Update Services**: Modify services to use `MongoConnectionManager` -3. **Test Connections**: Verify all tenant databases are accessible -4. **Migrate Data**: Move existing data to appropriate tenant databases - -### From Other Databases - -1. **Export Data**: Export data from source database -2. **Transform Schema**: Adapt schema for MongoDB document structure -3. **Import Data**: Use `mongoimport` or custom scripts -4. **Update Application**: Modify application code to use MongoDB operations -5. **Test Functionality**: Verify all features work with new database - -## Integration with Other Components - -### Redis Caching -```go -// Cache MongoDB query results in Redis -func (s *Service) GetCachedProducts(ctx context.Context, category string) ([]Product, error) { - cacheKey := fmt.Sprintf("products:category:%s", category) - - // Try Redis cache first - if cached, err := s.redis.Get(ctx, cacheKey); err == nil { - var products []Product - if err := json.Unmarshal([]byte(cached), &products); err == nil { - return products, nil - } - } - - // Query MongoDB - products, err := s.getProductsFromMongo(ctx, category) - if err != nil { - return nil, err - } - - // Cache results - if data, err := json.Marshal(products); err == nil { - s.redis.Set(ctx, cacheKey, string(data), time.Hour) - } - - return products, nil -} -``` - -### Kafka Integration -```go -// Publish MongoDB change events to Kafka -func (s *Service) PublishProductUpdate(ctx context.Context, productID string, update bson.M) error { - // Update MongoDB - if err := s.updateProductInMongo(ctx, productID, update); err != nil { - return err - } - - // Publish event to Kafka - event := map[string]interface{}{ - "event_type": "product_updated", - "product_id": productID, - "update": update, - "timestamp": time.Now(), - } - - eventData, _ := json.Marshal(event) - return s.kafka.Publish("product-events", eventData) -} -``` - -This MongoDB integration provides a robust, scalable, and feature-rich NoSQL database solution that seamlessly integrates with the existing project architecture. diff --git a/docs_wiki/ONBOARDING_SCRIPTS.md b/docs_wiki/ONBOARDING_SCRIPTS.md deleted file mode 100644 index c4050df..0000000 --- a/docs_wiki/ONBOARDING_SCRIPTS.md +++ /dev/null @@ -1,381 +0,0 @@ -# Onboarding Scripts Documentation - -## Overview - -The Onboarding Scripts provide an interactive setup wizard for quickly configuring and initializing the stackyard application. These cross-platform scripts guide new users through the essential configuration steps, ensuring a smooth first-time setup experience. - -## Features - -- **Interactive Configuration**: Step-by-step guided setup process -- **Cross-Platform Support**: Separate scripts for Unix/Linux/macOS and Windows -- **Comprehensive Setup**: Configures app settings, ports, services, and infrastructure -- **Security Awareness**: Highlights default credentials that must be changed -- **Automatic Validation**: Tests build and dependency setup -- **Backup Safety**: Creates backups before making configuration changes -- **User-Friendly**: Color-coded output with clear instructions and defaults - -## Prerequisites - -### Unix/Linux/macOS -- Bash shell -- `config.yaml` file in the project root -- Go compiler (optional, for build testing) - -### Windows -- Windows Command Prompt or PowerShell -- `config.yaml` file in the project root -- PowerShell (recommended for YAML manipulation) - -## Quick Start - -### Unix/Linux/macOS -```bash -# Make executable (first time only) -chmod +x scripts/onboarding.sh - -# Run the onboarding script -./scripts/onboarding.sh -``` - -### Windows -```cmd -# Run the onboarding script -scripts\onboarding.bat -``` - -## Configuration Workflow - -The onboarding script guides you through these configuration categories: - -### 1. Basic Application Configuration -- **Application Name**: Display name for your application -- **Version**: Application version number -- **Server Port**: HTTP API server port (default: 8080) -- **Monitoring Port**: Web monitoring interface port (default: 9090) - -### 2. Environment Settings -- **Debug Mode**: Enable detailed logging (default: Yes) -- **TUI Mode**: Enable Terminal User Interface (default: Yes) -- **Quiet Startup**: Suppress console logs during boot (default: No) - -### 3. Service Configuration -- **Monitoring Dashboard**: Enable web-based monitoring interface (default: Yes) -- **API Encryption**: Enable end-to-end API encryption (default: No) - -### 4. Infrastructure Configuration -- **PostgreSQL**: Database setup (single/multi/none, default: single) -- **Redis**: Caching layer (default: No) -- **Kafka**: Message queue (default: No) -- **MinIO**: Object storage (default: No) - -## Configuration Examples - -### Development Setup -``` -Application Name: My Dev App -Version: 1.0.0 -Server Port: 8080 -Monitoring Port: 9090 -Debug Mode: Yes -TUI Mode: Yes -Quiet Startup: No -Monitoring: Yes -Encryption: No -PostgreSQL: single -Redis: No -Kafka: No -MinIO: No -``` - -### Production Setup -``` -Application Name: My Production App -Version: 1.0.0 -Server Port: 8080 -Monitoring Port: 9090 -Debug Mode: No -TUI Mode: No -Quiet Startup: Yes -Monitoring: Yes -Encryption: Yes -PostgreSQL: multi -Redis: Yes -Kafka: Yes -MinIO: Yes -``` - -## Security Warnings - -The onboarding script prominently displays security warnings about: - -### Default Credentials (MUST Change) -- **PostgreSQL Password**: `Mypostgres01` -- **Monitoring Password**: `admin` -- **MinIO Credentials**: `minioadmin/minioadmin` -- **API Secret Key**: `super-secret-key` - -### Security Features Information -- **API Obfuscation**: Enabled by default (security through obscurity) -- **Encryption**: Optional AES-256-GCM encryption for API communications - -### Production Readiness Checklist -- [ ] Change all default passwords -- [ ] Configure strong encryption keys (if encryption enabled) -- [ ] Set up proper SSL/TLS certificates -- [ ] Configure firewall rules -- [ ] Enable audit logging -- [ ] Set up monitoring alerts - -## Generated Configuration - -The script updates `config.yaml` with your selections. Example output: - -```yaml -app: - name: "My Fancy Go App" - version: "1.0.0" - debug: true - enable_tui: true - quiet_startup: false - -server: - port: "8080" - -monitoring: - port: "9090" - enabled: true - -services: - service_a: true - service_b: false - service_encryption: false - -postgres: - enabled: true - connections: - - name: "primary" - enabled: true - host: "localhost" - port: 5432 - user: "postgres" - password: "Mypostgres01" - dbname: "postgres" - sslmode: "disable" - -redis: - enabled: false - -kafka: - enabled: false - -monitoring: - minio: - enabled: false - -encryption: - enabled: false -``` - -## Next Steps After Onboarding - -The script provides clear guidance for post-setup tasks: - -### 1. Security Configuration -```bash -# Update passwords in config.yaml -nano config.yaml - -# Set environment variables for sensitive data -export POSTGRES_PASSWORD="your-secure-password" -export MONITORING_PASSWORD="your-admin-password" -``` - -### 2. Infrastructure Setup -```bash -# Start PostgreSQL -docker run -d --name postgres \ - -e POSTGRES_PASSWORD=your-secure-password \ - -p 5432:5432 postgres:15 - -# Start Redis (if enabled) -docker run -d --name redis -p 6379:6379 redis:7 - -# Configure MinIO (if enabled) -docker run -d --name minio \ - -p 9000:9000 \ - -e MINIO_ACCESS_KEY=your-access-key \ - -e MINIO_SECRET_KEY=your-secret-key \ - minio/minio server /data -``` - -### 3. Build and Test -```bash -# Update dependencies -go mod tidy - -# Build the application -./scripts/build.sh - -# Test the application -go run cmd/app/main.go -``` - -### 4. Production Deployment -```bash -# Build Docker images -./scripts/docker_build.sh "myapp" "myregistry/myapp" - -# Deploy with Docker Compose -docker-compose up -d - -# Access monitoring dashboard -open http://localhost:9090 -``` - -## Troubleshooting - -### Common Issues - -**"config.yaml not found"** -- Ensure you're running the script from the project root directory -- Check that `config.yaml` exists: `ls -la config.yaml` - -**"Permission denied" (Unix/Linux/macOS)** -- Make the script executable: `chmod +x scripts/onboarding.sh` -- Check file permissions on the scripts directory - -**"PowerShell not available" (Windows)** -- The script will fall back to manual configuration instructions -- Install PowerShell or manually edit `config.yaml` - -**Configuration not applied** -- Check for syntax errors in `config.yaml` -- Restore from backup if needed: `cp config.yaml.backup config.yaml` - -**Build test fails** -- Ensure Go is installed: `go version` -- Check dependencies: `go mod tidy` -- Verify configuration syntax - -### Recovery Options - -**Restore from Backup** -```bash -# Unix/Linux/macOS -cp config.yaml.backup config.yaml - -# Windows -copy config.yaml.backup config.yaml -``` - -**Reset to Defaults** -```bash -# Remove current config and use template -rm config.yaml -cp config.template.yaml config.yaml -``` - -**Manual Configuration** -If the script fails, manually edit `config.yaml` with a text editor following the examples in this documentation. - -## Advanced Usage - -### Automated Setup (CI/CD) -```bash -# Non-interactive mode (future enhancement) -./scripts/onboarding.sh --non-interactive --config production.yaml - -# Custom configuration file -./scripts/onboarding.sh --config my-config.yaml -``` - -### Custom Infrastructure -For complex setups, modify `config.yaml` after running the onboarding script: - -```yaml -# Multi-tenant PostgreSQL setup -postgres: - enabled: true - connections: - - name: "tenant_a" - host: "db-tenant-a.company.com" - sslmode: "require" - - name: "tenant_b" - host: "db-tenant-b.company.com" - sslmode: "require" - -# High availability Redis -redis: - enabled: true - cluster: true - addresses: - - "redis-1.company.com:6379" - - "redis-2.company.com:6379" - - "redis-3.company.com:6379" -``` - -## Script Architecture - -### Unix/Linux/macOS Implementation (`onboarding.sh`) -- **Bash scripting** with color support -- **YAML manipulation** using `sed` for simple key-value updates -- **Error handling** with backup restoration on failure -- **Interactive prompts** with validation and defaults - -### Windows Implementation (`onboarding.bat`) -- **Batch scripting** with ANSI color support -- **PowerShell integration** for complex YAML manipulation -- **Fallback handling** for systems without PowerShell -- **Cross-compatibility** with Windows CMD limitations - -## Integration with Other Scripts - -The onboarding script works seamlessly with other project scripts: - -```bash -# Run onboarding first -./scripts/onboarding.sh - -# Then build -./scripts/build.sh - -# Or build Docker images -./scripts/docker_build.sh - -# Change package name if needed -./scripts/change_package.sh "github.com/mycompany/myproject" -``` - -## Best Practices - -### Development Environment -1. Run onboarding with development defaults -2. Keep debug mode enabled -3. Use TUI for better development experience -4. Enable monitoring for development insights - -### Production Environment -1. Run onboarding with production settings -2. Disable debug mode and TUI -3. Enable quiet startup for cleaner logs -4. Configure all security settings -5. Set up proper infrastructure (SSL, firewalls, monitoring) - -### Team Onboarding -1. Commit the configured `config.yaml` (without secrets) -2. Document environment variable requirements -3. Create setup documentation for new team members -4. Use version control for configuration templates - -## Support and Resources - -### Documentation Links -- [Configuration Guide](CONFIGURATION_GUIDE.md) - Complete config reference -- [Build Scripts](BUILD_SCRIPTS.md) - Application building -- [Docker Containerization](DOCKER_CONTAINERIZATION.md) - Container deployment -- [Integration Guide](INTEGRATION_GUIDE.md) - Infrastructure setup - -### Community Resources -- GitHub Issues: Report bugs and request features -- Discussions: Share setup experiences and tips -- Wiki: Extended documentation and examples diff --git a/docs_wiki/REFERENCE.md b/docs_wiki/REFERENCE.md new file mode 100644 index 0000000..0c0be09 --- /dev/null +++ b/docs_wiki/REFERENCE.md @@ -0,0 +1,905 @@ +# Technical Reference + +This comprehensive reference covers all configuration options, API specifications, advanced features, and technical details for Stackyard. Use this as your complete technical resource. + +## Configuration Reference + +### Complete Configuration Schema + +```yaml +# Application Configuration +app: + name: "Stackyard App" # Application display name + version: "1.0.0" # Application version + debug: true # Enable debug logging + env: "development" # Environment (development, staging, production) + banner_path: "banner.txt" # Path to startup banner file + startup_delay: 3 # Seconds to display boot screen (0 to skip) + quiet_startup: false # Suppress console logs during startup + enable_tui: true # Enable Terminal User Interface + +# Server Configuration +server: + port: "8080" # HTTP server port + +# Service Configuration +services: + service_a: true # Basic CRUD service example + service_b: false # Additional service + service_c: true # Another service + service_d: false # Disabled service + service_e: false # Event streaming service + service_f: false # Multi-tenant service + service_g: false # MongoDB service + service_h: false # Broadcast utility demo + service_i: false # Grafana integration + +# Authentication +auth: + type: "apikey" # Authentication type (apikey, basic, none) + secret: "your-secret-key" # API key for authentication + +# Redis Configuration +redis: + enabled: false # Enable Redis + address: "localhost:6379" # Redis server address + password: "" # Redis password (optional) + db: 0 # Redis database number + +# Kafka Configuration +kafka: + enabled: false # Enable Kafka + brokers: # List of Kafka brokers + - "localhost:9092" + topic: "stackyard-events" # Default topic + group_id: "stackyard" # Consumer group ID + +# PostgreSQL Configuration (Single Connection) +postgres: + enabled: true # Enable PostgreSQL + host: "localhost" # Database host + port: 5432 # Database port + user: "postgres" # Database user + password: "password" # Database password + dbname: "stackyard" # Database name + sslmode: "disable" # SSL mode (disable, require, verify-ca, verify-full) + max_open_conns: 10 # Maximum open connections + max_idle_conns: 5 # Maximum idle connections + +# PostgreSQL Multi-Connection Configuration +postgres: + enabled: true + connections: + - name: "primary" # Connection identifier + enabled: true # Enable this connection + host: "localhost" + port: 5432 + user: "postgres" + password: "password" + dbname: "primary_db" + sslmode: "disable" + - name: "secondary" + enabled: true + host: "localhost" + port: 5433 + user: "postgres" + password: "password" + dbname: "secondary_db" + sslmode: "disable" + +# MongoDB Configuration +mongo: + enabled: false + connections: + - name: "primary" + enabled: true + uri: "mongodb://localhost:27017" + database: "primary_db" + - name: "analytics" + enabled: false + uri: "mongodb://analytics.example.com:27017" + database: "analytics_db" + +# Monitoring Configuration +monitoring: + enabled: true # Enable web monitoring dashboard + port: "9090" # Monitoring dashboard port + password: "admin" # Dashboard login password + obfuscate_api: false # Enable API response obfuscation + title: "Stackyard Admin" # Dashboard title + subtitle: "Monitoring Dashboard" # Dashboard subtitle + +# MinIO Configuration +monitoring: + minio: + enabled: false # Enable MinIO integration + endpoint: "localhost:9000" # MinIO server endpoint + access_key: "minioadmin" # MinIO access key + secret_key: "minioadmin" # MinIO secret key + use_ssl: false # Use SSL for MinIO connection + bucket: "stackyard" # Default bucket name + +# Cron Jobs Configuration +cron: + enabled: false # Enable scheduled jobs + jobs: + cleanup: "0 0 * * *" # Daily cleanup at midnight + health_check: "*/5 * * * *" # Health check every 5 minutes + +# Encryption Configuration +encryption: + enabled: false # Enable API encryption + algorithm: "aes-256-gcm" # Encryption algorithm + key: "" # 32-byte encryption key (base64 encoded) + rotate_keys: false # Enable automatic key rotation + key_rotation_interval: "24h" # Key rotation interval + +# Grafana Integration +grafana: + enabled: false # Enable Grafana integration + url: "http://localhost:3000" # Grafana server URL + api_key: "" # Grafana API key + username: "admin" # Grafana username (alternative to API key) + password: "admin" # Grafana password (alternative to API key) +``` + +## API Specifications + +### Response Format + +All API responses follow this standardized structure: + +```json +{ + "success": true, // Boolean: operation success status + "message": "Operation completed", // String: human-readable message + "data": { // Object: response payload (varies by endpoint) + "key": "value" + }, + "meta": { // Object: pagination metadata (optional) + "page": 1, + "per_page": 10, + "total": 100, + "total_pages": 10 + }, + "timestamp": 1642598400 // Number: Unix timestamp +} +``` + +### Error Response Format + +```json +{ + "success": false, + "error": { + "code": "ERROR_CODE", // String: machine-readable error code + "message": "Human readable error", // String: human-readable error message + "details": { // Object: additional error details (optional) + "field": "validation error" + } + }, + "timestamp": 1642598400 +} +``` + +### Standard Error Codes + +| Code | HTTP Status | Description | +|------|-------------|-------------| +| `BAD_REQUEST` | 400 | Invalid request parameters | +| `UNAUTHORIZED` | 401 | Authentication required | +| `FORBIDDEN` | 403 | Access denied | +| `NOT_FOUND` | 404 | Resource not found | +| `ENDPOINT_NOT_FOUND` | 404 | API endpoint doesn't exist | +| `CONFLICT` | 409 | Resource conflict | +| `VALIDATION_ERROR` | 422 | Request validation failed | +| `INTERNAL_ERROR` | 500 | Internal server error | +| `SERVICE_UNAVAILABLE` | 503 | Service temporarily unavailable | + +## API Endpoints Reference + +### Service A (CRUD Example) - `/api/v1/users` + +#### GET `/api/v1/users` +List users with pagination. + +**Query Parameters:** +- `page` (integer, optional): Page number (default: 1) +- `per_page` (integer, optional): Items per page (default: 10, max: 100) + +**Response:** +```json +{ + "success": true, + "data": [ + { + "id": 1, + "name": "John Doe", + "email": "john@example.com", + "created_at": "2024-01-01T00:00:00Z" + } + ], + "meta": { + "page": 1, + "per_page": 10, + "total": 1, + "total_pages": 1 + } +} +``` + +#### POST `/api/v1/users` +Create a new user. + +**Request Body:** +```json +{ + "name": "Jane Doe", + "email": "jane@example.com", + "age": 25 +} +``` + +**Response:** +```json +{ + "success": true, + "data": { + "id": 2, + "name": "Jane Doe", + "email": "jane@example.com", + "age": 25, + "created_at": "2024-01-01T00:00:00Z" + } +} +``` + +#### GET `/api/v1/users/:id` +Get a specific user. + +**Response:** +```json +{ + "success": true, + "data": { + "id": 1, + "name": "John Doe", + "email": "john@example.com" + } +} +``` + +#### PUT `/api/v1/users/:id` +Update a user. + +**Request Body:** +```json +{ + "name": "John Smith", + "email": "johnsmith@example.com" +} +``` + +#### DELETE `/api/v1/users/:id` +Delete a user. + +**Response:** +```json +{ + "success": true, + "message": "User deleted successfully" +} +``` + +### Service G (MongoDB) - `/api/v1/products` + +#### GET `/api/v1/products/{tenant}` +List products for a tenant. + +**Path Parameters:** +- `tenant` (string): Tenant identifier + +#### POST `/api/v1/products/{tenant}` +Create a product. + +**Request Body:** +```json +{ + "name": "Laptop", + "price": 999.99, + "category": "electronics" +} +``` + +### Service H (Event Streaming) - `/api/v1/events` + +#### GET `/api/v1/events/stream/{stream_id}` +Subscribe to event stream (Server-Sent Events). + +#### POST `/api/v1/events/broadcast` +Broadcast an event. + +**Request Body:** +```json +{ + "type": "user_action", + "message": "User logged in", + "data": { + "user_id": 123 + } +} +``` + +#### GET `/api/v1/events/streams` +Get stream information. + +### Service I (Grafana) - `/api/v1/grafana` + +#### POST `/api/v1/grafana/dashboards` +Create a Grafana dashboard. + +#### GET `/api/v1/grafana/dashboards` +List dashboards. + +#### GET `/api/v1/grafana/health` +Get Grafana health status. + +### Health Endpoints + +#### GET `/health` +Application health check. + +**Response:** +```json +{ + "status": "ok", + "server_ready": true, + "infrastructure": { + "postgres": {"initialized": true}, + "redis": {"initialized": false} + }, + "initialization_progress": 0.8 +} +``` + +#### GET `/health/infrastructure` +Detailed infrastructure health. + +## Request Validation + +### Built-in Validators + +| Tag | Description | Example | +|-----|-------------|---------| +| `required` | Field must not be empty | `validate:"required"` | +| `email` | Valid email format | `validate:"required,email"` | +| `min=X` | Minimum string length | `validate:"min=3"` | +| `max=X` | Maximum string length | `validate:"max=100"` | +| `gte=X` | Greater than or equal (numeric) | `validate:"gte=18"` | +| `lte=X` | Less than or equal (numeric) | `validate:"lte=120"` | +| `oneof=X Y Z` | Value must be one of listed | `validate:"oneof=low medium high"` | + +### Custom Validators + +#### Phone Number +```go +validate.RegisterValidation("phone", func(fl validator.FieldLevel) bool { + phone := fl.Field().String() + matched, _ := regexp.MatchString(`^\+?[1-9]\d{1,14}$`, phone) + return matched +}) +``` +Usage: `validate:"phone"` + +#### Username +```go +validate.RegisterValidation("username", func(fl validator.FieldLevel) bool { + username := fl.Field().String() + matched, _ := regexp.MatchString(`^[a-zA-Z0-9]{3,20}$`, username) + return matched +}) +``` +Usage: `validate:"username"` + +## Database Schemas + +### PostgreSQL Tables + +#### Users Table (GORM Auto-Migration) +```sql +CREATE TABLE users ( + id SERIAL PRIMARY KEY, + name VARCHAR(255) NOT NULL, + email VARCHAR(255) UNIQUE NOT NULL, + age INTEGER, + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + deleted_at TIMESTAMP WITH TIME ZONE +); +``` + +### MongoDB Collections + +#### Products Collection +```javascript +{ + "_id": ObjectId("..."), + "tenant_id": "tenant_a", + "name": "Laptop", + "price": 999.99, + "category": "electronics", + "created_at": ISODate("2024-01-01T00:00:00Z") +} +``` + +## Infrastructure Managers + +### AsyncResult Types + +All async operations return `AsyncResult[T]` types: + +```go +type AsyncResult[T any] struct { + Value T + Error error + Done chan struct{} +} + +// Methods +func (ar *AsyncResult[T]) Wait() (T, error) +func (ar *AsyncResult[T]) WaitWithTimeout(timeout time.Duration) (T, error) +func (ar *AsyncResult[T]) IsDone() bool +``` + +### Worker Pool Configuration + +| Infrastructure | Default Workers | Purpose | +|----------------|-----------------|---------| +| Redis | 10 | Cache operations | +| Kafka | 5 | Message publishing | +| PostgreSQL | 15 | Database queries | +| MongoDB | 12 | Document operations | +| MinIO | 8 | File uploads | +| Cron | 5 | Scheduled jobs | + +### Connection Pool Settings + +#### PostgreSQL +```yaml +postgres: + max_open_conns: 10 # Maximum open connections + max_idle_conns: 5 # Maximum idle connections + conn_max_lifetime: "1h" # Connection max lifetime +``` + +#### Redis +```yaml +redis: + pool_size: 10 # Connection pool size + min_idle_conns: 2 # Minimum idle connections + conn_max_lifetime: "1h" # Connection max lifetime +``` + +## Security Features + +### API Obfuscation + +**Configuration:** +```yaml +monitoring: + obfuscate_api: true +``` + +**How it works:** +- Intercepts `/api/*` requests +- Base64 encodes JSON responses +- Sets `X-Obfuscated: true` header +- Excludes streaming endpoints + +### API Encryption + +**Configuration:** +```yaml +encryption: + enabled: true + algorithm: "aes-256-gcm" + key: "32-byte-base64-encoded-key" +``` + +**Features:** +- AES-256-GCM authenticated encryption +- Automatic request/response encryption +- Key rotation support +- Client-side decryption utilities + +### Authentication + +**API Key Authentication:** +```yaml +auth: + type: "apikey" + secret: "your-secret-key" +``` + +**Usage:** +```bash +curl -H "X-API-Key: your-secret-key" http://localhost:8080/api/v1/users +``` + +## Monitoring & Observability + +### Web Dashboard Endpoints + +| Endpoint | Description | +|----------|-------------| +| `/` | Main dashboard | +| `/logs` | Real-time logs | +| `/postgres` | Database management | +| `/infrastructure` | Infrastructure status | +| `/config` | Configuration viewer | + +### Terminal UI Controls + +**Boot Sequence:** +- `q` - Skip countdown and continue +- `Ctrl+C` - Quit application + +**Live Logs:** +- `↑/↓` - Scroll up/down +- `Page Up/Down` - Page navigation +- `Home/End` - Jump to top/bottom +- `/` - Open filter dialog +- `F1` - Toggle auto-scroll +- `F2` - Clear all logs +- `q/Esc` - Exit TUI + +### Log Levels + +- `DEBUG` - Detailed debugging information +- `INFO` - General information messages +- `WARN` - Warning messages +- `ERROR` - Error conditions +- `FATAL` - Critical errors that cause termination + +## Build & Deployment + +### Build Scripts + +#### Unix/Linux/macOS (`scripts/build.sh`) +```bash +# Interactive build +./scripts/build.sh + +# With obfuscation +echo "y" | ./scripts/build.sh + +# Automated (no obfuscation) +echo "n" | ./scripts/build.sh +``` + +#### Windows (`scripts/build.bat`) +```batch +scripts\build.bat +``` + +### Docker Build Scripts + +#### Build Options +```bash +# Build all stages +./scripts/docker_build.sh + +# Build specific target +./scripts/docker_build.sh "myapp" "registry.com/myapp" "prod" + +# Available targets: test, dev, prod, prod-slim, prod-minimal, ultra-prod +``` + +### Environment Variables + +Override configuration at runtime: + +```bash +# Application settings +export APP_DEBUG=true +export APP_ENABLE_TUI=false + +# Server settings +export SERVER_PORT=3000 + +# Database settings +export POSTGRES_HOST=prod-db.example.com +export POSTGRES_PASSWORD=secure-password + +# Monitoring +export MONITORING_PASSWORD=admin-password +``` + +## Advanced Features + +### Multi-Tenant Architecture + +**Database Switching:** +```go +// Get tenant-specific connection +conn, exists := postgresManager.GetConnection("tenant_a") +if exists { + // Use tenant_a database + result := conn.ORM.Where("tenant_id = ?", "tenant_a").Find(&data) +} +``` + +**Tenant Isolation:** +- Separate database connections per tenant +- Automatic tenant ID injection +- Isolated data access patterns + +### Event Streaming + +**Server-Sent Events:** +```javascript +const eventSource = new EventSource('/api/v1/events/stream/notifications'); + +eventSource.onmessage = function(event) { + const data = JSON.parse(event.data); + console.log('Event:', data.type, data.message); +}; +``` + +**Broadcasting:** +```go +// Broadcast to specific stream +broadcaster.Broadcast("notifications", "alert", "System alert", alertData) + +// Broadcast to all streams +broadcaster.BroadcastToAll("global", "Global announcement", globalData) +``` + +### Cron Job Scheduling + +**Job Definition:** +```yaml +cron: + enabled: true + jobs: + cleanup: "0 0 * * *" # Daily at midnight + health_check: "*/5 * * * *" # Every 5 minutes + backup: "0 3 * * 1" # Weekly backup (Monday 3 AM) +``` + +**Programmatic Jobs:** +```go +cronManager.AddJob("custom-job", "0 */2 * * *", func() { + // Run every 2 hours + performMaintenance() +}) +``` + +## Performance Tuning + +### Worker Pool Sizing + +Adjust based on load: + +```yaml +infrastructure: + redis: + workers: 20 # High cache load + postgres: + workers: 25 # High database load + kafka: + workers: 10 # Moderate message load +``` + +### Connection Pool Optimization + +```yaml +postgres: + max_open_conns: 20 + max_idle_conns: 10 + conn_max_lifetime: "30m" + +redis: + pool_size: 15 + min_idle_conns: 5 + conn_max_lifetime: "1h" +``` + +### Memory Management + +- **Log rotation**: Automatic cleanup prevents memory leaks +- **Connection pooling**: Reuses connections efficiently +- **Async operations**: Prevents blocking and resource exhaustion + +## Troubleshooting + +### Common Issues + +**"Port already in use"** +```bash +# Find process using port +lsof -i :8080 +# Kill process +kill -9 +``` + +**"Database connection refused"** +```bash +# Check if database is running +docker ps | grep postgres + +# Test connection +psql -h localhost -U postgres -d stackyard +``` + +**"Service not registering"** +- Verify service is enabled in `config.yaml` +- Check for compilation errors in service code +- Ensure service implements the `Service` interface correctly + +**"Async operation timeout"** +- Increase timeout values in configuration +- Check worker pool sizing +- Monitor system resources + +### Debug Mode + +Enable detailed logging: + +```yaml +app: + debug: true +``` + +### Health Checks + +Monitor system health: + +```bash +# Application health +curl http://localhost:8080/health + +# Infrastructure health +curl http://localhost:8080/health/infrastructure + +# Service-specific health +curl http://localhost:8080/health/services +``` + +## Migration Guide + +### From Single DB to Multi-Tenant + +1. **Update Configuration:** + ```yaml + postgres: + enabled: true + connections: + - name: "tenant_a" + host: "db-tenant-a" + - name: "tenant_b" + host: "db-tenant-b" + ``` + +2. **Update Services:** + ```go + // Before + s.db.Find(&users) + + // After + conn, _ := s.postgresManager.GetConnection(tenantID) + conn.ORM.Find(&users) + ``` + +3. **Add Tenant Context:** + - Inject tenant ID into requests + - Validate tenant access permissions + - Update data models for tenant isolation + +### From Monolithic to Microservices + +1. **Extract Services:** + - Identify service boundaries + - Create separate service repositories + - Implement inter-service communication + +2. **Shared Infrastructure:** + - Use shared databases with tenant isolation + - Implement service discovery + - Configure centralized logging + +3. **Deployment Updates:** + - Create separate Docker images + - Update orchestration (Kubernetes, Docker Compose) + - Configure load balancing + +## API Versioning + +### URL-based Versioning + +``` +GET /api/v1/users # Version 1 +GET /api/v2/users # Version 2 (future) +``` + +### Header-based Versioning + +``` +Accept: application/vnd.stackyard.v1+json +Accept: application/vnd.stackyard.v2+json +``` + +### Deprecation Strategy + +1. **Announce deprecation** in response headers +2. **Maintain backward compatibility** for N versions +3. **Provide migration guides** for breaking changes +4. **Sunset old versions** with clear timelines + +## Integration Patterns + +### External API Integration + +```go +type ExternalAPIClient struct { + baseURL string + apiKey string + httpClient *http.Client +} + +func (c *ExternalAPIClient) MakeRequest(endpoint string, data interface{}) error { + // Implement retry logic + // Handle rate limiting + // Parse responses +} +``` + +### Webhook Handling + +```go +func (s *WebhookService) HandleWebhook(c echo.Context) error { + // Verify webhook signature + signature := c.Request().Header.Get("X-Signature") + if !s.verifySignature(c.Request().Body, signature) { + return response.Forbidden(c, "Invalid signature") + } + + // Process webhook payload + var payload WebhookPayload + if err := c.Bind(&payload); err != nil { + return response.BadRequest(c, "Invalid payload") + } + + // Queue for processing + s.queue.ProcessAsync(payload) + return response.Success(c, nil, "Webhook received") +} +``` + +### File Upload Handling + +```go +func (s *UploadService) HandleUpload(c echo.Context) error { + file, err := c.FormFile("file") + if err != nil { + return response.BadRequest(c, "No file provided") + } + + // Validate file size/type + if file.Size > s.maxFileSize { + return response.BadRequest(c, "File too large") + } + + // Upload to storage + result, err := s.storage.UploadFile(context.Background(), + fmt.Sprintf("uploads/%s", file.Filename), + file, file.Size, file.Header.Get("Content-Type")) + + return response.Created(c, map[string]interface{}{ + "filename": file.Filename, + "url": s.storage.GetFileUrl(result.Key), + }, "File uploaded") +} +``` + +This technical reference provides comprehensive coverage of Stackyard's capabilities, configuration options, and implementation details. Use this document as your authoritative source for all technical aspects of the framework. diff --git a/docs_wiki/REQUEST_RESPONSE_STRUCTURE.md b/docs_wiki/REQUEST_RESPONSE_STRUCTURE.md deleted file mode 100644 index c4f4a1c..0000000 --- a/docs_wiki/REQUEST_RESPONSE_STRUCTURE.md +++ /dev/null @@ -1,263 +0,0 @@ -# Default Request Response Structure for Echo Service - -## Summary - -This project now has a structured, clean, and dynamic request/response structure for the Echo service. This system provides: - -**Standardized Response** - Consistent format for all API endpoints -**Automatic Request Validation** - Input validation with clear error messages -**Built-in Pagination** - Pagination support with complete metadata -**Comprehensive Error Handling** - Various helper functions for error responses -**Type-safe** - Uses structs for request/response - ---- - -## Created Files - -### 1. **pkg/response/response.go** -Package for standardizing API responses: - -**Structs:** -- `Response` - Main response structure -- `ErrorDetail` - Detailed error information -- `Meta` - Pagination metadata -- `PaginationRequest` - Standard pagination parameters - -**Helper Functions:** -- Success responses: `Success()`, `SuccessWithMeta()`, `Created()`, `NoContent()` -- Error responses: `BadRequest()`, `Unauthorized()`, `Forbidden()`, `NotFound()`, `Conflict()`, `ValidationError()`, `InternalServerError()`, `ServiceUnavailable()` -- Utilities: `CalculateMeta()` for pagination metadata - -### 2. **pkg/request/request.go** -Package for request validation and binding: - -**Functions:** -- `Bind()` - Bind and validate request simultaneously -- `Validate()` - Validate struct using validator tags -- `FormatValidationErrors()` - Format error messages in user-friendly way - -**Custom Validators:** -- `phone` - Phone number format validation -- `username` - Username validation (alphanumeric, 3-20 chars) - -**Common Request Structs:** -- `IDRequest` - For requests with single ID -- `IDsRequest` - For requests with multiple IDs -- `SearchRequest` - For search with filter and pagination -- `DateRangeRequest` - For date-based filtering -- `SortRequest` - For sorting parameters - -### 3. **docs/API_RESPONSE_STRUCTURE.md** -Complete documentation with: -- Response structure format -- Examples of all helper functions -- Best practices -- Complete handler examples - -### 4. **docs/examples/response_examples.go** -Example implementation file: -- 7 different use cases -- Success, error, pagination, validation -- Search, custom errors, delete operations - -### 5. **internal/services/modules/service_a.go** (Updated) -Updated as reference implementation with: -- Complete CRUD operations (GET, POST, PUT, DELETE) -- Pagination support -- Request validation -- Error handling - -**Exposed Endpoints:** -- `GET /api/v1/users` -- `GET /api/v1/users/:id` -- `POST /api/v1/users` -- `PUT /api/v1/users/:id` -- `DELETE /api/v1/users/:id` - ---- - -## Dependencies - -Added new dependency: -```bash -go get github.com/go-playground/validator/v10 -``` - ---- - -## Usage - -### 1. Success Response -```go -func GetUser(c echo.Context) error { - user := getUserFromDB() - return response.Success(c, user, "User retrieved") -} -``` - -**Output:** -```json -{ - "success": true, - "message": "User retrieved", - "data": { "id": "123", "name": "John" }, - "timestamp": 1672531200 -} -``` - -### 2. Pagination Response -```go -func GetUsers(c echo.Context) error { - var pagination response.PaginationRequest - c.Bind(&pagination) - - users := fetchUsers(pagination.GetOffset(), pagination.GetPerPage()) - meta := response.CalculateMeta(pagination.GetPage(), pagination.GetPerPage(), 100) - - return response.SuccessWithMeta(c, users, meta) -} -``` - -Query: `GET /users?page=2&per_page=20` - -**Output:** -```json -{ - "success": true, - "data": [...], - "meta": { - "page": 2, - "per_page": 20, - "total": 100, - "total_pages": 5 - }, - "timestamp": 1672531200 -} -``` - -### 3. Request Validation -```go -type CreateUserRequest struct { - Username string `json:"username" validate:"required,username"` - Email string `json:"email" validate:"required,email"` - Age int `json:"age" validate:"required,gte=18"` -} - -func CreateUser(c echo.Context) error { - var req CreateUserRequest - - if err := request.Bind(c, &req); err != nil { - if validationErr, ok := err.(*request.ValidationError); ok { - return response.ValidationError(c, "Validation failed", - validationErr.GetFieldErrors()) - } - return response.BadRequest(c, err.Error()) - } - - user := createUser(req) - return response.Created(c, user, "User created") -} -``` - -**Error Output (if validation fails):** -```json -{ - "success": false, - "error": { - "code": "VALIDATION_ERROR", - "message": "Validation failed", - "details": { - "username": "Username must be alphanumeric and 3-20 characters", - "age": "age must be greater than or equal to 18" - } - }, - "timestamp": 1672531200 -} -``` - -### 4. Error Responses -```go -// Not Found -return response.NotFound(c, "User not found") - -// Unauthorized -return response.Unauthorized(c, "Invalid credentials") - -// Bad Request -return response.BadRequest(c, "Invalid input") - -// Internal Server Error -return response.InternalServerError(c, "Database error") -``` - ---- - -## Key Features - -### 1. **Consistent Response Format** -All responses follow the same structure with fields `success`, `data`, `error`, `meta`, and `timestamp`. - -### 2. **Built-in Validation** -Support for various validation rules: -- `required`, `email`, `min`, `max`, `len` -- `gte`, `lte`, `oneof` -- Custom: `phone`, `username` - -### 3. **Pagination Helper** -```go -pagination.GetPage() // Default: 1 -pagination.GetPerPage() // Default: 10, Max: 100 -pagination.GetOffset() // Calculate offset for DB query -``` - -### 4. **Comprehensive Error Handling** -Helper functions for all common HTTP status codes with customizable error details. - -### 5. **Type Safe** -Uses Go structs for request and response, avoiding manual `map[string]interface{}`. - ---- - -## Example Service Implementation - -See [`service_a.go`](../internal/services/modules/service_a.go) for complete implementation example with: -- List with pagination -- Get single resource -- Create with validation -- Update with validation -- Delete with proper response - ---- - -## Complete Documentation - -See [`API_RESPONSE_STRUCTURE.md`](API_RESPONSE_STRUCTURE.md) for: -- Complete documentation of all functions -- Best practices -- Advanced examples -- Complete use cases - ---- - -## Next Steps - -1. **Use response helpers** in all service modules -2. **Implement validation** for all request structs -3. **Standardize error messages** across services -4. **Add custom validators** according to business needs -5. **Update existing endpoints** to use the new structure - ---- - -## Build Status - -```bash -- Dependencies installed -- go mod tidy completed -- Build successful -- Ready to use! -``` - ---- - -**Note:** This structure is production-ready and can be used immediately for all Echo services. All files are provided with complete documentation and examples. diff --git a/docs_wiki/SERVICE_IMPLEMENTATION.md b/docs_wiki/SERVICE_IMPLEMENTATION.md deleted file mode 100644 index 5ecdd3d..0000000 --- a/docs_wiki/SERVICE_IMPLEMENTATION.md +++ /dev/null @@ -1,555 +0,0 @@ -# Service Implementation Guide - -This guide documents how to create, implement, and register services in the boilerplate. Services are modular components that encapsulate business logic and expose HTTP endpoints. - ---- - -## Table of Contents - -1. [Overview](#overview) -2. [Service Interface](#service-interface) -3. [Creating a Basic Service](#creating-a-basic-service) -4. [Creating a Service with Dependencies](#creating-a-service-with-dependencies) -5. [Registering the Service](#registering-the-service) -6. [Configuration](#configuration) -7. [Complete Example](#complete-example) - ---- - -## Overview - -The service architecture follows these principles: - -- **Modularity**: Each service is self-contained and can be enabled/disabled via configuration -- **Interface-based**: All services implement the `Service` interface -- **Dynamic Configuration**: Services are registered in a map, so adding new services requires minimal code changes -- **Dependency Injection**: Services can receive infrastructure dependencies (Redis, Postgres, etc.) - -### Directory Structure - -``` -internal/ - services/ - services.go # Service interface and registry - modules/ - service_a.go # Individual service implementations - service_b.go - ... -``` - ---- - -## Service Interface - -All services must implement the `Service` interface defined in `internal/services/services.go`: - -```go -type Service interface { - Name() string // Human-readable name for logging/monitoring - RegisterRoutes(g *echo.Group) // Register HTTP routes - Enabled() bool // Whether the service is active - Endpoints() []string // List of endpoints (for monitoring UI) -} -``` - -| Method | Purpose | -|--------|---------| -| `Name()` | Returns a display name shown in logs and monitoring dashboard | -| `RegisterRoutes()` | Registers all HTTP endpoints under the provided Echo group | -| `Enabled()` | Returns whether the service should be active (based on config) | -| `Endpoints()` | Returns a list of endpoint paths for the monitoring UI | - ---- - -## Creating a Basic Service - -### Step 1: Create the Service File - -Create a new file in `internal/services/modules/`. For example, `service_orders.go`: - -```go -package modules - -import ( - "your-module/pkg/response" - - "github.com/labstack/echo/v4" -) - -type OrdersService struct { - enabled bool -} - -func NewOrdersService(enabled bool) *OrdersService { - return &OrdersService{enabled: enabled} -} - -func (s *OrdersService) Name() string { return "Orders Service" } -func (s *OrdersService) Enabled() bool { return s.enabled } -func (s *OrdersService) Endpoints() []string { return []string{"/orders", "/orders/:id"} } - -func (s *OrdersService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/orders") - - sub.GET("", s.listOrders) - sub.GET("/:id", s.getOrder) - sub.POST("", s.createOrder) -} - -// Handler implementations -func (s *OrdersService) listOrders(c echo.Context) error { - // Your business logic here - return response.Success(c, []string{"order1", "order2"}) -} - -func (s *OrdersService) getOrder(c echo.Context) error { - id := c.Param("id") - return response.Success(c, map[string]string{"id": id, "status": "pending"}) -} - -func (s *OrdersService) createOrder(c echo.Context) error { - // Bind request, validate, create order - return response.Created(c, map[string]string{"id": "new-order-123"}) -} -``` - -### Key Points - -1. The struct stores the `enabled` flag passed from configuration -2. `Name()` returns a human-readable name for logs and monitoring -3. `Endpoints()` lists the base paths for monitoring UI display -4. `RegisterRoutes()` sets up all HTTP handlers under a sub-group - ---- - -## Creating a Service with Multiple Database Connections - -### Multi-Tenant Services with GORM - -For services that need to work with multiple PostgreSQL databases (multi-tenancy), inject the `PostgresConnectionManager`: - -```go -package modules - -import ( - "fmt" - "strconv" - "test-go/pkg/infrastructure" - "test-go/pkg/response" - - "github.com/labstack/echo/v4" - "gorm.io/gorm" -) - -type MultiTenantOrder struct { - gorm.Model - TenantID string `json:"tenant_id" gorm:"not null;index"` - CustomerID uint `json:"customer_id" gorm:"not null"` - ProductName string `json:"product_name" gorm:"not null"` - Quantity int `json:"quantity" gorm:"not null;check:quantity > 0"` - TotalPrice float64 `json:"total_price" gorm:"not null;type:decimal(10,2)"` - Status string `json:"status" gorm:"not null;default:'pending'"` -} - -type OrdersService struct { - enabled bool - postgresConnectionManager *infrastructure.PostgresConnectionManager -} - -func NewOrdersService( - enabled bool, - postgresConnectionManager *infrastructure.PostgresConnectionManager, -) *OrdersService { - // Auto-migrate schema for all connected databases - if enabled && postgresConnectionManager != nil { - allConnections := postgresConnectionManager.GetAllConnections() - for tenant, db := range allConnections { - if db.ORM != nil { - if err := db.ORM.AutoMigrate(&MultiTenantOrder{}); err != nil { - fmt.Printf("Error migrating MultiTenantOrder for tenant '%s': %v\n", tenant, err) - } - } - } - } - - return &OrdersService{ - enabled: enabled, - postgresConnectionManager: postgresConnectionManager, - } -} - -func (s *OrdersService) Name() string { return "Multi-Tenant Orders Service" } -func (s *OrdersService) Enabled() bool { return s.enabled && s.postgresConnectionManager != nil } -func (s *OrdersService) Endpoints() []string { return []string{"/orders/{tenant}", "/orders/{tenant}/{id}"} } - -func (s *OrdersService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/orders") - - // Routes with tenant parameter for database selection - sub.GET("/:tenant", s.listOrdersByTenant) - sub.POST("/:tenant", s.createOrder) - sub.GET("/:tenant/:id", s.getOrderByTenant) - sub.PUT("/:tenant/:id", s.updateOrder) - sub.DELETE("/:tenant/:id", s.deleteOrder) -} - -func (s *OrdersService) listOrdersByTenant(c echo.Context) error { - tenant := c.Param("tenant") - - // Get the database connection for this tenant - dbConn, exists := s.postgresConnectionManager.GetConnection(tenant) - if !exists { - return response.NotFound(c, fmt.Sprintf("Tenant database '%s' not found or not connected", tenant)) - } - - // Query using GORM - var orders []MultiTenantOrder - result := dbConn.ORM.Where("tenant_id = ?", tenant).Order("created_at DESC").Find(&orders) - if result.Error != nil { - return response.InternalServerError(c, fmt.Sprintf("Failed to query tenant '%s' database: %v", tenant, result.Error)) - } - - return response.Success(c, orders, fmt.Sprintf("Orders retrieved from tenant '%s' database", tenant)) -} - -func (s *OrdersService) createOrder(c echo.Context) error { - tenant := c.Param("tenant") - - // Get the database connection for this tenant - dbConn, exists := s.postgresConnectionManager.GetConnection(tenant) - if !exists { - return response.NotFound(c, fmt.Sprintf("Tenant database '%s' not found or not connected", tenant)) - } - - var order MultiTenantOrder - if err := c.Bind(&order); err != nil { - return response.BadRequest(c, "Invalid order data") - } - - // Set tenant ID and create using GORM - order.TenantID = tenant - order.Status = "pending" - - result := dbConn.ORM.Create(&order) - if result.Error != nil { - return response.InternalServerError(c, fmt.Sprintf("Failed to create order in tenant '%s' database: %v", tenant, result.Error)) - } - - return response.Created(c, order, fmt.Sprintf("Order created in tenant '%s' database", tenant)) -} -``` - -### Benefits of Multi-Tenant Architecture - -- **Data Isolation**: Each tenant's data is completely separated -- **Scalability**: Different tenants can use different database instances -- **Performance**: Queries are isolated to specific tenant databases -- **Security**: Tenant data cannot accidentally mix -- **Flexibility**: Tenants can have different database configurations - -### Configuration for Multi-Tenant Services - -```yaml -postgres: - enabled: true - connections: - - name: "tenant_a" - enabled: true - host: "localhost" - port: 5432 - user: "postgres" - password: "password" - dbname: "tenant_a_db" - sslmode: "disable" - - - name: "tenant_b" - enabled: true - host: "localhost" - port: 5433 - user: "postgres" - password: "password" - dbname: "tenant_b_db" - sslmode: "disable" -``` - -## Creating a Service with Dependencies - -For services that require infrastructure (database, cache, etc.), inject dependencies via the constructor: - -```go -package modules - -import ( - "your-module/pkg/infrastructure" - "your-module/pkg/response" - - "github.com/labstack/echo/v4" -) - -type InventoryService struct { - db *infrastructure.PostgresManager - redis *infrastructure.RedisManager - enabled bool -} - -func NewInventoryService( - db *infrastructure.PostgresManager, - redis *infrastructure.RedisManager, - enabled bool, -) *InventoryService { - return &InventoryService{ - db: db, - redis: redis, - enabled: enabled, - } -} - -func (s *InventoryService) Name() string { return "Inventory Service" } - -func (s *InventoryService) Enabled() bool { - // Can add additional checks for required dependencies - return s.enabled && s.db != nil -} - -func (s *InventoryService) Endpoints() []string { - return []string{"/inventory"} -} - -func (s *InventoryService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/inventory") - sub.GET("", s.getInventory) - sub.PUT("/:sku", s.updateStock) -} - -func (s *InventoryService) getInventory(c echo.Context) error { - // Use s.db or s.redis for data operations - return response.Success(c, nil) -} - -func (s *InventoryService) updateStock(c echo.Context) error { - return response.Success(c, nil) -} -``` - -### Conditional Enabling - -The `Enabled()` method can include dependency checks: - -```go -func (s *InventoryService) Enabled() bool { - // Only enable if config says enabled AND database is available - return s.enabled && s.db != nil && s.db.ORM != nil -} -``` - ---- - -## Registering the Service - -### Step 2: Add to Service List (That's It!) - -Adding a new service is now incredibly simple. Just add your service to the list in `internal/services/register.go`: - -```go -{ - Name: "orders", - Constructor: func() interface{ Service } { - return modules.NewOrdersService(sr.config.Services.IsEnabled("orders")) - }, -}, -``` - -### How the Simplified System Works - -The registration system has been completely redesigned for simplicity: - -#### 1. **Clean Service List** -All services are defined in a simple array - no complex dependency detection or goroutines: - -```go -services := []ServiceDefinition{ - { - Name: "service_a", - Constructor: func() interface{ Service } { - return modules.NewServiceA(sr.config.Services.IsEnabled("service_a")) - }, - }, - // Add your new service here - that's it! - { - Name: "your_service", - Constructor: func() interface{ Service } { - return modules.NewYourService(sr.config.Services.IsEnabled("your_service")) - }, - }, -} -``` - -#### 2. **Synchronous Registration** -Services are registered and booted immediately: - -```go -// Register and boot all services -for _, svc := range services { - service := svc.Constructor() - registry.Register(service) - sr.logger.Info("Registered service", "service", svc.Name) -} - -registry.Boot(echo) -sr.logger.Info("All services registered and booted successfully") -``` - -### Server Integration (Clean and Simple) - -The server integration is now straightforward: - -```go -// internal/server/server.go - Simple service registration -serviceRegistrar := services.NewServiceRegistrar( - s.config, s.logger, - s.redisManager, s.kafkaManager, s.postgresManager, - s.postgresConnectionManager, s.mongoManager, - s.mongoConnectionManager, s.cronManager, -) - -// Register all services (simple and straightforward) -serviceRegistrar.RegisterAllServices(registry, s.echo) -s.logger.Info("All services registered successfully, ready to start monitoring") -``` - -### Service Key Convention - -The string passed to `IsEnabled()` is the key used in `config.yaml`: - -| Code | Config Key | -|------|------------| -| `IsEnabled("orders")` | `services.orders` | -| `IsEnabled("inventory")` | `services.inventory` | -| `IsEnabled("service_a")` | `services.service_a` | - ---- - -## Configuration - -### Step 3: Add to config.yaml - -Add your service key to the `services` section: - -```yaml -services: - service_a: true - service_b: false - service_c: true - service_d: false - orders: true # Your new service - inventory: true # Another new service -``` - -### Configuration Behavior - -| Value | Behavior | -|-------|----------| -| `true` | Service is enabled | -| `false` | Service is disabled (skipped at startup) | -| Not specified | Defaults to `true` (enabled) | - -The default-to-enabled behavior is defined in `config/config.go`: - -```go -func (s ServicesConfig) IsEnabled(serviceName string) bool { - if enabled, exists := s[serviceName]; exists { - return enabled - } - return true // Default to enabled if not specified -} -``` - ---- - -## Complete Example - -Here is a complete walkthrough for adding a new "Notifications" service: - -### 1. Create the Service File - -`internal/services/modules/notifications.go`: - -```go -package modules - -import ( - "your-module/pkg/response" - - "github.com/labstack/echo/v4" -) - -type NotificationsService struct { - enabled bool -} - -func NewNotificationsService(enabled bool) *NotificationsService { - return &NotificationsService{enabled: enabled} -} - -func (s *NotificationsService) Name() string { return "Notifications Service" } -func (s *NotificationsService) Enabled() bool { return s.enabled } -func (s *NotificationsService) Endpoints() []string { return []string{"/notifications"} } - -func (s *NotificationsService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/notifications") - - sub.GET("", func(c echo.Context) error { - return response.Success(c, []map[string]string{ - {"id": "1", "message": "Welcome!", "read": "false"}, - {"id": "2", "message": "New update available", "read": "true"}, - }) - }) - - sub.POST("/:id/read", func(c echo.Context) error { - id := c.Param("id") - return response.Success(c, nil, "Notification "+id+" marked as read") - }) -} -``` - -### 2. Register the Service - -`internal/server/server.go`: - -```go -// In the Start() method, add after existing services: -registry.Register(modules.NewNotificationsService(s.config.Services.IsEnabled("notifications"))) -``` - -### 3. Configure - -`config.yaml`: - -```yaml -services: - service_a: true - service_b: false - notifications: true # Enable the new service -``` - -### 4. Test - -Start the application and verify: - -- Check the startup logs for "Starting Service... Notifications Service" -- Access `GET /api/v1/notifications` -- Check the monitoring dashboard at `http://localhost:9090` to see the service listed - ---- - -## Summary Checklist - -When adding a new service: - -1. Create a new file in `internal/services/modules/` -2. Implement the `Service` interface (Name, Enabled, Endpoints, RegisterRoutes) -3. Create a constructor that accepts `enabled bool` (and any dependencies) -4. Register the service in `internal/server/server.go` using `IsEnabled("key")` -5. Add the service key to `config.yaml` under `services:` - -No changes to `config/config.go` are required since the services configuration is dynamic. diff --git a/docs_wiki/TUI_IMPLEMENTATION.md b/docs_wiki/TUI_IMPLEMENTATION.md deleted file mode 100644 index e19e4ba..0000000 --- a/docs_wiki/TUI_IMPLEMENTATION.md +++ /dev/null @@ -1,123 +0,0 @@ -# TUI Implementation Documentation - -This document outlines the implementation of the Terminal User Interface (TUI) for the application, constructed using the Bubble Tea framework. - -## Overview - -The application features a sophisticated TUI that provides visual feedback during the boot sequence and a live dashboard for monitoring system resources and service status. The implementation utilizes the Model-View-Update (MVU) architecture provided by Bubble Tea. - -## Key Technologies - -- **Bubble Tea**: The primary framework for the TUI loop. -- **Lipgloss**: Used for styling, layouts, and color management. -- **Bubbles**: Provides pre-built components like spinners and progress bars. - -## Components - -The TUI is divided into two main components: the Boot Sequence and the Live Dashboard. - -### 1. Boot Sequence (`pkg/tui/boot.go`) - -The boot sequence handles the visualization of service initialization. It guides the user through the startup process with real-time feedback. - -#### Model Structure - -The `BootModel` struct manages the state of the boot process: -- **State Tracking**: Monitors the current phase (starting, initializing, complete, countdown, error). -- **Service Queue**: Maintains a list of services to be initialized and their current status (pending, loading, success, error, skipped). -- **Animation**: manages frame counters for visual effects like the wave animation. - -#### Features - -- **Phased Execution**: The logic transitions through distinct phases: - 1. **Starting**: Brief intro animation. - 2. **Initializing**: Iterates through the service queue, executing initialization functions. - 3. **Complete/Countdown**: Displays a success message and an optional countdown before proceeding. -- **Visual Feedback**: - - Uses a spinner for active tasks. - - Displays a progress bar showing overall completion. - - Lists individual services with color-coded status indicators. -- **Interactivity**: The user can skip the countdown or quit the application using keyboard commands (q, esc, ctrl+c). - -### 2. Live Dashboard (`pkg/tui/live.go`) - -Once the application is running, the live dashboard provides a real-time view of application logs with enhanced scrolling and filtering capabilities. - -#### Model Structure - -The `LiveModel` struct holds the log data and UI state: -- **Log Entries**: Time-stamped log messages with level and content information. -- **UI Components**: Viewport for scrolling, text input for filtering, spinner for status indication. -- **Filtering**: Separate filtered logs list based on user input (searches level and message content). -- **Scrolling**: Viewport component manages log content scrolling within the terminal window. -- **Thread Safety**: Mutex-protected log storage for concurrent access. - -#### Features - -- **Real-time Log Streaming**: Continuously displays new log entries as they arrive. -- **Scrollable Content**: Uses Bubble Tea viewport component for smooth scrolling through log history. -- **Interactive Filtering**: Press "/" to enter filter mode and search through logs by level or content. -- **Log Level Coloring**: Different colors for DEBUG, INFO, WARN, ERROR, and FATAL level logs. -- **Keyboard Navigation**: - - **↑/↓ or j/k**: Scroll up/down line by line - - **Page Up/Down or Space**: Scroll page by page - - **Home/g**: Jump to top of logs - - **End/G**: Jump to bottom of logs - - **"/"**: Enter filter mode - - **Enter**: Apply filter (when in filter mode) - - **Esc**: Clear filter or exit filter mode - - **q/Esc/Ctrl+C**: Quit the live dashboard -- **Layout**: - - **Header**: Displays app name, version, and running status with uptime. - - **Filter Input**: Appears at top when filter mode is active. - - **Scrollable Log Area**: Shows formatted log entries with timestamps and levels. - - **Footer**: Shows current filter, scroll position, last update time, and available controls. -- **Responsive Design**: Adapts to terminal window size changes, updating viewport dimensions accordingly. -- **Filter Functionality**: Real-time filtering of logs based on user input (case-insensitive search in level and message). -- **Log Rotation**: Automatically manages log buffer size to prevent memory issues. - -## Styling System (`pkg/tui/styles.go`) - -The application uses `lipgloss` to define a consistent design language. The color palette appears to be inspired by the Dracula theme. - -- **Colors**: - - Primary/Accents: Pink (#FF79C6), Purple (#BD93F9), Cyan (#8BE9FD). - - Status: Green (#50FA7B) for success, Yellow (#F1FA8C) for warning, Red (#FF5555) for error. - - UI Elements: Dark Grey (#6272A4) for borders and muted text. -- **Typography**: Uses bold text for headers and distinct colors for labels vs values. -- **Animations**: - - **Wave**: A string-array based animation frame loop in the boot screen. - - **Pulse**: Color cycling on headers to indicate activity. - -## Architecture & Data Flow - -1. **Initialization (`Init`)**: - - Starts the spinner tick. - - Starts the custom tick loop (boot tick or dashboard tick) to drive animations and updates. - -2. **Update Loop (`Update`)**: - - **KeyMsg**: Handles user input for quitting. - - **WindowSizeMsg**: Recalculates layout dimensions when the terminal is resized. - - **TickMsg**: - - Updates animation frames. - - Advanced boot logic (transitions phases, starts services). - - Refreshes system statistics (Dashboard only). - -3. **Rendering (`View`)**: - - Constructs the string representation of the UI using `lipgloss` styles. - - Uses `strings.Builder` for efficient string concatenation. - - Renders sub-components (boxes, progress bars) and joins them spatially. - -## Usage - -To use these components, the application entry point calls the respective Run functions: - -```go -// Run Boot Sequence -results, err := tui.RunBootSequence(config, initQueue) - -// Run Dashboard -err := tui.RunDashboardTUI(config, infraStatus, serviceStatus) -``` - -Both functions encapsulate the `tea.NewProgram` creation and execution, handling the alternative screen buffer automatically. diff --git a/docs_wiki/blueprint/blueprint.txt b/docs_wiki/blueprint/blueprint.txt index d84074a..ac9e058 100644 --- a/docs_wiki/blueprint/blueprint.txt +++ b/docs_wiki/blueprint/blueprint.txt @@ -249,14 +249,65 @@ func NewInventoryService( ### 5.3 Service Registration -Services are registered in `internal/server/server.go`: +Services are registered using an automatic discovery system with factory functions: + +#### Service Factory Pattern + +Each service module uses an `init()` function to register itself with the global registry: ```go -registry.Register(modules.NewServiceA(s.config.Services.IsEnabled("service_a"))) -registry.Register(modules.NewOrdersService(s.config.Services.IsEnabled("orders"))) -registry.Register(modules.NewServiceE(s.config.Services.IsEnabled("service_e"))) +func init() { + registry.RegisterService("service_name", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("service_name") { + return nil + } + if deps == nil || deps.RequiredDependency == nil { + logger.Warn("Required dependency not available, skipping service") + return nil + } + return NewService(deps.RequiredDependency, true, logger) + }) +} ``` +#### Service Registration Flow + +1. **Import Trigger**: `cmd/app/services.go` imports the services package with blank import +2. **Module Import**: `internal/services/register.go` imports all service modules +3. **Service Registration**: Each module's `init()` function registers a factory +4. **Auto-Discovery**: `registry.AutoDiscoverServices()` creates enabled services +5. **Dependency Injection**: Services check for required dependencies before creation + +#### Service Registration Files + +- **`cmd/app/services.go`**: Triggers service registration in main application +- **`internal/services/register.go`**: Imports all service modules +- **`pkg/registry/registry.go`**: Service factory registration and auto-discovery + +#### Service Configuration + +```yaml +services: + service_a: true + service_b: false + orders: true + inventory: true +``` + +#### Service Dependencies + +Services can require infrastructure dependencies: + +- **Service A**: No dependencies (always available) +- **Service C**: No dependencies (cache demo) +- **Service D**: PostgreSQL database connection +- **Service F**: PostgreSQL connection manager (multi-tenant) +- **Service G**: MongoDB connection manager (multi-tenant) +- **Service H**: No dependencies (broadcast utility) +- **Service I**: Grafana manager + +Services with missing dependencies are gracefully skipped during auto-discovery. + ### 5.4 Service Configuration ```yaml @@ -1072,7 +1123,19 @@ id, err := cron.AddJob("database_backup", "0 3 * * *", func() { 3. **Environment variables** - Can override config values 4. **Validation** - Config values are validated at startup -## 9. PROJECT STRUCTURE SUMMARY +## 9. SIMPLIFIED DOCUMENTATION STRUCTURE + +``` +docs_wiki/ +├── GETTING_STARTED.md # 🚀 Quick start guide for new users +├── DEVELOPMENT.md # 🔧 Development guide for extending the app +├── ARCHITECTURE.md # 🏗️ Technical overview and design decisions +├── REFERENCE.md # 📖 Complete technical reference (config, APIs, advanced) +└── blueprint/ + └── blueprint.txt # 🔍 Internal technical blueprint (this file) +``` + +## 10. PROJECT STRUCTURE SUMMARY ``` stackyard/ @@ -1083,19 +1146,13 @@ stackyard/ ├── config/ │ ├── config.go # Configuration management │ └── config.yaml # Application configuration -├── docs_wiki/ # Documentation -│ ├── API_OBFUSCATION.md -│ ├── API_RESPONSE_STRUCTURE.md -│ ├── ARCHITECTURE_DIAGRAMS.md -│ ├── DOCKER_CONTAINERIZATION.md # Docker setup guide -│ ├── ERROR_HANDLING.md -│ ├── HOME.md -│ ├── INTEGRATION_GUIDE.md -│ ├── REQUEST_RESPONSE_STRUCTURE.md -│ ├── SERVICE_IMPLEMENTATION.md -│ ├── TUI_IMPLEMENTATION.md -│ └── blueprint/ # Blueprint files -│ └── blueprint.txt # This file +├── docs_wiki/ # Simplified project documentation +│ ├── GETTING_STARTED.md # Quick start guide +│ ├── DEVELOPMENT.md # Development guide +│ ├── ARCHITECTURE.md # Technical overview +│ ├── REFERENCE.md # Complete technical reference +│ └── blueprint/ # Internal blueprint files +│ └── blueprint.txt # This comprehensive blueprint ├── internal/ │ ├── middleware/ # HTTP middleware │ ├── monitoring/ # Monitoring system @@ -1105,7 +1162,7 @@ stackyard/ │ └── modules/ # Service implementations ├── pkg/ │ ├── request/ # Request handling -│ ├── response/ # Response helpers +│ ├── response/ # Standardized API responses │ ├── tui/ # Terminal UI │ ├── infrastructure/ # External integrations │ ├── logger/ # Logging @@ -1118,7 +1175,7 @@ stackyard/ │ ├── docker_build.sh # Docker image build (Unix/Linux/macOS) │ └── docker_build.bat # Docker image build (Windows) └── web/ # Web assets - └── monitoring/ # Monitoring UI + └── monitoring/ # Monitoring UI ``` ## 10. KEY FEATURES SUMMARY @@ -1290,6 +1347,69 @@ When contributing to this project: 2. **Original Work**: Contributions must be original work or properly licensed 3. **No Additional Restrictions**: Contributions cannot add additional licensing restrictions -### 13.8 Commercial Support +### 13.8 Service Registration System + +The application uses an automatic service discovery and registration system that allows services to be dynamically enabled/disabled through configuration. + +#### Service Registration Flow + +1. **Import Trigger**: `cmd/app/services.go` imports the services package with blank import +2. **Module Import**: `internal/services/register.go` imports all service modules +3. **Service Registration**: Each module's `init()` function registers a factory +4. **Auto-Discovery**: `registry.AutoDiscoverServices()` creates enabled services +5. **Dependency Injection**: Services check for required dependencies before creation + +#### Service Factory Pattern + +Each service module uses an `init()` function to register itself: + +```go +func init() { + registry.RegisterService("service_name", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("service_name") { + return nil + } + if deps == nil || deps.RequiredDependency == nil { + logger.Warn("Required dependency not available, skipping service") + return nil + } + return NewService(deps.RequiredDependency, true, logger) + }) +} +``` + +#### Service Dependencies + +Services can require infrastructure dependencies: + +- **Service A**: No dependencies (always available) +- **Service C**: No dependencies (cache demo) +- **Service D**: PostgreSQL database connection +- **Service F**: PostgreSQL connection manager (multi-tenant) +- **Service G**: MongoDB connection manager (multi-tenant) +- **Service H**: No dependencies (broadcast utility) +- **Service I**: Grafana manager + +Services with missing dependencies are gracefully skipped during auto-discovery. + +#### Troubleshooting Service Registration + +**Common Issues:** + +1. **Services not registering**: Ensure `cmd/app/services.go` imports the services package +2. **Nil pointer dereferences**: Services should check if `deps` is nil before accessing dependencies +3. **Missing dependencies**: Services requiring infrastructure components will be skipped if dependencies aren't available +4. **Configuration issues**: Check that services are enabled in `config.yaml` + +**Debug Commands:** +```bash +# Check registered service factories +go run -tags debug cmd/app/main.go --debug-services + +# Verify service configuration +cat config.yaml | grep -A 10 "services:" +``` + +### 13.9 Commercial Support While the software is provided under Apache License 2.0, commercial support, consulting, and custom development services may be available through the project maintainer. diff --git a/docs_wiki/examples/response_examples.go b/docs_wiki/examples/response_examples.go index ce02788..df9498d 100644 --- a/docs_wiki/examples/response_examples.go +++ b/docs_wiki/examples/response_examples.go @@ -1,8 +1,8 @@ package examples import ( - "test-go/pkg/request" - "test-go/pkg/response" + "stackyard/pkg/request" + "stackyard/pkg/response" "github.com/labstack/echo/v4" ) diff --git a/go.mod b/go.mod index 1d112f8..53aca3a 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module test-go +module stackyard go 1.25.3 diff --git a/internal/middleware/encryption.go b/internal/middleware/encryption.go index 2f45758..4644da9 100644 --- a/internal/middleware/encryption.go +++ b/internal/middleware/encryption.go @@ -9,8 +9,8 @@ import ( "strings" "time" - "test-go/config" - "test-go/pkg/logger" + "stackyard/config" + "stackyard/pkg/logger" "github.com/labstack/echo/v4" ) diff --git a/internal/middleware/middleware.go b/internal/middleware/middleware.go index bfe11a1..d4c5421 100644 --- a/internal/middleware/middleware.go +++ b/internal/middleware/middleware.go @@ -5,7 +5,7 @@ import ( "net/http" "time" - "test-go/pkg/logger" + "stackyard/pkg/logger" "github.com/labstack/echo/v4" ) diff --git a/internal/monitoring/auth_handlers.go b/internal/monitoring/auth_handlers.go index 56a8af1..951053e 100644 --- a/internal/monitoring/auth_handlers.go +++ b/internal/monitoring/auth_handlers.go @@ -1,10 +1,10 @@ package monitoring import ( + "stackyard/internal/monitoring/database" + "stackyard/internal/monitoring/session" + "stackyard/pkg/response" "strings" - "test-go/internal/monitoring/database" - "test-go/internal/monitoring/session" - "test-go/pkg/response" "time" "github.com/labstack/echo/v4" diff --git a/internal/monitoring/handlers.go b/internal/monitoring/handlers.go index 20347bf..cd11783 100644 --- a/internal/monitoring/handlers.go +++ b/internal/monitoring/handlers.go @@ -3,11 +3,11 @@ package monitoring import ( "fmt" "os" + "stackyard/config" + "stackyard/pkg/infrastructure" + "stackyard/pkg/response" + "stackyard/pkg/utils" "sync" - "test-go/config" - "test-go/pkg/infrastructure" - "test-go/pkg/response" - "test-go/pkg/utils" "time" "github.com/labstack/echo/v4" diff --git a/internal/monitoring/server.go b/internal/monitoring/server.go index 8845ec4..e9d569b 100644 --- a/internal/monitoring/server.go +++ b/internal/monitoring/server.go @@ -2,14 +2,14 @@ package monitoring import ( "net/http" - "test-go/config" - "test-go/internal/monitoring/database" - "test-go/internal/monitoring/session" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" + "stackyard/config" + "stackyard/internal/monitoring/database" + "stackyard/internal/monitoring/session" + "stackyard/pkg/infrastructure" + "stackyard/pkg/logger" "time" - monMiddleware "test-go/internal/monitoring/middleware" + monMiddleware "stackyard/internal/monitoring/middleware" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" diff --git a/internal/monitoring/user_handlers.go b/internal/monitoring/user_handlers.go index ae44b71..5f4ea38 100644 --- a/internal/monitoring/user_handlers.go +++ b/internal/monitoring/user_handlers.go @@ -7,9 +7,9 @@ import ( "io" "os" "path/filepath" + "stackyard/internal/monitoring/database" + "stackyard/pkg/response" "strings" - "test-go/internal/monitoring/database" - "test-go/pkg/response" "time" "github.com/labstack/echo/v4" diff --git a/internal/server/server.go b/internal/server/server.go index 6b0413b..f1ee5a9 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -7,33 +7,28 @@ import ( "reflect" "time" - "test-go/config" - "test-go/internal/middleware" - "test-go/internal/monitoring" - "test-go/internal/services" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" - "test-go/pkg/utils" + _ "stackyard/internal/services/modules" + + "stackyard/config" + "stackyard/internal/middleware" + "stackyard/internal/monitoring" + "stackyard/pkg/infrastructure" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" + "stackyard/pkg/utils" "github.com/labstack/echo/v4" echoMiddleware "github.com/labstack/echo/v4/middleware" ) type Server struct { - echo *echo.Echo - config *config.Config - logger *logger.Logger - redisManager *infrastructure.RedisManager - kafkaManager *infrastructure.KafkaManager - postgresManager *infrastructure.PostgresManager - postgresConnectionManager *infrastructure.PostgresConnectionManager - mongoManager *infrastructure.MongoManager - mongoConnectionManager *infrastructure.MongoConnectionManager - grafanaManager *infrastructure.GrafanaManager - cronManager *infrastructure.CronManager - broadcaster *monitoring.LogBroadcaster - infraInitManager *infrastructure.InfraInitManager + echo *echo.Echo + config *config.Config + logger *logger.Logger + dependencies *registry.Dependencies + broadcaster *monitoring.LogBroadcaster + infraInitManager *infrastructure.InfraInitManager } func New(cfg *config.Config, l *logger.Logger, b *monitoring.LogBroadcaster) *Server { @@ -91,18 +86,30 @@ func (s *Server) Start() error { // 1. Start Async Infrastructure Initialization (doesn't block) s.logger.Info("Starting async infrastructure initialization...") - s.redisManager, s.kafkaManager, _, s.postgresConnectionManager, s.mongoConnectionManager, s.grafanaManager, s.cronManager = + redisManager, kafkaManager, _, postgresConnectionManager, mongoConnectionManager, grafanaManager, cronManager := s.infraInitManager.StartAsyncInitialization(s.config, s.logger) + // Create dependencies container + s.dependencies = registry.NewDependencies( + redisManager, + kafkaManager, + nil, // Will be set from connection manager + postgresConnectionManager, + nil, // Will be set from connection manager + mongoConnectionManager, + grafanaManager, + cronManager, + ) + // Set default connections for backward compatibility - if s.postgresConnectionManager != nil { - if defaultConn, exists := s.postgresConnectionManager.GetDefaultConnection(); exists { - s.postgresManager = defaultConn + if postgresConnectionManager != nil { + if defaultConn, exists := postgresConnectionManager.GetDefaultConnection(); exists { + s.dependencies.PostgresManager = defaultConn } } - if s.mongoConnectionManager != nil { - if defaultConn, exists := s.mongoConnectionManager.GetDefaultConnection(); exists { - s.mongoManager = defaultConn + if mongoConnectionManager != nil { + if defaultConn, exists := mongoConnectionManager.GetDefaultConnection(); exists { + s.dependencies.MongoManager = defaultConn } } @@ -121,7 +128,7 @@ func (s *Server) Start() error { // 3. Init Services (phased: independent first, then infrastructure-dependent) s.logger.Info("Booting Services...") - registry := services.NewRegistry(s.logger) + serviceRegistry := registry.NewServiceRegistry(s.logger) // Health Check Endpoint with infrastructure status s.echo.GET("/health", func(c echo.Context) error { @@ -149,29 +156,28 @@ func (s *Server) Start() error { return response.Success(c, map[string]string{"status": "restarting", "message": "Service is restarting..."}) }) - // Create service registrar and register all services - serviceRegistrar := services.NewServiceRegistrar( - s.config, - s.logger, - s.redisManager, - s.kafkaManager, - s.postgresManager, - s.postgresConnectionManager, - s.mongoManager, - s.mongoConnectionManager, - s.grafanaManager, - s.cronManager, - ) + // Auto-discover and register all services + s.logger.Info("Auto-discovering services...") + services := registry.AutoDiscoverServices(s.config, s.logger, s.dependencies) + + // Register services with the registry + for _, service := range services { + serviceRegistry.Register(service) + } + + if len(services) <= 0 { + s.logger.Warn("No services registered!") + } - // Register all services (simple and straightforward) - serviceRegistrar.RegisterAllServices(registry, s.echo) - s.logger.Info("All services registered successfully, ready to start monitoring") + // Boot all services + serviceRegistry.Boot(s.echo) + s.logger.Info("All services boot successfully, ready to start monitoring") // 4. Start Monitoring (if enabled) - after all services are registered if s.config.Monitoring.Enabled { // Dynamic Service List Generation var servicesList []monitoring.ServiceInfo - for _, srv := range registry.GetServices() { + for _, srv := range serviceRegistry.GetServices() { // Prepend /api/v1 to endpoints var fullEndpoints []string for _, endp := range srv.Endpoints() { @@ -185,7 +191,7 @@ func (s *Server) Start() error { Endpoints: fullEndpoints, }) } - go monitoring.Start(s.config.Monitoring, s.config, s, s.broadcaster, s.redisManager, s.postgresManager, s.postgresConnectionManager, s.mongoManager, s.mongoConnectionManager, s.kafkaManager, s.cronManager, servicesList, s.logger) + go monitoring.Start(s.config.Monitoring, s.config, s, s.broadcaster, redisManager, s.dependencies.PostgresManager, postgresConnectionManager, s.dependencies.MongoManager, mongoConnectionManager, kafkaManager, cronManager, servicesList, s.logger) s.logger.Info("Monitoring interface started", "port", s.config.Monitoring.Port, "services_count", len(servicesList)) } @@ -203,12 +209,12 @@ func (s *Server) GetStatus() map[string]interface{} { netStats, _ := utils.GetNetworkInfo() infra := map[string]bool{ - "redis": s.config.Redis.Enabled && s.redisManager != nil, - "kafka": s.config.Kafka.Enabled && s.kafkaManager != nil, - "postgres": (s.config.Postgres.Enabled || s.config.PostgresMultiConfig.Enabled) && (s.postgresManager != nil || s.postgresConnectionManager != nil), - "mongo": (s.config.Mongo.Enabled || s.config.MongoMultiConfig.Enabled) && (s.mongoManager != nil || s.mongoConnectionManager != nil), - "grafana": s.config.Grafana.Enabled && s.grafanaManager != nil, - "cron": s.config.Cron.Enabled && s.cronManager != nil, + "redis": s.config.Redis.Enabled && s.dependencies != nil && s.dependencies.RedisManager != nil, + "kafka": s.config.Kafka.Enabled && s.dependencies != nil && s.dependencies.KafkaManager != nil, + "postgres": (s.config.Postgres.Enabled || s.config.PostgresMultiConfig.Enabled) && (s.dependencies != nil && s.dependencies.PostgresManager != nil), + "mongo": (s.config.Mongo.Enabled || s.config.MongoMultiConfig.Enabled) && (s.dependencies != nil && s.dependencies.MongoManager != nil), + "grafana": s.config.Grafana.Enabled && s.dependencies != nil && s.dependencies.GrafanaManager != nil, + "cron": s.config.Cron.Enabled && s.dependencies != nil && s.dependencies.CronManager != nil, } return map[string]interface{}{ @@ -254,9 +260,9 @@ func (s *Server) Shutdown(ctx context.Context, logger *logger.Logger) error { var shutdownErrors []error // 1. Cron Manager - if s.cronManager != nil { + if s.dependencies != nil && s.dependencies.CronManager != nil { logger.Info("Shutting down Cron Manager...") - if err := s.cronManager.Close(); err != nil { + if err := s.dependencies.CronManager.Close(); err != nil { shutdownErrors = append(shutdownErrors, fmt.Errorf("cron manager shutdown error: %w", err)) logger.Error("Error shutting down Cron Manager", err) } else { @@ -264,33 +270,21 @@ func (s *Server) Shutdown(ctx context.Context, logger *logger.Logger) error { } } - // 2. MongoDB connections - if s.mongoConnectionManager != nil { - logger.Info("Shutting down MongoDB connections...") - if err := s.mongoConnectionManager.CloseAll(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("mongodb shutdown error: %w", err)) - logger.Error("Error shutting down MongoDB connections", err) - } else { - logger.Info("MongoDB connections shut down successfully") - } - } + // 2. MongoDB connections - need to get from connection manager + // Note: We don't have direct access to connection managers anymore, + // but they should be closed by the infra init manager + logger.Info("MongoDB connections will be closed by infrastructure manager") - // 3. PostgreSQL connections - if s.postgresConnectionManager != nil { - logger.Info("Shutting down PostgreSQL connections...") - if err := s.postgresConnectionManager.CloseAll(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("postgres shutdown error: %w", err)) - logger.Error("Error shutting down PostgreSQL connections", err) - } else { - logger.Info("PostgreSQL connections shut down successfully") - } - } + // 3. PostgreSQL connections - need to get from connection manager + // Note: We don't have direct access to connection managers anymore, + // but they should be closed by the infra init manager + logger.Info("PostgreSQL connections will be closed by infrastructure manager") // 4. Kafka Manager - if s.kafkaManager != nil { + if s.dependencies != nil && s.dependencies.KafkaManager != nil { logger.Info("Shutting down Kafka Manager...") - if err := s.kafkaManager.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("kafka shutdown error: %w", err)) + if err := s.dependencies.KafkaManager.Close(); err != nil { + shutdownErrors = append(shutdownErrors, fmt.Errorf("kafka manager shutdown error: %w", err)) logger.Error("Error shutting down Kafka Manager", err) } else { logger.Info("Kafka Manager shut down successfully") @@ -298,10 +292,10 @@ func (s *Server) Shutdown(ctx context.Context, logger *logger.Logger) error { } // 5. Redis Manager - if s.redisManager != nil { + if s.dependencies != nil && s.dependencies.RedisManager != nil { logger.Info("Shutting down Redis Manager...") - if err := s.redisManager.Close(); err != nil { - shutdownErrors = append(shutdownErrors, fmt.Errorf("redis shutdown error: %w", err)) + if err := s.dependencies.RedisManager.Close(); err != nil { + shutdownErrors = append(shutdownErrors, fmt.Errorf("redis manager shutdown error: %w", err)) logger.Error("Error shutting down Redis Manager", err) } else { logger.Info("Redis Manager shut down successfully") diff --git a/internal/services/modules/service_a.go b/internal/services/modules/service_a.go index 4abfa82..2ea471a 100644 --- a/internal/services/modules/service_a.go +++ b/internal/services/modules/service_a.go @@ -1,8 +1,12 @@ package modules import ( - "test-go/pkg/request" - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/request" + "stackyard/pkg/response" "time" "github.com/labstack/echo/v4" @@ -169,3 +173,10 @@ func (s *ServiceA) DeleteUser(c echo.Context) error { // No content response return response.NoContent(c) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_a", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewServiceA(config.Services.IsEnabled("service_a")) + }) +} diff --git a/internal/services/modules/service_b.go b/internal/services/modules/service_b.go index f76b306..950707d 100644 --- a/internal/services/modules/service_b.go +++ b/internal/services/modules/service_b.go @@ -1,7 +1,11 @@ package modules import ( - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/registry" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/response" "github.com/labstack/echo/v4" ) @@ -24,3 +28,10 @@ func (s *ServiceB) RegisterRoutes(g *echo.Group) { return response.Success(c, map[string]string{"message": "Hello from Service B - Products"}) }) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_b", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewServiceB(config.Services.IsEnabled("service_b")) + }) +} diff --git a/internal/services/modules/service_c.go b/internal/services/modules/service_c.go index e33934e..62b32bf 100644 --- a/internal/services/modules/service_c.go +++ b/internal/services/modules/service_c.go @@ -3,8 +3,12 @@ package modules import ( "time" - "test-go/pkg/cache" - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/registry" + "stackyard/pkg/cache" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/response" "github.com/labstack/echo/v4" ) @@ -61,3 +65,10 @@ func (s *ServiceC) RegisterRoutes(g *echo.Group) { }) }) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_c", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewServiceC(config.Services.IsEnabled("service_c")) + }) +} diff --git a/internal/services/modules/service_d.go b/internal/services/modules/service_d.go index 89dba52..7be441c 100644 --- a/internal/services/modules/service_d.go +++ b/internal/services/modules/service_d.go @@ -4,9 +4,12 @@ import ( "context" "strconv" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/registry" + "stackyard/pkg/infrastructure" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/response" "github.com/labstack/echo/v4" "gorm.io/gorm" @@ -129,3 +132,11 @@ func (s *ServiceD) deleteTask(c echo.Context) error { return response.Success(c, nil, "Task deleted") } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_d", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + logger.Debug("Service INIT LOADED") + return NewServiceD(deps.PostgresManager, config.Services.IsEnabled("service_d"), logger) + }) +} diff --git a/internal/services/modules/service_e.go b/internal/services/modules/service_e.go index 872d604..811b655 100644 --- a/internal/services/modules/service_e.go +++ b/internal/services/modules/service_e.go @@ -13,7 +13,11 @@ import ( "strings" "time" - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/registry" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/response" "github.com/labstack/echo/v4" ) @@ -333,3 +337,14 @@ func (s *ServiceE) DecryptJSON(encryptedData string, target interface{}) error { return json.Unmarshal(decrypted, target) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_e", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + encryptionConfig := map[string]interface{}{ + "algorithm": config.Encryption.Algorithm, + "key": config.Encryption.Key, + } + return NewServiceE(config.Encryption.Enabled, encryptionConfig) + }) +} diff --git a/internal/services/modules/service_f.go b/internal/services/modules/service_f.go index da61f45..0059838 100644 --- a/internal/services/modules/service_f.go +++ b/internal/services/modules/service_f.go @@ -2,10 +2,13 @@ package modules import ( "fmt" + "stackyard/config" + "stackyard/pkg/infrastructure" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" "strconv" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" "github.com/labstack/echo/v4" "gorm.io/gorm" @@ -232,3 +235,17 @@ func (s *ServiceF) deleteOrder(c echo.Context) error { return response.Success(c, nil, fmt.Sprintf("Order deleted from tenant '%s' database", tenant)) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_f", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("service_f") { + return nil + } + if deps == nil || deps.PostgresConnectionManager == nil { + logger.Warn("PostgreSQL connections not available, skipping Service F") + return nil + } + return NewServiceF(deps.PostgresConnectionManager, true, logger) + }) +} diff --git a/internal/services/modules/service_g.go b/internal/services/modules/service_g.go index 937797d..7e0e85a 100644 --- a/internal/services/modules/service_g.go +++ b/internal/services/modules/service_g.go @@ -3,9 +3,12 @@ package modules import ( "context" "fmt" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" + "stackyard/config" + "stackyard/pkg/infrastructure" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" "github.com/labstack/echo/v4" "go.mongodb.org/mongo-driver/bson" @@ -375,3 +378,17 @@ func (s *ServiceG) getProductAnalytics(c echo.Context) error { return response.Success(c, result, fmt.Sprintf("Product analytics for tenant '%s' database", tenant)) } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_g", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("service_g") { + return nil + } + if deps == nil || deps.MongoConnectionManager == nil { + logger.Warn("MongoDB connections not available, skipping Service G") + return nil + } + return NewServiceG(deps.MongoConnectionManager, true, logger) + }) +} diff --git a/internal/services/modules/service_h.go b/internal/services/modules/service_h.go index fd6ac83..fe40aec 100644 --- a/internal/services/modules/service_h.go +++ b/internal/services/modules/service_h.go @@ -5,9 +5,12 @@ import ( "fmt" "time" - "test-go/pkg/logger" - "test-go/pkg/response" - "test-go/pkg/utils" + "stackyard/config" + "stackyard/pkg/registry" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/response" + "stackyard/pkg/utils" "github.com/labstack/echo/v4" ) @@ -277,3 +280,10 @@ func (s *ServiceH) startDemoStreams() { generator.Start() } } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_h", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + return NewServiceH(config.Services.IsEnabled("service_h"), logger) + }) +} diff --git a/internal/services/modules/service_i.go b/internal/services/modules/service_i.go index d278cdb..717733a 100644 --- a/internal/services/modules/service_i.go +++ b/internal/services/modules/service_i.go @@ -1,10 +1,13 @@ package modules import ( + "stackyard/config" + "stackyard/pkg/infrastructure" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + "stackyard/pkg/registry" + "stackyard/pkg/response" "strconv" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - "test-go/pkg/response" "github.com/labstack/echo/v4" ) @@ -202,3 +205,17 @@ func (s *ServiceI) getHealth(c echo.Context) error { return response.Success(c, health, "Grafana health check successful") } + +// Auto-registration function - called when package is imported +func init() { + registry.RegisterService("service_i", func(config *config.Config, logger *logger.Logger, deps *registry.Dependencies) interfaces.Service { + if !config.Services.IsEnabled("service_i") { + return nil + } + if deps == nil || deps.GrafanaManager == nil { + logger.Warn("Grafana manager not available, skipping Service I") + return nil + } + return NewServiceI(deps.GrafanaManager, true, logger) + }) +} diff --git a/internal/services/register.go b/internal/services/register.go deleted file mode 100644 index c67630b..0000000 --- a/internal/services/register.go +++ /dev/null @@ -1,180 +0,0 @@ -package services - -import ( - "test-go/config" - "test-go/internal/services/modules" - "test-go/pkg/infrastructure" - "test-go/pkg/logger" - - "github.com/labstack/echo/v4" -) - -// ServiceDefinition holds service registration information -type ServiceDefinition struct { - Name string - Constructor func() interface{ Service } -} - -// ServiceRegistrar handles service registration -type ServiceRegistrar struct { - config *config.Config - logger *logger.Logger - redisManager *infrastructure.RedisManager - kafkaManager *infrastructure.KafkaManager - postgresManager *infrastructure.PostgresManager - postgresConnMgr *infrastructure.PostgresConnectionManager - mongoManager *infrastructure.MongoManager - mongoConnMgr *infrastructure.MongoConnectionManager - grafanaManager *infrastructure.GrafanaManager - cronManager *infrastructure.CronManager -} - -// NewServiceRegistrar creates a new service registrar -func NewServiceRegistrar( - cfg *config.Config, - logger *logger.Logger, - redisMgr *infrastructure.RedisManager, - kafkaMgr *infrastructure.KafkaManager, - postgresMgr *infrastructure.PostgresManager, - postgresConnMgr *infrastructure.PostgresConnectionManager, - mongoMgr *infrastructure.MongoManager, - mongoConnMgr *infrastructure.MongoConnectionManager, - grafanaMgr *infrastructure.GrafanaManager, - cronMgr *infrastructure.CronManager, -) *ServiceRegistrar { - return &ServiceRegistrar{ - config: cfg, - logger: logger, - redisManager: redisMgr, - kafkaManager: kafkaMgr, - postgresManager: postgresMgr, - postgresConnMgr: postgresConnMgr, - mongoManager: mongoMgr, - mongoConnMgr: mongoConnMgr, - grafanaManager: grafanaMgr, - cronManager: cronMgr, - } -} - -/* -HOW TO ADD A NEW SERVICE: - -1. Create your service file in internal/services/modules/ (e.g., service_orders.go) -2. Implement the Service interface (Name, Enabled, Endpoints, RegisterRoutes) -3. Add your service to the list below - that's it! - -EXAMPLE: - -// In internal/services/modules/service_orders.go -type OrdersService struct { - enabled bool -} - -func NewOrdersService(enabled bool) *OrdersService { - return &OrdersService{enabled: enabled} -} - -func (s *OrdersService) Name() string { return "Orders Service" } -func (s *OrdersService) Enabled() bool { return s.enabled } -func (s *OrdersService) Endpoints() []string { return []string{"/orders"} } - -func (s *OrdersService) RegisterRoutes(g *echo.Group) { - sub := g.Group("/orders") - sub.GET("", s.listOrders) - sub.POST("", s.createOrder) -} - -// Add to config.yaml under services: -// services: -// orders: true - -// Then add to the list below: -// { -// Name: "orders", -// Constructor: func() interface{ Service } { -// return modules.NewOrdersService(sr.config.Services.IsEnabled("orders")) -// }, -// }, -*/ - -// RegisterAllServices registers all services -// Just add your new service below - that's it! -func (sr *ServiceRegistrar) RegisterAllServices(registry *Registry, echo *echo.Echo) { - services := []ServiceDefinition{ - // =============================== - // ADD YOUR NEW SERVICE HERE - // =============================== - { - Name: "service_a", - Constructor: func() interface{ Service } { - return modules.NewServiceA(sr.config.Services.IsEnabled("service_a")) - }, - }, - { - Name: "service_b", - Constructor: func() interface{ Service } { - return modules.NewServiceB(sr.config.Services.IsEnabled("service_b")) - }, - }, - { - Name: "service_c", - Constructor: func() interface{ Service } { - return modules.NewServiceC(sr.config.Services.IsEnabled("service_c")) - }, - }, - { - Name: "service_d", - Constructor: func() interface{ Service } { - return modules.NewServiceD(sr.postgresManager, sr.config.Services.IsEnabled("service_d"), sr.logger) - }, - }, - { - Name: "service_e", - Constructor: func() interface{ Service } { - encryptionConfig := map[string]interface{}{ - "algorithm": sr.config.Encryption.Algorithm, - "key": sr.config.Encryption.Key, - } - return modules.NewServiceE(sr.config.Encryption.Enabled, encryptionConfig) - }, - }, - { - Name: "service_f", - Constructor: func() interface{ Service } { - return modules.NewServiceF(sr.postgresConnMgr, sr.config.Services.IsEnabled("service_f"), sr.logger) - }, - }, - { - Name: "service_g", - Constructor: func() interface{ Service } { - return modules.NewServiceG(sr.mongoConnMgr, sr.config.Services.IsEnabled("service_g"), sr.logger) - }, - }, - { - Name: "service_h", - Constructor: func() interface{ Service } { - return modules.NewServiceH(sr.config.Services.IsEnabled("service_h"), sr.logger) - }, - }, - { - Name: "service_i", - Constructor: func() interface{ Service } { - return modules.NewServiceI(sr.grafanaManager, sr.config.Services.IsEnabled("service_i"), sr.logger) - }, - }, - - // =============================== - // ADD YOUR NEW SERVICE ABOVE THIS LINE - // =============================== - } - - // Register and boot all services - for _, svc := range services { - service := svc.Constructor() - registry.Register(service) - sr.logger.Info("Registered service", "service", svc.Name) - } - - registry.Boot(echo) - sr.logger.Info("All services registered and booted successfully") -} diff --git a/internal/services/services.go b/internal/services/services.go deleted file mode 100644 index b13172e..0000000 --- a/internal/services/services.go +++ /dev/null @@ -1,66 +0,0 @@ -package services - -import ( - "test-go/pkg/logger" - - "github.com/labstack/echo/v4" -) - -// Service defines a module that can register routes -type Service interface { - Name() string - RegisterRoutes(g *echo.Group) - Enabled() bool - Endpoints() []string -} - -// Registry holds available services -type Registry struct { - services []Service - logger *logger.Logger -} - -// NewRegistry creates a new service registry -func NewRegistry(l *logger.Logger) *Registry { - return &Registry{ - services: make([]Service, 0), - logger: l, - } -} - -// Register adds a service to the registry -func (r *Registry) Register(s Service) { - r.services = append(r.services, s) -} - -// GetServices returns the list of registered services -func (r *Registry) GetServices() []Service { - return r.services -} - -// Boot initializes enabled services and registers their routes -func (r *Registry) Boot(e *echo.Echo) { - api := e.Group("/api/v1") - - for _, s := range r.services { - if s.Enabled() { - r.logger.Info("Starting Service...", "service", s.Name()) - s.RegisterRoutes(api) - r.logger.Info("Service Started", "service", s.Name()) - } else { - r.logger.Warn("Service Skipped (Disabled via config)", "service", s.Name()) - } - } -} - -// BootService boots a single service (for dynamic registration) -func (r *Registry) BootService(e *echo.Echo, s Service) { - if s.Enabled() { - api := e.Group("/api/v1") - r.logger.Info("Starting Service...", "service", s.Name()) - s.RegisterRoutes(api) - r.logger.Info("Service Started", "service", s.Name()) - } else { - r.logger.Warn("Service Skipped (Disabled via config)", "service", s.Name()) - } -} diff --git a/pkg/infrastructure/async_init.go b/pkg/infrastructure/async_init.go index c73216f..fdbfd83 100644 --- a/pkg/infrastructure/async_init.go +++ b/pkg/infrastructure/async_init.go @@ -2,9 +2,9 @@ package infrastructure import ( "context" + "stackyard/config" + "stackyard/pkg/logger" "sync" - "test-go/config" - "test-go/pkg/logger" "time" ) diff --git a/pkg/infrastructure/grafana.go b/pkg/infrastructure/grafana.go index 7da8e1f..822e2e5 100644 --- a/pkg/infrastructure/grafana.go +++ b/pkg/infrastructure/grafana.go @@ -7,9 +7,9 @@ import ( "fmt" "io" "net/http" + "stackyard/config" + "stackyard/pkg/logger" "strings" - "test-go/config" - "test-go/pkg/logger" "time" "github.com/hashicorp/go-retryablehttp" diff --git a/pkg/infrastructure/http_monitor.go b/pkg/infrastructure/http_monitor.go index 7ec0e35..6efe130 100644 --- a/pkg/infrastructure/http_monitor.go +++ b/pkg/infrastructure/http_monitor.go @@ -2,7 +2,7 @@ package infrastructure import ( "net/http" - "test-go/config" + "stackyard/config" "time" ) diff --git a/pkg/infrastructure/kafka.go b/pkg/infrastructure/kafka.go index fb0b75b..8a78a98 100644 --- a/pkg/infrastructure/kafka.go +++ b/pkg/infrastructure/kafka.go @@ -3,8 +3,8 @@ package infrastructure import ( "context" "fmt" - "test-go/config" - "test-go/pkg/logger" + "stackyard/config" + "stackyard/pkg/logger" "github.com/IBM/sarama" ) diff --git a/pkg/infrastructure/minio.go b/pkg/infrastructure/minio.go index 41454df..36862a0 100644 --- a/pkg/infrastructure/minio.go +++ b/pkg/infrastructure/minio.go @@ -3,7 +3,7 @@ package infrastructure import ( "context" "io" - "test-go/config" + "stackyard/config" "time" "github.com/minio/minio-go/v7" diff --git a/pkg/infrastructure/mongo.go b/pkg/infrastructure/mongo.go index 72eb11e..92dd0b2 100644 --- a/pkg/infrastructure/mongo.go +++ b/pkg/infrastructure/mongo.go @@ -3,11 +3,11 @@ package infrastructure import ( "context" "fmt" + "stackyard/config" + "stackyard/pkg/logger" "strconv" "strings" "sync" - "test-go/config" - "test-go/pkg/logger" "time" "go.mongodb.org/mongo-driver/bson/primitive" diff --git a/pkg/infrastructure/postgres.go b/pkg/infrastructure/postgres.go index b297f9a..c723efe 100644 --- a/pkg/infrastructure/postgres.go +++ b/pkg/infrastructure/postgres.go @@ -4,8 +4,8 @@ import ( "context" "database/sql" "fmt" + "stackyard/config" "sync" - "test-go/config" _ "github.com/jackc/pgx/v5/stdlib" "gorm.io/driver/postgres" diff --git a/pkg/infrastructure/redis.go b/pkg/infrastructure/redis.go index e678f81..809a882 100644 --- a/pkg/infrastructure/redis.go +++ b/pkg/infrastructure/redis.go @@ -3,7 +3,7 @@ package infrastructure import ( "context" "fmt" - "test-go/config" + "stackyard/config" "time" "github.com/redis/go-redis/v9" diff --git a/pkg/interfaces/service.go b/pkg/interfaces/service.go new file mode 100644 index 0000000..0349e00 --- /dev/null +++ b/pkg/interfaces/service.go @@ -0,0 +1,18 @@ +package interfaces + +import "github.com/labstack/echo/v4" + +// Service defines the interface that all services must implement +type Service interface { + // Name returns the human-readable name of the service + Name() string + + // Enabled returns whether the service is enabled + Enabled() bool + + // Endpoints returns a list of endpoint patterns this service handles + Endpoints() []string + + // RegisterRoutes registers the service's routes with the Echo router + RegisterRoutes(g *echo.Group) +} diff --git a/pkg/registry/dependencies.go b/pkg/registry/dependencies.go new file mode 100644 index 0000000..19df0dc --- /dev/null +++ b/pkg/registry/dependencies.go @@ -0,0 +1,40 @@ +package registry + +import ( + "stackyard/pkg/infrastructure" +) + +// Dependencies holds all infrastructure dependencies that services might need +type Dependencies struct { + RedisManager *infrastructure.RedisManager + KafkaManager *infrastructure.KafkaManager + PostgresManager *infrastructure.PostgresManager + PostgresConnectionManager *infrastructure.PostgresConnectionManager + MongoManager *infrastructure.MongoManager + MongoConnectionManager *infrastructure.MongoConnectionManager + GrafanaManager *infrastructure.GrafanaManager + CronManager *infrastructure.CronManager +} + +// NewDependencies creates a new dependencies container +func NewDependencies( + redisManager *infrastructure.RedisManager, + kafkaManager *infrastructure.KafkaManager, + postgresManager *infrastructure.PostgresManager, + postgresConnectionManager *infrastructure.PostgresConnectionManager, + mongoManager *infrastructure.MongoManager, + mongoConnectionManager *infrastructure.MongoConnectionManager, + grafanaManager *infrastructure.GrafanaManager, + cronManager *infrastructure.CronManager, +) *Dependencies { + return &Dependencies{ + RedisManager: redisManager, + KafkaManager: kafkaManager, + PostgresManager: postgresManager, + PostgresConnectionManager: postgresConnectionManager, + MongoManager: mongoManager, + MongoConnectionManager: mongoConnectionManager, + GrafanaManager: grafanaManager, + CronManager: cronManager, + } +} diff --git a/pkg/registry/registry.go b/pkg/registry/registry.go new file mode 100644 index 0000000..d9f65b1 --- /dev/null +++ b/pkg/registry/registry.go @@ -0,0 +1,126 @@ +package registry + +import ( + "fmt" + "stackyard/config" + "stackyard/pkg/interfaces" + "stackyard/pkg/logger" + + "github.com/labstack/echo/v4" +) + +// ServiceFactory creates a service instance with dependencies +type ServiceFactory func(config *config.Config, logger *logger.Logger, deps *Dependencies) interfaces.Service + +// Global registry of service factories +var serviceFactories = make(map[string]ServiceFactory) + +// RegisterService registers a service factory for automatic discovery +func RegisterService(name string, factory ServiceFactory) { + serviceFactories[name] = factory +} + +// AutoDiscoverServices automatically discovers and creates all enabled services +func AutoDiscoverServices( + config *config.Config, + logger *logger.Logger, + deps *Dependencies, +) []interfaces.Service { + var services []interfaces.Service + + for name, factory := range serviceFactories { + logger.Debug("Creating service", "name", name) + if config.Services.IsEnabled(name) { + if service := factory(config, logger, deps); service != nil { + services = append(services, service) + logger.Info("Auto-registered service", "service", name) + } else { + logger.Warn("Service factory returned nil", "service", name) + } + } else { + logger.Debug("Service disabled via config", "service", name) + } + } + + return services +} + +// ServiceRegistry holds discovered services and manages their lifecycle +type ServiceRegistry struct { + services []interfaces.Service + logger *logger.Logger +} + +// NewServiceRegistry creates a new service registry +func NewServiceRegistry(logger *logger.Logger) *ServiceRegistry { + return &ServiceRegistry{ + services: make([]interfaces.Service, 0), + logger: logger, + } +} + +// GetServiceFactories returns the global service factories map for testing/debugging +func GetServiceFactories() map[string]ServiceFactory { + return serviceFactories +} + +// Register adds a service to the registry +func (r *ServiceRegistry) Register(s interfaces.Service) { + r.services = append(r.services, s) +} + +// RegisterServiceWithDependencies creates and registers a service with dependencies +func (r *ServiceRegistry) RegisterServiceWithDependencies( + config *config.Config, + logger *logger.Logger, + deps *Dependencies, + serviceName string, +) error { + if factory, exists := serviceFactories[serviceName]; exists { + if config.Services.IsEnabled(serviceName) { + service := factory(config, logger, deps) + if service != nil { + r.Register(service) + r.logger.Info("Service registered with dependencies", "service", serviceName) + return nil + } + return fmt.Errorf("failed to create service: %s", serviceName) + } else { + r.logger.Debug("Service disabled via config", "service", serviceName) + return nil + } + } + return fmt.Errorf("service factory not found: %s", serviceName) +} + +// GetServices returns the list of registered services +func (r *ServiceRegistry) GetServices() []interfaces.Service { + return r.services +} + +// Boot initializes enabled services and registers their routes +func (r *ServiceRegistry) Boot(e *echo.Echo) { + api := e.Group("/api/v1") + + for _, s := range r.services { + if s.Enabled() { + r.logger.Info("Starting Service...", "service", s.Name()) + s.RegisterRoutes(api) + r.logger.Info("Service Started", "service", s.Name()) + } else { + r.logger.Warn("Service Skipped (Disabled via config)", "service", s.Name()) + } + } +} + +// BootService boots a single service (for dynamic registration) +func (r *ServiceRegistry) BootService(e *echo.Echo, s interfaces.Service) { + if s.Enabled() { + api := e.Group("/api/v1") + r.logger.Info("Starting Service...", "service", s.Name()) + s.RegisterRoutes(api) + r.logger.Info("Service Started", "service", s.Name()) + } else { + r.logger.Warn("Service Skipped (Disabled via config)", "service", s.Name()) + } +} diff --git a/pkg/tui/live.go b/pkg/tui/live.go index 72f296d..146c2b5 100644 --- a/pkg/tui/live.go +++ b/pkg/tui/live.go @@ -3,9 +3,9 @@ package tui import ( "fmt" "os" + "stackyard/pkg/tui/template" "strings" "sync" - "test-go/pkg/tui/template" "time" "github.com/charmbracelet/bubbles/spinner"