add database schema migration for bug reports and users tables
This commit is contained in:
@@ -9,6 +9,7 @@ import (
|
||||
type Config struct {
|
||||
Port string
|
||||
DSN string
|
||||
Database string
|
||||
APIKey string
|
||||
AdminKey string
|
||||
MaxOpenConns int
|
||||
@@ -55,9 +56,19 @@ func Load() *Config {
|
||||
connMaxLifetime = 5
|
||||
}
|
||||
|
||||
dbName := os.Getenv("DATABASE_NAME")
|
||||
if dbName == "" {
|
||||
panic("DATABASE_NAME environment variable is required")
|
||||
}
|
||||
|
||||
if os.Getenv("DB_DSN") == "" {
|
||||
panic("DB_DSN environment variable is required")
|
||||
}
|
||||
|
||||
return &Config{
|
||||
Port: port,
|
||||
DSN: os.Getenv("DB_DSN"),
|
||||
Database: dbName,
|
||||
APIKey: apiKey,
|
||||
AdminKey: adminKey,
|
||||
MaxOpenConns: maxOpenConns,
|
||||
|
||||
55
internal/database/schema/init.sql
Normal file
55
internal/database/schema/init.sql
Normal file
@@ -0,0 +1,55 @@
|
||||
CREATE TABLE IF NOT EXISTS `bug_reports` (
|
||||
`id` INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
|
||||
`name` VARCHAR(255) NOT NULL,
|
||||
`email` VARCHAR(255) NOT NULL,
|
||||
`description` TEXT NOT NULL,
|
||||
`hwid` VARCHAR(255) NOT NULL DEFAULT '',
|
||||
`hostname` VARCHAR(255) NOT NULL DEFAULT '',
|
||||
`os_user` VARCHAR(255) NOT NULL DEFAULT '',
|
||||
`submitter_ip` VARCHAR(45) NOT NULL DEFAULT '',
|
||||
`system_info` JSON NULL,
|
||||
`status` ENUM('new', 'in_review', 'resolved', 'closed') NOT NULL DEFAULT 'new',
|
||||
`created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`updated_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
INDEX `idx_status` (`status`),
|
||||
INDEX `idx_hwid` (`hwid`),
|
||||
INDEX `idx_hostname` (`hostname`),
|
||||
INDEX `idx_os_user` (`os_user`),
|
||||
INDEX `idx_created_at` (`created_at`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `bug_report_files` (
|
||||
`id` INT UNSIGNED AUTO_INCREMENT PRIMARY KEY,
|
||||
`report_id` INT UNSIGNED NOT NULL,
|
||||
`file_role` ENUM('screenshot', 'mail_file', 'localstorage', 'config', 'system_info') NOT NULL,
|
||||
`filename` VARCHAR(255) NOT NULL,
|
||||
`mime_type` VARCHAR(127) NOT NULL DEFAULT 'application/octet-stream',
|
||||
`file_size` INT UNSIGNED NOT NULL DEFAULT 0,
|
||||
`data` LONGBLOB NOT NULL,
|
||||
`created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
CONSTRAINT `fk_report` FOREIGN KEY (`report_id`) REFERENCES `bug_reports`(`id`) ON DELETE CASCADE,
|
||||
INDEX `idx_report_id` (`report_id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `rate_limit_hwid` (
|
||||
`hwid` VARCHAR(255) PRIMARY KEY,
|
||||
`window_start` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`count` INT UNSIGNED NOT NULL DEFAULT 0
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `user` (
|
||||
`id` VARCHAR(255) PRIMARY KEY,
|
||||
`username` VARCHAR(255) NOT NULL UNIQUE,
|
||||
`password_hash` VARCHAR(255) NOT NULL,
|
||||
`role` ENUM('admin', 'user') NOT NULL DEFAULT 'user',
|
||||
`enabled` BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
`created_at` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`displayname` VARCHAR(255) NOT NULL DEFAULT ''
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `session` (
|
||||
`id` VARCHAR(255) PRIMARY KEY,
|
||||
`user_id` VARCHAR(255) NOT NULL,
|
||||
`expires_at` DATETIME NOT NULL,
|
||||
CONSTRAINT `fk_session_user` FOREIGN KEY (`user_id`) REFERENCES `user`(`id`) ON DELETE CASCADE
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
|
||||
4
internal/database/schema/migrations/1_bug_reports.sql
Normal file
4
internal/database/schema/migrations/1_bug_reports.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
ALTER TABLE bug_reports ADD COLUMN hostname VARCHAR(255) NOT NULL DEFAULT '' AFTER hwid;
|
||||
ALTER TABLE bug_reports ADD COLUMN os_user VARCHAR(255) NOT NULL DEFAULT '' AFTER hostname;
|
||||
ALTER TABLE bug_reports ADD INDEX idx_hostname (hostname);
|
||||
ALTER TABLE bug_reports ADD INDEX idx_os_user (os_user);
|
||||
1
internal/database/schema/migrations/2_users.sql
Normal file
1
internal/database/schema/migrations/2_users.sql
Normal file
@@ -0,0 +1 @@
|
||||
ALTER TABLE user ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT TRUE AFTER role;
|
||||
23
internal/database/schema/migrations/tasks.json
Normal file
23
internal/database/schema/migrations/tasks.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"tasks": [
|
||||
{
|
||||
"id": "1_bug_reports",
|
||||
"sql_file": "1_bug_reports.sql",
|
||||
"description": "Add hostname, os_user columns and their indexes to bug_reports.",
|
||||
"conditions": [
|
||||
{ "type": "column_not_exists", "table": "bug_reports", "column": "hostname" },
|
||||
{ "type": "column_not_exists", "table": "bug_reports", "column": "os_user" },
|
||||
{ "type": "index_not_exists", "table": "bug_reports", "index": "idx_hostname" },
|
||||
{ "type": "index_not_exists", "table": "bug_reports", "index": "idx_os_user" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": "2_users",
|
||||
"sql_file": "2_users.sql",
|
||||
"description": "Add enabled column to user table.",
|
||||
"conditions": [
|
||||
{ "type": "column_not_exists", "table": "user", "column": "enabled" }
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
219
internal/database/schema/migrator.go
Normal file
219
internal/database/schema/migrator.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
//go:embed init.sql migrations/*.json migrations/*.sql
|
||||
var migrationsFS embed.FS
|
||||
|
||||
type taskFile struct {
|
||||
Tasks []task `json:"tasks"`
|
||||
}
|
||||
|
||||
type task struct {
|
||||
ID string `json:"id"`
|
||||
SQLFile string `json:"sql_file"`
|
||||
Description string `json:"description"`
|
||||
Conditions []condition `json:"conditions"`
|
||||
}
|
||||
|
||||
type condition struct {
|
||||
Type string `json:"type"` // "column_not_exists" | "index_not_exists" | "column_exists" | "index_exists" | "table_not_exists" | "table_exists"
|
||||
Table string `json:"table"`
|
||||
Column string `json:"column,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
}
|
||||
|
||||
// Migrate reads migrations/tasks.json and executes every task whose
|
||||
// conditions are ALL satisfied (i.e. logical AND).
|
||||
func Migrate(db *sqlx.DB, dbName string) error {
|
||||
// If the database has no tables at all, bootstrap with init.sql.
|
||||
empty, err := schemaIsEmpty(db, dbName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: check empty: %w", err)
|
||||
}
|
||||
if empty {
|
||||
log.Println("[migrate] empty schema detected – running init.sql")
|
||||
initSQL, err := migrationsFS.ReadFile("init.sql")
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: read init.sql: %w", err)
|
||||
}
|
||||
for _, stmt := range splitStatements(string(initSQL)) {
|
||||
if _, err := db.Exec(stmt); err != nil {
|
||||
return fmt.Errorf("schema: exec init.sql: %w\nSQL: %s", err, stmt)
|
||||
}
|
||||
}
|
||||
log.Println("[migrate] init.sql applied – base schema created")
|
||||
} else {
|
||||
log.Println("[migrate] checking if tables exist")
|
||||
// Check if the tables are there or not
|
||||
var tableNames []string
|
||||
var foundTables []string
|
||||
tableNames = append(tableNames, "bug_reports", "bug_report_files", "rate_limit_hwid", "user", "session")
|
||||
for _, tableName := range tableNames {
|
||||
found, err := tableExists(db, dbName, tableName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: check table %s: %w", tableName, err)
|
||||
}
|
||||
if !found {
|
||||
log.Printf("[migrate] warning: expected table %s not found – schema may be in an inconsistent state", tableName)
|
||||
continue
|
||||
}
|
||||
foundTables = append(foundTables, tableName)
|
||||
}
|
||||
if len(foundTables) != len(tableNames) {
|
||||
log.Printf("[migrate] warning: expected %d tables, found %d", len(tableNames), len(foundTables))
|
||||
log.Printf("[migrate] info: running init.sql")
|
||||
initSQL, err := migrationsFS.ReadFile("init.sql")
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: read init.sql: %w", err)
|
||||
}
|
||||
for _, stmt := range splitStatements(string(initSQL)) {
|
||||
if _, err := db.Exec(stmt); err != nil {
|
||||
return fmt.Errorf("schema: exec init.sql: %w\nSQL: %s", err, stmt)
|
||||
}
|
||||
}
|
||||
log.Println("[migrate] init.sql applied – base schema created")
|
||||
} else {
|
||||
log.Println("[migrate] all expected tables found – skipping init.sql")
|
||||
}
|
||||
}
|
||||
|
||||
raw, err := migrationsFS.ReadFile("migrations/tasks.json")
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: read tasks.json: %w", err)
|
||||
}
|
||||
|
||||
var tf taskFile
|
||||
if err := json.Unmarshal(raw, &tf); err != nil {
|
||||
return fmt.Errorf("schema: parse tasks.json: %w", err)
|
||||
}
|
||||
|
||||
for _, t := range tf.Tasks {
|
||||
needed, err := shouldRun(db, dbName, t.Conditions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: evaluate conditions for %s: %w", t.ID, err)
|
||||
}
|
||||
if !needed {
|
||||
log.Printf("[migrate] skip %s – conditions already met", t.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
sqlBytes, err := migrationsFS.ReadFile("migrations/" + t.SQLFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("schema: read %s: %w", t.SQLFile, err)
|
||||
}
|
||||
|
||||
stmts := splitStatements(string(sqlBytes))
|
||||
for _, stmt := range stmts {
|
||||
if _, err := db.Exec(stmt); err != nil {
|
||||
return fmt.Errorf("schema: exec %s: %w\nSQL: %s", t.ID, err, stmt)
|
||||
}
|
||||
}
|
||||
log.Printf("[migrate] applied %s – %s", t.ID, t.Description)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---------- Condition evaluator ----------
|
||||
|
||||
func shouldRun(db *sqlx.DB, dbName string, conds []condition) (bool, error) {
|
||||
for _, c := range conds {
|
||||
met, err := evaluate(db, dbName, c)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if met {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func evaluate(db *sqlx.DB, dbName string, c condition) (bool, error) {
|
||||
switch c.Type {
|
||||
case "column_not_exists":
|
||||
exists, err := columnExists(db, dbName, c.Table, c.Column)
|
||||
return !exists, err
|
||||
|
||||
case "column_exists":
|
||||
return columnExists(db, dbName, c.Table, c.Column)
|
||||
|
||||
case "index_not_exists":
|
||||
exists, err := indexExists(db, dbName, c.Table, c.Index)
|
||||
return !exists, err
|
||||
|
||||
case "index_exists":
|
||||
return indexExists(db, dbName, c.Table, c.Index)
|
||||
|
||||
case "table_not_exists":
|
||||
exists, err := tableExists(db, dbName, c.Table)
|
||||
return !exists, err
|
||||
|
||||
case "table_exists":
|
||||
return tableExists(db, dbName, c.Table)
|
||||
|
||||
default:
|
||||
return false, fmt.Errorf("unknown condition type: %s", c.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- MySQL introspection helpers ----------
|
||||
|
||||
func columnExists(db *sqlx.DB, dbName, table, column string) (bool, error) {
|
||||
var count int
|
||||
err := db.Get(&count,
|
||||
`SELECT COUNT(*) FROM information_schema.COLUMNS
|
||||
WHERE TABLE_SCHEMA = ?
|
||||
AND TABLE_NAME = ?
|
||||
AND COLUMN_NAME = ?`, dbName, table, column)
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
func indexExists(db *sqlx.DB, dbName, table, index string) (bool, error) {
|
||||
var count int
|
||||
err := db.Get(&count,
|
||||
`SELECT COUNT(*) FROM information_schema.STATISTICS
|
||||
WHERE TABLE_SCHEMA = ?
|
||||
AND TABLE_NAME = ?
|
||||
AND INDEX_NAME = ?`, dbName, table, index)
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
func tableExists(db *sqlx.DB, dbName, table string) (bool, error) {
|
||||
var count int
|
||||
err := db.Get(&count,
|
||||
`SELECT COUNT(*) FROM information_schema.TABLES
|
||||
WHERE TABLE_SCHEMA = ?
|
||||
AND TABLE_NAME = ?`, dbName, table)
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
func schemaIsEmpty(db *sqlx.DB, dbName string) (bool, error) {
|
||||
var count int
|
||||
err := db.Get(&count,
|
||||
`SELECT COUNT(*) FROM information_schema.TABLES
|
||||
WHERE TABLE_SCHEMA = ?`, dbName)
|
||||
return count == 0, err
|
||||
}
|
||||
|
||||
// splitStatements splits a SQL blob on ";" respecting only top-level
|
||||
// semicolons (good enough for simple ALTER / CREATE statements).
|
||||
func splitStatements(sql string) []string {
|
||||
raw := strings.Split(sql, ";")
|
||||
out := make([]string, 0, len(raw))
|
||||
for _, s := range raw {
|
||||
s = strings.TrimSpace(s)
|
||||
if s != "" {
|
||||
out = append(out, s)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
Reference in New Issue
Block a user