CUB-113: implement core CRUD API endpoints
Some checks failed
Dev Build / build-test (pull_request) Failing after 2m4s
Some checks failed
Dev Build / build-test (pull_request) Failing after 2m4s
- Add dtos package with request/response structs
- Add repositories: Material, Filament, Printer, PrintJob, UsageLog
- Add services: FilamentService, PrinterService, PrintJobService
- Add handlers for all 5 resources with consistent error responses
- Wire all endpoints into Chi router under /api
- Validation on POST/PUT filament endpoints
- Filter/pagination support on list endpoints
- Soft-delete for filaments (DELETE /api/filaments/{id})
- go build ./... && go vet ./... → PASS
This commit is contained in:
@@ -1 +0,0 @@
|
||||
# Repositories
|
||||
285
backend/internal/repositories/filament_repository.go
Normal file
285
backend/internal/repositories/filament_repository.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/CubeCraft-Creations/Extrudex/backend/internal/models"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// FilamentRepository handles database queries for filament_spools.
|
||||
type FilamentRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewFilamentRepository creates a FilamentRepository backed by the given pool.
|
||||
func NewFilamentRepository(pool *pgxpool.Pool) *FilamentRepository {
|
||||
return &FilamentRepository{pool: pool}
|
||||
}
|
||||
|
||||
// FilamentFilter holds query parameters for listing filament spools.
|
||||
type FilamentFilter struct {
|
||||
Material string // filter by material_base name (case-insensitive)
|
||||
Finish string // filter by material_finish name (case-insensitive)
|
||||
Color string // filter by exact color_hex match
|
||||
LowStock bool // if true, filter for remaining_grams <= low_stock_threshold_grams
|
||||
Limit int
|
||||
Offset int
|
||||
}
|
||||
|
||||
// spoolScanFields is the common SELECT column list for filament spools with JOINs.
|
||||
const spoolScanFields = `
|
||||
s.id, s.name,
|
||||
s.material_base_id,
|
||||
COALESCE(mb.name, '') as material_base_name,
|
||||
COALESCE(mb.density_g_cm3, 0) as material_base_density_g_cm3,
|
||||
COALESCE(mb.extrusion_temp_min, NULL::int) as material_base_extrusion_temp_min,
|
||||
COALESCE(mb.extrusion_temp_max, NULL::int) as material_base_extrusion_temp_max,
|
||||
COALESCE(mb.bed_temp_min, NULL::int) as material_base_bed_temp_min,
|
||||
COALESCE(mb.bed_temp_max, NULL::int) as material_base_bed_temp_max,
|
||||
COALESCE(mb.created_at, s.created_at) as material_base_created_at,
|
||||
COALESCE(mb.updated_at, s.created_at) as material_base_updated_at,
|
||||
s.material_finish_id,
|
||||
COALESCE(mf.name, '') as material_finish_name,
|
||||
mf.description as material_finish_description,
|
||||
COALESCE(mf.created_at, s.created_at) as material_finish_created_at,
|
||||
COALESCE(mf.updated_at, s.created_at) as material_finish_updated_at,
|
||||
s.material_modifier_id,
|
||||
mm.name as material_modifier_name,
|
||||
mm.description as material_modifier_description,
|
||||
mm.created_at as material_modifier_created_at,
|
||||
mm.updated_at as material_modifier_updated_at,
|
||||
s.color_hex, s.brand, s.diameter_mm,
|
||||
s.initial_grams, s.remaining_grams, s.spool_weight_grams,
|
||||
s.cost_usd, s.low_stock_threshold_grams,
|
||||
s.notes, s.barcode,
|
||||
s.deleted_at, s.created_at, s.updated_at`
|
||||
|
||||
const spoolFromJoins = `
|
||||
FROM filament_spools s
|
||||
LEFT JOIN material_bases mb ON s.material_base_id = mb.id
|
||||
LEFT JOIN material_finishes mf ON s.material_finish_id = mf.id
|
||||
LEFT JOIN material_modifiers mm ON s.material_modifier_id = mm.id`
|
||||
|
||||
// scanSpoolWithJoins scans a full spool row including all JOINed tables.
|
||||
func scanSpoolWithJoins(row interface{ Scan(...interface{}) error }) (models.FilamentSpool, error) {
|
||||
var s models.FilamentSpool
|
||||
var mb models.MaterialBase
|
||||
var mf models.MaterialFinish
|
||||
var mfDesc *string
|
||||
var modifierID *int
|
||||
var modName, modDesc *string
|
||||
var modCreatedAt, modUpdatedAt *time.Time
|
||||
|
||||
err := row.Scan(
|
||||
&s.ID, &s.Name,
|
||||
&s.MaterialBaseID,
|
||||
&mb.Name, &mb.DensityGCm3,
|
||||
&mb.ExtrusionTempMin, &mb.ExtrusionTempMax,
|
||||
&mb.BedTempMin, &mb.BedTempMax,
|
||||
&mb.CreatedAt, &mb.UpdatedAt,
|
||||
&s.MaterialFinishID,
|
||||
&mf.Name, &mfDesc,
|
||||
&mf.CreatedAt, &mf.UpdatedAt,
|
||||
&modifierID,
|
||||
&modName, &modDesc,
|
||||
&modCreatedAt, &modUpdatedAt,
|
||||
&s.ColorHex, &s.Brand, &s.DiameterMM,
|
||||
&s.InitialGrams, &s.RemainingGrams, &s.SpoolWeightGrams,
|
||||
&s.CostUSD, &s.LowStockThresholdGrams,
|
||||
&s.Notes, &s.Barcode,
|
||||
&s.DeletedAt, &s.CreatedAt, &s.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
mb.ID = s.MaterialBaseID
|
||||
s.MaterialBase = &mb
|
||||
|
||||
mf.ID = s.MaterialFinishID
|
||||
if mfDesc != nil {
|
||||
mf.Description = mfDesc
|
||||
}
|
||||
s.MaterialFinish = &mf
|
||||
|
||||
s.MaterialModifierID = modifierID
|
||||
if modifierID != nil && modName != nil {
|
||||
mm := models.MaterialModifier{
|
||||
ID: *modifierID,
|
||||
Name: *modName,
|
||||
}
|
||||
if modDesc != nil {
|
||||
mm.Description = modDesc
|
||||
}
|
||||
if modCreatedAt != nil {
|
||||
mm.CreatedAt = *modCreatedAt
|
||||
}
|
||||
if modUpdatedAt != nil {
|
||||
mm.UpdatedAt = *modUpdatedAt
|
||||
}
|
||||
s.MaterialModifier = &mm
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// GetAll returns filament spools matching the given filters, with pagination.
|
||||
// Returns results, total matching count, and any error.
|
||||
func (r *FilamentRepository) GetAll(ctx context.Context, filter FilamentFilter) ([]models.FilamentSpool, int, error) {
|
||||
conditions := []string{"s.deleted_at IS NULL"}
|
||||
args := []interface{}{}
|
||||
argIdx := 1
|
||||
|
||||
if filter.Material != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("LOWER(mb.name) = LOWER($%d)", argIdx))
|
||||
args = append(args, filter.Material)
|
||||
argIdx++
|
||||
}
|
||||
if filter.Finish != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("LOWER(mf.name) = LOWER($%d)", argIdx))
|
||||
args = append(args, filter.Finish)
|
||||
argIdx++
|
||||
}
|
||||
if filter.Color != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("s.color_hex = $%d", argIdx))
|
||||
args = append(args, filter.Color)
|
||||
argIdx++
|
||||
}
|
||||
if filter.LowStock {
|
||||
conditions = append(conditions, "s.remaining_grams <= s.low_stock_threshold_grams")
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Count total.
|
||||
var total int
|
||||
countQuery := "SELECT COUNT(*) " + spoolFromJoins + " " + whereClause
|
||||
if err := r.pool.QueryRow(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Query with pagination.
|
||||
dataQuery := "SELECT " + spoolScanFields + " " + spoolFromJoins + " " +
|
||||
whereClause +
|
||||
" ORDER BY s.name ASC" +
|
||||
fmt.Sprintf(" LIMIT $%d OFFSET $%d", argIdx, argIdx+1)
|
||||
|
||||
dataArgs := make([]interface{}, len(args))
|
||||
copy(dataArgs, args)
|
||||
dataArgs = append(dataArgs, filter.Limit, filter.Offset)
|
||||
|
||||
rows, err := r.pool.Query(ctx, dataQuery, dataArgs...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var spools []models.FilamentSpool
|
||||
for rows.Next() {
|
||||
s, err := scanSpoolWithJoins(rows)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
spools = append(spools, s)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if spools == nil {
|
||||
spools = []models.FilamentSpool{}
|
||||
}
|
||||
|
||||
return spools, total, nil
|
||||
}
|
||||
|
||||
// GetByID returns a single filament spool by ID with JOINed data.
|
||||
// Returns nil if not found or soft-deleted.
|
||||
func (r *FilamentRepository) GetByID(ctx context.Context, id int) (*models.FilamentSpool, error) {
|
||||
query := "SELECT " + spoolScanFields + " " + spoolFromJoins +
|
||||
" WHERE s.id = $1 AND s.deleted_at IS NULL"
|
||||
|
||||
row := r.pool.QueryRow(ctx, query, id)
|
||||
s, err := scanSpoolWithJoins(row)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// Create inserts a new filament spool and returns the created spool with JOINed data.
|
||||
func (r *FilamentRepository) Create(ctx context.Context, spool *models.FilamentSpool) (*models.FilamentSpool, error) {
|
||||
var id int
|
||||
err := r.pool.QueryRow(ctx, `
|
||||
INSERT INTO filament_spools (
|
||||
name, material_base_id, material_finish_id, material_modifier_id,
|
||||
color_hex, brand, diameter_mm, initial_grams, remaining_grams,
|
||||
spool_weight_grams, cost_usd, low_stock_threshold_grams,
|
||||
notes, barcode
|
||||
) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)
|
||||
RETURNING id
|
||||
`,
|
||||
spool.Name, spool.MaterialBaseID, spool.MaterialFinishID, spool.MaterialModifierID,
|
||||
spool.ColorHex, spool.Brand, spool.DiameterMM, spool.InitialGrams, spool.RemainingGrams,
|
||||
spool.SpoolWeightGrams, spool.CostUSD, spool.LowStockThresholdGrams,
|
||||
spool.Notes, spool.Barcode,
|
||||
).Scan(&id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r.GetByID(ctx, id)
|
||||
}
|
||||
|
||||
// Update applies partial updates to an existing filament spool.
|
||||
// Only non-nil fields in the update map are applied.
|
||||
// Returns the updated spool.
|
||||
func (r *FilamentRepository) Update(ctx context.Context, id int, updates map[string]interface{}) (*models.FilamentSpool, error) {
|
||||
if len(updates) == 0 {
|
||||
return r.GetByID(ctx, id)
|
||||
}
|
||||
|
||||
setClauses := []string{"updated_at = NOW()"}
|
||||
args := []interface{}{}
|
||||
argIdx := 1
|
||||
|
||||
for col, val := range updates {
|
||||
setClauses = append(setClauses, fmt.Sprintf("%s = $%d", col, argIdx))
|
||||
args = append(args, val)
|
||||
argIdx++
|
||||
}
|
||||
|
||||
args = append(args, id)
|
||||
query := fmt.Sprintf("UPDATE filament_spools SET %s WHERE id = $%d AND deleted_at IS NULL",
|
||||
strings.Join(setClauses, ", "), argIdx)
|
||||
|
||||
result, err := r.pool.Exec(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.RowsAffected() == 0 {
|
||||
return nil, nil // not found or deleted
|
||||
}
|
||||
|
||||
return r.GetByID(ctx, id)
|
||||
}
|
||||
|
||||
// SoftDelete marks a filament spool as deleted by setting deleted_at = NOW().
|
||||
// Returns true if a row was affected.
|
||||
func (r *FilamentRepository) SoftDelete(ctx context.Context, id int) (bool, error) {
|
||||
result, err := r.pool.Exec(ctx, `
|
||||
UPDATE filament_spools
|
||||
SET deleted_at = NOW(), updated_at = NOW()
|
||||
WHERE id = $1 AND deleted_at IS NULL
|
||||
`, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return result.RowsAffected() > 0, nil
|
||||
}
|
||||
54
backend/internal/repositories/material_repository.go
Normal file
54
backend/internal/repositories/material_repository.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Package repositories provides data access logic backed by PostgreSQL via pgxpool.
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/CubeCraft-Creations/Extrudex/backend/internal/models"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// MaterialRepository handles database queries for material lookup tables.
|
||||
type MaterialRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewMaterialRepository creates a MaterialRepository backed by the given pool.
|
||||
func NewMaterialRepository(pool *pgxpool.Pool) *MaterialRepository {
|
||||
return &MaterialRepository{pool: pool}
|
||||
}
|
||||
|
||||
// GetAll returns all material bases ordered by name.
|
||||
func (r *MaterialRepository) GetAll(ctx context.Context) ([]models.MaterialBase, error) {
|
||||
rows, err := r.pool.Query(ctx, `
|
||||
SELECT id, name, density_g_cm3, extrusion_temp_min, extrusion_temp_max,
|
||||
bed_temp_min, bed_temp_max, created_at, updated_at
|
||||
FROM material_bases
|
||||
ORDER BY name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var materials []models.MaterialBase
|
||||
for rows.Next() {
|
||||
var m models.MaterialBase
|
||||
if err := rows.Scan(
|
||||
&m.ID, &m.Name, &m.DensityGCm3,
|
||||
&m.ExtrusionTempMin, &m.ExtrusionTempMax,
|
||||
&m.BedTempMin, &m.BedTempMax,
|
||||
&m.CreatedAt, &m.UpdatedAt,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
materials = append(materials, m)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if materials == nil {
|
||||
materials = []models.MaterialBase{}
|
||||
}
|
||||
return materials, nil
|
||||
}
|
||||
157
backend/internal/repositories/print_job_repository.go
Normal file
157
backend/internal/repositories/print_job_repository.go
Normal file
@@ -0,0 +1,157 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/CubeCraft-Creations/Extrudex/backend/internal/models"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// PrintJobRepository handles database queries for print_jobs.
|
||||
type PrintJobRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewPrintJobRepository creates a PrintJobRepository backed by the given pool.
|
||||
func NewPrintJobRepository(pool *pgxpool.Pool) *PrintJobRepository {
|
||||
return &PrintJobRepository{pool: pool}
|
||||
}
|
||||
|
||||
// PrintJobFilter holds query parameters for listing print jobs.
|
||||
type PrintJobFilter struct {
|
||||
Status string // filter by job_status name (case-insensitive)
|
||||
PrinterID *int // filter by printer_id
|
||||
Limit int
|
||||
Offset int
|
||||
}
|
||||
|
||||
// scanPrintJobWithJoins scans a print_job row with JOINed tables.
|
||||
func (r *PrintJobRepository) scanPrintJobWithJoins(row interface{ Scan(...interface{}) error }) (models.PrintJob, error) {
|
||||
var pj models.PrintJob
|
||||
var js models.JobStatus
|
||||
|
||||
err := row.Scan(
|
||||
&pj.ID, &pj.PrinterID, &pj.FilamentSpoolID,
|
||||
&pj.JobName, &pj.FileName,
|
||||
&pj.JobStatusID,
|
||||
&pj.StartedAt, &pj.CompletedAt,
|
||||
&pj.DurationSeconds, &pj.EstimatedDurationSeconds,
|
||||
&pj.TotalMMExtruded, &pj.TotalGramsUsed, &pj.TotalCostUSD,
|
||||
&pj.Notes,
|
||||
&pj.DeletedAt, &pj.CreatedAt, &pj.UpdatedAt,
|
||||
&js.ID, &js.Name,
|
||||
&js.CreatedAt, &js.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return pj, err
|
||||
}
|
||||
|
||||
pj.JobStatus = &js
|
||||
return pj, nil
|
||||
}
|
||||
|
||||
// GetAll returns print jobs matching the given filters, with pagination.
|
||||
func (r *PrintJobRepository) GetAll(ctx context.Context, filter PrintJobFilter) ([]models.PrintJob, int, error) {
|
||||
conditions := []string{"pj.deleted_at IS NULL"}
|
||||
args := []interface{}{}
|
||||
argIdx := 1
|
||||
|
||||
if filter.Status != "" {
|
||||
conditions = append(conditions, fmt.Sprintf("LOWER(js.name) = LOWER($%d)", argIdx))
|
||||
args = append(args, filter.Status)
|
||||
argIdx++
|
||||
}
|
||||
if filter.PrinterID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("pj.printer_id = $%d", argIdx))
|
||||
args = append(args, *filter.PrinterID)
|
||||
argIdx++
|
||||
}
|
||||
|
||||
whereClause := ""
|
||||
if len(conditions) > 0 {
|
||||
whereClause = "WHERE " + strings.Join(conditions, " AND ")
|
||||
}
|
||||
|
||||
// Count.
|
||||
var total int
|
||||
countQuery := `SELECT COUNT(*)
|
||||
FROM print_jobs pj
|
||||
LEFT JOIN job_statuses js ON pj.job_status_id = js.id
|
||||
` + " " + whereClause
|
||||
if err := r.pool.QueryRow(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Query with pagination.
|
||||
dataQuery := `SELECT
|
||||
pj.id, pj.printer_id, pj.filament_spool_id,
|
||||
pj.job_name, pj.file_name,
|
||||
pj.job_status_id,
|
||||
pj.started_at, pj.completed_at,
|
||||
pj.duration_seconds, pj.estimated_duration_seconds,
|
||||
pj.total_mm_extruded, pj.total_grams_used, pj.total_cost_usd,
|
||||
pj.notes,
|
||||
pj.deleted_at, pj.created_at, pj.updated_at,
|
||||
js.id, js.name,
|
||||
js.created_at, js.updated_at
|
||||
FROM print_jobs pj
|
||||
LEFT JOIN job_statuses js ON pj.job_status_id = js.id
|
||||
` + whereClause +
|
||||
" ORDER BY pj.created_at DESC" +
|
||||
fmt.Sprintf(" LIMIT $%d OFFSET $%d", argIdx, argIdx+1)
|
||||
|
||||
dataArgs := make([]interface{}, len(args))
|
||||
copy(dataArgs, args)
|
||||
dataArgs = append(dataArgs, filter.Limit, filter.Offset)
|
||||
|
||||
rows, err := r.pool.Query(ctx, dataQuery, dataArgs...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var jobs []models.PrintJob
|
||||
for rows.Next() {
|
||||
pj, err := r.scanPrintJobWithJoins(rows)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
jobs = append(jobs, pj)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if jobs == nil {
|
||||
jobs = []models.PrintJob{}
|
||||
}
|
||||
|
||||
return jobs, total, nil
|
||||
}
|
||||
|
||||
// GetByID returns a single print job by ID with JOINed job_status.
|
||||
func (r *PrintJobRepository) GetByID(ctx context.Context, id int) (*models.PrintJob, error) {
|
||||
row := r.pool.QueryRow(ctx, `
|
||||
SELECT
|
||||
pj.id, pj.printer_id, pj.filament_spool_id,
|
||||
pj.job_name, pj.file_name,
|
||||
pj.job_status_id,
|
||||
pj.started_at, pj.completed_at,
|
||||
pj.duration_seconds, pj.estimated_duration_seconds,
|
||||
pj.total_mm_extruded, pj.total_grams_used, pj.total_cost_usd,
|
||||
pj.notes,
|
||||
pj.deleted_at, pj.created_at, pj.updated_at,
|
||||
js.id, js.name,
|
||||
js.created_at, js.updated_at
|
||||
FROM print_jobs pj
|
||||
LEFT JOIN job_statuses js ON pj.job_status_id = js.id
|
||||
WHERE pj.id = $1 AND pj.deleted_at IS NULL
|
||||
`, id)
|
||||
|
||||
pj, err := r.scanPrintJobWithJoins(row)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pj, nil
|
||||
}
|
||||
78
backend/internal/repositories/printer_repository.go
Normal file
78
backend/internal/repositories/printer_repository.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/CubeCraft-Creations/Extrudex/backend/internal/models"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// PrinterRepository handles database queries for printers.
|
||||
type PrinterRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewPrinterRepository creates a PrinterRepository backed by the given pool.
|
||||
func NewPrinterRepository(pool *pgxpool.Pool) *PrinterRepository {
|
||||
return &PrinterRepository{pool: pool}
|
||||
}
|
||||
|
||||
// scanPrinterWithType scans a printer row with JOINed printer_type.
|
||||
func (r *PrinterRepository) scanPrinterWithType(row interface{ Scan(...interface{}) error }) (models.Printer, error) {
|
||||
var p models.Printer
|
||||
var pt models.PrinterType
|
||||
|
||||
err := row.Scan(
|
||||
&p.ID, &p.Name, &p.PrinterTypeID,
|
||||
&p.Manufacturer, &p.Model,
|
||||
&p.MoonrakerURL, &p.MoonrakerAPIKey,
|
||||
&p.MQTTBrokerHost, &p.MQTTTopicPrefix,
|
||||
&p.MQTTTLSEnabled, &p.IsActive,
|
||||
&p.CreatedAt, &p.UpdatedAt,
|
||||
&pt.ID, &pt.Name,
|
||||
&pt.CreatedAt, &pt.UpdatedAt,
|
||||
)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
p.PrinterType = &pt
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// GetAll returns all printers joined with their printer_type, ordered by name.
|
||||
func (r *PrinterRepository) GetAll(ctx context.Context) ([]models.Printer, error) {
|
||||
rows, err := r.pool.Query(ctx, `
|
||||
SELECT p.id, p.name, p.printer_type_id,
|
||||
p.manufacturer, p.model,
|
||||
p.moonraker_url, p.moonraker_api_key,
|
||||
p.mqtt_broker_host, p.mqtt_topic_prefix,
|
||||
p.mqtt_tls_enabled, p.is_active,
|
||||
p.created_at, p.updated_at,
|
||||
pt.id, pt.name,
|
||||
pt.created_at, pt.updated_at
|
||||
FROM printers p
|
||||
JOIN printer_types pt ON p.printer_type_id = pt.id
|
||||
ORDER BY p.name
|
||||
`)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var printers []models.Printer
|
||||
for rows.Next() {
|
||||
p, err := r.scanPrinterWithType(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
printers = append(printers, p)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if printers == nil {
|
||||
printers = []models.Printer{}
|
||||
}
|
||||
return printers, nil
|
||||
}
|
||||
96
backend/internal/repositories/usage_log_repository.go
Normal file
96
backend/internal/repositories/usage_log_repository.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package repositories
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/CubeCraft-Creations/Extrudex/backend/internal/models"
|
||||
"github.com/jackc/pgx/v5/pgxpool"
|
||||
)
|
||||
|
||||
// UsageLogRepository handles database queries for usage_logs.
|
||||
type UsageLogRepository struct {
|
||||
pool *pgxpool.Pool
|
||||
}
|
||||
|
||||
// NewUsageLogRepository creates a UsageLogRepository backed by the given pool.
|
||||
func NewUsageLogRepository(pool *pgxpool.Pool) *UsageLogRepository {
|
||||
return &UsageLogRepository{pool: pool}
|
||||
}
|
||||
|
||||
// UsageLogFilter holds query parameters for listing usage logs.
|
||||
type UsageLogFilter struct {
|
||||
SpoolID *int // filter by filament_spool_id
|
||||
JobID *int // filter by print_job_id
|
||||
Limit int
|
||||
Offset int
|
||||
}
|
||||
|
||||
// GetAll returns usage logs matching the given filters, with pagination.
|
||||
func (r *UsageLogRepository) GetAll(ctx context.Context, filter UsageLogFilter) ([]models.UsageLog, int, error) {
|
||||
conditions := []string{"1=1"}
|
||||
args := []interface{}{}
|
||||
argIdx := 1
|
||||
|
||||
if filter.SpoolID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("ul.filament_spool_id = $%d", argIdx))
|
||||
args = append(args, *filter.SpoolID)
|
||||
argIdx++
|
||||
}
|
||||
if filter.JobID != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("ul.print_job_id = $%d", argIdx))
|
||||
args = append(args, *filter.JobID)
|
||||
argIdx++
|
||||
}
|
||||
|
||||
whereClause := "WHERE " + fmt.Sprintf("%s", conditions[0])
|
||||
for _, c := range conditions[1:] {
|
||||
whereClause += " AND " + c
|
||||
}
|
||||
|
||||
// Count.
|
||||
var total int
|
||||
countQuery := "SELECT COUNT(*) FROM usage_logs ul " + whereClause
|
||||
if err := r.pool.QueryRow(ctx, countQuery, args...).Scan(&total); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Query with pagination.
|
||||
dataQuery := `SELECT id, print_job_id, filament_spool_id, mm_extruded,
|
||||
grams_used, cost_usd, logged_at, created_at
|
||||
FROM usage_logs ul
|
||||
` + whereClause +
|
||||
" ORDER BY ul.logged_at DESC" +
|
||||
fmt.Sprintf(" LIMIT $%d OFFSET $%d", argIdx, argIdx+1)
|
||||
|
||||
dataArgs := make([]interface{}, len(args))
|
||||
copy(dataArgs, args)
|
||||
dataArgs = append(dataArgs, filter.Limit, filter.Offset)
|
||||
|
||||
rows, err := r.pool.Query(ctx, dataQuery, dataArgs...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var logs []models.UsageLog
|
||||
for rows.Next() {
|
||||
var l models.UsageLog
|
||||
if err := rows.Scan(
|
||||
&l.ID, &l.PrintJobID, &l.FilamentSpoolID,
|
||||
&l.MMExtruded, &l.GramsUsed, &l.CostUSD,
|
||||
&l.LoggedAt, &l.CreatedAt,
|
||||
); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
logs = append(logs, l)
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if logs == nil {
|
||||
logs = []models.UsageLog{}
|
||||
}
|
||||
|
||||
return logs, total, nil
|
||||
}
|
||||
Reference in New Issue
Block a user