feat: design db, configure Docker compose to spin up Postgres

This commit is contained in:
Anton Shubin 2024-01-22 23:18:45 +08:00
commit 598fef10b7
11 changed files with 403 additions and 0 deletions

4
.env.example Normal file
View File

@ -0,0 +1,4 @@
DB_HOST=localhost
DB_USER=gb-light
DB_NAME=gb-light
DB_PASS=localPassword

25
.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
# OS
.DS_Store
# IDE
.idea
.vscode
# Configs
.env
.env.*
!.env.example
# Packages
node_modules
# Artifacts
/build
/.svelte-kit
/package
vite.config.js.timestamp-*
vite.config.ts.timestamp-*
/.volumes
# Logs
*.log

34
README.md Normal file
View File

@ -0,0 +1,34 @@
# GB-Lux
This is a prototype/draft of a light control system.
At the moment it is at database design stage.
### Prerequisites
- [Docker](https://docs.docker.com/get-docker/)
- [Bun](https://bun.sh/)
## Installation
1. Clone the repository
2. Install dependencies
```bash
bun install
```
3. Copy `.env.example` to `.env` and fill in the values or leave them as is:
```bash
cp .env.example .env
```
4. Spin up database:
```bash
docker compose -f dev.compose.yml up
```
5. Apply database migrations:
```bash
bun db:migrate
```
6. Now you can connect to the database with your favorite client (e.g. [DBeaver](https://dbeaver.io/)) using the credentials from `.env` file.

BIN
bun.lockb Executable file

Binary file not shown.

28
dev.compose.yml Normal file
View File

@ -0,0 +1,28 @@
version: '3.8'
services:
postgres:
container_name: postgres
image: postgres:15-alpine
restart: unless-stopped
volumes:
- ./.volumes/postgres:/var/lib/postgresql/data
environment:
- PGUSER=${DB_USER}
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=${DB_NAME}
ports:
- 5432:5432
deploy:
resources:
limits:
cpus: '0.5'
memory: 512M
healthcheck:
test: ["CMD", "pg_isready", "-U", "${POSTGRES_USER}"]
interval: 1m
timeout: 10s
retries: 3
start_period: 40s
security_opt:
- no-new-privileges:true

21
package.json Normal file
View File

@ -0,0 +1,21 @@
{
"name": "gb-lux",
"version": "0.0.1",
"private": true,
"type": "module",
"scripts": {
"dev": "docker compose -f dev.compose.yml up",
"dev:docker:build": "docker compose -f dev.compose.yml build",
"dev:docker:clean": "docker rmi $(docker images -f \"dangling=true\" -q)",
"db:migrate": "bun ./sql/migrate.js",
"db:purge": "bun ./sql/purge.js",
"db:seed": "bun ./sql/seed.js"
},
"dependencies": {
"postgres": "3.4.3"
},
"devDependencies": {
"dotenv": "16.3.2",
"@types/bun": "latest"
}
}

7
sql/lib/db.js Normal file
View File

@ -0,0 +1,7 @@
import postgres from 'postgres';
import { env } from './env.js';
export const sql = postgres({
...env.db,
transform: postgres.camel,
});

14
sql/lib/env.js Normal file
View File

@ -0,0 +1,14 @@
import { config } from 'dotenv';
const isProd = process.env.NODE_ENV === 'prod' || process.env.NODE_ENV === 'production';
config({ path: isProd ? '.env.prod' : '.env' });
export const env = {
isProd,
db: {
host: process.env.DB_HOST,
user: process.env.DB_USER,
pass: process.env.DB_PASS,
db: process.env.DB_NAME,
},
};

77
sql/migrate.js Normal file
View File

@ -0,0 +1,77 @@
import { readdirSync, readFileSync } from 'fs';
import { extname, join } from 'path';
import { sql } from './lib/db.js';
const migrationsFolder = './sql/migrations';
const fileExtension = '.sql';
async function applyMigrations() {
try {
// check if migrations table exists
const tableExists =
await sql`SELECT exists (SELECT FROM information_schema.tables WHERE table_name = 'migrations')`;
// create migrations table if it does not exist
if (!tableExists[0].exists) {
await sql`
CREATE TABLE migrations (
id SERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP
)
`;
console.log('Migrations table was created.\n');
}
} catch (err) {
console.error('❌ Failed to create migrations table', err);
process.exit(1);
}
let migrations = [];
try {
// get list of migrations from database
const migrationsResult = await sql`SELECT name FROM migrations`;
migrations = migrationsResult.map(({ name }) => name);
} catch (err) {
console.error('❌ Failed to get migrations from database', err);
process.exit(1);
}
try {
// read directory and get .sql files
const files = readdirSync(migrationsFolder);
const migrationsToApply = files
.filter(file => extname(file) === fileExtension)
.map(file => file.replace(fileExtension, ''))
.filter(file => !migrations.includes(file))
.sort();
if (!migrationsToApply.length) {
console.log('✅ No new migrations to apply.');
process.exit(0);
}
console.log(`Applying ${migrationsToApply.length} migration(s):`);
for (let migration of migrationsToApply) {
console.log('- ' + migration);
// read SQL file
const sqlScript = readFileSync(join(migrationsFolder, migration + fileExtension), 'utf8');
// execute the SQL script
await sql.begin(async tx => {
await tx.unsafe(sqlScript);
// record that this migration has been run
await tx`INSERT INTO migrations (name) VALUES (${migration})`;
});
}
console.log('\n✅ Migrations successfully applied.');
process.exit(0);
} catch (err) {
console.error('\n❌ Failed to apply migrations\n', err);
process.exit(1);
}
}
void applyMigrations();

View File

@ -0,0 +1,148 @@
CREATE TABLE organisations
(
id SERIAL PRIMARY KEY,
name VARCHAR(50) NOT NULL,
role INT2 NOT NULL DEFAULT 0, -- 0 - client, 1 - Yume, maybe more roles in the future
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE users
(
id SERIAL PRIMARY KEY,
organisation_id INT4 NOT NULL,
permission INT2,
email VARCHAR(50),
first_name VARCHAR(50),
last_name VARCHAR(50),
photo_url VARCHAR(200),
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (organisation_id) REFERENCES organisations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE keys
(
id SERIAL PRIMARY KEY,
user_id INT4 NOT NULL,
kind INT2 NOT NULL,
identification VARCHAR(50) NOT NULL,
secret VARCHAR(60),
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (user_id) REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE sessions
(
id SERIAL PRIMARY KEY,
token VARCHAR(32) NOT NULL,
user_id INT4 NOT NULL,
key_id INT4 NOT NULL,
expires_at timestamp(3) without time zone,
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (user_id) REFERENCES users (id) ON DELETE CASCADE ON UPDATE CASCADE,
FOREIGN KEY (key_id) REFERENCES keys (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE projects
(
id SERIAL PRIMARY KEY,
organisation_id INT4 NOT NULL,
name VARCHAR(50) NOT NULL,
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (organisation_id) REFERENCES organisations (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE device_groups
(
id SERIAL PRIMARY KEY,
project_id INT4 NOT NULL,
name VARCHAR(50) NOT NULL,
force_status INT2 NOT NULL DEFAULT 0, -- 0 - no force, 1 - force on, 2 - force off
lux_threshold INT2, -- Triggers the light if the lux level is below this threshold
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (project_id) REFERENCES projects (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE device_group_daily_schedule
(
id SERIAL PRIMARY KEY,
device_group_id INT4 NOT NULL,
day_of_week INT2 NOT NULL, -- 0 - Sunday, 1 - Monday, 2 - Tuesday, 3 - Wednesday, 4 - Thursday, 5 - Friday, 6 - Saturday
start_time TIME NOT NULL,
end_time TIME NOT NULL,
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (device_group_id) REFERENCES device_groups (id) ON DELETE CASCADE ON UPDATE CASCADE
);
-- We might need to separate light, gateways and sensors into different tables. For now, let's discuss it.
CREATE TABLE devices
(
id SERIAL PRIMARY KEY,
uuid VARCHAR(36) NOT NULL,
gbtb_id VARCHAR(50) NOT NULL, -- QUESTION: What type is gbtb_id? Is it a string or an integer?
device_group_id INT4 NOT NULL,
name VARCHAR(50) NOT NULL,
lamppost_number VARCHAR(50), -- QUESTION: What type is lamppost_number? Is it a string or an integer?
started_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, -- for calculating the uptime/running hours
lux_level INT2,
role INT2 NOT NULL DEFAULT 0, -- 0 - light, 1 - motion sensor, 2 - gateway, maybe more roles in the future
-- geolocation. We can use Postgres plugin "PostGIS" for this, but it's not necessary for now
latitude FLOAT4,
longitude FLOAT4,
altitude FLOAT4,
status INT2 NOT NULL DEFAULT 0, -- 0 - off, 1 - on, maybe more statuses in the future
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (device_group_id) REFERENCES device_groups (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE measures
(
id SERIAL PRIMARY KEY,
device_id INT4 NOT NULL,
measure_type INT2 NOT NULL DEFAULT 0, -- 0 - temperature, 1 - humidity, 2 - light, 3 - motion, 4 - voltage, 5 - power consumption, maybe more in the future
value INT4 NOT NULL,
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (device_id) REFERENCES devices (id) ON DELETE CASCADE ON UPDATE CASCADE
);
CREATE TABLE commands
(
id SERIAL PRIMARY KEY,
organisation_id INT4 NOT NULL,
originator_id INT4,
originator_type INT2 NOT NULL DEFAULT 0, -- 0 - user, 1 - device, 2 - device group, 3 - organisation, maybe more in the future
command_type INT2 NOT NULL, -- 0 - on, 1 - off, 2 - force on, 3 - force off, 4 - reboot, 5 - reset, etc
comment VARCHAR(100),
entity INT4 NOT NULL,
entity_type INT2 NOT NULL DEFAULT 0, -- 0 - device, 1 - device group, 2 - organisation, 3 - user, maybe more in the future
scheduled_at TIMESTAMP(3), -- NULL if the command is executed immediately
executed_at TIMESTAMP(3), -- NULL if the command is not executed yet
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (organisation_id) REFERENCES organisations (id) ON DELETE CASCADE ON UPDATE CASCADE
-- No other FKs, because we don't want to delete the history when the entity is deleted
);
CREATE TABLE alerts
(
id SERIAL PRIMARY KEY,
organisation_id INT4 NOT NULL,
alert_type INT2 NOT NULL, -- 0 - temperature, 1 - humidity, 2 - light, 3 - motion, 4 - voltage, 5 - power consumption, maybe more in the future
entity INT4 NOT NULL,
entity_type INT2 NOT NULL DEFAULT 0, -- 0 - device, 1 - device group, 2 - organisation, 3 - user, maybe more in the future
created_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (organisation_id) REFERENCES organisations (id) ON DELETE CASCADE ON UPDATE CASCADE
-- No other FKs, because we don't want to delete the history when the entity is deleted
);

45
sql/purge.js Normal file
View File

@ -0,0 +1,45 @@
import { env } from './lib/env.js';
import { sql } from './lib/db.js';
/** Drops all tables from the database.
* if NODE_ENV!='prod' || 'production' and not --prod - then abort */
async function purge() {
if (env.isProd && !process.argv.includes('--prod')) {
console.error('❌ Cannot purge database in production');
process.exit(1);
}
let tables = [];
try {
console.log('Fetching table names...');
const result = await sql`
SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public'
AND table_type = 'BASE TABLE'
`;
tables = result.map(({ tableName }) => tableName);
} catch (err) {
console.error('❌ Failed to fetch table names', err);
process.exit(1);
}
if (!tables.length) {
console.log('\n✅ No tables to purge.');
process.exit(0);
}
try {
console.log(`Purging ${tables.length} tables:`);
for (let table of tables) {
console.log('- ' + table);
await sql`DROP TABLE ${sql(table)} CASCADE`;
}
console.log('\n✅ Database purged.');
process.exit(0);
} catch (err) {
console.error('\n❌ Failed to purge database\n', err);
process.exit(1);
}
}
void purge();