mirror of
https://github.com/nicotsx/ironmount.git
synced 2025-12-10 12:10:51 +01:00
refactor: unify backend and frontend servers (#3)
* refactor: unify backend and frontend servers * refactor: correct paths for openapi & drizzle * refactor: move api-client to client * fix: drizzle paths * chore: fix linting issues * fix: form reset issue
This commit is contained in:
73
app/server/core/capabilities.ts
Normal file
73
app/server/core/capabilities.ts
Normal file
@@ -0,0 +1,73 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import Docker from "dockerode";
|
||||
import { logger } from "../utils/logger";
|
||||
|
||||
export type SystemCapabilities = {
|
||||
docker: boolean;
|
||||
rclone: boolean;
|
||||
};
|
||||
|
||||
let capabilitiesPromise: Promise<SystemCapabilities> | null = null;
|
||||
|
||||
/**
|
||||
* Returns the current system capabilities.
|
||||
* On first call, detects all capabilities and caches the promise.
|
||||
* Subsequent calls return the same cached promise, ensuring detection only happens once.
|
||||
*/
|
||||
export async function getCapabilities(): Promise<SystemCapabilities> {
|
||||
if (capabilitiesPromise === null) {
|
||||
// Start detection and cache the promise
|
||||
capabilitiesPromise = detectCapabilities();
|
||||
}
|
||||
|
||||
return capabilitiesPromise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detects which optional capabilities are available in the current environment
|
||||
*/
|
||||
async function detectCapabilities(): Promise<SystemCapabilities> {
|
||||
return {
|
||||
docker: await detectDocker(),
|
||||
rclone: await detectRclone(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if Docker is available by:
|
||||
* 1. Checking if /var/run/docker.sock exists and is accessible
|
||||
* 2. Attempting to ping the Docker daemon
|
||||
*/
|
||||
async function detectDocker(): Promise<boolean> {
|
||||
try {
|
||||
await fs.access("/var/run/docker.sock");
|
||||
|
||||
const docker = new Docker();
|
||||
await docker.ping();
|
||||
|
||||
logger.info("Docker capability: enabled");
|
||||
return true;
|
||||
} catch (_) {
|
||||
logger.warn(
|
||||
"Docker capability: disabled. " +
|
||||
"To enable: mount /var/run/docker.sock and /run/docker/plugins in docker-compose.yml",
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if rclone is available by:
|
||||
* 1. Checking if /root/.config/rclone directory exists and is accessible
|
||||
*/
|
||||
async function detectRclone(): Promise<boolean> {
|
||||
try {
|
||||
await fs.access("/root/.config/rclone");
|
||||
|
||||
logger.info("rclone capability: enabled");
|
||||
return true;
|
||||
} catch (_) {
|
||||
logger.warn("rclone capability: disabled. " + "To enable: mount /root/.config/rclone in docker-compose.yml");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
23
app/server/core/config.ts
Normal file
23
app/server/core/config.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { type } from "arktype";
|
||||
import "dotenv/config";
|
||||
|
||||
const envSchema = type({
|
||||
NODE_ENV: type.enumerated("development", "production", "test").default("development"),
|
||||
SESSION_SECRET: "string?",
|
||||
}).pipe((s) => ({
|
||||
__prod__: s.NODE_ENV === "production",
|
||||
environment: s.NODE_ENV,
|
||||
sessionSecret: s.SESSION_SECRET || "change-me-in-production-please",
|
||||
}));
|
||||
|
||||
const parseConfig = (env: unknown) => {
|
||||
const result = envSchema(env);
|
||||
|
||||
if (result instanceof type.errors) {
|
||||
throw new Error(`Invalid environment variables: ${result.toString()}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
export const config = parseConfig(process.env);
|
||||
6
app/server/core/constants.ts
Normal file
6
app/server/core/constants.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export const OPERATION_TIMEOUT = 5000;
|
||||
export const VOLUME_MOUNT_BASE = "/var/lib/ironmount/volumes";
|
||||
export const REPOSITORY_BASE = "/var/lib/ironmount/repositories";
|
||||
export const DATABASE_URL = "/var/lib/ironmount/data/ironmount.db";
|
||||
export const RESTIC_PASS_FILE = "/var/lib/ironmount/data/restic.pass";
|
||||
export const SOCKET_PATH = "/run/docker/plugins/ironmount.sock";
|
||||
37
app/server/core/events.ts
Normal file
37
app/server/core/events.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import { EventEmitter } from "node:events";
|
||||
import type { TypedEmitter } from "tiny-typed-emitter";
|
||||
|
||||
/**
|
||||
* Event payloads for the SSE system
|
||||
*/
|
||||
interface ServerEvents {
|
||||
"backup:started": (data: { scheduleId: number; volumeName: string; repositoryName: string }) => void;
|
||||
"backup:progress": (data: {
|
||||
scheduleId: number;
|
||||
volumeName: string;
|
||||
repositoryName: string;
|
||||
seconds_elapsed: number;
|
||||
percent_done: number;
|
||||
total_files: number;
|
||||
files_done: number;
|
||||
total_bytes: number;
|
||||
bytes_done: number;
|
||||
current_files: string[];
|
||||
}) => void;
|
||||
"backup:completed": (data: {
|
||||
scheduleId: number;
|
||||
volumeName: string;
|
||||
repositoryName: string;
|
||||
status: "success" | "error" | "stopped";
|
||||
}) => void;
|
||||
"volume:mounted": (data: { volumeName: string }) => void;
|
||||
"volume:unmounted": (data: { volumeName: string }) => void;
|
||||
"volume:updated": (data: { volumeName: string }) => void;
|
||||
"volume:status_changed": (data: { volumeName: string; status: string }) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
* Global event emitter for server-side events
|
||||
* Use this to emit events that should be broadcasted to connected clients via SSE
|
||||
*/
|
||||
export const serverEvents = new EventEmitter() as TypedEmitter<ServerEvents>;
|
||||
44
app/server/core/scheduler.ts
Normal file
44
app/server/core/scheduler.ts
Normal file
@@ -0,0 +1,44 @@
|
||||
import cron, { type ScheduledTask } from "node-cron";
|
||||
import { logger } from "../utils/logger";
|
||||
|
||||
export abstract class Job {
|
||||
abstract run(): Promise<unknown>;
|
||||
}
|
||||
|
||||
type JobConstructor = new () => Job;
|
||||
|
||||
class SchedulerClass {
|
||||
private tasks: ScheduledTask[] = [];
|
||||
|
||||
async start() {
|
||||
logger.info("Scheduler started");
|
||||
}
|
||||
|
||||
build(JobClass: JobConstructor) {
|
||||
const job = new JobClass();
|
||||
return {
|
||||
schedule: (cronExpression: string) => {
|
||||
const task = cron.schedule(cronExpression, async () => {
|
||||
try {
|
||||
await job.run();
|
||||
} catch (error) {
|
||||
logger.error(`Job ${JobClass.name} failed:`, error);
|
||||
}
|
||||
});
|
||||
|
||||
this.tasks.push(task);
|
||||
logger.info(`Scheduled job ${JobClass.name} with cron: ${cronExpression}`);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async stop() {
|
||||
for (const task of this.tasks) {
|
||||
task.stop();
|
||||
}
|
||||
this.tasks = [];
|
||||
logger.info("Scheduler stopped");
|
||||
}
|
||||
}
|
||||
|
||||
export const Scheduler = new SchedulerClass();
|
||||
26
app/server/db/db.ts
Normal file
26
app/server/db/db.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import "dotenv/config";
|
||||
import { Database } from "bun:sqlite";
|
||||
import path from "node:path";
|
||||
import { drizzle } from "drizzle-orm/bun-sqlite";
|
||||
import { migrate } from "drizzle-orm/bun-sqlite/migrator";
|
||||
import { DATABASE_URL } from "../core/constants";
|
||||
import * as schema from "./schema";
|
||||
import fs from "node:fs/promises";
|
||||
|
||||
await fs.mkdir(path.dirname(DATABASE_URL), { recursive: true });
|
||||
|
||||
const sqlite = new Database(DATABASE_URL);
|
||||
sqlite.run("PRAGMA foreign_keys = ON;");
|
||||
|
||||
export const db = drizzle({ client: sqlite, schema });
|
||||
|
||||
export const runDbMigrations = () => {
|
||||
let migrationsFolder = path.join("/app", "assets", "migrations");
|
||||
|
||||
const { NODE_ENV } = process.env;
|
||||
if (NODE_ENV !== "production") {
|
||||
migrationsFolder = path.join("/app", "app", "drizzle");
|
||||
}
|
||||
|
||||
migrate(db, { migrationsFolder });
|
||||
};
|
||||
103
app/server/db/schema.ts
Normal file
103
app/server/db/schema.ts
Normal file
@@ -0,0 +1,103 @@
|
||||
import { relations, sql } from "drizzle-orm";
|
||||
import { int, integer, sqliteTable, text } from "drizzle-orm/sqlite-core";
|
||||
import type { CompressionMode, RepositoryBackend, repositoryConfigSchema, RepositoryStatus } from "~/schemas/restic";
|
||||
import type { BackendStatus, BackendType, volumeConfigSchema } from "~/schemas/volumes";
|
||||
|
||||
/**
|
||||
* Volumes Table
|
||||
*/
|
||||
export const volumesTable = sqliteTable("volumes_table", {
|
||||
id: int().primaryKey({ autoIncrement: true }),
|
||||
name: text().notNull().unique(),
|
||||
type: text().$type<BackendType>().notNull(),
|
||||
status: text().$type<BackendStatus>().notNull().default("unmounted"),
|
||||
lastError: text("last_error"),
|
||||
lastHealthCheck: integer("last_health_check", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
createdAt: integer("created_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
updatedAt: integer("updated_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
config: text("config", { mode: "json" }).$type<typeof volumeConfigSchema.inferOut>().notNull(),
|
||||
autoRemount: int("auto_remount", { mode: "boolean" }).notNull().default(true),
|
||||
});
|
||||
export type Volume = typeof volumesTable.$inferSelect;
|
||||
|
||||
/**
|
||||
* Users Table
|
||||
*/
|
||||
export const usersTable = sqliteTable("users_table", {
|
||||
id: int().primaryKey({ autoIncrement: true }),
|
||||
username: text().notNull().unique(),
|
||||
passwordHash: text("password_hash").notNull(),
|
||||
hasDownloadedResticPassword: int("has_downloaded_restic_password", { mode: "boolean" }).notNull().default(false),
|
||||
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
updatedAt: int("updated_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
});
|
||||
export type User = typeof usersTable.$inferSelect;
|
||||
export const sessionsTable = sqliteTable("sessions_table", {
|
||||
id: text().primaryKey(),
|
||||
userId: int("user_id")
|
||||
.notNull()
|
||||
.references(() => usersTable.id, { onDelete: "cascade" }),
|
||||
expiresAt: int("expires_at", { mode: "number" }).notNull(),
|
||||
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
});
|
||||
export type Session = typeof sessionsTable.$inferSelect;
|
||||
|
||||
/**
|
||||
* Repositories Table
|
||||
*/
|
||||
export const repositoriesTable = sqliteTable("repositories_table", {
|
||||
id: text().primaryKey(),
|
||||
name: text().notNull().unique(),
|
||||
type: text().$type<RepositoryBackend>().notNull(),
|
||||
config: text("config", { mode: "json" }).$type<typeof repositoryConfigSchema.inferOut>().notNull(),
|
||||
compressionMode: text("compression_mode").$type<CompressionMode>().default("auto"),
|
||||
status: text().$type<RepositoryStatus>().default("unknown"),
|
||||
lastChecked: int("last_checked", { mode: "number" }),
|
||||
lastError: text("last_error"),
|
||||
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
updatedAt: int("updated_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
});
|
||||
export type Repository = typeof repositoriesTable.$inferSelect;
|
||||
|
||||
/**
|
||||
* Backup Schedules Table
|
||||
*/
|
||||
export const backupSchedulesTable = sqliteTable("backup_schedules_table", {
|
||||
id: int().primaryKey({ autoIncrement: true }),
|
||||
volumeId: int("volume_id")
|
||||
.notNull()
|
||||
.references(() => volumesTable.id, { onDelete: "cascade" }),
|
||||
repositoryId: text("repository_id")
|
||||
.notNull()
|
||||
.references(() => repositoriesTable.id, { onDelete: "cascade" }),
|
||||
enabled: int("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
cronExpression: text("cron_expression").notNull(),
|
||||
retentionPolicy: text("retention_policy", { mode: "json" }).$type<{
|
||||
keepLast?: number;
|
||||
keepHourly?: number;
|
||||
keepDaily?: number;
|
||||
keepWeekly?: number;
|
||||
keepMonthly?: number;
|
||||
keepYearly?: number;
|
||||
keepWithinDuration?: string;
|
||||
}>(),
|
||||
excludePatterns: text("exclude_patterns", { mode: "json" }).$type<string[]>().default([]),
|
||||
includePatterns: text("include_patterns", { mode: "json" }).$type<string[]>().default([]),
|
||||
lastBackupAt: int("last_backup_at", { mode: "number" }),
|
||||
lastBackupStatus: text("last_backup_status").$type<"success" | "error" | "in_progress">(),
|
||||
lastBackupError: text("last_backup_error"),
|
||||
nextBackupAt: int("next_backup_at", { mode: "number" }),
|
||||
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
updatedAt: int("updated_at", { mode: "number" }).notNull().default(sql`(unixepoch())`),
|
||||
});
|
||||
export const backupScheduleRelations = relations(backupSchedulesTable, ({ one }) => ({
|
||||
volume: one(volumesTable, {
|
||||
fields: [backupSchedulesTable.volumeId],
|
||||
references: [volumesTable.id],
|
||||
}),
|
||||
repository: one(repositoriesTable, {
|
||||
fields: [backupSchedulesTable.repositoryId],
|
||||
references: [repositoriesTable.id],
|
||||
}),
|
||||
}));
|
||||
export type BackupSchedule = typeof backupSchedulesTable.$inferSelect;
|
||||
104
app/server/index.ts
Normal file
104
app/server/index.ts
Normal file
@@ -0,0 +1,104 @@
|
||||
import { createHonoServer } from "react-router-hono-server/bun";
|
||||
import * as fs from "node:fs/promises";
|
||||
import { Scalar } from "@scalar/hono-api-reference";
|
||||
import { Hono } from "hono";
|
||||
import { logger as honoLogger } from "hono/logger";
|
||||
import { openAPIRouteHandler } from "hono-openapi";
|
||||
import { getCapabilities } from "./core/capabilities";
|
||||
import { runDbMigrations } from "./db/db";
|
||||
import { authController } from "./modules/auth/auth.controller";
|
||||
import { requireAuth } from "./modules/auth/auth.middleware";
|
||||
import { driverController } from "./modules/driver/driver.controller";
|
||||
import { startup } from "./modules/lifecycle/startup";
|
||||
import { repositoriesController } from "./modules/repositories/repositories.controller";
|
||||
import { systemController } from "./modules/system/system.controller";
|
||||
import { volumeController } from "./modules/volumes/volume.controller";
|
||||
import { backupScheduleController } from "./modules/backups/backups.controller";
|
||||
import { eventsController } from "./modules/events/events.controller";
|
||||
import { handleServiceError } from "./utils/errors";
|
||||
import { logger } from "./utils/logger";
|
||||
import { shutdown } from "./modules/lifecycle/shutdown";
|
||||
import { SOCKET_PATH } from "./core/constants";
|
||||
|
||||
export const generalDescriptor = (app: Hono) =>
|
||||
openAPIRouteHandler(app, {
|
||||
documentation: {
|
||||
info: {
|
||||
title: "Ironmount API",
|
||||
version: "1.0.0",
|
||||
description: "API for managing volumes",
|
||||
},
|
||||
servers: [{ url: "http://192.168.2.42:4096", description: "Development Server" }],
|
||||
},
|
||||
});
|
||||
|
||||
export const scalarDescriptor = Scalar({
|
||||
title: "Ironmount API Docs",
|
||||
pageTitle: "Ironmount API Docs",
|
||||
url: "/api/v1/openapi.json",
|
||||
});
|
||||
|
||||
const driver = new Hono().use(honoLogger()).route("/", driverController);
|
||||
const app = new Hono()
|
||||
.use(honoLogger())
|
||||
.get("healthcheck", (c) => c.json({ status: "ok" }))
|
||||
.route("/api/v1/auth", authController.basePath("/api/v1"))
|
||||
.route("/api/v1/volumes", volumeController.use(requireAuth))
|
||||
.route("/api/v1/repositories", repositoriesController.use(requireAuth))
|
||||
.route("/api/v1/backups", backupScheduleController.use(requireAuth))
|
||||
.route("/api/v1/system", systemController.use(requireAuth))
|
||||
.route("/api/v1/events", eventsController.use(requireAuth));
|
||||
|
||||
app.get("/api/v1/openapi.json", generalDescriptor(app));
|
||||
app.get("/api/v1/docs", scalarDescriptor);
|
||||
|
||||
app.onError((err, c) => {
|
||||
logger.error(`${c.req.url}: ${err.message}`);
|
||||
|
||||
if (err.cause instanceof Error) {
|
||||
logger.error(err.cause.message);
|
||||
}
|
||||
|
||||
const { status, message } = handleServiceError(err);
|
||||
|
||||
return c.json({ message }, status);
|
||||
});
|
||||
|
||||
runDbMigrations();
|
||||
|
||||
const { docker } = await getCapabilities();
|
||||
|
||||
if (docker) {
|
||||
try {
|
||||
await fs.mkdir("/run/docker/plugins", { recursive: true });
|
||||
|
||||
Bun.serve({
|
||||
unix: SOCKET_PATH,
|
||||
fetch: driver.fetch,
|
||||
});
|
||||
|
||||
logger.info(`Docker volume plugin server running at ${SOCKET_PATH}`);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to start Docker volume plugin server: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
startup();
|
||||
|
||||
logger.info(`Server is running at http://localhost:4096`);
|
||||
|
||||
export type AppType = typeof app;
|
||||
|
||||
process.on("SIGTERM", async () => {
|
||||
logger.info("SIGTERM received, starting graceful shutdown...");
|
||||
await shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
process.on("SIGINT", async () => {
|
||||
logger.info("SIGINT received, starting graceful shutdown...");
|
||||
await shutdown();
|
||||
process.exit(0);
|
||||
});
|
||||
|
||||
export default await createHonoServer({ app, port: 4096 });
|
||||
29
app/server/jobs/backup-execution.ts
Normal file
29
app/server/jobs/backup-execution.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { backupsService } from "../modules/backups/backups.service";
|
||||
import { toMessage } from "../utils/errors";
|
||||
import { logger } from "../utils/logger";
|
||||
|
||||
export class BackupExecutionJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Checking for backup schedules to execute...");
|
||||
|
||||
const scheduleIds = await backupsService.getSchedulesToExecute();
|
||||
|
||||
if (scheduleIds.length === 0) {
|
||||
logger.debug("No backup schedules to execute");
|
||||
return { done: true, timestamp: new Date(), executed: 0 };
|
||||
}
|
||||
|
||||
logger.info(`Found ${scheduleIds.length} backup schedule(s) to execute`);
|
||||
|
||||
for (const scheduleId of scheduleIds) {
|
||||
try {
|
||||
await backupsService.executeBackup(scheduleId);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to execute backup for schedule ${scheduleId}: ${toMessage(error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date(), executed: scheduleIds.length };
|
||||
}
|
||||
}
|
||||
49
app/server/jobs/cleanup-dangling.ts
Normal file
49
app/server/jobs/cleanup-dangling.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import path from "node:path";
|
||||
import fs from "node:fs/promises";
|
||||
import { volumeService } from "../modules/volumes/volume.service";
|
||||
import { readMountInfo } from "../utils/mountinfo";
|
||||
import { getVolumePath } from "../modules/volumes/helpers";
|
||||
import { logger } from "../utils/logger";
|
||||
import { executeUnmount } from "../modules/backends/utils/backend-utils";
|
||||
import { toMessage } from "../utils/errors";
|
||||
import { VOLUME_MOUNT_BASE } from "../core/constants";
|
||||
|
||||
export class CleanupDanglingMountsJob extends Job {
|
||||
async run() {
|
||||
const allVolumes = await volumeService.listVolumes();
|
||||
const allSystemMounts = await readMountInfo();
|
||||
|
||||
for (const mount of allSystemMounts) {
|
||||
if (mount.mountPoint.includes("ironmount") && mount.mountPoint.endsWith("_data")) {
|
||||
const matchingVolume = allVolumes.find((v) => getVolumePath(v) === mount.mountPoint);
|
||||
if (!matchingVolume) {
|
||||
logger.info(`Found dangling mount at ${mount.mountPoint}, attempting to unmount...`);
|
||||
await executeUnmount(mount.mountPoint);
|
||||
|
||||
await fs.rmdir(path.dirname(mount.mountPoint)).catch((err) => {
|
||||
logger.warn(
|
||||
`Failed to remove dangling mount directory ${path.dirname(mount.mountPoint)}: ${toMessage(err)}`,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const allIronmountDirs = await fs.readdir(VOLUME_MOUNT_BASE).catch(() => []);
|
||||
|
||||
for (const dir of allIronmountDirs) {
|
||||
const volumePath = `${VOLUME_MOUNT_BASE}/${dir}/_data`;
|
||||
const matchingVolume = allVolumes.find((v) => getVolumePath(v) === volumePath);
|
||||
if (!matchingVolume) {
|
||||
const fullPath = path.join(VOLUME_MOUNT_BASE, dir);
|
||||
logger.info(`Found dangling mount directory at ${fullPath}, attempting to remove...`);
|
||||
await fs.rmdir(fullPath, { recursive: true }).catch((err) => {
|
||||
logger.warn(`Failed to remove dangling mount directory ${fullPath}: ${toMessage(err)}`);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
10
app/server/jobs/cleanup-sessions.ts
Normal file
10
app/server/jobs/cleanup-sessions.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { authService } from "../modules/auth/auth.service";
|
||||
|
||||
export class CleanupSessionsJob extends Job {
|
||||
async run() {
|
||||
authService.cleanupExpiredSessions();
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
25
app/server/jobs/healthchecks.ts
Normal file
25
app/server/jobs/healthchecks.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { volumeService } from "../modules/volumes/volume.service";
|
||||
import { logger } from "../utils/logger";
|
||||
import { db } from "../db/db";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { volumesTable } from "../db/schema";
|
||||
|
||||
export class VolumeHealthCheckJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Running health check for all volumes...");
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(eq(volumesTable.status, "mounted"), eq(volumesTable.status, "error")),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
const { status } = await volumeService.checkHealth(volume.name);
|
||||
if (status === "error" && volume.autoRemount) {
|
||||
await volumeService.mountVolume(volume.name);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
26
app/server/jobs/repository-healthchecks.ts
Normal file
26
app/server/jobs/repository-healthchecks.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { repositoriesService } from "../modules/repositories/repositories.service";
|
||||
import { logger } from "../utils/logger";
|
||||
import { db } from "../db/db";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { repositoriesTable } from "../db/schema";
|
||||
|
||||
export class RepositoryHealthCheckJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Running health check for all repositories...");
|
||||
|
||||
const repositories = await db.query.repositoriesTable.findMany({
|
||||
where: or(eq(repositoriesTable.status, "healthy"), eq(repositoriesTable.status, "error")),
|
||||
});
|
||||
|
||||
for (const repository of repositories) {
|
||||
try {
|
||||
await repositoriesService.checkHealth(repository.id);
|
||||
} catch (error) {
|
||||
logger.error(`Health check failed for repository ${repository.name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
141
app/server/modules/auth/auth.controller.ts
Normal file
141
app/server/modules/auth/auth.controller.ts
Normal file
@@ -0,0 +1,141 @@
|
||||
import { validator } from "hono-openapi";
|
||||
|
||||
import { Hono } from "hono";
|
||||
import { deleteCookie, getCookie, setCookie } from "hono/cookie";
|
||||
import {
|
||||
changePasswordBodySchema,
|
||||
changePasswordDto,
|
||||
getMeDto,
|
||||
getStatusDto,
|
||||
loginBodySchema,
|
||||
loginDto,
|
||||
logoutDto,
|
||||
registerBodySchema,
|
||||
registerDto,
|
||||
type ChangePasswordDto,
|
||||
type GetMeDto,
|
||||
type GetStatusDto,
|
||||
type LoginDto,
|
||||
type LogoutDto,
|
||||
type RegisterDto,
|
||||
} from "./auth.dto";
|
||||
import { authService } from "./auth.service";
|
||||
import { toMessage } from "../../utils/errors";
|
||||
|
||||
const COOKIE_NAME = "session_id";
|
||||
const COOKIE_OPTIONS = {
|
||||
httpOnly: true,
|
||||
secure: process.env.NODE_ENV === "production",
|
||||
sameSite: "lax" as const,
|
||||
path: "/",
|
||||
};
|
||||
|
||||
export const authController = new Hono()
|
||||
.post("/register", registerDto, validator("json", registerBodySchema), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
|
||||
try {
|
||||
const { user, sessionId } = await authService.register(body.username, body.password);
|
||||
|
||||
setCookie(c, COOKIE_NAME, sessionId, {
|
||||
...COOKIE_OPTIONS,
|
||||
expires: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000), // 7 days
|
||||
});
|
||||
|
||||
return c.json<RegisterDto>(
|
||||
{
|
||||
success: true,
|
||||
message: "User registered successfully",
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
hasDownloadedResticPassword: user.hasDownloadedResticPassword,
|
||||
},
|
||||
},
|
||||
201,
|
||||
);
|
||||
} catch (error) {
|
||||
return c.json<RegisterDto>({ success: false, message: toMessage(error) }, 400);
|
||||
}
|
||||
})
|
||||
.post("/login", loginDto, validator("json", loginBodySchema), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
|
||||
try {
|
||||
const { sessionId, user, expiresAt } = await authService.login(body.username, body.password);
|
||||
|
||||
setCookie(c, COOKIE_NAME, sessionId, {
|
||||
...COOKIE_OPTIONS,
|
||||
expires: new Date(expiresAt),
|
||||
});
|
||||
|
||||
return c.json<LoginDto>({
|
||||
success: true,
|
||||
message: "Login successful",
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
hasDownloadedResticPassword: user.hasDownloadedResticPassword,
|
||||
},
|
||||
});
|
||||
} catch (error) {
|
||||
return c.json<LoginDto>({ success: false, message: toMessage(error) }, 401);
|
||||
}
|
||||
})
|
||||
.post("/logout", logoutDto, async (c) => {
|
||||
const sessionId = getCookie(c, COOKIE_NAME);
|
||||
|
||||
if (sessionId) {
|
||||
await authService.logout(sessionId);
|
||||
deleteCookie(c, COOKIE_NAME, COOKIE_OPTIONS);
|
||||
}
|
||||
|
||||
return c.json<LogoutDto>({ success: true });
|
||||
})
|
||||
.get("/me", getMeDto, async (c) => {
|
||||
const sessionId = getCookie(c, COOKIE_NAME);
|
||||
|
||||
if (!sessionId) {
|
||||
return c.json<GetMeDto>({ success: false, message: "Not authenticated" }, 401);
|
||||
}
|
||||
|
||||
const session = await authService.verifySession(sessionId);
|
||||
|
||||
if (!session) {
|
||||
deleteCookie(c, COOKIE_NAME, COOKIE_OPTIONS);
|
||||
return c.json({ message: "Not authenticated" }, 401);
|
||||
}
|
||||
|
||||
return c.json<GetMeDto>({
|
||||
success: true,
|
||||
user: session.user,
|
||||
message: "Authenticated",
|
||||
});
|
||||
})
|
||||
.get("/status", getStatusDto, async (c) => {
|
||||
const hasUsers = await authService.hasUsers();
|
||||
return c.json<GetStatusDto>({ hasUsers });
|
||||
})
|
||||
.post("/change-password", changePasswordDto, validator("json", changePasswordBodySchema), async (c) => {
|
||||
const sessionId = getCookie(c, COOKIE_NAME);
|
||||
|
||||
if (!sessionId) {
|
||||
return c.json<ChangePasswordDto>({ success: false, message: "Not authenticated" }, 401);
|
||||
}
|
||||
|
||||
const session = await authService.verifySession(sessionId);
|
||||
|
||||
if (!session) {
|
||||
deleteCookie(c, COOKIE_NAME, COOKIE_OPTIONS);
|
||||
return c.json<ChangePasswordDto>({ success: false, message: "Not authenticated" }, 401);
|
||||
}
|
||||
|
||||
const body = c.req.valid("json");
|
||||
|
||||
try {
|
||||
await authService.changePassword(session.user.id, body.currentPassword, body.newPassword);
|
||||
return c.json<ChangePasswordDto>({ success: true, message: "Password changed successfully" });
|
||||
} catch (error) {
|
||||
return c.json<ChangePasswordDto>({ success: false, message: toMessage(error) }, 400);
|
||||
}
|
||||
});
|
||||
153
app/server/modules/auth/auth.dto.ts
Normal file
153
app/server/modules/auth/auth.dto.ts
Normal file
@@ -0,0 +1,153 @@
|
||||
import { type } from "arktype";
|
||||
import { describeRoute, resolver } from "hono-openapi";
|
||||
|
||||
// Validation schemas
|
||||
export const loginBodySchema = type({
|
||||
username: "string>0",
|
||||
password: "string>7",
|
||||
});
|
||||
|
||||
export const registerBodySchema = type({
|
||||
username: "string>2",
|
||||
password: "string>7",
|
||||
});
|
||||
|
||||
const loginResponseSchema = type({
|
||||
message: "string",
|
||||
success: "boolean",
|
||||
user: type({
|
||||
id: "number",
|
||||
username: "string",
|
||||
hasDownloadedResticPassword: "boolean",
|
||||
}).optional(),
|
||||
});
|
||||
|
||||
export const loginDto = describeRoute({
|
||||
description: "Login with username and password",
|
||||
operationId: "login",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Login successful",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(loginResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type LoginDto = typeof loginResponseSchema.infer;
|
||||
|
||||
export const registerDto = describeRoute({
|
||||
description: "Register a new user",
|
||||
operationId: "register",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
201: {
|
||||
description: "User created successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(loginResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type RegisterDto = typeof loginResponseSchema.infer;
|
||||
|
||||
const logoutResponseSchema = type({
|
||||
success: "boolean",
|
||||
});
|
||||
|
||||
export const logoutDto = describeRoute({
|
||||
description: "Logout current user",
|
||||
operationId: "logout",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Logout successful",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(logoutResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type LogoutDto = typeof logoutResponseSchema.infer;
|
||||
|
||||
export const getMeDto = describeRoute({
|
||||
description: "Get current authenticated user",
|
||||
operationId: "getMe",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Current user information",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(loginResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type GetMeDto = typeof loginResponseSchema.infer;
|
||||
|
||||
const statusResponseSchema = type({
|
||||
hasUsers: "boolean",
|
||||
});
|
||||
|
||||
export const getStatusDto = describeRoute({
|
||||
description: "Get authentication system status",
|
||||
operationId: "getStatus",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Authentication system status",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(statusResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type GetStatusDto = typeof statusResponseSchema.infer;
|
||||
|
||||
export const changePasswordBodySchema = type({
|
||||
currentPassword: "string>0",
|
||||
newPassword: "string>7",
|
||||
});
|
||||
|
||||
const changePasswordResponseSchema = type({
|
||||
success: "boolean",
|
||||
message: "string",
|
||||
});
|
||||
|
||||
export const changePasswordDto = describeRoute({
|
||||
description: "Change current user password",
|
||||
operationId: "changePassword",
|
||||
tags: ["Auth"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Password changed successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(changePasswordResponseSchema),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export type ChangePasswordDto = typeof changePasswordResponseSchema.infer;
|
||||
|
||||
export type LoginBody = typeof loginBodySchema.infer;
|
||||
export type RegisterBody = typeof registerBodySchema.infer;
|
||||
export type ChangePasswordBody = typeof changePasswordBodySchema.infer;
|
||||
64
app/server/modules/auth/auth.middleware.ts
Normal file
64
app/server/modules/auth/auth.middleware.ts
Normal file
@@ -0,0 +1,64 @@
|
||||
import { deleteCookie, getCookie } from "hono/cookie";
|
||||
import { createMiddleware } from "hono/factory";
|
||||
import { authService } from "./auth.service";
|
||||
|
||||
const COOKIE_NAME = "session_id";
|
||||
const COOKIE_OPTIONS = {
|
||||
httpOnly: true,
|
||||
secure: process.env.NODE_ENV === "production",
|
||||
sameSite: "lax" as const,
|
||||
path: "/",
|
||||
};
|
||||
|
||||
declare module "hono" {
|
||||
interface ContextVariableMap {
|
||||
user: {
|
||||
id: number;
|
||||
username: string;
|
||||
hasDownloadedResticPassword: boolean;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Middleware to require authentication
|
||||
* Verifies the session cookie and attaches user to context
|
||||
*/
|
||||
export const requireAuth = createMiddleware(async (c, next) => {
|
||||
const sessionId = getCookie(c, COOKIE_NAME);
|
||||
|
||||
if (!sessionId) {
|
||||
return c.json({ message: "Authentication required" }, 401);
|
||||
}
|
||||
|
||||
const session = await authService.verifySession(sessionId);
|
||||
|
||||
if (!session) {
|
||||
deleteCookie(c, COOKIE_NAME, COOKIE_OPTIONS);
|
||||
return c.json({ message: "Invalid or expired session" }, 401);
|
||||
}
|
||||
|
||||
c.set("user", session.user);
|
||||
|
||||
await next();
|
||||
});
|
||||
|
||||
/**
|
||||
* Middleware to optionally attach user if authenticated
|
||||
* Does not block the request if not authenticated
|
||||
*/
|
||||
export const optionalAuth = createMiddleware(async (c, next) => {
|
||||
const sessionId = getCookie(c, COOKIE_NAME);
|
||||
|
||||
if (sessionId) {
|
||||
const session = await authService.verifySession(sessionId);
|
||||
|
||||
if (session) {
|
||||
c.set("user", session.user);
|
||||
} else {
|
||||
deleteCookie(c, COOKIE_NAME, COOKIE_OPTIONS);
|
||||
}
|
||||
}
|
||||
|
||||
await next();
|
||||
});
|
||||
179
app/server/modules/auth/auth.service.ts
Normal file
179
app/server/modules/auth/auth.service.ts
Normal file
@@ -0,0 +1,179 @@
|
||||
import { eq, lt } from "drizzle-orm";
|
||||
import { db } from "../../db/db";
|
||||
import { sessionsTable, usersTable } from "../../db/schema";
|
||||
import { logger } from "../../utils/logger";
|
||||
|
||||
const SESSION_DURATION = 1000 * 60 * 60 * 24 * 30; // 30 days
|
||||
|
||||
export class AuthService {
|
||||
/**
|
||||
* Register a new user with username and password
|
||||
*/
|
||||
async register(username: string, password: string) {
|
||||
const [existingUser] = await db.select().from(usersTable);
|
||||
|
||||
if (existingUser) {
|
||||
throw new Error("Admin user already exists");
|
||||
}
|
||||
|
||||
const passwordHash = await Bun.password.hash(password, {
|
||||
algorithm: "argon2id",
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
});
|
||||
|
||||
const [user] = await db.insert(usersTable).values({ username, passwordHash }).returning();
|
||||
|
||||
if (!user) {
|
||||
throw new Error("User registration failed");
|
||||
}
|
||||
|
||||
logger.info(`User registered: ${username}`);
|
||||
const sessionId = crypto.randomUUID();
|
||||
const expiresAt = new Date(Date.now() + SESSION_DURATION).getTime();
|
||||
|
||||
await db.insert(sessionsTable).values({
|
||||
id: sessionId,
|
||||
userId: user.id,
|
||||
expiresAt,
|
||||
});
|
||||
|
||||
return {
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
createdAt: user.createdAt,
|
||||
hasDownloadedResticPassword: user.hasDownloadedResticPassword,
|
||||
},
|
||||
sessionId,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Login user with username and password
|
||||
*/
|
||||
async login(username: string, password: string) {
|
||||
const [user] = await db.select().from(usersTable).where(eq(usersTable.username, username));
|
||||
|
||||
if (!user) {
|
||||
throw new Error("Invalid credentials");
|
||||
}
|
||||
|
||||
const isValid = await Bun.password.verify(password, user.passwordHash);
|
||||
|
||||
if (!isValid) {
|
||||
throw new Error("Invalid credentials");
|
||||
}
|
||||
|
||||
const sessionId = crypto.randomUUID();
|
||||
const expiresAt = new Date(Date.now() + SESSION_DURATION).getTime();
|
||||
|
||||
await db.insert(sessionsTable).values({
|
||||
id: sessionId,
|
||||
userId: user.id,
|
||||
expiresAt,
|
||||
});
|
||||
|
||||
logger.info(`User logged in: ${username}`);
|
||||
|
||||
return {
|
||||
sessionId,
|
||||
user: {
|
||||
id: user.id,
|
||||
username: user.username,
|
||||
hasDownloadedResticPassword: user.hasDownloadedResticPassword,
|
||||
},
|
||||
expiresAt,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Logout user by deleting their session
|
||||
*/
|
||||
async logout(sessionId: string) {
|
||||
await db.delete(sessionsTable).where(eq(sessionsTable.id, sessionId));
|
||||
logger.info(`User logged out: session ${sessionId}`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify a session and return the associated user
|
||||
*/
|
||||
async verifySession(sessionId: string) {
|
||||
const [session] = await db
|
||||
.select({
|
||||
session: sessionsTable,
|
||||
user: usersTable,
|
||||
})
|
||||
.from(sessionsTable)
|
||||
.innerJoin(usersTable, eq(sessionsTable.userId, usersTable.id))
|
||||
.where(eq(sessionsTable.id, sessionId));
|
||||
|
||||
if (!session) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (session.session.expiresAt < Date.now()) {
|
||||
await db.delete(sessionsTable).where(eq(sessionsTable.id, sessionId));
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
user: {
|
||||
id: session.user.id,
|
||||
username: session.user.username,
|
||||
hasDownloadedResticPassword: session.user.hasDownloadedResticPassword,
|
||||
},
|
||||
session: {
|
||||
id: session.session.id,
|
||||
expiresAt: session.session.expiresAt,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired sessions
|
||||
*/
|
||||
async cleanupExpiredSessions() {
|
||||
const result = await db.delete(sessionsTable).where(lt(sessionsTable.expiresAt, Date.now())).returning();
|
||||
if (result.length > 0) {
|
||||
logger.info(`Cleaned up ${result.length} expired sessions`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if any users exist in the system
|
||||
*/
|
||||
async hasUsers(): Promise<boolean> {
|
||||
const [user] = await db.select({ id: usersTable.id }).from(usersTable).limit(1);
|
||||
return !!user;
|
||||
}
|
||||
|
||||
/**
|
||||
* Change password for a user
|
||||
*/
|
||||
async changePassword(userId: number, currentPassword: string, newPassword: string) {
|
||||
const [user] = await db.select().from(usersTable).where(eq(usersTable.id, userId));
|
||||
|
||||
if (!user) {
|
||||
throw new Error("User not found");
|
||||
}
|
||||
|
||||
const isValid = await Bun.password.verify(currentPassword, user.passwordHash);
|
||||
|
||||
if (!isValid) {
|
||||
throw new Error("Current password is incorrect");
|
||||
}
|
||||
|
||||
const newPasswordHash = await Bun.password.hash(newPassword, {
|
||||
algorithm: "argon2id",
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
});
|
||||
|
||||
await db.update(usersTable).set({ passwordHash: newPasswordHash }).where(eq(usersTable.id, userId));
|
||||
|
||||
logger.info(`Password changed for user: ${user.username}`);
|
||||
}
|
||||
}
|
||||
|
||||
export const authService = new AuthService();
|
||||
37
app/server/modules/backends/backend.ts
Normal file
37
app/server/modules/backends/backend.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import type { BackendStatus } from "~/schemas/volumes";
|
||||
import type { Volume } from "../../db/schema";
|
||||
import { getVolumePath } from "../volumes/helpers";
|
||||
import { makeDirectoryBackend } from "./directory/directory-backend";
|
||||
import { makeNfsBackend } from "./nfs/nfs-backend";
|
||||
import { makeSmbBackend } from "./smb/smb-backend";
|
||||
import { makeWebdavBackend } from "./webdav/webdav-backend";
|
||||
|
||||
type OperationResult = {
|
||||
error?: string;
|
||||
status: BackendStatus;
|
||||
};
|
||||
|
||||
export type VolumeBackend = {
|
||||
mount: () => Promise<OperationResult>;
|
||||
unmount: () => Promise<OperationResult>;
|
||||
checkHealth: () => Promise<OperationResult>;
|
||||
};
|
||||
|
||||
export const createVolumeBackend = (volume: Volume): VolumeBackend => {
|
||||
const path = getVolumePath(volume);
|
||||
|
||||
switch (volume.config.backend) {
|
||||
case "nfs": {
|
||||
return makeNfsBackend(volume.config, path);
|
||||
}
|
||||
case "smb": {
|
||||
return makeSmbBackend(volume.config, path);
|
||||
}
|
||||
case "directory": {
|
||||
return makeDirectoryBackend(volume.config, path);
|
||||
}
|
||||
case "webdav": {
|
||||
return makeWebdavBackend(volume.config, path);
|
||||
}
|
||||
}
|
||||
};
|
||||
59
app/server/modules/backends/directory/directory-backend.ts
Normal file
59
app/server/modules/backends/directory/directory-backend.ts
Normal file
@@ -0,0 +1,59 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as npath from "node:path";
|
||||
import { toMessage } from "../../../utils/errors";
|
||||
import { logger } from "../../../utils/logger";
|
||||
import type { VolumeBackend } from "../backend";
|
||||
import { BACKEND_STATUS, type BackendConfig } from "~/schemas/volumes";
|
||||
|
||||
const mount = async (config: BackendConfig, _volumePath: string) => {
|
||||
if (config.backend !== "directory") {
|
||||
return { status: BACKEND_STATUS.error, error: "Invalid backend type" };
|
||||
}
|
||||
|
||||
logger.info("Mounting directory volume from:", config.path);
|
||||
|
||||
try {
|
||||
await fs.access(config.path);
|
||||
const stats = await fs.stat(config.path);
|
||||
|
||||
if (!stats.isDirectory()) {
|
||||
return { status: BACKEND_STATUS.error, error: "Path is not a directory" };
|
||||
}
|
||||
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
} catch (error) {
|
||||
logger.error("Failed to mount directory volume:", error);
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
const unmount = async () => {
|
||||
logger.info("Cannot unmount directory volume.");
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
};
|
||||
|
||||
const checkHealth = async (config: BackendConfig) => {
|
||||
if (config.backend !== "directory") {
|
||||
return { status: BACKEND_STATUS.error, error: "Invalid backend type" };
|
||||
}
|
||||
|
||||
try {
|
||||
await fs.access(config.path);
|
||||
|
||||
// Try to create a temporary file to ensure write access
|
||||
const tempFilePath = npath.join(config.path, `.healthcheck-${Date.now()}`);
|
||||
await fs.writeFile(tempFilePath, "healthcheck");
|
||||
await fs.unlink(tempFilePath);
|
||||
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
} catch (error) {
|
||||
logger.error("Directory health check failed:", error);
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
export const makeDirectoryBackend = (config: BackendConfig, volumePath: string): VolumeBackend => ({
|
||||
mount: () => mount(config, volumePath),
|
||||
unmount,
|
||||
checkHealth: () => checkHealth(config),
|
||||
});
|
||||
121
app/server/modules/backends/nfs/nfs-backend.ts
Normal file
121
app/server/modules/backends/nfs/nfs-backend.ts
Normal file
@@ -0,0 +1,121 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as os from "node:os";
|
||||
import { OPERATION_TIMEOUT } from "../../../core/constants";
|
||||
import { toMessage } from "../../../utils/errors";
|
||||
import { logger } from "../../../utils/logger";
|
||||
import { getMountForPath } from "../../../utils/mountinfo";
|
||||
import { withTimeout } from "../../../utils/timeout";
|
||||
import type { VolumeBackend } from "../backend";
|
||||
import { createTestFile, executeMount, executeUnmount } from "../utils/backend-utils";
|
||||
import { BACKEND_STATUS, type BackendConfig } from "~/schemas/volumes";
|
||||
|
||||
const mount = async (config: BackendConfig, path: string) => {
|
||||
logger.debug(`Mounting volume ${path}...`);
|
||||
|
||||
if (config.backend !== "nfs") {
|
||||
logger.error("Provided config is not for NFS backend");
|
||||
return { status: BACKEND_STATUS.error, error: "Provided config is not for NFS backend" };
|
||||
}
|
||||
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("NFS mounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "NFS mounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const { status } = await checkHealth(path, config.readOnly ?? false);
|
||||
if (status === "mounted") {
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
}
|
||||
|
||||
logger.debug(`Trying to unmount any existing mounts at ${path} before mounting...`);
|
||||
await unmount(path);
|
||||
|
||||
const run = async () => {
|
||||
await fs.mkdir(path, { recursive: true });
|
||||
|
||||
const source = `${config.server}:${config.exportPath}`;
|
||||
const options = [`vers=${config.version}`, `port=${config.port}`];
|
||||
if (config.readOnly) {
|
||||
options.push("ro");
|
||||
}
|
||||
const args = ["-t", "nfs", "-o", options.join(","), source, path];
|
||||
|
||||
logger.debug(`Mounting volume ${path}...`);
|
||||
logger.info(`Executing mount: mount ${args.join(" ")}`);
|
||||
|
||||
await executeMount(args);
|
||||
|
||||
logger.info(`NFS volume at ${path} mounted successfully.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "NFS mount");
|
||||
} catch (err) {
|
||||
logger.error("Error mounting NFS volume", { error: toMessage(err) });
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(err) };
|
||||
}
|
||||
};
|
||||
|
||||
const unmount = async (path: string) => {
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("NFS unmounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "NFS unmounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const run = async () => {
|
||||
try {
|
||||
await fs.access(path);
|
||||
} catch {
|
||||
logger.warn(`Path ${path} does not exist. Skipping unmount.`);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
}
|
||||
|
||||
await executeUnmount(path);
|
||||
|
||||
await fs.rmdir(path);
|
||||
|
||||
logger.info(`NFS volume at ${path} unmounted successfully.`);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "NFS unmount");
|
||||
} catch (err) {
|
||||
logger.error("Error unmounting NFS volume", { path, error: toMessage(err) });
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(err) };
|
||||
}
|
||||
};
|
||||
|
||||
const checkHealth = async (path: string, readOnly: boolean) => {
|
||||
const run = async () => {
|
||||
logger.debug(`Checking health of NFS volume at ${path}...`);
|
||||
await fs.access(path);
|
||||
|
||||
const mount = await getMountForPath(path);
|
||||
|
||||
if (!mount || !mount.fstype.startsWith("nfs")) {
|
||||
throw new Error(`Path ${path} is not mounted as NFS.`);
|
||||
}
|
||||
|
||||
if (!readOnly) {
|
||||
await createTestFile(path);
|
||||
}
|
||||
|
||||
logger.debug(`NFS volume at ${path} is healthy and mounted.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "NFS health check");
|
||||
} catch (error) {
|
||||
logger.error("NFS volume health check failed:", toMessage(error));
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
export const makeNfsBackend = (config: BackendConfig, path: string): VolumeBackend => ({
|
||||
mount: () => mount(config, path),
|
||||
unmount: () => unmount(path),
|
||||
checkHealth: () => checkHealth(path, config.readOnly ?? false),
|
||||
});
|
||||
134
app/server/modules/backends/smb/smb-backend.ts
Normal file
134
app/server/modules/backends/smb/smb-backend.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as os from "node:os";
|
||||
import { OPERATION_TIMEOUT } from "../../../core/constants";
|
||||
import { toMessage } from "../../../utils/errors";
|
||||
import { logger } from "../../../utils/logger";
|
||||
import { getMountForPath } from "../../../utils/mountinfo";
|
||||
import { withTimeout } from "../../../utils/timeout";
|
||||
import type { VolumeBackend } from "../backend";
|
||||
import { createTestFile, executeMount, executeUnmount } from "../utils/backend-utils";
|
||||
import { BACKEND_STATUS, type BackendConfig } from "~/schemas/volumes";
|
||||
|
||||
const mount = async (config: BackendConfig, path: string) => {
|
||||
logger.debug(`Mounting SMB volume ${path}...`);
|
||||
|
||||
if (config.backend !== "smb") {
|
||||
logger.error("Provided config is not for SMB backend");
|
||||
return { status: BACKEND_STATUS.error, error: "Provided config is not for SMB backend" };
|
||||
}
|
||||
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("SMB mounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "SMB mounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const { status } = await checkHealth(path, config.readOnly ?? false);
|
||||
if (status === "mounted") {
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
}
|
||||
|
||||
logger.debug(`Trying to unmount any existing mounts at ${path} before mounting...`);
|
||||
await unmount(path);
|
||||
|
||||
const run = async () => {
|
||||
await fs.mkdir(path, { recursive: true });
|
||||
|
||||
const source = `//${config.server}/${config.share}`;
|
||||
const options = [
|
||||
`user=${config.username}`,
|
||||
`pass=${config.password}`,
|
||||
`vers=${config.vers}`,
|
||||
`port=${config.port}`,
|
||||
"uid=1000",
|
||||
"gid=1000",
|
||||
];
|
||||
|
||||
if (config.domain) {
|
||||
options.push(`domain=${config.domain}`);
|
||||
}
|
||||
|
||||
if (config.readOnly) {
|
||||
options.push("ro");
|
||||
}
|
||||
|
||||
const args = ["-t", "cifs", "-o", options.join(","), source, path];
|
||||
|
||||
logger.debug(`Mounting SMB volume ${path}...`);
|
||||
logger.info(`Executing mount: mount ${args.join(" ")}`);
|
||||
|
||||
await executeMount(args);
|
||||
|
||||
logger.info(`SMB volume at ${path} mounted successfully.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "SMB mount");
|
||||
} catch (error) {
|
||||
logger.error("Error mounting SMB volume", { error: toMessage(error) });
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
const unmount = async (path: string) => {
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("SMB unmounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "SMB unmounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const run = async () => {
|
||||
try {
|
||||
await fs.access(path);
|
||||
} catch {
|
||||
logger.warn(`Path ${path} does not exist. Skipping unmount.`);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
}
|
||||
|
||||
await executeUnmount(path);
|
||||
|
||||
await fs.rmdir(path);
|
||||
|
||||
logger.info(`SMB volume at ${path} unmounted successfully.`);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "SMB unmount");
|
||||
} catch (error) {
|
||||
logger.error("Error unmounting SMB volume", { path, error: toMessage(error) });
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
const checkHealth = async (path: string, readOnly: boolean) => {
|
||||
const run = async () => {
|
||||
logger.debug(`Checking health of SMB volume at ${path}...`);
|
||||
await fs.access(path);
|
||||
|
||||
const mount = await getMountForPath(path);
|
||||
|
||||
if (!mount || mount.fstype !== "cifs") {
|
||||
throw new Error(`Path ${path} is not mounted as CIFS/SMB.`);
|
||||
}
|
||||
|
||||
if (!readOnly) {
|
||||
await createTestFile(path);
|
||||
}
|
||||
|
||||
logger.debug(`SMB volume at ${path} is healthy and mounted.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "SMB health check");
|
||||
} catch (error) {
|
||||
logger.error("SMB volume health check failed:", toMessage(error));
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
export const makeSmbBackend = (config: BackendConfig, path: string): VolumeBackend => ({
|
||||
mount: () => mount(config, path),
|
||||
unmount: () => unmount(path),
|
||||
checkHealth: () => checkHealth(path, config.readOnly ?? false),
|
||||
});
|
||||
47
app/server/modules/backends/utils/backend-utils.ts
Normal file
47
app/server/modules/backends/utils/backend-utils.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as npath from "node:path";
|
||||
import { toMessage } from "../../../utils/errors";
|
||||
import { logger } from "../../../utils/logger";
|
||||
import { $ } from "bun";
|
||||
|
||||
export const executeMount = async (args: string[]): Promise<void> => {
|
||||
let stderr: string | undefined;
|
||||
|
||||
const result = await $`mount ${args}`.nothrow();
|
||||
stderr = result.stderr.toString();
|
||||
|
||||
if (stderr?.trim()) {
|
||||
logger.warn(stderr.trim());
|
||||
}
|
||||
};
|
||||
|
||||
export const executeUnmount = async (path: string): Promise<void> => {
|
||||
let stderr: string | undefined;
|
||||
|
||||
const result = await $`umount -l -f ${path}`.nothrow();
|
||||
stderr = result.stderr.toString();
|
||||
|
||||
if (stderr?.trim()) {
|
||||
logger.warn(stderr.trim());
|
||||
}
|
||||
};
|
||||
|
||||
export const createTestFile = async (path: string): Promise<void> => {
|
||||
const testFilePath = npath.join(path, `.healthcheck-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`);
|
||||
|
||||
await fs.writeFile(testFilePath, "healthcheck");
|
||||
|
||||
const files = await fs.readdir(path);
|
||||
await Promise.all(
|
||||
files.map(async (file) => {
|
||||
if (file.startsWith(".healthcheck-")) {
|
||||
const filePath = npath.join(path, file);
|
||||
try {
|
||||
await fs.unlink(filePath);
|
||||
} catch (err) {
|
||||
logger.warn(`Failed to stat or unlink file ${filePath}: ${toMessage(err)}`);
|
||||
}
|
||||
}
|
||||
}),
|
||||
);
|
||||
};
|
||||
168
app/server/modules/backends/webdav/webdav-backend.ts
Normal file
168
app/server/modules/backends/webdav/webdav-backend.ts
Normal file
@@ -0,0 +1,168 @@
|
||||
import { execFile as execFileCb } from "node:child_process";
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as os from "node:os";
|
||||
import { promisify } from "node:util";
|
||||
import { OPERATION_TIMEOUT } from "../../../core/constants";
|
||||
import { toMessage } from "../../../utils/errors";
|
||||
import { logger } from "../../../utils/logger";
|
||||
import { getMountForPath } from "../../../utils/mountinfo";
|
||||
import { withTimeout } from "../../../utils/timeout";
|
||||
import type { VolumeBackend } from "../backend";
|
||||
import { createTestFile, executeMount, executeUnmount } from "../utils/backend-utils";
|
||||
import { BACKEND_STATUS, type BackendConfig } from "~/schemas/volumes";
|
||||
|
||||
const execFile = promisify(execFileCb);
|
||||
|
||||
const mount = async (config: BackendConfig, path: string) => {
|
||||
logger.debug(`Mounting WebDAV volume ${path}...`);
|
||||
|
||||
if (config.backend !== "webdav") {
|
||||
logger.error("Provided config is not for WebDAV backend");
|
||||
return { status: BACKEND_STATUS.error, error: "Provided config is not for WebDAV backend" };
|
||||
}
|
||||
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("WebDAV mounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "WebDAV mounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const { status } = await checkHealth(path, config.readOnly ?? false);
|
||||
if (status === "mounted") {
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
}
|
||||
|
||||
logger.debug(`Trying to unmount any existing mounts at ${path} before mounting...`);
|
||||
await unmount(path);
|
||||
|
||||
const run = async () => {
|
||||
await fs.mkdir(path, { recursive: true }).catch((err) => {
|
||||
logger.warn(`Failed to create directory ${path}: ${err.message}`);
|
||||
});
|
||||
|
||||
const protocol = config.ssl ? "https" : "http";
|
||||
const defaultPort = config.ssl ? 443 : 80;
|
||||
const port = config.port !== defaultPort ? `:${config.port}` : "";
|
||||
const source = `${protocol}://${config.server}${port}${config.path}`;
|
||||
|
||||
const options = config.readOnly
|
||||
? ["uid=1000", "gid=1000", "file_mode=0444", "dir_mode=0555", "ro"]
|
||||
: ["uid=1000", "gid=1000", "file_mode=0664", "dir_mode=0775"];
|
||||
|
||||
if (config.username && config.password) {
|
||||
const secretsFile = "/etc/davfs2/secrets";
|
||||
const secretsContent = `${source} ${config.username} ${config.password}\n`;
|
||||
await fs.appendFile(secretsFile, secretsContent, { mode: 0o600 });
|
||||
}
|
||||
|
||||
logger.debug(`Mounting WebDAV volume ${path}...`);
|
||||
|
||||
const args = ["-t", "davfs", source, path];
|
||||
await executeMount(args);
|
||||
|
||||
const { stderr } = await execFile("mount", ["-t", "davfs", "-o", options.join(","), source, path], {
|
||||
timeout: OPERATION_TIMEOUT,
|
||||
maxBuffer: 1024 * 1024,
|
||||
});
|
||||
|
||||
if (stderr?.trim()) {
|
||||
logger.warn(stderr.trim());
|
||||
}
|
||||
|
||||
logger.info(`WebDAV volume at ${path} mounted successfully.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "WebDAV mount");
|
||||
} catch (error) {
|
||||
const errorMsg = toMessage(error);
|
||||
|
||||
if (errorMsg.includes("already mounted")) {
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
}
|
||||
|
||||
logger.error("Error mounting WebDAV volume", { error: errorMsg });
|
||||
|
||||
if (errorMsg.includes("option") && errorMsg.includes("requires argument")) {
|
||||
return {
|
||||
status: BACKEND_STATUS.error,
|
||||
error: "Invalid mount options. Please check your WebDAV server configuration.",
|
||||
};
|
||||
} else if (errorMsg.includes("connection refused") || errorMsg.includes("Connection refused")) {
|
||||
return {
|
||||
status: BACKEND_STATUS.error,
|
||||
error: "Cannot connect to WebDAV server. Please check the server address and port.",
|
||||
};
|
||||
} else if (errorMsg.includes("unauthorized") || errorMsg.includes("Unauthorized")) {
|
||||
return {
|
||||
status: BACKEND_STATUS.error,
|
||||
error: "Authentication failed. Please check your username and password.",
|
||||
};
|
||||
}
|
||||
|
||||
return { status: BACKEND_STATUS.error, error: errorMsg };
|
||||
}
|
||||
};
|
||||
|
||||
const unmount = async (path: string) => {
|
||||
if (os.platform() !== "linux") {
|
||||
logger.error("WebDAV unmounting is only supported on Linux hosts.");
|
||||
return { status: BACKEND_STATUS.error, error: "WebDAV unmounting is only supported on Linux hosts." };
|
||||
}
|
||||
|
||||
const run = async () => {
|
||||
try {
|
||||
await fs.access(path);
|
||||
} catch (e) {
|
||||
logger.warn(`Path ${path} does not exist. Skipping unmount.`, e);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
}
|
||||
|
||||
await executeUnmount(path);
|
||||
|
||||
await fs.rmdir(path);
|
||||
|
||||
logger.info(`WebDAV volume at ${path} unmounted successfully.`);
|
||||
return { status: BACKEND_STATUS.unmounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "WebDAV unmount");
|
||||
} catch (error) {
|
||||
logger.error("Error unmounting WebDAV volume", { path, error: toMessage(error) });
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
const checkHealth = async (path: string, readOnly: boolean) => {
|
||||
const run = async () => {
|
||||
logger.debug(`Checking health of WebDAV volume at ${path}...`);
|
||||
await fs.access(path);
|
||||
|
||||
const mount = await getMountForPath(path);
|
||||
|
||||
if (!mount || mount.fstype !== "fuse") {
|
||||
throw new Error(`Path ${path} is not mounted as WebDAV.`);
|
||||
}
|
||||
|
||||
if (!readOnly) {
|
||||
await createTestFile(path);
|
||||
}
|
||||
|
||||
logger.debug(`WebDAV volume at ${path} is healthy and mounted.`);
|
||||
return { status: BACKEND_STATUS.mounted };
|
||||
};
|
||||
|
||||
try {
|
||||
return await withTimeout(run(), OPERATION_TIMEOUT, "WebDAV health check");
|
||||
} catch (error) {
|
||||
logger.error("WebDAV volume health check failed:", toMessage(error));
|
||||
return { status: BACKEND_STATUS.error, error: toMessage(error) };
|
||||
}
|
||||
};
|
||||
|
||||
export const makeWebdavBackend = (config: BackendConfig, path: string): VolumeBackend => ({
|
||||
mount: () => mount(config, path),
|
||||
unmount: () => unmount(path),
|
||||
checkHealth: () => checkHealth(path, config.readOnly ?? false),
|
||||
});
|
||||
81
app/server/modules/backups/backups.controller.ts
Normal file
81
app/server/modules/backups/backups.controller.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { Hono } from "hono";
|
||||
import { validator } from "hono-openapi";
|
||||
import {
|
||||
createBackupScheduleBody,
|
||||
createBackupScheduleDto,
|
||||
deleteBackupScheduleDto,
|
||||
getBackupScheduleDto,
|
||||
getBackupScheduleForVolumeDto,
|
||||
listBackupSchedulesDto,
|
||||
runBackupNowDto,
|
||||
stopBackupDto,
|
||||
updateBackupScheduleDto,
|
||||
updateBackupScheduleBody,
|
||||
type CreateBackupScheduleDto,
|
||||
type DeleteBackupScheduleDto,
|
||||
type GetBackupScheduleDto,
|
||||
type GetBackupScheduleForVolumeResponseDto,
|
||||
type ListBackupSchedulesResponseDto,
|
||||
type RunBackupNowDto,
|
||||
type StopBackupDto,
|
||||
type UpdateBackupScheduleDto,
|
||||
} from "./backups.dto";
|
||||
import { backupsService } from "./backups.service";
|
||||
|
||||
export const backupScheduleController = new Hono()
|
||||
.get("/", listBackupSchedulesDto, async (c) => {
|
||||
const schedules = await backupsService.listSchedules();
|
||||
|
||||
return c.json<ListBackupSchedulesResponseDto>(schedules, 200);
|
||||
})
|
||||
.get("/:scheduleId", getBackupScheduleDto, async (c) => {
|
||||
const scheduleId = c.req.param("scheduleId");
|
||||
|
||||
const schedule = await backupsService.getSchedule(Number(scheduleId));
|
||||
|
||||
return c.json<GetBackupScheduleDto>(schedule, 200);
|
||||
})
|
||||
.get("/volume/:volumeId", getBackupScheduleForVolumeDto, async (c) => {
|
||||
const volumeId = c.req.param("volumeId");
|
||||
const schedule = await backupsService.getScheduleForVolume(Number(volumeId));
|
||||
|
||||
return c.json<GetBackupScheduleForVolumeResponseDto>(schedule, 200);
|
||||
})
|
||||
.post("/", createBackupScheduleDto, validator("json", createBackupScheduleBody), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
|
||||
const schedule = await backupsService.createSchedule(body);
|
||||
|
||||
return c.json<CreateBackupScheduleDto>(schedule, 201);
|
||||
})
|
||||
.patch("/:scheduleId", updateBackupScheduleDto, validator("json", updateBackupScheduleBody), async (c) => {
|
||||
const scheduleId = c.req.param("scheduleId");
|
||||
const body = c.req.valid("json");
|
||||
|
||||
const schedule = await backupsService.updateSchedule(Number(scheduleId), body);
|
||||
|
||||
return c.json<UpdateBackupScheduleDto>(schedule, 200);
|
||||
})
|
||||
.delete("/:scheduleId", deleteBackupScheduleDto, async (c) => {
|
||||
const scheduleId = c.req.param("scheduleId");
|
||||
|
||||
await backupsService.deleteSchedule(Number(scheduleId));
|
||||
|
||||
return c.json<DeleteBackupScheduleDto>({ success: true }, 200);
|
||||
})
|
||||
.post("/:scheduleId/run", runBackupNowDto, async (c) => {
|
||||
const scheduleId = c.req.param("scheduleId");
|
||||
|
||||
backupsService.executeBackup(Number(scheduleId), true).catch((error) => {
|
||||
console.error("Backup execution failed:", error);
|
||||
});
|
||||
|
||||
return c.json<RunBackupNowDto>({ success: true }, 200);
|
||||
})
|
||||
.post("/:scheduleId/stop", stopBackupDto, async (c) => {
|
||||
const scheduleId = c.req.param("scheduleId");
|
||||
|
||||
await backupsService.stopBackup(Number(scheduleId));
|
||||
|
||||
return c.json<StopBackupDto>({ success: true }, 200);
|
||||
});
|
||||
253
app/server/modules/backups/backups.dto.ts
Normal file
253
app/server/modules/backups/backups.dto.ts
Normal file
@@ -0,0 +1,253 @@
|
||||
import { type } from "arktype";
|
||||
import { describeRoute, resolver } from "hono-openapi";
|
||||
import { volumeSchema } from "../volumes/volume.dto";
|
||||
import { repositorySchema } from "../repositories/repositories.dto";
|
||||
|
||||
const retentionPolicySchema = type({
|
||||
keepLast: "number?",
|
||||
keepHourly: "number?",
|
||||
keepDaily: "number?",
|
||||
keepWeekly: "number?",
|
||||
keepMonthly: "number?",
|
||||
keepYearly: "number?",
|
||||
keepWithinDuration: "string?",
|
||||
});
|
||||
|
||||
export type RetentionPolicy = typeof retentionPolicySchema.infer;
|
||||
|
||||
const backupScheduleSchema = type({
|
||||
id: "number",
|
||||
volumeId: "number",
|
||||
repositoryId: "string",
|
||||
enabled: "boolean",
|
||||
cronExpression: "string",
|
||||
retentionPolicy: retentionPolicySchema.or("null"),
|
||||
excludePatterns: "string[] | null",
|
||||
includePatterns: "string[] | null",
|
||||
lastBackupAt: "number | null",
|
||||
lastBackupStatus: "'success' | 'error' | 'in_progress' | null",
|
||||
lastBackupError: "string | null",
|
||||
nextBackupAt: "number | null",
|
||||
createdAt: "number",
|
||||
updatedAt: "number",
|
||||
}).and(
|
||||
type({
|
||||
volume: volumeSchema,
|
||||
repository: repositorySchema,
|
||||
}),
|
||||
);
|
||||
|
||||
/**
|
||||
* List all backup schedules
|
||||
*/
|
||||
export const listBackupSchedulesResponse = backupScheduleSchema.array();
|
||||
|
||||
export type ListBackupSchedulesResponseDto = typeof listBackupSchedulesResponse.infer;
|
||||
|
||||
export const listBackupSchedulesDto = describeRoute({
|
||||
description: "List all backup schedules",
|
||||
tags: ["Backups"],
|
||||
operationId: "listBackupSchedules",
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of backup schedules",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listBackupSchedulesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get a single backup schedule
|
||||
*/
|
||||
export const getBackupScheduleResponse = backupScheduleSchema;
|
||||
|
||||
export type GetBackupScheduleDto = typeof getBackupScheduleResponse.infer;
|
||||
|
||||
export const getBackupScheduleDto = describeRoute({
|
||||
description: "Get a backup schedule by ID",
|
||||
tags: ["Backups"],
|
||||
operationId: "getBackupSchedule",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup schedule details",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(getBackupScheduleResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const getBackupScheduleForVolumeResponse = backupScheduleSchema.or("null");
|
||||
|
||||
export type GetBackupScheduleForVolumeResponseDto = typeof getBackupScheduleForVolumeResponse.infer;
|
||||
|
||||
export const getBackupScheduleForVolumeDto = describeRoute({
|
||||
description: "Get a backup schedule for a specific volume",
|
||||
tags: ["Backups"],
|
||||
operationId: "getBackupScheduleForVolume",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup schedule details for the volume",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(getBackupScheduleForVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Create a new backup schedule
|
||||
*/
|
||||
export const createBackupScheduleBody = type({
|
||||
volumeId: "number",
|
||||
repositoryId: "string",
|
||||
enabled: "boolean",
|
||||
cronExpression: "string",
|
||||
retentionPolicy: retentionPolicySchema.optional(),
|
||||
excludePatterns: "string[]?",
|
||||
includePatterns: "string[]?",
|
||||
tags: "string[]?",
|
||||
});
|
||||
|
||||
export type CreateBackupScheduleBody = typeof createBackupScheduleBody.infer;
|
||||
|
||||
export const createBackupScheduleResponse = backupScheduleSchema.omit("volume", "repository");
|
||||
|
||||
export type CreateBackupScheduleDto = typeof createBackupScheduleResponse.infer;
|
||||
|
||||
export const createBackupScheduleDto = describeRoute({
|
||||
description: "Create a new backup schedule for a volume",
|
||||
operationId: "createBackupSchedule",
|
||||
tags: ["Backups"],
|
||||
responses: {
|
||||
201: {
|
||||
description: "Backup schedule created successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(createBackupScheduleResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Update a backup schedule
|
||||
*/
|
||||
export const updateBackupScheduleBody = type({
|
||||
repositoryId: "string",
|
||||
enabled: "boolean?",
|
||||
cronExpression: "string",
|
||||
retentionPolicy: retentionPolicySchema.optional(),
|
||||
excludePatterns: "string[]?",
|
||||
includePatterns: "string[]?",
|
||||
tags: "string[]?",
|
||||
});
|
||||
|
||||
export type UpdateBackupScheduleBody = typeof updateBackupScheduleBody.infer;
|
||||
|
||||
export const updateBackupScheduleResponse = backupScheduleSchema.omit("volume", "repository");
|
||||
|
||||
export type UpdateBackupScheduleDto = typeof updateBackupScheduleResponse.infer;
|
||||
|
||||
export const updateBackupScheduleDto = describeRoute({
|
||||
description: "Update a backup schedule",
|
||||
operationId: "updateBackupSchedule",
|
||||
tags: ["Backups"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup schedule updated successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(updateBackupScheduleResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Delete a backup schedule
|
||||
*/
|
||||
export const deleteBackupScheduleResponse = type({
|
||||
success: "boolean",
|
||||
});
|
||||
|
||||
export type DeleteBackupScheduleDto = typeof deleteBackupScheduleResponse.infer;
|
||||
|
||||
export const deleteBackupScheduleDto = describeRoute({
|
||||
description: "Delete a backup schedule",
|
||||
operationId: "deleteBackupSchedule",
|
||||
tags: ["Backups"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup schedule deleted successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(deleteBackupScheduleResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a backup immediately
|
||||
*/
|
||||
export const runBackupNowResponse = type({
|
||||
success: "boolean",
|
||||
});
|
||||
|
||||
export type RunBackupNowDto = typeof runBackupNowResponse.infer;
|
||||
|
||||
export const runBackupNowDto = describeRoute({
|
||||
description: "Trigger a backup immediately for a schedule",
|
||||
operationId: "runBackupNow",
|
||||
tags: ["Backups"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup started successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(runBackupNowResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Stop a running backup
|
||||
*/
|
||||
export const stopBackupResponse = type({
|
||||
success: "boolean",
|
||||
});
|
||||
|
||||
export type StopBackupDto = typeof stopBackupResponse.infer;
|
||||
|
||||
export const stopBackupDto = describeRoute({
|
||||
description: "Stop a backup that is currently in progress",
|
||||
operationId: "stopBackup",
|
||||
tags: ["Backups"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Backup stopped successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(stopBackupResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
409: {
|
||||
description: "No backup is currently running for this schedule",
|
||||
},
|
||||
},
|
||||
});
|
||||
353
app/server/modules/backups/backups.service.ts
Normal file
353
app/server/modules/backups/backups.service.ts
Normal file
@@ -0,0 +1,353 @@
|
||||
import { eq } from "drizzle-orm";
|
||||
import cron from "node-cron";
|
||||
import { CronExpressionParser } from "cron-parser";
|
||||
import { NotFoundError, BadRequestError, ConflictError } from "http-errors-enhanced";
|
||||
import { db } from "../../db/db";
|
||||
import { backupSchedulesTable, repositoriesTable, volumesTable } from "../../db/schema";
|
||||
import { restic } from "../../utils/restic";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { getVolumePath } from "../volumes/helpers";
|
||||
import type { CreateBackupScheduleBody, UpdateBackupScheduleBody } from "./backups.dto";
|
||||
import { toMessage } from "../../utils/errors";
|
||||
import { serverEvents } from "../../core/events";
|
||||
|
||||
const runningBackups = new Map<number, AbortController>();
|
||||
|
||||
const calculateNextRun = (cronExpression: string): number => {
|
||||
try {
|
||||
const interval = CronExpressionParser.parse(cronExpression, {
|
||||
currentDate: new Date(),
|
||||
tz: "UTC",
|
||||
});
|
||||
|
||||
return interval.next().getTime();
|
||||
} catch (error) {
|
||||
logger.error(`Failed to parse cron expression "${cronExpression}": ${error}`);
|
||||
const fallback = new Date();
|
||||
fallback.setMinutes(fallback.getMinutes() + 1);
|
||||
return fallback.getTime();
|
||||
}
|
||||
};
|
||||
|
||||
const listSchedules = async () => {
|
||||
const schedules = await db.query.backupSchedulesTable.findMany({
|
||||
with: {
|
||||
volume: true,
|
||||
repository: true,
|
||||
},
|
||||
});
|
||||
return schedules;
|
||||
};
|
||||
|
||||
const getSchedule = async (scheduleId: number) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(volumesTable.id, scheduleId),
|
||||
with: {
|
||||
volume: true,
|
||||
repository: true,
|
||||
},
|
||||
});
|
||||
|
||||
if (!schedule) {
|
||||
throw new NotFoundError("Backup schedule not found");
|
||||
}
|
||||
|
||||
return schedule;
|
||||
};
|
||||
|
||||
const createSchedule = async (data: CreateBackupScheduleBody) => {
|
||||
if (!cron.validate(data.cronExpression)) {
|
||||
throw new BadRequestError("Invalid cron expression");
|
||||
}
|
||||
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.id, data.volumeId),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.id, data.repositoryId),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const nextBackupAt = calculateNextRun(data.cronExpression);
|
||||
|
||||
const [newSchedule] = await db
|
||||
.insert(backupSchedulesTable)
|
||||
.values({
|
||||
volumeId: data.volumeId,
|
||||
repositoryId: data.repositoryId,
|
||||
enabled: data.enabled,
|
||||
cronExpression: data.cronExpression,
|
||||
retentionPolicy: data.retentionPolicy ?? null,
|
||||
excludePatterns: data.excludePatterns ?? [],
|
||||
includePatterns: data.includePatterns ?? [],
|
||||
nextBackupAt: nextBackupAt,
|
||||
})
|
||||
.returning();
|
||||
|
||||
if (!newSchedule) {
|
||||
throw new Error("Failed to create backup schedule");
|
||||
}
|
||||
|
||||
return newSchedule;
|
||||
};
|
||||
|
||||
const updateSchedule = async (scheduleId: number, data: UpdateBackupScheduleBody) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(backupSchedulesTable.id, scheduleId),
|
||||
});
|
||||
|
||||
if (!schedule) {
|
||||
throw new NotFoundError("Backup schedule not found");
|
||||
}
|
||||
|
||||
if (data.cronExpression && !cron.validate(data.cronExpression)) {
|
||||
throw new BadRequestError("Invalid cron expression");
|
||||
}
|
||||
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.id, data.repositoryId),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const cronExpression = data.cronExpression ?? schedule.cronExpression;
|
||||
const nextBackupAt = data.cronExpression ? calculateNextRun(cronExpression) : schedule.nextBackupAt;
|
||||
|
||||
const [updated] = await db
|
||||
.update(backupSchedulesTable)
|
||||
.set({ ...data, nextBackupAt, updatedAt: Date.now() })
|
||||
.where(eq(backupSchedulesTable.id, scheduleId))
|
||||
.returning();
|
||||
|
||||
if (!updated) {
|
||||
throw new Error("Failed to update backup schedule");
|
||||
}
|
||||
|
||||
return updated;
|
||||
};
|
||||
|
||||
const deleteSchedule = async (scheduleId: number) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(backupSchedulesTable.id, scheduleId),
|
||||
});
|
||||
|
||||
if (!schedule) {
|
||||
throw new NotFoundError("Backup schedule not found");
|
||||
}
|
||||
|
||||
await db.delete(backupSchedulesTable).where(eq(backupSchedulesTable.id, scheduleId));
|
||||
};
|
||||
|
||||
const executeBackup = async (scheduleId: number, manual = false) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(backupSchedulesTable.id, scheduleId),
|
||||
});
|
||||
|
||||
if (!schedule) {
|
||||
throw new NotFoundError("Backup schedule not found");
|
||||
}
|
||||
|
||||
if (!schedule.enabled && !manual) {
|
||||
logger.info(`Backup schedule ${scheduleId} is disabled. Skipping execution.`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (schedule.lastBackupStatus === "in_progress") {
|
||||
logger.info(`Backup schedule ${scheduleId} is already in progress. Skipping execution.`);
|
||||
return;
|
||||
}
|
||||
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.id, schedule.volumeId),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.id, schedule.repositoryId),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
if (volume.status !== "mounted") {
|
||||
throw new BadRequestError("Volume is not mounted");
|
||||
}
|
||||
|
||||
logger.info(`Starting backup for volume ${volume.name} to repository ${repository.name}`);
|
||||
|
||||
serverEvents.emit("backup:started", {
|
||||
scheduleId,
|
||||
volumeName: volume.name,
|
||||
repositoryName: repository.name,
|
||||
});
|
||||
|
||||
await db
|
||||
.update(backupSchedulesTable)
|
||||
.set({ lastBackupStatus: "in_progress", updatedAt: Date.now(), lastBackupError: null })
|
||||
.where(eq(backupSchedulesTable.id, scheduleId));
|
||||
|
||||
const abortController = new AbortController();
|
||||
runningBackups.set(scheduleId, abortController);
|
||||
|
||||
try {
|
||||
const volumePath = getVolumePath(volume);
|
||||
|
||||
const backupOptions: {
|
||||
exclude?: string[];
|
||||
include?: string[];
|
||||
tags?: string[];
|
||||
signal?: AbortSignal;
|
||||
} = {
|
||||
tags: [schedule.id.toString()],
|
||||
signal: abortController.signal,
|
||||
};
|
||||
|
||||
if (schedule.excludePatterns && schedule.excludePatterns.length > 0) {
|
||||
backupOptions.exclude = schedule.excludePatterns;
|
||||
}
|
||||
|
||||
if (schedule.includePatterns && schedule.includePatterns.length > 0) {
|
||||
backupOptions.include = schedule.includePatterns;
|
||||
}
|
||||
|
||||
await restic.backup(repository.config, volumePath, {
|
||||
...backupOptions,
|
||||
onProgress: (progress) => {
|
||||
serverEvents.emit("backup:progress", {
|
||||
scheduleId,
|
||||
volumeName: volume.name,
|
||||
repositoryName: repository.name,
|
||||
...progress,
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
if (schedule.retentionPolicy) {
|
||||
await restic.forget(repository.config, schedule.retentionPolicy, { tag: schedule.id.toString() });
|
||||
}
|
||||
|
||||
const nextBackupAt = calculateNextRun(schedule.cronExpression);
|
||||
await db
|
||||
.update(backupSchedulesTable)
|
||||
.set({
|
||||
lastBackupAt: Date.now(),
|
||||
lastBackupStatus: "success",
|
||||
lastBackupError: null,
|
||||
nextBackupAt: nextBackupAt,
|
||||
updatedAt: Date.now(),
|
||||
})
|
||||
.where(eq(backupSchedulesTable.id, scheduleId));
|
||||
|
||||
logger.info(`Backup completed successfully for volume ${volume.name} to repository ${repository.name}`);
|
||||
|
||||
serverEvents.emit("backup:completed", {
|
||||
scheduleId,
|
||||
volumeName: volume.name,
|
||||
repositoryName: repository.name,
|
||||
status: "success",
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`Backup failed for volume ${volume.name} to repository ${repository.name}: ${toMessage(error)}`);
|
||||
|
||||
await db
|
||||
.update(backupSchedulesTable)
|
||||
.set({
|
||||
lastBackupAt: Date.now(),
|
||||
lastBackupStatus: "error",
|
||||
lastBackupError: toMessage(error),
|
||||
updatedAt: Date.now(),
|
||||
})
|
||||
.where(eq(backupSchedulesTable.id, scheduleId));
|
||||
|
||||
serverEvents.emit("backup:completed", {
|
||||
scheduleId,
|
||||
volumeName: volume.name,
|
||||
repositoryName: repository.name,
|
||||
status: "error",
|
||||
});
|
||||
|
||||
throw error;
|
||||
} finally {
|
||||
runningBackups.delete(scheduleId);
|
||||
}
|
||||
};
|
||||
|
||||
const getSchedulesToExecute = async () => {
|
||||
const now = Date.now();
|
||||
const schedules = await db.query.backupSchedulesTable.findMany({
|
||||
where: eq(backupSchedulesTable.enabled, true),
|
||||
});
|
||||
|
||||
const schedulesToRun: number[] = [];
|
||||
|
||||
for (const schedule of schedules) {
|
||||
if (!schedule.nextBackupAt || schedule.nextBackupAt <= now) {
|
||||
schedulesToRun.push(schedule.id);
|
||||
}
|
||||
}
|
||||
|
||||
return schedulesToRun;
|
||||
};
|
||||
|
||||
const getScheduleForVolume = async (volumeId: number) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(backupSchedulesTable.volumeId, volumeId),
|
||||
with: { volume: true, repository: true },
|
||||
});
|
||||
|
||||
return schedule ?? null;
|
||||
};
|
||||
|
||||
const stopBackup = async (scheduleId: number) => {
|
||||
const schedule = await db.query.backupSchedulesTable.findFirst({
|
||||
where: eq(backupSchedulesTable.id, scheduleId),
|
||||
});
|
||||
|
||||
if (!schedule) {
|
||||
throw new NotFoundError("Backup schedule not found");
|
||||
}
|
||||
|
||||
await db
|
||||
.update(backupSchedulesTable)
|
||||
.set({
|
||||
lastBackupStatus: "error",
|
||||
lastBackupError: "Backup was stopped by user",
|
||||
updatedAt: Date.now(),
|
||||
})
|
||||
.where(eq(backupSchedulesTable.id, scheduleId));
|
||||
|
||||
const abortController = runningBackups.get(scheduleId);
|
||||
if (!abortController) {
|
||||
throw new ConflictError("No backup is currently running for this schedule");
|
||||
}
|
||||
|
||||
logger.info(`Stopping backup for schedule ${scheduleId}`);
|
||||
|
||||
abortController.abort();
|
||||
};
|
||||
|
||||
export const backupsService = {
|
||||
listSchedules,
|
||||
getSchedule,
|
||||
createSchedule,
|
||||
updateSchedule,
|
||||
deleteSchedule,
|
||||
executeBackup,
|
||||
getSchedulesToExecute,
|
||||
getScheduleForVolume,
|
||||
stopBackup,
|
||||
};
|
||||
87
app/server/modules/driver/driver.controller.ts
Normal file
87
app/server/modules/driver/driver.controller.ts
Normal file
@@ -0,0 +1,87 @@
|
||||
import { Hono } from "hono";
|
||||
import { volumeService } from "../volumes/volume.service";
|
||||
import { getVolumePath } from "../volumes/helpers";
|
||||
|
||||
export const driverController = new Hono()
|
||||
.post("/VolumeDriver.Capabilities", (c) => {
|
||||
return c.json({
|
||||
Capabilities: {
|
||||
Scope: "global",
|
||||
},
|
||||
});
|
||||
})
|
||||
.post("/Plugin.Activate", (c) => {
|
||||
return c.json({
|
||||
Implements: ["VolumeDriver"],
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.Create", (_) => {
|
||||
throw new Error("Volume creation is not supported via the driver");
|
||||
})
|
||||
.post("/VolumeDriver.Remove", (c) => {
|
||||
return c.json({
|
||||
Err: "",
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.Mount", async (c) => {
|
||||
const body = await c.req.json();
|
||||
|
||||
if (!body.Name) {
|
||||
return c.json({ Err: "Volume name is required" }, 400);
|
||||
}
|
||||
|
||||
const volumeName = body.Name.replace(/^im-/, "");
|
||||
|
||||
return c.json({
|
||||
Mountpoint: getVolumePath(volumeName),
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.Unmount", (c) => {
|
||||
return c.json({
|
||||
Err: "",
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.Path", async (c) => {
|
||||
const body = await c.req.json();
|
||||
|
||||
if (!body.Name) {
|
||||
return c.json({ Err: "Volume name is required" }, 400);
|
||||
}
|
||||
|
||||
const { volume } = await volumeService.getVolume(body.Name.replace(/^im-/, ""));
|
||||
|
||||
return c.json({
|
||||
Mountpoint: getVolumePath(volume),
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.Get", async (c) => {
|
||||
const body = await c.req.json();
|
||||
|
||||
if (!body.Name) {
|
||||
return c.json({ Err: "Volume name is required" }, 400);
|
||||
}
|
||||
|
||||
const { volume } = await volumeService.getVolume(body.Name.replace(/^im-/, ""));
|
||||
|
||||
return c.json({
|
||||
Volume: {
|
||||
Name: `im-${volume.name}`,
|
||||
Mountpoint: getVolumePath(volume),
|
||||
Status: {},
|
||||
},
|
||||
Err: "",
|
||||
});
|
||||
})
|
||||
.post("/VolumeDriver.List", async (c) => {
|
||||
const volumes = await volumeService.listVolumes();
|
||||
|
||||
const res = volumes.map((volume) => ({
|
||||
Name: `im-${volume.name}`,
|
||||
Mountpoint: getVolumePath(volume),
|
||||
Status: {},
|
||||
}));
|
||||
|
||||
return c.json({
|
||||
Volumes: res,
|
||||
});
|
||||
});
|
||||
101
app/server/modules/events/events.controller.ts
Normal file
101
app/server/modules/events/events.controller.ts
Normal file
@@ -0,0 +1,101 @@
|
||||
import { Hono } from "hono";
|
||||
import { streamSSE } from "hono/streaming";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { serverEvents } from "../../core/events";
|
||||
|
||||
export const eventsController = new Hono().get("/", (c) => {
|
||||
logger.info("Client connected to SSE endpoint");
|
||||
|
||||
return streamSSE(c, async (stream) => {
|
||||
await stream.writeSSE({
|
||||
data: JSON.stringify({ type: "connected", timestamp: Date.now() }),
|
||||
event: "connected",
|
||||
});
|
||||
|
||||
const onBackupStarted = (data: { scheduleId: number; volumeName: string; repositoryName: string }) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "backup:started",
|
||||
});
|
||||
};
|
||||
|
||||
const onBackupProgress = (data: {
|
||||
scheduleId: number;
|
||||
volumeName: string;
|
||||
repositoryName: string;
|
||||
seconds_elapsed: number;
|
||||
percent_done: number;
|
||||
total_files: number;
|
||||
files_done: number;
|
||||
total_bytes: number;
|
||||
bytes_done: number;
|
||||
current_files: string[];
|
||||
}) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "backup:progress",
|
||||
});
|
||||
};
|
||||
|
||||
const onBackupCompleted = (data: {
|
||||
scheduleId: number;
|
||||
volumeName: string;
|
||||
repositoryName: string;
|
||||
status: "success" | "error" | "stopped";
|
||||
}) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "backup:completed",
|
||||
});
|
||||
};
|
||||
|
||||
const onVolumeMounted = (data: { volumeName: string }) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "volume:mounted",
|
||||
});
|
||||
};
|
||||
|
||||
const onVolumeUnmounted = (data: { volumeName: string }) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "volume:unmounted",
|
||||
});
|
||||
};
|
||||
|
||||
const onVolumeUpdated = (data: { volumeName: string }) => {
|
||||
stream.writeSSE({
|
||||
data: JSON.stringify(data),
|
||||
event: "volume:updated",
|
||||
});
|
||||
};
|
||||
|
||||
serverEvents.on("backup:started", onBackupStarted);
|
||||
serverEvents.on("backup:progress", onBackupProgress);
|
||||
serverEvents.on("backup:completed", onBackupCompleted);
|
||||
serverEvents.on("volume:mounted", onVolumeMounted);
|
||||
serverEvents.on("volume:unmounted", onVolumeUnmounted);
|
||||
serverEvents.on("volume:updated", onVolumeUpdated);
|
||||
|
||||
let keepAlive = true;
|
||||
|
||||
stream.onAbort(() => {
|
||||
logger.info("Client disconnected from SSE endpoint");
|
||||
keepAlive = false;
|
||||
serverEvents.off("backup:started", onBackupStarted);
|
||||
serverEvents.off("backup:progress", onBackupProgress);
|
||||
serverEvents.off("backup:completed", onBackupCompleted);
|
||||
serverEvents.off("volume:mounted", onVolumeMounted);
|
||||
serverEvents.off("volume:unmounted", onVolumeUnmounted);
|
||||
serverEvents.off("volume:updated", onVolumeUpdated);
|
||||
});
|
||||
|
||||
while (keepAlive) {
|
||||
await stream.writeSSE({
|
||||
data: JSON.stringify({ timestamp: Date.now() }),
|
||||
event: "heartbeat",
|
||||
});
|
||||
await stream.sleep(5000);
|
||||
}
|
||||
});
|
||||
});
|
||||
28
app/server/modules/lifecycle/shutdown.ts
Normal file
28
app/server/modules/lifecycle/shutdown.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { Scheduler } from "../../core/scheduler";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { db } from "../../db/db";
|
||||
import { volumesTable } from "../../db/schema";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { SOCKET_PATH } from "../../core/constants";
|
||||
import { createVolumeBackend } from "../backends/backend";
|
||||
|
||||
export const shutdown = async () => {
|
||||
await Scheduler.stop();
|
||||
|
||||
await Bun.file(SOCKET_PATH)
|
||||
.delete()
|
||||
.catch(() => {
|
||||
// Ignore errors if the socket file does not exist
|
||||
});
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(eq(volumesTable.status, "mounted")),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
const backend = createVolumeBackend(volume);
|
||||
const { status, error } = await backend.unmount();
|
||||
|
||||
logger.info(`Volume ${volume.name} unmount status: ${status}${error ? `, error: ${error}` : ""}`);
|
||||
}
|
||||
};
|
||||
39
app/server/modules/lifecycle/startup.ts
Normal file
39
app/server/modules/lifecycle/startup.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import { Scheduler } from "../../core/scheduler";
|
||||
import { and, eq, or } from "drizzle-orm";
|
||||
import { db } from "../../db/db";
|
||||
import { volumesTable } from "../../db/schema";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { restic } from "../../utils/restic";
|
||||
import { volumeService } from "../volumes/volume.service";
|
||||
import { CleanupDanglingMountsJob } from "../../jobs/cleanup-dangling";
|
||||
import { VolumeHealthCheckJob } from "../../jobs/healthchecks";
|
||||
import { RepositoryHealthCheckJob } from "../../jobs/repository-healthchecks";
|
||||
import { BackupExecutionJob } from "../../jobs/backup-execution";
|
||||
import { CleanupSessionsJob } from "../../jobs/cleanup-sessions";
|
||||
|
||||
export const startup = async () => {
|
||||
await Scheduler.start();
|
||||
|
||||
await restic.ensurePassfile().catch((err) => {
|
||||
logger.error(`Error ensuring restic passfile exists: ${err.message}`);
|
||||
});
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(
|
||||
eq(volumesTable.status, "mounted"),
|
||||
and(eq(volumesTable.autoRemount, true), eq(volumesTable.status, "error")),
|
||||
),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
await volumeService.mountVolume(volume.name).catch((err) => {
|
||||
logger.error(`Error auto-remounting volume ${volume.name} on startup: ${err.message}`);
|
||||
});
|
||||
}
|
||||
|
||||
Scheduler.build(CleanupDanglingMountsJob).schedule("0 * * * *");
|
||||
Scheduler.build(VolumeHealthCheckJob).schedule("*/5 * * * *");
|
||||
Scheduler.build(RepositoryHealthCheckJob).schedule("*/10 * * * *");
|
||||
Scheduler.build(BackupExecutionJob).schedule("* * * * *");
|
||||
Scheduler.build(CleanupSessionsJob).schedule("0 0 * * *");
|
||||
};
|
||||
145
app/server/modules/repositories/repositories.controller.ts
Normal file
145
app/server/modules/repositories/repositories.controller.ts
Normal file
@@ -0,0 +1,145 @@
|
||||
import { Hono } from "hono";
|
||||
import { validator } from "hono-openapi";
|
||||
import {
|
||||
createRepositoryBody,
|
||||
createRepositoryDto,
|
||||
deleteRepositoryDto,
|
||||
doctorRepositoryDto,
|
||||
getRepositoryDto,
|
||||
getSnapshotDetailsDto,
|
||||
listRcloneRemotesDto,
|
||||
listRepositoriesDto,
|
||||
listSnapshotFilesDto,
|
||||
listSnapshotFilesQuery,
|
||||
listSnapshotsDto,
|
||||
listSnapshotsFilters,
|
||||
restoreSnapshotBody,
|
||||
restoreSnapshotDto,
|
||||
type DeleteRepositoryDto,
|
||||
type DoctorRepositoryDto,
|
||||
type GetRepositoryDto,
|
||||
type GetSnapshotDetailsDto,
|
||||
type ListRepositoriesDto,
|
||||
type ListSnapshotFilesDto,
|
||||
type ListSnapshotsDto,
|
||||
type RestoreSnapshotDto,
|
||||
} from "./repositories.dto";
|
||||
import { repositoriesService } from "./repositories.service";
|
||||
import { getRcloneRemoteInfo, listRcloneRemotes } from "../../utils/rclone";
|
||||
|
||||
export const repositoriesController = new Hono()
|
||||
.get("/", listRepositoriesDto, async (c) => {
|
||||
const repositories = await repositoriesService.listRepositories();
|
||||
|
||||
return c.json<ListRepositoriesDto>(repositories, 200);
|
||||
})
|
||||
.post("/", createRepositoryDto, validator("json", createRepositoryBody), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
const res = await repositoriesService.createRepository(body.name, body.config, body.compressionMode);
|
||||
|
||||
return c.json({ message: "Repository created", repository: res.repository }, 201);
|
||||
})
|
||||
.get("/rclone-remotes", listRcloneRemotesDto, async (c) => {
|
||||
const remoteNames = await listRcloneRemotes();
|
||||
|
||||
const remotes = await Promise.all(
|
||||
remoteNames.map(async (name) => {
|
||||
const info = await getRcloneRemoteInfo(name);
|
||||
return {
|
||||
name,
|
||||
type: info?.type ?? "unknown",
|
||||
};
|
||||
}),
|
||||
);
|
||||
|
||||
return c.json(remotes);
|
||||
})
|
||||
.get("/:name", getRepositoryDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const res = await repositoriesService.getRepository(name);
|
||||
|
||||
return c.json<GetRepositoryDto>(res.repository, 200);
|
||||
})
|
||||
.delete("/:name", deleteRepositoryDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
await repositoriesService.deleteRepository(name);
|
||||
|
||||
return c.json<DeleteRepositoryDto>({ message: "Repository deleted" }, 200);
|
||||
})
|
||||
.get("/:name/snapshots", listSnapshotsDto, validator("query", listSnapshotsFilters), async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { backupId } = c.req.valid("query");
|
||||
|
||||
const res = await repositoriesService.listSnapshots(name, backupId);
|
||||
|
||||
const snapshots = res.map((snapshot) => {
|
||||
const { summary } = snapshot;
|
||||
|
||||
let duration = 0;
|
||||
if (summary) {
|
||||
const { backup_start, backup_end } = summary;
|
||||
duration = new Date(backup_end).getTime() - new Date(backup_start).getTime();
|
||||
}
|
||||
|
||||
return {
|
||||
short_id: snapshot.short_id,
|
||||
duration,
|
||||
paths: snapshot.paths,
|
||||
size: summary?.total_bytes_processed || 0,
|
||||
time: new Date(snapshot.time).getTime(),
|
||||
};
|
||||
});
|
||||
|
||||
return c.json<ListSnapshotsDto>(snapshots, 200);
|
||||
})
|
||||
.get("/:name/snapshots/:snapshotId", getSnapshotDetailsDto, async (c) => {
|
||||
const { name, snapshotId } = c.req.param();
|
||||
const snapshot = await repositoriesService.getSnapshotDetails(name, snapshotId);
|
||||
|
||||
let duration = 0;
|
||||
if (snapshot.summary) {
|
||||
const { backup_start, backup_end } = snapshot.summary;
|
||||
duration = new Date(backup_end).getTime() - new Date(backup_start).getTime();
|
||||
}
|
||||
|
||||
const response = {
|
||||
short_id: snapshot.short_id,
|
||||
duration,
|
||||
time: new Date(snapshot.time).getTime(),
|
||||
paths: snapshot.paths,
|
||||
size: snapshot.summary?.total_bytes_processed || 0,
|
||||
summary: snapshot.summary,
|
||||
};
|
||||
|
||||
return c.json<GetSnapshotDetailsDto>(response, 200);
|
||||
})
|
||||
.get(
|
||||
"/:name/snapshots/:snapshotId/files",
|
||||
listSnapshotFilesDto,
|
||||
validator("query", listSnapshotFilesQuery),
|
||||
async (c) => {
|
||||
const { name, snapshotId } = c.req.param();
|
||||
const { path } = c.req.valid("query");
|
||||
|
||||
const result = await repositoriesService.listSnapshotFiles(name, snapshotId, path);
|
||||
|
||||
c.header("Cache-Control", "max-age=300, stale-while-revalidate=600");
|
||||
|
||||
return c.json<ListSnapshotFilesDto>(result, 200);
|
||||
},
|
||||
)
|
||||
.post("/:name/restore", restoreSnapshotDto, validator("json", restoreSnapshotBody), async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { snapshotId, ...options } = c.req.valid("json");
|
||||
|
||||
const result = await repositoriesService.restoreSnapshot(name, snapshotId, options);
|
||||
|
||||
return c.json<RestoreSnapshotDto>(result, 200);
|
||||
})
|
||||
.post("/:name/doctor", doctorRepositoryDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
|
||||
const result = await repositoriesService.doctorRepository(name);
|
||||
|
||||
return c.json<DoctorRepositoryDto>(result, 200);
|
||||
});
|
||||
328
app/server/modules/repositories/repositories.dto.ts
Normal file
328
app/server/modules/repositories/repositories.dto.ts
Normal file
@@ -0,0 +1,328 @@
|
||||
import { type } from "arktype";
|
||||
import { describeRoute, resolver } from "hono-openapi";
|
||||
import { COMPRESSION_MODES, REPOSITORY_BACKENDS, REPOSITORY_STATUS, repositoryConfigSchema } from "~/schemas/restic";
|
||||
|
||||
export const repositorySchema = type({
|
||||
id: "string",
|
||||
name: "string",
|
||||
type: type.valueOf(REPOSITORY_BACKENDS),
|
||||
config: repositoryConfigSchema,
|
||||
compressionMode: type.valueOf(COMPRESSION_MODES).or("null"),
|
||||
status: type.valueOf(REPOSITORY_STATUS).or("null"),
|
||||
lastChecked: "number | null",
|
||||
lastError: "string | null",
|
||||
createdAt: "number",
|
||||
updatedAt: "number",
|
||||
});
|
||||
|
||||
export type RepositoryDto = typeof repositorySchema.infer;
|
||||
|
||||
/**
|
||||
* List all repositories
|
||||
*/
|
||||
export const listRepositoriesResponse = repositorySchema.array();
|
||||
export type ListRepositoriesDto = typeof listRepositoriesResponse.infer;
|
||||
|
||||
export const listRepositoriesDto = describeRoute({
|
||||
description: "List all repositories",
|
||||
tags: ["Repositories"],
|
||||
operationId: "listRepositories",
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of repositories",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listRepositoriesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Create a new repository
|
||||
*/
|
||||
export const createRepositoryBody = type({
|
||||
name: "string",
|
||||
compressionMode: type.valueOf(COMPRESSION_MODES).optional(),
|
||||
config: repositoryConfigSchema,
|
||||
});
|
||||
|
||||
export type CreateRepositoryBody = typeof createRepositoryBody.infer;
|
||||
|
||||
export const createRepositoryResponse = type({
|
||||
message: "string",
|
||||
repository: type({
|
||||
id: "string",
|
||||
name: "string",
|
||||
}),
|
||||
});
|
||||
|
||||
export type CreateRepositoryDto = typeof createRepositoryResponse.infer;
|
||||
|
||||
export const createRepositoryDto = describeRoute({
|
||||
description: "Create a new restic repository",
|
||||
operationId: "createRepository",
|
||||
tags: ["Repositories"],
|
||||
responses: {
|
||||
201: {
|
||||
description: "Repository created successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(createRepositoryResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get a single repository
|
||||
*/
|
||||
export const getRepositoryResponse = repositorySchema;
|
||||
export type GetRepositoryDto = typeof getRepositoryResponse.infer;
|
||||
|
||||
export const getRepositoryDto = describeRoute({
|
||||
description: "Get a single repository by name",
|
||||
tags: ["Repositories"],
|
||||
operationId: "getRepository",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Repository details",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(getRepositoryResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Delete a repository
|
||||
*/
|
||||
export const deleteRepositoryResponse = type({
|
||||
message: "string",
|
||||
});
|
||||
|
||||
export type DeleteRepositoryDto = typeof deleteRepositoryResponse.infer;
|
||||
|
||||
export const deleteRepositoryDto = describeRoute({
|
||||
description: "Delete a repository",
|
||||
tags: ["Repositories"],
|
||||
operationId: "deleteRepository",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Repository deleted successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(deleteRepositoryResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* List snapshots in a repository
|
||||
*/
|
||||
export const snapshotSchema = type({
|
||||
short_id: "string",
|
||||
time: "number",
|
||||
paths: "string[]",
|
||||
size: "number",
|
||||
duration: "number",
|
||||
});
|
||||
|
||||
const listSnapshotsResponse = snapshotSchema.array();
|
||||
|
||||
export type ListSnapshotsDto = typeof listSnapshotsResponse.infer;
|
||||
|
||||
export const listSnapshotsFilters = type({
|
||||
backupId: "string?",
|
||||
});
|
||||
|
||||
export const listSnapshotsDto = describeRoute({
|
||||
description: "List all snapshots in a repository",
|
||||
tags: ["Repositories"],
|
||||
operationId: "listSnapshots",
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of snapshots",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listSnapshotsResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get snapshot details
|
||||
*/
|
||||
export const getSnapshotDetailsResponse = snapshotSchema;
|
||||
|
||||
export type GetSnapshotDetailsDto = typeof getSnapshotDetailsResponse.infer;
|
||||
|
||||
export const getSnapshotDetailsDto = describeRoute({
|
||||
description: "Get details of a specific snapshot",
|
||||
tags: ["Repositories"],
|
||||
operationId: "getSnapshotDetails",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Snapshot details",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(getSnapshotDetailsResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* List files in a snapshot
|
||||
*/
|
||||
export const snapshotFileNodeSchema = type({
|
||||
name: "string",
|
||||
type: "string",
|
||||
path: "string",
|
||||
uid: "number?",
|
||||
gid: "number?",
|
||||
size: "number?",
|
||||
mode: "number?",
|
||||
mtime: "string?",
|
||||
atime: "string?",
|
||||
ctime: "string?",
|
||||
});
|
||||
|
||||
export const listSnapshotFilesResponse = type({
|
||||
snapshot: type({
|
||||
id: "string",
|
||||
short_id: "string",
|
||||
time: "string",
|
||||
hostname: "string",
|
||||
paths: "string[]",
|
||||
}),
|
||||
files: snapshotFileNodeSchema.array(),
|
||||
});
|
||||
|
||||
export type ListSnapshotFilesDto = typeof listSnapshotFilesResponse.infer;
|
||||
|
||||
export const listSnapshotFilesQuery = type({
|
||||
path: "string?",
|
||||
});
|
||||
|
||||
export const listSnapshotFilesDto = describeRoute({
|
||||
description: "List files and directories in a snapshot",
|
||||
tags: ["Repositories"],
|
||||
operationId: "listSnapshotFiles",
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of files and directories in the snapshot",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listSnapshotFilesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Restore a snapshot
|
||||
*/
|
||||
export const restoreSnapshotBody = type({
|
||||
snapshotId: "string",
|
||||
include: "string[]?",
|
||||
exclude: "string[]?",
|
||||
delete: "boolean?",
|
||||
});
|
||||
|
||||
export type RestoreSnapshotBody = typeof restoreSnapshotBody.infer;
|
||||
|
||||
export const restoreSnapshotResponse = type({
|
||||
success: "boolean",
|
||||
message: "string",
|
||||
filesRestored: "number",
|
||||
filesSkipped: "number",
|
||||
});
|
||||
|
||||
export type RestoreSnapshotDto = typeof restoreSnapshotResponse.infer;
|
||||
|
||||
export const restoreSnapshotDto = describeRoute({
|
||||
description: "Restore a snapshot to a target path on the filesystem",
|
||||
tags: ["Repositories"],
|
||||
operationId: "restoreSnapshot",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Snapshot restored successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(restoreSnapshotResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Doctor a repository (unlock, check, repair)
|
||||
*/
|
||||
export const doctorStepSchema = type({
|
||||
step: "string",
|
||||
success: "boolean",
|
||||
output: "string | null",
|
||||
error: "string | null",
|
||||
});
|
||||
|
||||
export const doctorRepositoryResponse = type({
|
||||
success: "boolean",
|
||||
steps: doctorStepSchema.array(),
|
||||
});
|
||||
|
||||
export type DoctorRepositoryDto = typeof doctorRepositoryResponse.infer;
|
||||
|
||||
export const doctorRepositoryDto = describeRoute({
|
||||
description:
|
||||
"Run doctor operations on a repository to fix common issues (unlock, check, repair index). Use this when the repository is locked or has errors.",
|
||||
tags: ["Repositories"],
|
||||
operationId: "doctorRepository",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Doctor operation completed",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(doctorRepositoryResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* List rclone available remotes
|
||||
*/
|
||||
const rcloneRemoteSchema = type({
|
||||
name: "string",
|
||||
type: "string",
|
||||
});
|
||||
|
||||
const listRcloneRemotesResponse = rcloneRemoteSchema.array();
|
||||
|
||||
export const listRcloneRemotesDto = describeRoute({
|
||||
description: "List all configured rclone remotes on the host system",
|
||||
tags: ["Rclone"],
|
||||
operationId: "listRcloneRemotes",
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of rclone remotes",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listRcloneRemotesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
328
app/server/modules/repositories/repositories.service.ts
Normal file
328
app/server/modules/repositories/repositories.service.ts
Normal file
@@ -0,0 +1,328 @@
|
||||
import crypto from "node:crypto";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced";
|
||||
import slugify from "slugify";
|
||||
import { db } from "../../db/db";
|
||||
import { repositoriesTable } from "../../db/schema";
|
||||
import { toMessage } from "../../utils/errors";
|
||||
import { restic } from "../../utils/restic";
|
||||
import { cryptoUtils } from "../../utils/crypto";
|
||||
import type { CompressionMode, RepositoryConfig } from "~/schemas/restic";
|
||||
|
||||
const listRepositories = async () => {
|
||||
const repositories = await db.query.repositoriesTable.findMany({});
|
||||
return repositories;
|
||||
};
|
||||
|
||||
const encryptConfig = async (config: RepositoryConfig): Promise<RepositoryConfig> => {
|
||||
const encryptedConfig: Record<string, string> = { ...config };
|
||||
|
||||
switch (config.backend) {
|
||||
case "s3":
|
||||
encryptedConfig.accessKeyId = await cryptoUtils.encrypt(config.accessKeyId);
|
||||
encryptedConfig.secretAccessKey = await cryptoUtils.encrypt(config.secretAccessKey);
|
||||
break;
|
||||
case "gcs":
|
||||
encryptedConfig.credentialsJson = await cryptoUtils.encrypt(config.credentialsJson);
|
||||
break;
|
||||
case "azure":
|
||||
encryptedConfig.accountKey = await cryptoUtils.encrypt(config.accountKey);
|
||||
break;
|
||||
}
|
||||
|
||||
return encryptedConfig as RepositoryConfig;
|
||||
};
|
||||
|
||||
const createRepository = async (name: string, config: RepositoryConfig, compressionMode?: CompressionMode) => {
|
||||
const slug = slugify(name, { lower: true, strict: true });
|
||||
|
||||
const existing = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, slug),
|
||||
});
|
||||
|
||||
if (existing) {
|
||||
throw new ConflictError("Repository with this name already exists");
|
||||
}
|
||||
|
||||
const id = crypto.randomUUID();
|
||||
|
||||
const encryptedConfig = await encryptConfig(config);
|
||||
|
||||
const [created] = await db
|
||||
.insert(repositoriesTable)
|
||||
.values({
|
||||
id,
|
||||
name: slug,
|
||||
type: config.backend,
|
||||
config: encryptedConfig,
|
||||
compressionMode: compressionMode ?? "auto",
|
||||
status: "unknown",
|
||||
})
|
||||
.returning();
|
||||
|
||||
if (!created) {
|
||||
throw new InternalServerError("Failed to create repository");
|
||||
}
|
||||
|
||||
const { success, error } = await restic.init(encryptedConfig);
|
||||
|
||||
if (success) {
|
||||
await db
|
||||
.update(repositoriesTable)
|
||||
.set({
|
||||
status: "healthy",
|
||||
lastChecked: Date.now(),
|
||||
lastError: null,
|
||||
})
|
||||
.where(eq(repositoriesTable.id, id));
|
||||
|
||||
return { repository: created, status: 201 };
|
||||
}
|
||||
|
||||
const errorMessage = toMessage(error);
|
||||
await db.delete(repositoriesTable).where(eq(repositoriesTable.id, id));
|
||||
|
||||
throw new InternalServerError(`Failed to initialize repository: ${errorMessage}`);
|
||||
};
|
||||
|
||||
const getRepository = async (name: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
return { repository };
|
||||
};
|
||||
|
||||
const deleteRepository = async (name: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
// TODO: Add cleanup logic for the actual restic repository files
|
||||
|
||||
await db.delete(repositoriesTable).where(eq(repositoriesTable.name, name));
|
||||
};
|
||||
|
||||
/**
|
||||
* List snapshots for a given repository
|
||||
* If backupId is provided, filter snapshots by that backup ID (tag)
|
||||
* @param name Repository name
|
||||
* @param backupId Optional backup ID to filter snapshots for a specific backup schedule
|
||||
*
|
||||
* @returns List of snapshots
|
||||
*/
|
||||
const listSnapshots = async (name: string, backupId?: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
let snapshots = [];
|
||||
|
||||
if (backupId) {
|
||||
snapshots = await restic.snapshots(repository.config, { tags: [backupId.toString()] });
|
||||
} else {
|
||||
snapshots = await restic.snapshots(repository.config);
|
||||
}
|
||||
|
||||
return snapshots;
|
||||
};
|
||||
|
||||
const listSnapshotFiles = async (name: string, snapshotId: string, path?: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const result = await restic.ls(repository.config, snapshotId, path);
|
||||
|
||||
if (!result.snapshot) {
|
||||
throw new NotFoundError("Snapshot not found or empty");
|
||||
}
|
||||
|
||||
return {
|
||||
snapshot: {
|
||||
id: result.snapshot.id,
|
||||
short_id: result.snapshot.short_id,
|
||||
time: result.snapshot.time,
|
||||
hostname: result.snapshot.hostname,
|
||||
paths: result.snapshot.paths,
|
||||
},
|
||||
files: result.nodes,
|
||||
};
|
||||
};
|
||||
|
||||
const restoreSnapshot = async (
|
||||
name: string,
|
||||
snapshotId: string,
|
||||
options?: { include?: string[]; exclude?: string[]; delete?: boolean },
|
||||
) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const result = await restic.restore(repository.config, snapshotId, "/", options);
|
||||
|
||||
return {
|
||||
success: true,
|
||||
message: "Snapshot restored successfully",
|
||||
filesRestored: result.files_restored,
|
||||
filesSkipped: result.files_skipped,
|
||||
};
|
||||
};
|
||||
|
||||
const getSnapshotDetails = async (name: string, snapshotId: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const snapshots = await restic.snapshots(repository.config);
|
||||
const snapshot = snapshots.find((snap) => snap.id === snapshotId || snap.short_id === snapshotId);
|
||||
|
||||
if (!snapshot) {
|
||||
throw new NotFoundError("Snapshot not found");
|
||||
}
|
||||
|
||||
return snapshot;
|
||||
};
|
||||
|
||||
const checkHealth = async (repositoryId: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.id, repositoryId),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const { error, status } = await restic
|
||||
.snapshots(repository.config)
|
||||
.then(() => ({ error: null, status: "healthy" as const }))
|
||||
.catch((error) => ({ error: toMessage(error), status: "error" as const }));
|
||||
|
||||
await db
|
||||
.update(repositoriesTable)
|
||||
.set({
|
||||
status,
|
||||
lastChecked: Date.now(),
|
||||
lastError: error,
|
||||
})
|
||||
.where(eq(repositoriesTable.id, repository.id));
|
||||
|
||||
return { status, lastError: error };
|
||||
};
|
||||
|
||||
const doctorRepository = async (name: string) => {
|
||||
const repository = await db.query.repositoriesTable.findFirst({
|
||||
where: eq(repositoriesTable.name, name),
|
||||
});
|
||||
|
||||
if (!repository) {
|
||||
throw new NotFoundError("Repository not found");
|
||||
}
|
||||
|
||||
const steps: Array<{ step: string; success: boolean; output: string | null; error: string | null }> = [];
|
||||
|
||||
const unlockResult = await restic.unlock(repository.config).then(
|
||||
(result) => ({ success: true, message: result.message, error: null }),
|
||||
(error) => ({ success: false, message: null, error: toMessage(error) }),
|
||||
);
|
||||
|
||||
steps.push({
|
||||
step: "unlock",
|
||||
success: unlockResult.success,
|
||||
output: unlockResult.message,
|
||||
error: unlockResult.error,
|
||||
});
|
||||
|
||||
const checkResult = await restic.check(repository.config, { readData: false }).then(
|
||||
(result) => result,
|
||||
(error) => ({ success: false, output: null, error: toMessage(error), hasErrors: true }),
|
||||
);
|
||||
|
||||
steps.push({
|
||||
step: "check",
|
||||
success: checkResult.success,
|
||||
output: checkResult.output,
|
||||
error: checkResult.error,
|
||||
});
|
||||
|
||||
if (checkResult.hasErrors) {
|
||||
const repairResult = await restic.repairIndex(repository.config).then(
|
||||
(result) => ({ success: true, output: result.output, error: null }),
|
||||
(error) => ({ success: false, output: null, error: toMessage(error) }),
|
||||
);
|
||||
|
||||
steps.push({
|
||||
step: "repair_index",
|
||||
success: repairResult.success,
|
||||
output: repairResult.output,
|
||||
error: repairResult.error,
|
||||
});
|
||||
|
||||
const recheckResult = await restic.check(repository.config, { readData: false }).then(
|
||||
(result) => result,
|
||||
(error) => ({ success: false, output: null, error: toMessage(error), hasErrors: true }),
|
||||
);
|
||||
|
||||
steps.push({
|
||||
step: "recheck",
|
||||
success: recheckResult.success,
|
||||
output: recheckResult.output,
|
||||
error: recheckResult.error,
|
||||
});
|
||||
}
|
||||
|
||||
const allSuccessful = steps.every((s) => s.success);
|
||||
|
||||
console.log("Doctor steps:", steps);
|
||||
|
||||
await db
|
||||
.update(repositoriesTable)
|
||||
.set({
|
||||
status: allSuccessful ? "healthy" : "error",
|
||||
lastChecked: Date.now(),
|
||||
lastError: allSuccessful ? null : steps.find((s) => !s.success)?.error,
|
||||
})
|
||||
.where(eq(repositoriesTable.id, repository.id));
|
||||
|
||||
return {
|
||||
success: allSuccessful,
|
||||
steps,
|
||||
};
|
||||
};
|
||||
|
||||
export const repositoriesService = {
|
||||
listRepositories,
|
||||
createRepository,
|
||||
getRepository,
|
||||
deleteRepository,
|
||||
listSnapshots,
|
||||
listSnapshotFiles,
|
||||
restoreSnapshot,
|
||||
getSnapshotDetails,
|
||||
checkHealth,
|
||||
doctorRepository,
|
||||
};
|
||||
57
app/server/modules/system/system.controller.ts
Normal file
57
app/server/modules/system/system.controller.ts
Normal file
@@ -0,0 +1,57 @@
|
||||
import { Hono } from "hono";
|
||||
import { validator } from "hono-openapi";
|
||||
import {
|
||||
downloadResticPasswordBodySchema,
|
||||
downloadResticPasswordDto,
|
||||
systemInfoDto,
|
||||
type SystemInfoDto,
|
||||
} from "./system.dto";
|
||||
import { systemService } from "./system.service";
|
||||
import { requireAuth } from "../auth/auth.middleware";
|
||||
import { RESTIC_PASS_FILE } from "../../core/constants";
|
||||
import { db } from "../../db/db";
|
||||
import { usersTable } from "../../db/schema";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
export const systemController = new Hono()
|
||||
.get("/info", systemInfoDto, async (c) => {
|
||||
const info = await systemService.getSystemInfo();
|
||||
|
||||
return c.json<SystemInfoDto>(info, 200);
|
||||
})
|
||||
.post(
|
||||
"/restic-password",
|
||||
downloadResticPasswordDto,
|
||||
requireAuth,
|
||||
validator("json", downloadResticPasswordBodySchema),
|
||||
async (c) => {
|
||||
const user = c.get("user");
|
||||
const body = c.req.valid("json");
|
||||
|
||||
const [dbUser] = await db.select().from(usersTable).where(eq(usersTable.id, user.id));
|
||||
|
||||
if (!dbUser) {
|
||||
return c.json({ message: "User not found" }, 401);
|
||||
}
|
||||
|
||||
const isValid = await Bun.password.verify(body.password, dbUser.passwordHash);
|
||||
|
||||
if (!isValid) {
|
||||
return c.json({ message: "Incorrect password" }, 401);
|
||||
}
|
||||
|
||||
try {
|
||||
const file = Bun.file(RESTIC_PASS_FILE);
|
||||
const content = await file.text();
|
||||
|
||||
await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, user.id));
|
||||
|
||||
c.header("Content-Type", "text/plain");
|
||||
c.header("Content-Disposition", 'attachment; filename="restic.pass"');
|
||||
|
||||
return c.text(content);
|
||||
} catch (_error) {
|
||||
return c.json({ message: "Failed to read Restic password file" }, 500);
|
||||
}
|
||||
},
|
||||
);
|
||||
49
app/server/modules/system/system.dto.ts
Normal file
49
app/server/modules/system/system.dto.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { type } from "arktype";
|
||||
import { describeRoute, resolver } from "hono-openapi";
|
||||
|
||||
export const capabilitiesSchema = type({
|
||||
docker: "boolean",
|
||||
rclone: "boolean",
|
||||
});
|
||||
|
||||
export const systemInfoResponse = type({
|
||||
capabilities: capabilitiesSchema,
|
||||
});
|
||||
|
||||
export type SystemInfoDto = typeof systemInfoResponse.infer;
|
||||
|
||||
export const systemInfoDto = describeRoute({
|
||||
description: "Get system information including available capabilities",
|
||||
tags: ["System"],
|
||||
operationId: "getSystemInfo",
|
||||
responses: {
|
||||
200: {
|
||||
description: "System information with enabled capabilities",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(systemInfoResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const downloadResticPasswordBodySchema = type({
|
||||
password: "string",
|
||||
});
|
||||
|
||||
export const downloadResticPasswordDto = describeRoute({
|
||||
description: "Download the Restic password file for backup recovery. Requires password re-authentication.",
|
||||
tags: ["System"],
|
||||
operationId: "downloadResticPassword",
|
||||
responses: {
|
||||
200: {
|
||||
description: "Restic password file content",
|
||||
content: {
|
||||
"text/plain": {
|
||||
schema: { type: "string" },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
11
app/server/modules/system/system.service.ts
Normal file
11
app/server/modules/system/system.service.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { getCapabilities } from "../../core/capabilities";
|
||||
|
||||
const getSystemInfo = async () => {
|
||||
return {
|
||||
capabilities: await getCapabilities(),
|
||||
};
|
||||
};
|
||||
|
||||
export const systemService = {
|
||||
getSystemInfo,
|
||||
};
|
||||
10
app/server/modules/volumes/helpers.ts
Normal file
10
app/server/modules/volumes/helpers.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { VOLUME_MOUNT_BASE } from "../../core/constants";
|
||||
import type { Volume } from "../../db/schema";
|
||||
|
||||
export const getVolumePath = (volume: Volume) => {
|
||||
if (volume.config.backend === "directory") {
|
||||
return volume.config.path;
|
||||
}
|
||||
|
||||
return `${VOLUME_MOUNT_BASE}/${volume.name}/_data`;
|
||||
};
|
||||
137
app/server/modules/volumes/volume.controller.ts
Normal file
137
app/server/modules/volumes/volume.controller.ts
Normal file
@@ -0,0 +1,137 @@
|
||||
import { Hono } from "hono";
|
||||
import { validator } from "hono-openapi";
|
||||
import {
|
||||
createVolumeBody,
|
||||
createVolumeDto,
|
||||
deleteVolumeDto,
|
||||
getContainersDto,
|
||||
getVolumeDto,
|
||||
healthCheckDto,
|
||||
type ListVolumesDto,
|
||||
listFilesDto,
|
||||
listVolumesDto,
|
||||
mountVolumeDto,
|
||||
testConnectionBody,
|
||||
testConnectionDto,
|
||||
unmountVolumeDto,
|
||||
updateVolumeBody,
|
||||
updateVolumeDto,
|
||||
type CreateVolumeDto,
|
||||
type GetVolumeDto,
|
||||
type ListContainersDto,
|
||||
type UpdateVolumeDto,
|
||||
type ListFilesDto,
|
||||
browseFilesystemDto,
|
||||
type BrowseFilesystemDto,
|
||||
} from "./volume.dto";
|
||||
import { volumeService } from "./volume.service";
|
||||
import { getVolumePath } from "./helpers";
|
||||
|
||||
export const volumeController = new Hono()
|
||||
.get("/", listVolumesDto, async (c) => {
|
||||
const volumes = await volumeService.listVolumes();
|
||||
|
||||
return c.json<ListVolumesDto>(volumes, 200);
|
||||
})
|
||||
.post("/", createVolumeDto, validator("json", createVolumeBody), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
const res = await volumeService.createVolume(body.name, body.config);
|
||||
|
||||
const response = {
|
||||
...res.volume,
|
||||
path: getVolumePath(res.volume),
|
||||
};
|
||||
|
||||
return c.json<CreateVolumeDto>(response, 201);
|
||||
})
|
||||
.post("/test-connection", testConnectionDto, validator("json", testConnectionBody), async (c) => {
|
||||
const body = c.req.valid("json");
|
||||
const result = await volumeService.testConnection(body.config);
|
||||
|
||||
return c.json(result, 200);
|
||||
})
|
||||
.delete("/:name", deleteVolumeDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
await volumeService.deleteVolume(name);
|
||||
|
||||
return c.json({ message: "Volume deleted" }, 200);
|
||||
})
|
||||
.get("/:name", getVolumeDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const res = await volumeService.getVolume(name);
|
||||
|
||||
const response = {
|
||||
volume: {
|
||||
...res.volume,
|
||||
path: getVolumePath(res.volume),
|
||||
},
|
||||
statfs: {
|
||||
total: res.statfs.total ?? 0,
|
||||
used: res.statfs.used ?? 0,
|
||||
free: res.statfs.free ?? 0,
|
||||
},
|
||||
};
|
||||
|
||||
return c.json<GetVolumeDto>(response, 200);
|
||||
})
|
||||
.get("/:name/containers", getContainersDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { containers } = await volumeService.getContainersUsingVolume(name);
|
||||
|
||||
return c.json<ListContainersDto>(containers, 200);
|
||||
})
|
||||
.put("/:name", updateVolumeDto, validator("json", updateVolumeBody), async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const body = c.req.valid("json");
|
||||
const res = await volumeService.updateVolume(name, body);
|
||||
|
||||
const response = {
|
||||
...res.volume,
|
||||
path: getVolumePath(res.volume),
|
||||
};
|
||||
|
||||
return c.json<UpdateVolumeDto>(response, 200);
|
||||
})
|
||||
.post("/:name/mount", mountVolumeDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { error, status } = await volumeService.mountVolume(name);
|
||||
|
||||
return c.json({ error, status }, error ? 500 : 200);
|
||||
})
|
||||
.post("/:name/unmount", unmountVolumeDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { error, status } = await volumeService.unmountVolume(name);
|
||||
|
||||
return c.json({ error, status }, error ? 500 : 200);
|
||||
})
|
||||
.post("/:name/health-check", healthCheckDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const { error, status } = await volumeService.checkHealth(name);
|
||||
|
||||
return c.json({ error, status }, 200);
|
||||
})
|
||||
.get("/:name/files", listFilesDto, async (c) => {
|
||||
const { name } = c.req.param();
|
||||
const subPath = c.req.query("path");
|
||||
const result = await volumeService.listFiles(name, subPath);
|
||||
|
||||
const response = {
|
||||
files: result.files,
|
||||
path: result.path,
|
||||
};
|
||||
|
||||
c.header("Cache-Control", "public, max-age=10, stale-while-revalidate=60");
|
||||
|
||||
return c.json<ListFilesDto>(response, 200);
|
||||
})
|
||||
.get("/filesystem/browse", browseFilesystemDto, async (c) => {
|
||||
const path = c.req.query("path") || "/";
|
||||
const result = await volumeService.browseFilesystem(path);
|
||||
|
||||
const response = {
|
||||
directories: result.directories,
|
||||
path: result.path,
|
||||
};
|
||||
|
||||
return c.json<BrowseFilesystemDto>(response, 200);
|
||||
});
|
||||
373
app/server/modules/volumes/volume.dto.ts
Normal file
373
app/server/modules/volumes/volume.dto.ts
Normal file
@@ -0,0 +1,373 @@
|
||||
import { type } from "arktype";
|
||||
import { describeRoute, resolver } from "hono-openapi";
|
||||
import { BACKEND_STATUS, BACKEND_TYPES, volumeConfigSchema } from "~/schemas/volumes";
|
||||
|
||||
export const volumeSchema = type({
|
||||
id: "number",
|
||||
name: "string",
|
||||
type: type.valueOf(BACKEND_TYPES),
|
||||
status: type.valueOf(BACKEND_STATUS),
|
||||
lastError: "string | null",
|
||||
createdAt: "number",
|
||||
updatedAt: "number",
|
||||
lastHealthCheck: "number",
|
||||
config: volumeConfigSchema,
|
||||
autoRemount: "boolean",
|
||||
});
|
||||
|
||||
export type VolumeDto = typeof volumeSchema.infer;
|
||||
|
||||
/**
|
||||
* List all volumes
|
||||
*/
|
||||
export const listVolumesResponse = volumeSchema.array();
|
||||
export type ListVolumesDto = typeof listVolumesResponse.infer;
|
||||
|
||||
export const listVolumesDto = describeRoute({
|
||||
description: "List all volumes",
|
||||
tags: ["Volumes"],
|
||||
operationId: "listVolumes",
|
||||
responses: {
|
||||
200: {
|
||||
description: "A list of volumes",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listVolumesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Create a new volume
|
||||
*/
|
||||
export const createVolumeBody = type({
|
||||
name: "string",
|
||||
config: volumeConfigSchema,
|
||||
});
|
||||
|
||||
export const createVolumeResponse = volumeSchema;
|
||||
export type CreateVolumeDto = typeof createVolumeResponse.infer;
|
||||
|
||||
export const createVolumeDto = describeRoute({
|
||||
description: "Create a new volume",
|
||||
operationId: "createVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
201: {
|
||||
description: "Volume created successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(createVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Delete a volume
|
||||
*/
|
||||
export const deleteVolumeResponse = type({
|
||||
message: "string",
|
||||
});
|
||||
export type DeleteVolumeDto = typeof deleteVolumeResponse.infer;
|
||||
|
||||
export const deleteVolumeDto = describeRoute({
|
||||
description: "Delete a volume",
|
||||
operationId: "deleteVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume deleted successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(deleteVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const statfsSchema = type({
|
||||
total: "number",
|
||||
used: "number",
|
||||
free: "number",
|
||||
});
|
||||
|
||||
const getVolumeResponse = type({
|
||||
volume: volumeSchema,
|
||||
statfs: statfsSchema,
|
||||
});
|
||||
|
||||
export type GetVolumeDto = typeof getVolumeResponse.infer;
|
||||
/**
|
||||
* Get a volume
|
||||
*/
|
||||
export const getVolumeDto = describeRoute({
|
||||
description: "Get a volume by name",
|
||||
operationId: "getVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume details",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(getVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
404: {
|
||||
description: "Volume not found",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Update a volume
|
||||
*/
|
||||
export const updateVolumeBody = type({
|
||||
autoRemount: "boolean?",
|
||||
config: volumeConfigSchema.optional(),
|
||||
});
|
||||
|
||||
export type UpdateVolumeBody = typeof updateVolumeBody.infer;
|
||||
|
||||
export const updateVolumeResponse = volumeSchema;
|
||||
export type UpdateVolumeDto = typeof updateVolumeResponse.infer;
|
||||
|
||||
export const updateVolumeDto = describeRoute({
|
||||
description: "Update a volume's configuration",
|
||||
operationId: "updateVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume updated successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(updateVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
404: {
|
||||
description: "Volume not found",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Test connection
|
||||
*/
|
||||
export const testConnectionBody = type({
|
||||
config: volumeConfigSchema,
|
||||
});
|
||||
|
||||
export const testConnectionResponse = type({
|
||||
success: "boolean",
|
||||
message: "string",
|
||||
});
|
||||
export type TestConnectionDto = typeof testConnectionResponse.infer;
|
||||
|
||||
export const testConnectionDto = describeRoute({
|
||||
description: "Test connection to backend",
|
||||
operationId: "testConnection",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Connection test result",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(testConnectionResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Mount volume
|
||||
*/
|
||||
export const mountVolumeResponse = type({
|
||||
error: "string?",
|
||||
status: type.valueOf(BACKEND_STATUS),
|
||||
});
|
||||
export type MountVolumeDto = typeof mountVolumeResponse.infer;
|
||||
|
||||
export const mountVolumeDto = describeRoute({
|
||||
description: "Mount a volume",
|
||||
operationId: "mountVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume mounted successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(mountVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Unmount volume
|
||||
*/
|
||||
export const unmountVolumeResponse = type({
|
||||
error: "string?",
|
||||
status: type.valueOf(BACKEND_STATUS),
|
||||
});
|
||||
export type UnmountVolumeDto = typeof unmountVolumeResponse.infer;
|
||||
|
||||
export const unmountVolumeDto = describeRoute({
|
||||
description: "Unmount a volume",
|
||||
operationId: "unmountVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume unmounted successfully",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(unmountVolumeResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
export const healthCheckResponse = type({
|
||||
error: "string?",
|
||||
status: type.valueOf(BACKEND_STATUS),
|
||||
});
|
||||
export type HealthCheckDto = typeof healthCheckResponse.infer;
|
||||
|
||||
export const healthCheckDto = describeRoute({
|
||||
description: "Perform a health check on a volume",
|
||||
operationId: "healthCheckVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "Volume health check result",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(healthCheckResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
404: {
|
||||
description: "Volume not found",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Get containers using a volume
|
||||
*/
|
||||
const containerSchema = type({
|
||||
id: "string",
|
||||
name: "string",
|
||||
state: "string",
|
||||
image: "string",
|
||||
});
|
||||
|
||||
export const listContainersResponse = containerSchema.array();
|
||||
export type ListContainersDto = typeof listContainersResponse.infer;
|
||||
|
||||
export const getContainersDto = describeRoute({
|
||||
description: "Get containers using a volume by name",
|
||||
operationId: "getContainersUsingVolume",
|
||||
tags: ["Volumes"],
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of containers using the volume",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listContainersResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
404: {
|
||||
description: "Volume not found",
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* List files in a volume
|
||||
*/
|
||||
const fileEntrySchema = type({
|
||||
name: "string",
|
||||
path: "string",
|
||||
type: type.enumerated("file", "directory"),
|
||||
size: "number?",
|
||||
modifiedAt: "number?",
|
||||
});
|
||||
|
||||
export const listFilesResponse = type({
|
||||
files: fileEntrySchema.array(),
|
||||
path: "string",
|
||||
});
|
||||
export type ListFilesDto = typeof listFilesResponse.infer;
|
||||
|
||||
export const listFilesDto = describeRoute({
|
||||
description: "List files in a volume directory",
|
||||
operationId: "listFiles",
|
||||
tags: ["Volumes"],
|
||||
parameters: [
|
||||
{
|
||||
in: "query",
|
||||
name: "path",
|
||||
required: false,
|
||||
schema: {
|
||||
type: "string",
|
||||
},
|
||||
description: "Subdirectory path to list (relative to volume root)",
|
||||
},
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of files in the volume",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(listFilesResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
/**
|
||||
* Browse filesystem directories
|
||||
*/
|
||||
export const browseFilesystemResponse = type({
|
||||
directories: fileEntrySchema.array(),
|
||||
path: "string",
|
||||
});
|
||||
export type BrowseFilesystemDto = typeof browseFilesystemResponse.infer;
|
||||
|
||||
export const browseFilesystemDto = describeRoute({
|
||||
description: "Browse directories on the host filesystem",
|
||||
operationId: "browseFilesystem",
|
||||
tags: ["Volumes"],
|
||||
parameters: [
|
||||
{
|
||||
in: "query",
|
||||
name: "path",
|
||||
required: false,
|
||||
schema: {
|
||||
type: "string",
|
||||
},
|
||||
description: "Directory path to browse (absolute path, defaults to /)",
|
||||
},
|
||||
],
|
||||
responses: {
|
||||
200: {
|
||||
description: "List of directories in the specified path",
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: resolver(browseFilesystemResponse),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
406
app/server/modules/volumes/volume.service.ts
Normal file
406
app/server/modules/volumes/volume.service.ts
Normal file
@@ -0,0 +1,406 @@
|
||||
import * as fs from "node:fs/promises";
|
||||
import * as os from "node:os";
|
||||
import * as path from "node:path";
|
||||
import Docker from "dockerode";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced";
|
||||
import slugify from "slugify";
|
||||
import { getCapabilities } from "../../core/capabilities";
|
||||
import { db } from "../../db/db";
|
||||
import { volumesTable } from "../../db/schema";
|
||||
import { toMessage } from "../../utils/errors";
|
||||
import { getStatFs, type StatFs } from "../../utils/mountinfo";
|
||||
import { withTimeout } from "../../utils/timeout";
|
||||
import { createVolumeBackend } from "../backends/backend";
|
||||
import type { UpdateVolumeBody } from "./volume.dto";
|
||||
import { getVolumePath } from "./helpers";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { serverEvents } from "../../core/events";
|
||||
import type { BackendConfig } from "~/schemas/volumes";
|
||||
|
||||
const listVolumes = async () => {
|
||||
const volumes = await db.query.volumesTable.findMany({});
|
||||
|
||||
return volumes;
|
||||
};
|
||||
|
||||
const createVolume = async (name: string, backendConfig: BackendConfig) => {
|
||||
const slug = slugify(name, { lower: true, strict: true });
|
||||
|
||||
const existing = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, slug),
|
||||
});
|
||||
|
||||
if (existing) {
|
||||
throw new ConflictError("Volume already exists");
|
||||
}
|
||||
|
||||
const [created] = await db
|
||||
.insert(volumesTable)
|
||||
.values({
|
||||
name: slug,
|
||||
config: backendConfig,
|
||||
type: backendConfig.backend,
|
||||
})
|
||||
.returning();
|
||||
|
||||
if (!created) {
|
||||
throw new InternalServerError("Failed to create volume");
|
||||
}
|
||||
|
||||
const backend = createVolumeBackend(created);
|
||||
const { error, status } = await backend.mount();
|
||||
|
||||
await db
|
||||
.update(volumesTable)
|
||||
.set({ status, lastError: error ?? null, lastHealthCheck: Date.now() })
|
||||
.where(eq(volumesTable.name, slug));
|
||||
|
||||
return { volume: created, status: 201 };
|
||||
};
|
||||
|
||||
const deleteVolume = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const backend = createVolumeBackend(volume);
|
||||
await backend.unmount();
|
||||
await db.delete(volumesTable).where(eq(volumesTable.name, name));
|
||||
};
|
||||
|
||||
const mountVolume = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const backend = createVolumeBackend(volume);
|
||||
const { error, status } = await backend.mount();
|
||||
|
||||
await db
|
||||
.update(volumesTable)
|
||||
.set({ status, lastError: error ?? null, lastHealthCheck: Date.now() })
|
||||
.where(eq(volumesTable.name, name));
|
||||
|
||||
if (status === "mounted") {
|
||||
serverEvents.emit("volume:mounted", { volumeName: name });
|
||||
}
|
||||
|
||||
return { error, status };
|
||||
};
|
||||
|
||||
const unmountVolume = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const backend = createVolumeBackend(volume);
|
||||
const { status, error } = await backend.unmount();
|
||||
|
||||
await db.update(volumesTable).set({ status }).where(eq(volumesTable.name, name));
|
||||
|
||||
if (status === "unmounted") {
|
||||
serverEvents.emit("volume:unmounted", { volumeName: name });
|
||||
}
|
||||
|
||||
return { error, status };
|
||||
};
|
||||
|
||||
const getVolume = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
let statfs: Partial<StatFs> = {};
|
||||
if (volume.status === "mounted") {
|
||||
statfs = await withTimeout(getStatFs(getVolumePath(volume)), 1000, "getStatFs").catch((error) => {
|
||||
logger.warn(`Failed to get statfs for volume ${name}: ${toMessage(error)}`);
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
return { volume, statfs };
|
||||
};
|
||||
|
||||
const updateVolume = async (name: string, volumeData: UpdateVolumeBody) => {
|
||||
const existing = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!existing) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const configChanged =
|
||||
JSON.stringify(existing.config) !== JSON.stringify(volumeData.config) && volumeData.config !== undefined;
|
||||
|
||||
if (configChanged) {
|
||||
logger.debug("Unmounting existing volume before applying new config");
|
||||
const backend = createVolumeBackend(existing);
|
||||
await backend.unmount();
|
||||
}
|
||||
|
||||
const [updated] = await db
|
||||
.update(volumesTable)
|
||||
.set({
|
||||
config: volumeData.config,
|
||||
type: volumeData.config?.backend,
|
||||
autoRemount: volumeData.autoRemount,
|
||||
updatedAt: Date.now(),
|
||||
})
|
||||
.where(eq(volumesTable.name, name))
|
||||
.returning();
|
||||
|
||||
if (!updated) {
|
||||
throw new InternalServerError("Failed to update volume");
|
||||
}
|
||||
|
||||
if (configChanged) {
|
||||
const backend = createVolumeBackend(updated);
|
||||
const { error, status } = await backend.mount();
|
||||
await db
|
||||
.update(volumesTable)
|
||||
.set({ status, lastError: error ?? null, lastHealthCheck: Date.now() })
|
||||
.where(eq(volumesTable.name, name));
|
||||
|
||||
serverEvents.emit("volume:updated", { volumeName: name });
|
||||
}
|
||||
|
||||
return { volume: updated };
|
||||
};
|
||||
|
||||
const testConnection = async (backendConfig: BackendConfig) => {
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "ironmount-test-"));
|
||||
|
||||
const mockVolume = {
|
||||
id: 0,
|
||||
name: "test-connection",
|
||||
path: tempDir,
|
||||
config: backendConfig,
|
||||
createdAt: Date.now(),
|
||||
updatedAt: Date.now(),
|
||||
lastHealthCheck: Date.now(),
|
||||
type: backendConfig.backend,
|
||||
status: "unmounted" as const,
|
||||
lastError: null,
|
||||
autoRemount: true,
|
||||
};
|
||||
|
||||
const backend = createVolumeBackend(mockVolume);
|
||||
const { error } = await backend.mount();
|
||||
|
||||
await backend.unmount();
|
||||
|
||||
await fs.access(tempDir);
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
|
||||
return {
|
||||
success: !error,
|
||||
message: error ? toMessage(error) : "Connection successful",
|
||||
};
|
||||
};
|
||||
|
||||
const checkHealth = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const backend = createVolumeBackend(volume);
|
||||
const { error, status } = await backend.checkHealth();
|
||||
|
||||
if (status !== volume.status) {
|
||||
serverEvents.emit("volume:status_changed", { volumeName: name, status });
|
||||
}
|
||||
|
||||
await db
|
||||
.update(volumesTable)
|
||||
.set({ lastHealthCheck: Date.now(), status, lastError: error ?? null })
|
||||
.where(eq(volumesTable.name, volume.name));
|
||||
|
||||
return { status, error };
|
||||
};
|
||||
|
||||
const getContainersUsingVolume = async (name: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
const { docker } = await getCapabilities();
|
||||
if (!docker) {
|
||||
logger.debug("Docker capability not available, returning empty containers list");
|
||||
return { containers: [] };
|
||||
}
|
||||
|
||||
try {
|
||||
const docker = new Docker();
|
||||
const containers = await docker.listContainers({ all: true });
|
||||
|
||||
const usingContainers = [];
|
||||
for (const info of containers) {
|
||||
const container = docker.getContainer(info.Id);
|
||||
const inspect = await container.inspect();
|
||||
const mounts = inspect.Mounts || [];
|
||||
const usesVolume = mounts.some((mount) => mount.Type === "volume" && mount.Name === `im-${volume.name}`);
|
||||
if (usesVolume) {
|
||||
usingContainers.push({
|
||||
id: inspect.Id,
|
||||
name: inspect.Name,
|
||||
state: inspect.State.Status,
|
||||
image: inspect.Config.Image,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { containers: usingContainers };
|
||||
} catch (error) {
|
||||
logger.error(`Failed to get containers using volume: ${toMessage(error)}`);
|
||||
return { containers: [] };
|
||||
}
|
||||
};
|
||||
|
||||
const listFiles = async (name: string, subPath?: string) => {
|
||||
const volume = await db.query.volumesTable.findFirst({
|
||||
where: eq(volumesTable.name, name),
|
||||
});
|
||||
|
||||
if (!volume) {
|
||||
throw new NotFoundError("Volume not found");
|
||||
}
|
||||
|
||||
if (volume.status !== "mounted") {
|
||||
throw new InternalServerError("Volume is not mounted");
|
||||
}
|
||||
|
||||
// For directory volumes, use the configured path directly
|
||||
const volumePath = getVolumePath(volume);
|
||||
|
||||
const requestedPath = subPath ? path.join(volumePath, subPath) : volumePath;
|
||||
|
||||
const normalizedPath = path.normalize(requestedPath);
|
||||
if (!normalizedPath.startsWith(volumePath)) {
|
||||
throw new InternalServerError("Invalid path");
|
||||
}
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(normalizedPath, { withFileTypes: true });
|
||||
|
||||
const files = await Promise.all(
|
||||
entries.map(async (entry) => {
|
||||
const fullPath = path.join(normalizedPath, entry.name);
|
||||
const relativePath = path.relative(volumePath, fullPath);
|
||||
|
||||
try {
|
||||
const stats = await fs.stat(fullPath);
|
||||
return {
|
||||
name: entry.name,
|
||||
path: `/${relativePath}`,
|
||||
type: entry.isDirectory() ? ("directory" as const) : ("file" as const),
|
||||
size: entry.isFile() ? stats.size : undefined,
|
||||
modifiedAt: stats.mtimeMs,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
name: entry.name,
|
||||
path: `/${relativePath}`,
|
||||
type: entry.isDirectory() ? ("directory" as const) : ("file" as const),
|
||||
size: undefined,
|
||||
modifiedAt: undefined,
|
||||
};
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
files: files.sort((a, b) => {
|
||||
if (a.type !== b.type) {
|
||||
return a.type === "directory" ? -1 : 1;
|
||||
}
|
||||
return a.name.localeCompare(b.name);
|
||||
}),
|
||||
path: subPath || "/",
|
||||
};
|
||||
} catch (error) {
|
||||
throw new InternalServerError(`Failed to list files: ${toMessage(error)}`);
|
||||
}
|
||||
};
|
||||
|
||||
const browseFilesystem = async (browsePath: string) => {
|
||||
const normalizedPath = path.normalize(browsePath);
|
||||
|
||||
try {
|
||||
const entries = await fs.readdir(normalizedPath, { withFileTypes: true });
|
||||
|
||||
const directories = await Promise.all(
|
||||
entries
|
||||
.filter((entry) => entry.isDirectory())
|
||||
.map(async (entry) => {
|
||||
const fullPath = path.join(normalizedPath, entry.name);
|
||||
|
||||
try {
|
||||
const stats = await fs.stat(fullPath);
|
||||
return {
|
||||
name: entry.name,
|
||||
path: fullPath,
|
||||
type: "directory" as const,
|
||||
size: undefined,
|
||||
modifiedAt: stats.mtimeMs,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
name: entry.name,
|
||||
path: fullPath,
|
||||
type: "directory" as const,
|
||||
size: undefined,
|
||||
modifiedAt: undefined,
|
||||
};
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
directories: directories.sort((a, b) => a.name.localeCompare(b.name)),
|
||||
path: normalizedPath,
|
||||
};
|
||||
} catch (error) {
|
||||
throw new InternalServerError(`Failed to browse filesystem: ${toMessage(error)}`);
|
||||
}
|
||||
};
|
||||
|
||||
export const volumeService = {
|
||||
listVolumes,
|
||||
createVolume,
|
||||
mountVolume,
|
||||
deleteVolume,
|
||||
getVolume,
|
||||
updateVolume,
|
||||
testConnection,
|
||||
unmountVolume,
|
||||
checkHealth,
|
||||
getContainersUsingVolume,
|
||||
listFiles,
|
||||
browseFilesystem,
|
||||
};
|
||||
61
app/server/utils/crypto.ts
Normal file
61
app/server/utils/crypto.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import crypto from "node:crypto";
|
||||
import { RESTIC_PASS_FILE } from "../core/constants";
|
||||
|
||||
const algorithm = "aes-256-gcm" as const;
|
||||
const keyLength = 32;
|
||||
const encryptionPrefix = "encv1";
|
||||
|
||||
/**
|
||||
* Given a string, encrypts it using a randomly generated salt
|
||||
*/
|
||||
const encrypt = async (data: string) => {
|
||||
if (!data) {
|
||||
return data;
|
||||
}
|
||||
|
||||
if (data.startsWith(encryptionPrefix)) {
|
||||
return data;
|
||||
}
|
||||
|
||||
const secret = (await Bun.file(RESTIC_PASS_FILE).text()).trim();
|
||||
|
||||
const salt = crypto.randomBytes(16);
|
||||
const key = crypto.pbkdf2Sync(secret, salt, 100000, keyLength, "sha256");
|
||||
const iv = crypto.randomBytes(12);
|
||||
|
||||
const cipher = crypto.createCipheriv(algorithm, key, iv);
|
||||
const encrypted = Buffer.concat([cipher.update(data), cipher.final()]);
|
||||
|
||||
const tag = cipher.getAuthTag();
|
||||
return `${encryptionPrefix}:${salt.toString("hex")}:${iv.toString("hex")}:${encrypted.toString("hex")}:${tag.toString("hex")}`;
|
||||
};
|
||||
|
||||
/**
|
||||
* Given an encrypted string, decrypts it using the salt stored in the string
|
||||
*/
|
||||
const decrypt = async (encryptedData: string) => {
|
||||
const secret = await Bun.file(RESTIC_PASS_FILE).text();
|
||||
|
||||
const parts = encryptedData.split(":").slice(1); // Remove prefix
|
||||
const saltHex = parts.shift() as string;
|
||||
const salt = Buffer.from(saltHex, "hex");
|
||||
|
||||
const key = crypto.pbkdf2Sync(secret, salt, 100000, keyLength, "sha256");
|
||||
|
||||
const iv = Buffer.from(parts.shift() as string, "hex");
|
||||
const encrypted = Buffer.from(parts.shift() as string, "hex");
|
||||
const tag = Buffer.from(parts.shift() as string, "hex");
|
||||
const decipher = crypto.createDecipheriv(algorithm, key, iv);
|
||||
|
||||
decipher.setAuthTag(tag);
|
||||
|
||||
let decrypted = decipher.update(encrypted);
|
||||
decrypted = Buffer.concat([decrypted, decipher.final()]);
|
||||
|
||||
return decrypted.toString();
|
||||
};
|
||||
|
||||
export const cryptoUtils = {
|
||||
encrypt,
|
||||
decrypt,
|
||||
};
|
||||
19
app/server/utils/errors.ts
Normal file
19
app/server/utils/errors.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import { ConflictError, NotFoundError } from "http-errors-enhanced";
|
||||
import { sanitizeSensitiveData } from "./sanitize";
|
||||
|
||||
export const handleServiceError = (error: unknown) => {
|
||||
if (error instanceof ConflictError) {
|
||||
return { message: sanitizeSensitiveData(error.message), status: 409 as const };
|
||||
}
|
||||
|
||||
if (error instanceof NotFoundError) {
|
||||
return { message: sanitizeSensitiveData(error.message), status: 404 as const };
|
||||
}
|
||||
|
||||
return { message: sanitizeSensitiveData(toMessage(error)), status: 500 as const };
|
||||
};
|
||||
|
||||
export const toMessage = (err: unknown): string => {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
return sanitizeSensitiveData(message);
|
||||
};
|
||||
36
app/server/utils/logger.ts
Normal file
36
app/server/utils/logger.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { createLogger, format, transports } from "winston";
|
||||
import { sanitizeSensitiveData } from "./sanitize";
|
||||
|
||||
const { printf, combine, colorize } = format;
|
||||
|
||||
const printConsole = printf((info) => `${info.level} > ${info.message}`);
|
||||
const consoleFormat = combine(colorize(), printConsole);
|
||||
|
||||
const winstonLogger = createLogger({
|
||||
level: "debug",
|
||||
format: format.json(),
|
||||
transports: [new transports.Console({ level: "debug", format: consoleFormat })],
|
||||
});
|
||||
|
||||
const log = (level: "info" | "warn" | "error" | "debug", messages: unknown[]) => {
|
||||
const stringMessages = messages.flatMap((m) => {
|
||||
if (m instanceof Error) {
|
||||
return [sanitizeSensitiveData(m.message), m.stack ? sanitizeSensitiveData(m.stack) : undefined].filter(Boolean);
|
||||
}
|
||||
|
||||
if (typeof m === "object") {
|
||||
return sanitizeSensitiveData(JSON.stringify(m, null, 2));
|
||||
}
|
||||
|
||||
return sanitizeSensitiveData(String(m));
|
||||
});
|
||||
|
||||
winstonLogger.log(level, stringMessages.join(" "));
|
||||
};
|
||||
|
||||
export const logger = {
|
||||
debug: (...messages: unknown[]) => log("debug", messages),
|
||||
info: (...messages: unknown[]) => log("info", messages),
|
||||
warn: (...messages: unknown[]) => log("warn", messages),
|
||||
error: (...messages: unknown[]) => log("error", messages),
|
||||
};
|
||||
85
app/server/utils/mountinfo.ts
Normal file
85
app/server/utils/mountinfo.ts
Normal file
@@ -0,0 +1,85 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
|
||||
type MountInfo = {
|
||||
mountPoint: string;
|
||||
fstype: string;
|
||||
};
|
||||
|
||||
export type StatFs = {
|
||||
total: number;
|
||||
used: number;
|
||||
free: number;
|
||||
};
|
||||
|
||||
function isPathWithin(base: string, target: string): boolean {
|
||||
const rel = path.posix.relative(base, target);
|
||||
return rel === "" || (!rel.startsWith("..") && !path.isAbsolute(rel));
|
||||
}
|
||||
|
||||
function unescapeMount(s: string): string {
|
||||
return s.replace(/\\([0-7]{3})/g, (_, oct) => String.fromCharCode(parseInt(oct, 8)));
|
||||
}
|
||||
|
||||
export async function readMountInfo(): Promise<MountInfo[]> {
|
||||
const text = await fs.readFile("/proc/self/mountinfo", "utf-8");
|
||||
const result: MountInfo[] = [];
|
||||
|
||||
for (const line of text.split("\n")) {
|
||||
if (!line) continue;
|
||||
const sep = line.indexOf(" - ");
|
||||
|
||||
if (sep === -1) continue;
|
||||
|
||||
const left = line.slice(0, sep).split(" ");
|
||||
const right = line.slice(sep + 3).split(" ");
|
||||
|
||||
// [0]=mount ID, [1]=parent ID, [2]=major:minor, [3]=root, [4]=mount point, [5]=mount options, ...
|
||||
const mpRaw = left[4];
|
||||
const fstype = right[0];
|
||||
|
||||
if (!mpRaw || !fstype) continue;
|
||||
|
||||
result.push({ mountPoint: unescapeMount(mpRaw), fstype });
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
export async function getMountForPath(p: string): Promise<MountInfo | undefined> {
|
||||
const mounts = await readMountInfo();
|
||||
|
||||
let best: MountInfo | undefined;
|
||||
for (const m of mounts) {
|
||||
if (!isPathWithin(m.mountPoint, p)) continue;
|
||||
if (!best || m.mountPoint.length > best.mountPoint.length) {
|
||||
best = m;
|
||||
}
|
||||
}
|
||||
return best;
|
||||
}
|
||||
|
||||
export async function getStatFs(mountPoint: string) {
|
||||
const s = await fs.statfs(mountPoint, { bigint: true });
|
||||
|
||||
const unit = s.bsize > 0n ? s.bsize : 1n;
|
||||
|
||||
const blocks = s.blocks > 0n ? s.blocks : 0n;
|
||||
|
||||
let bfree = s.bfree > 0n ? s.bfree : 0n;
|
||||
if (bfree > blocks) bfree = blocks;
|
||||
|
||||
const bavail = s.bavail > 0n ? s.bavail : 0n;
|
||||
|
||||
const totalB = blocks * unit;
|
||||
const usedB = (blocks - bfree) * unit;
|
||||
const freeB = bavail * unit;
|
||||
|
||||
const MAX = BigInt(Number.MAX_SAFE_INTEGER);
|
||||
const toNumber = (x: bigint) => (x > MAX ? Number.MAX_SAFE_INTEGER : Number(x));
|
||||
|
||||
return {
|
||||
total: toNumber(totalB),
|
||||
used: toNumber(usedB),
|
||||
free: toNumber(freeB),
|
||||
};
|
||||
}
|
||||
76
app/server/utils/rclone.ts
Normal file
76
app/server/utils/rclone.ts
Normal file
@@ -0,0 +1,76 @@
|
||||
import { $ } from "bun";
|
||||
import { logger } from "./logger";
|
||||
|
||||
/**
|
||||
* List all configured rclone remotes
|
||||
* @returns Array of remote names
|
||||
*/
|
||||
export async function listRcloneRemotes(): Promise<string[]> {
|
||||
const result = await $`rclone listremotes`.nothrow();
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
logger.error(`Failed to list rclone remotes: ${result.stderr}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
// Parse output - each line is a remote name ending with ":"
|
||||
const remotes = result.stdout
|
||||
.toString()
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.endsWith(":"))
|
||||
.map((line) => line.slice(0, -1)); // Remove trailing ":"
|
||||
|
||||
return remotes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get information about a specific rclone remote
|
||||
* @param remote Remote name
|
||||
* @returns Remote type and configuration info
|
||||
*/
|
||||
export async function getRcloneRemoteInfo(
|
||||
remote: string,
|
||||
): Promise<{ type: string; config: Record<string, string> } | null> {
|
||||
try {
|
||||
const result = await $`rclone config show ${remote}`.quiet();
|
||||
|
||||
if (result.exitCode !== 0) {
|
||||
logger.error(`Failed to get info for remote ${remote}: ${result.stderr}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
// Parse the output to extract type and config
|
||||
const output = result.stdout.toString();
|
||||
const lines = output
|
||||
.split("\n")
|
||||
.map((l) => l.trim())
|
||||
.filter((l) => l);
|
||||
|
||||
const config: Record<string, string> = {};
|
||||
let type = "unknown";
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.includes("=")) {
|
||||
const parts = line.split("=");
|
||||
const key = parts[0];
|
||||
if (!key) continue;
|
||||
|
||||
const valueParts = parts.slice(1);
|
||||
const value = valueParts.join("=").trim();
|
||||
const cleanKey = key.trim();
|
||||
|
||||
if (cleanKey === "type") {
|
||||
type = value;
|
||||
}
|
||||
|
||||
config[cleanKey] = value;
|
||||
}
|
||||
}
|
||||
|
||||
return { type, config };
|
||||
} catch (error) {
|
||||
logger.error(`Error getting remote info for ${remote}: ${error}`);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
589
app/server/utils/restic.ts
Normal file
589
app/server/utils/restic.ts
Normal file
@@ -0,0 +1,589 @@
|
||||
import crypto from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { throttle } from "es-toolkit";
|
||||
import { type } from "arktype";
|
||||
import { $ } from "bun";
|
||||
import { REPOSITORY_BASE, RESTIC_PASS_FILE } from "../core/constants";
|
||||
import { logger } from "./logger";
|
||||
import { cryptoUtils } from "./crypto";
|
||||
import type { RetentionPolicy } from "../modules/backups/backups.dto";
|
||||
import { safeSpawn } from "./spawn";
|
||||
import type { RepositoryConfig } from "~/schemas/restic";
|
||||
|
||||
const backupOutputSchema = type({
|
||||
message_type: "'summary'",
|
||||
files_new: "number",
|
||||
files_changed: "number",
|
||||
files_unmodified: "number",
|
||||
dirs_new: "number",
|
||||
dirs_changed: "number",
|
||||
dirs_unmodified: "number",
|
||||
data_blobs: "number",
|
||||
tree_blobs: "number",
|
||||
data_added: "number",
|
||||
total_files_processed: "number",
|
||||
total_bytes_processed: "number",
|
||||
total_duration: "number",
|
||||
snapshot_id: "string",
|
||||
});
|
||||
|
||||
const snapshotInfoSchema = type({
|
||||
gid: "number?",
|
||||
hostname: "string",
|
||||
id: "string",
|
||||
parent: "string?",
|
||||
paths: "string[]",
|
||||
program_version: "string?",
|
||||
short_id: "string",
|
||||
time: "string",
|
||||
uid: "number?",
|
||||
username: "string",
|
||||
summary: type({
|
||||
backup_end: "string",
|
||||
backup_start: "string",
|
||||
data_added: "number",
|
||||
data_added_packed: "number",
|
||||
data_blobs: "number",
|
||||
dirs_changed: "number",
|
||||
dirs_new: "number",
|
||||
dirs_unmodified: "number",
|
||||
files_changed: "number",
|
||||
files_new: "number",
|
||||
files_unmodified: "number",
|
||||
total_bytes_processed: "number",
|
||||
total_files_processed: "number",
|
||||
tree_blobs: "number",
|
||||
}).optional(),
|
||||
});
|
||||
|
||||
const ensurePassfile = async () => {
|
||||
await fs.mkdir(path.dirname(RESTIC_PASS_FILE), { recursive: true });
|
||||
|
||||
try {
|
||||
await fs.access(RESTIC_PASS_FILE);
|
||||
} catch {
|
||||
logger.info("Restic passfile not found, creating a new one...");
|
||||
await fs.writeFile(RESTIC_PASS_FILE, crypto.randomBytes(32).toString("hex"), { mode: 0o600 });
|
||||
}
|
||||
};
|
||||
|
||||
const buildRepoUrl = (config: RepositoryConfig): string => {
|
||||
switch (config.backend) {
|
||||
case "local":
|
||||
return `${REPOSITORY_BASE}/${config.name}`;
|
||||
case "s3":
|
||||
return `s3:${config.endpoint}/${config.bucket}`;
|
||||
case "gcs":
|
||||
return `gs:${config.bucket}:/`;
|
||||
case "azure":
|
||||
return `azure:${config.container}:/`;
|
||||
case "rclone":
|
||||
return `rclone:${config.remote}:${config.path}`;
|
||||
default: {
|
||||
throw new Error(`Unsupported repository backend: ${JSON.stringify(config)}`);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const buildEnv = async (config: RepositoryConfig) => {
|
||||
const env: Record<string, string> = {
|
||||
RESTIC_CACHE_DIR: "/var/lib/ironmount/restic/cache",
|
||||
RESTIC_PASSWORD_FILE: RESTIC_PASS_FILE,
|
||||
PATH: process.env.PATH || "/usr/local/bin:/usr/bin:/bin",
|
||||
};
|
||||
|
||||
switch (config.backend) {
|
||||
case "s3":
|
||||
env.AWS_ACCESS_KEY_ID = await cryptoUtils.decrypt(config.accessKeyId);
|
||||
env.AWS_SECRET_ACCESS_KEY = await cryptoUtils.decrypt(config.secretAccessKey);
|
||||
break;
|
||||
case "gcs": {
|
||||
const decryptedCredentials = await cryptoUtils.decrypt(config.credentialsJson);
|
||||
const credentialsPath = path.join("/tmp", `gcs-credentials-${crypto.randomBytes(8).toString("hex")}.json`);
|
||||
await fs.writeFile(credentialsPath, decryptedCredentials, { mode: 0o600 });
|
||||
env.GOOGLE_PROJECT_ID = config.projectId;
|
||||
env.GOOGLE_APPLICATION_CREDENTIALS = credentialsPath;
|
||||
break;
|
||||
}
|
||||
case "azure": {
|
||||
env.AZURE_ACCOUNT_NAME = config.accountName;
|
||||
env.AZURE_ACCOUNT_KEY = await cryptoUtils.decrypt(config.accountKey);
|
||||
if (config.endpointSuffix) {
|
||||
env.AZURE_ENDPOINT_SUFFIX = config.endpointSuffix;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return env;
|
||||
};
|
||||
|
||||
const init = async (config: RepositoryConfig) => {
|
||||
await ensurePassfile();
|
||||
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const res = await $`restic init --repo ${repoUrl} --json`.env(env).nothrow();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic init failed: ${res.stderr}`);
|
||||
return { success: false, error: res.stderr };
|
||||
}
|
||||
|
||||
logger.info(`Restic repository initialized: ${repoUrl}`);
|
||||
return { success: true, error: null };
|
||||
};
|
||||
|
||||
const backupProgressSchema = type({
|
||||
message_type: "'status'",
|
||||
seconds_elapsed: "number",
|
||||
percent_done: "number",
|
||||
total_files: "number",
|
||||
files_done: "number",
|
||||
total_bytes: "number",
|
||||
bytes_done: "number",
|
||||
current_files: "string[]",
|
||||
});
|
||||
|
||||
export type BackupProgress = typeof backupProgressSchema.infer;
|
||||
|
||||
const backup = async (
|
||||
config: RepositoryConfig,
|
||||
source: string,
|
||||
options?: {
|
||||
exclude?: string[];
|
||||
include?: string[];
|
||||
tags?: string[];
|
||||
signal?: AbortSignal;
|
||||
onProgress?: (progress: BackupProgress) => void;
|
||||
},
|
||||
) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args: string[] = ["--repo", repoUrl, "backup", "--one-file-system"];
|
||||
|
||||
if (options?.tags && options.tags.length > 0) {
|
||||
for (const tag of options.tags) {
|
||||
args.push("--tag", tag);
|
||||
}
|
||||
}
|
||||
|
||||
let includeFile: string | null = null;
|
||||
if (options?.include && options.include.length > 0) {
|
||||
const tmp = await fs.mkdtemp("restic-include");
|
||||
includeFile = path.join(tmp, `include.txt`);
|
||||
const includePaths = options.include.map((p) => path.join(source, p));
|
||||
|
||||
await fs.writeFile(includeFile, includePaths.join("\n"), "utf-8");
|
||||
|
||||
args.push("--files-from", includeFile);
|
||||
} else {
|
||||
args.push(source);
|
||||
}
|
||||
|
||||
if (options?.exclude && options.exclude.length > 0) {
|
||||
for (const pattern of options.exclude) {
|
||||
args.push("--exclude", pattern);
|
||||
}
|
||||
}
|
||||
|
||||
args.push("--json");
|
||||
|
||||
const logData = throttle((data: string) => {
|
||||
logger.info(data.trim());
|
||||
}, 5000);
|
||||
|
||||
const streamProgress = throttle((data: string) => {
|
||||
if (options?.onProgress) {
|
||||
try {
|
||||
const jsonData = JSON.parse(data);
|
||||
const progress = backupProgressSchema(jsonData);
|
||||
if (!(progress instanceof type.errors)) {
|
||||
options.onProgress(progress);
|
||||
}
|
||||
} catch (_) {
|
||||
// Ignore JSON parse errors for non-JSON lines
|
||||
}
|
||||
}
|
||||
}, 1000);
|
||||
|
||||
let stdout = "";
|
||||
|
||||
const res = await safeSpawn({
|
||||
command: "restic",
|
||||
args,
|
||||
env,
|
||||
signal: options?.signal,
|
||||
onStdout: (data) => {
|
||||
stdout = data;
|
||||
logData(data);
|
||||
|
||||
if (options?.onProgress) {
|
||||
streamProgress(data);
|
||||
}
|
||||
},
|
||||
onStderr: (error) => {
|
||||
logger.error(error.trim());
|
||||
},
|
||||
finally: async () => {
|
||||
includeFile && (await fs.unlink(includeFile).catch(() => {}));
|
||||
},
|
||||
});
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic backup failed: ${res.stderr}`);
|
||||
logger.error(`Command executed: restic ${args.join(" ")}`);
|
||||
|
||||
throw new Error(`Restic backup failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
const lastLine = stdout.trim();
|
||||
const resSummary = JSON.parse(lastLine ?? "{}");
|
||||
|
||||
const result = backupOutputSchema(resSummary);
|
||||
|
||||
if (result instanceof type.errors) {
|
||||
logger.error(`Restic backup output validation failed: ${result}`);
|
||||
|
||||
throw new Error(`Restic backup output validation failed: ${result}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const restoreOutputSchema = type({
|
||||
message_type: "'summary'",
|
||||
total_files: "number",
|
||||
files_restored: "number",
|
||||
files_skipped: "number",
|
||||
total_bytes: "number?",
|
||||
bytes_restored: "number?",
|
||||
bytes_skipped: "number",
|
||||
});
|
||||
|
||||
const restore = async (
|
||||
config: RepositoryConfig,
|
||||
snapshotId: string,
|
||||
target: string,
|
||||
options?: {
|
||||
include?: string[];
|
||||
exclude?: string[];
|
||||
path?: string;
|
||||
delete?: boolean;
|
||||
},
|
||||
) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args: string[] = ["--repo", repoUrl, "restore", snapshotId, "--target", target];
|
||||
|
||||
if (options?.path) {
|
||||
args[args.length - 4] = `${snapshotId}:${options.path}`;
|
||||
}
|
||||
|
||||
if (options?.delete) {
|
||||
args.push("--delete");
|
||||
}
|
||||
|
||||
if (options?.include?.length) {
|
||||
for (const pattern of options.include) {
|
||||
args.push("--include", pattern);
|
||||
}
|
||||
}
|
||||
|
||||
if (options?.exclude && options.exclude.length > 0) {
|
||||
for (const pattern of options.exclude) {
|
||||
args.push("--exclude", pattern);
|
||||
}
|
||||
}
|
||||
|
||||
args.push("--json");
|
||||
|
||||
console.log("Restic restore command:", ["restic", ...args].join(" "));
|
||||
|
||||
const res = await $`restic ${args}`.env(env).nothrow();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic restore failed: ${res.stderr}`);
|
||||
throw new Error(`Restic restore failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
const stdout = res.text();
|
||||
const outputLines = stdout.trim().split("\n");
|
||||
const lastLine = outputLines[outputLines.length - 1];
|
||||
|
||||
if (!lastLine) {
|
||||
logger.info(`Restic restore completed for snapshot ${snapshotId} to target ${target}`);
|
||||
return {
|
||||
message_type: "summary" as const,
|
||||
total_files: 0,
|
||||
files_restored: 0,
|
||||
files_skipped: 0,
|
||||
bytes_skipped: 0,
|
||||
};
|
||||
}
|
||||
|
||||
const resSummary = JSON.parse(lastLine);
|
||||
const result = restoreOutputSchema(resSummary);
|
||||
|
||||
if (result instanceof type.errors) {
|
||||
logger.warn(`Restic restore output validation failed: ${result}`);
|
||||
logger.info(`Restic restore completed for snapshot ${snapshotId} to target ${target}`);
|
||||
return {
|
||||
message_type: "summary" as const,
|
||||
total_files: 0,
|
||||
files_restored: 0,
|
||||
files_skipped: 0,
|
||||
bytes_skipped: 0,
|
||||
};
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Restic restore completed for snapshot ${snapshotId} to target ${target}: ${result.files_restored} restored, ${result.files_skipped} skipped`,
|
||||
);
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const snapshots = async (config: RepositoryConfig, options: { tags?: string[] } = {}) => {
|
||||
const { tags } = options;
|
||||
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args = ["--repo", repoUrl, "snapshots"];
|
||||
|
||||
if (tags && tags.length > 0) {
|
||||
for (const tag of tags) {
|
||||
args.push("--tag", tag);
|
||||
}
|
||||
}
|
||||
|
||||
args.push("--json");
|
||||
|
||||
const res = await $`restic ${args}`.env(env).nothrow().quiet();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic snapshots retrieval failed: ${res.stderr}`);
|
||||
throw new Error(`Restic snapshots retrieval failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
const result = snapshotInfoSchema.array()(res.json());
|
||||
|
||||
if (result instanceof type.errors) {
|
||||
logger.error(`Restic snapshots output validation failed: ${result}`);
|
||||
throw new Error(`Restic snapshots output validation failed: ${result}`);
|
||||
}
|
||||
|
||||
return result;
|
||||
};
|
||||
|
||||
const forget = async (config: RepositoryConfig, options: RetentionPolicy, extra: { tag: string }) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args: string[] = ["--repo", repoUrl, "forget", "--group-by", "tags", "--tag", extra.tag];
|
||||
|
||||
if (options.keepLast) {
|
||||
args.push("--keep-last", String(options.keepLast));
|
||||
}
|
||||
if (options.keepHourly) {
|
||||
args.push("--keep-hourly", String(options.keepHourly));
|
||||
}
|
||||
if (options.keepDaily) {
|
||||
args.push("--keep-daily", String(options.keepDaily));
|
||||
}
|
||||
if (options.keepWeekly) {
|
||||
args.push("--keep-weekly", String(options.keepWeekly));
|
||||
}
|
||||
if (options.keepMonthly) {
|
||||
args.push("--keep-monthly", String(options.keepMonthly));
|
||||
}
|
||||
if (options.keepYearly) {
|
||||
args.push("--keep-yearly", String(options.keepYearly));
|
||||
}
|
||||
if (options.keepWithinDuration) {
|
||||
args.push("--keep-within-duration", options.keepWithinDuration);
|
||||
}
|
||||
|
||||
args.push("--prune");
|
||||
args.push("--json");
|
||||
|
||||
const res = await $`restic ${args}`.env(env).nothrow();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic forget failed: ${res.stderr}`);
|
||||
throw new Error(`Restic forget failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
return { success: true };
|
||||
};
|
||||
|
||||
const lsNodeSchema = type({
|
||||
name: "string",
|
||||
type: "string",
|
||||
path: "string",
|
||||
uid: "number?",
|
||||
gid: "number?",
|
||||
size: "number?",
|
||||
mode: "number?",
|
||||
mtime: "string?",
|
||||
atime: "string?",
|
||||
ctime: "string?",
|
||||
struct_type: "'node'",
|
||||
});
|
||||
|
||||
const lsSnapshotInfoSchema = type({
|
||||
time: "string",
|
||||
parent: "string?",
|
||||
tree: "string",
|
||||
paths: "string[]",
|
||||
hostname: "string",
|
||||
username: "string?",
|
||||
id: "string",
|
||||
short_id: "string",
|
||||
struct_type: "'snapshot'",
|
||||
message_type: "'snapshot'",
|
||||
});
|
||||
|
||||
const ls = async (config: RepositoryConfig, snapshotId: string, path?: string) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args: string[] = ["--repo", repoUrl, "ls", snapshotId, "--json", "--long"];
|
||||
|
||||
if (path) {
|
||||
args.push(path);
|
||||
}
|
||||
|
||||
const res = await $`restic ${args}`.env(env).nothrow().quiet();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic ls failed: ${res.stderr}`);
|
||||
throw new Error(`Restic ls failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
// The output is a stream of JSON objects, first is snapshot info, rest are file/dir nodes
|
||||
const stdout = res.text();
|
||||
const lines = stdout
|
||||
.trim()
|
||||
.split("\n")
|
||||
.filter((line) => line.trim());
|
||||
|
||||
if (lines.length === 0) {
|
||||
return { snapshot: null, nodes: [] };
|
||||
}
|
||||
|
||||
// First line is snapshot info
|
||||
const snapshotLine = JSON.parse(lines[0] ?? "{}");
|
||||
const snapshot = lsSnapshotInfoSchema(snapshotLine);
|
||||
|
||||
if (snapshot instanceof type.errors) {
|
||||
logger.error(`Restic ls snapshot info validation failed: ${snapshot}`);
|
||||
throw new Error(`Restic ls snapshot info validation failed: ${snapshot}`);
|
||||
}
|
||||
|
||||
const nodes: Array<typeof lsNodeSchema.infer> = [];
|
||||
for (let i = 1; i < lines.length; i++) {
|
||||
const nodeLine = JSON.parse(lines[i] ?? "{}");
|
||||
const nodeValidation = lsNodeSchema(nodeLine);
|
||||
|
||||
if (nodeValidation instanceof type.errors) {
|
||||
logger.warn(`Skipping invalid node: ${nodeValidation}`);
|
||||
continue;
|
||||
}
|
||||
|
||||
nodes.push(nodeValidation);
|
||||
}
|
||||
|
||||
return { snapshot, nodes };
|
||||
};
|
||||
|
||||
const unlock = async (config: RepositoryConfig) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const res = await $`restic unlock --repo ${repoUrl} --remove-all --json`.env(env).nothrow();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic unlock failed: ${res.stderr}`);
|
||||
throw new Error(`Restic unlock failed: ${res.stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Restic unlock succeeded for repository: ${repoUrl}`);
|
||||
return { success: true, message: "Repository unlocked successfully" };
|
||||
};
|
||||
|
||||
const check = async (config: RepositoryConfig, options?: { readData?: boolean }) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const args: string[] = ["--repo", repoUrl, "check"];
|
||||
|
||||
if (options?.readData) {
|
||||
args.push("--read-data");
|
||||
}
|
||||
|
||||
const res = await $`restic ${args}`.env(env).nothrow();
|
||||
|
||||
const stdout = res.text();
|
||||
const stderr = res.stderr.toString();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic check failed: ${stderr}`);
|
||||
return {
|
||||
success: false,
|
||||
hasErrors: true,
|
||||
output: stdout,
|
||||
error: stderr,
|
||||
};
|
||||
}
|
||||
|
||||
const hasErrors = stdout.includes("Fatal");
|
||||
|
||||
logger.info(`Restic check completed for repository: ${repoUrl}`);
|
||||
return {
|
||||
success: !hasErrors,
|
||||
hasErrors,
|
||||
output: stdout,
|
||||
error: hasErrors ? "Repository contains errors" : null,
|
||||
};
|
||||
};
|
||||
|
||||
const repairIndex = async (config: RepositoryConfig) => {
|
||||
const repoUrl = buildRepoUrl(config);
|
||||
const env = await buildEnv(config);
|
||||
|
||||
const res = await $`restic repair index --repo ${repoUrl}`.env(env).nothrow();
|
||||
|
||||
const stdout = res.text();
|
||||
const stderr = res.stderr.toString();
|
||||
|
||||
if (res.exitCode !== 0) {
|
||||
logger.error(`Restic repair index failed: ${stderr}`);
|
||||
throw new Error(`Restic repair index failed: ${stderr}`);
|
||||
}
|
||||
|
||||
logger.info(`Restic repair index completed for repository: ${repoUrl}`);
|
||||
return {
|
||||
success: true,
|
||||
output: stdout,
|
||||
message: "Index repaired successfully",
|
||||
};
|
||||
};
|
||||
|
||||
export const restic = {
|
||||
ensurePassfile,
|
||||
init,
|
||||
backup,
|
||||
restore,
|
||||
snapshots,
|
||||
forget,
|
||||
unlock,
|
||||
ls,
|
||||
check,
|
||||
repairIndex,
|
||||
};
|
||||
18
app/server/utils/sanitize.ts
Normal file
18
app/server/utils/sanitize.ts
Normal file
@@ -0,0 +1,18 @@
|
||||
/**
|
||||
* Sanitizes sensitive information from strings
|
||||
* This removes passwords and credentials from logs and error messages
|
||||
*/
|
||||
export const sanitizeSensitiveData = (text: string): string => {
|
||||
let sanitized = text.replace(/\b(pass|password)=([^\s,]+)/gi, "$1=***");
|
||||
|
||||
sanitized = sanitized.replace(/\/\/([^:@\s]+):([^@\s]+)@/g, "//$1:***@");
|
||||
|
||||
sanitized = sanitized.replace(/(\S+)\s+(\S+)\s+(\S+)/g, (match, url, user, _pass) => {
|
||||
if (url.startsWith("http://") || url.startsWith("https://")) {
|
||||
return `${url} ${user} ***`;
|
||||
}
|
||||
return match;
|
||||
});
|
||||
|
||||
return sanitized;
|
||||
};
|
||||
79
app/server/utils/spawn.ts
Normal file
79
app/server/utils/spawn.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
import { spawn } from "node:child_process";
|
||||
|
||||
interface Params {
|
||||
command: string;
|
||||
args: string[];
|
||||
env?: NodeJS.ProcessEnv;
|
||||
signal?: AbortSignal;
|
||||
onStdout?: (data: string) => void;
|
||||
onStderr?: (error: string) => void;
|
||||
onError?: (error: Error) => Promise<void> | void;
|
||||
onClose?: (code: number | null) => Promise<void> | void;
|
||||
finally?: () => Promise<void> | void;
|
||||
}
|
||||
|
||||
type SpawnResult = {
|
||||
exitCode: number;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
};
|
||||
|
||||
export const safeSpawn = (params: Params) => {
|
||||
const { command, args, env = {}, signal, ...callbacks } = params;
|
||||
|
||||
return new Promise<SpawnResult>((resolve) => {
|
||||
let stdoutData = "";
|
||||
let stderrData = "";
|
||||
|
||||
const child = spawn(command, args, {
|
||||
env: { ...process.env, ...env },
|
||||
signal: signal,
|
||||
});
|
||||
|
||||
child.stdout.on("data", (data) => {
|
||||
if (callbacks.onStdout) {
|
||||
callbacks.onStdout(data.toString());
|
||||
} else {
|
||||
stdoutData += data.toString();
|
||||
}
|
||||
});
|
||||
|
||||
child.stderr.on("data", (data) => {
|
||||
if (callbacks.onStderr) {
|
||||
callbacks.onStderr(data.toString());
|
||||
} else {
|
||||
stderrData += data.toString();
|
||||
}
|
||||
});
|
||||
|
||||
child.on("error", async (error) => {
|
||||
if (callbacks.onError) {
|
||||
await callbacks.onError(error);
|
||||
}
|
||||
if (callbacks.finally) {
|
||||
await callbacks.finally();
|
||||
}
|
||||
|
||||
resolve({
|
||||
exitCode: -1,
|
||||
stdout: stdoutData,
|
||||
stderr: stderrData,
|
||||
});
|
||||
});
|
||||
|
||||
child.on("close", async (code) => {
|
||||
if (callbacks.onClose) {
|
||||
await callbacks.onClose(code);
|
||||
}
|
||||
if (callbacks.finally) {
|
||||
await callbacks.finally();
|
||||
}
|
||||
|
||||
resolve({
|
||||
exitCode: code === null ? -1 : code,
|
||||
stdout: stdoutData,
|
||||
stderr: stderrData,
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
17
app/server/utils/timeout.ts
Normal file
17
app/server/utils/timeout.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
class TimeoutError extends Error {
|
||||
code = "ETIMEOUT";
|
||||
constructor(message: string) {
|
||||
super(message);
|
||||
this.name = "TimeoutError";
|
||||
}
|
||||
}
|
||||
|
||||
export async function withTimeout<T>(promise: Promise<T>, ms: number, label = "operation"): Promise<T> {
|
||||
let timer: NodeJS.Timeout | undefined;
|
||||
const timeout = new Promise<T>((_, reject) => {
|
||||
timer = setTimeout(() => reject(new TimeoutError(`${label} timed out after ${ms}ms`)), ms);
|
||||
});
|
||||
return Promise.race([promise, timeout]).finally(() => {
|
||||
if (timer) clearTimeout(timer);
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user