mirror of
https://github.com/nicotsx/ironmount.git
synced 2025-12-10 12:10:51 +01:00
refactor: unify backend and frontend servers (#3)
* refactor: unify backend and frontend servers * refactor: correct paths for openapi & drizzle * refactor: move api-client to client * fix: drizzle paths * chore: fix linting issues * fix: form reset issue
This commit is contained in:
28
app/server/modules/lifecycle/shutdown.ts
Normal file
28
app/server/modules/lifecycle/shutdown.ts
Normal file
@@ -0,0 +1,28 @@
|
||||
import { Scheduler } from "../../core/scheduler";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { db } from "../../db/db";
|
||||
import { volumesTable } from "../../db/schema";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { SOCKET_PATH } from "../../core/constants";
|
||||
import { createVolumeBackend } from "../backends/backend";
|
||||
|
||||
export const shutdown = async () => {
|
||||
await Scheduler.stop();
|
||||
|
||||
await Bun.file(SOCKET_PATH)
|
||||
.delete()
|
||||
.catch(() => {
|
||||
// Ignore errors if the socket file does not exist
|
||||
});
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(eq(volumesTable.status, "mounted")),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
const backend = createVolumeBackend(volume);
|
||||
const { status, error } = await backend.unmount();
|
||||
|
||||
logger.info(`Volume ${volume.name} unmount status: ${status}${error ? `, error: ${error}` : ""}`);
|
||||
}
|
||||
};
|
||||
39
app/server/modules/lifecycle/startup.ts
Normal file
39
app/server/modules/lifecycle/startup.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import { Scheduler } from "../../core/scheduler";
|
||||
import { and, eq, or } from "drizzle-orm";
|
||||
import { db } from "../../db/db";
|
||||
import { volumesTable } from "../../db/schema";
|
||||
import { logger } from "../../utils/logger";
|
||||
import { restic } from "../../utils/restic";
|
||||
import { volumeService } from "../volumes/volume.service";
|
||||
import { CleanupDanglingMountsJob } from "../../jobs/cleanup-dangling";
|
||||
import { VolumeHealthCheckJob } from "../../jobs/healthchecks";
|
||||
import { RepositoryHealthCheckJob } from "../../jobs/repository-healthchecks";
|
||||
import { BackupExecutionJob } from "../../jobs/backup-execution";
|
||||
import { CleanupSessionsJob } from "../../jobs/cleanup-sessions";
|
||||
|
||||
export const startup = async () => {
|
||||
await Scheduler.start();
|
||||
|
||||
await restic.ensurePassfile().catch((err) => {
|
||||
logger.error(`Error ensuring restic passfile exists: ${err.message}`);
|
||||
});
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(
|
||||
eq(volumesTable.status, "mounted"),
|
||||
and(eq(volumesTable.autoRemount, true), eq(volumesTable.status, "error")),
|
||||
),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
await volumeService.mountVolume(volume.name).catch((err) => {
|
||||
logger.error(`Error auto-remounting volume ${volume.name} on startup: ${err.message}`);
|
||||
});
|
||||
}
|
||||
|
||||
Scheduler.build(CleanupDanglingMountsJob).schedule("0 * * * *");
|
||||
Scheduler.build(VolumeHealthCheckJob).schedule("*/5 * * * *");
|
||||
Scheduler.build(RepositoryHealthCheckJob).schedule("*/10 * * * *");
|
||||
Scheduler.build(BackupExecutionJob).schedule("* * * * *");
|
||||
Scheduler.build(CleanupSessionsJob).schedule("0 0 * * *");
|
||||
};
|
||||
Reference in New Issue
Block a user