mirror of
https://github.com/nicotsx/ironmount.git
synced 2025-12-10 12:10:51 +01:00
refactor: unify backend and frontend servers (#3)
* refactor: unify backend and frontend servers * refactor: correct paths for openapi & drizzle * refactor: move api-client to client * fix: drizzle paths * chore: fix linting issues * fix: form reset issue
This commit is contained in:
29
app/server/jobs/backup-execution.ts
Normal file
29
app/server/jobs/backup-execution.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { backupsService } from "../modules/backups/backups.service";
|
||||
import { toMessage } from "../utils/errors";
|
||||
import { logger } from "../utils/logger";
|
||||
|
||||
export class BackupExecutionJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Checking for backup schedules to execute...");
|
||||
|
||||
const scheduleIds = await backupsService.getSchedulesToExecute();
|
||||
|
||||
if (scheduleIds.length === 0) {
|
||||
logger.debug("No backup schedules to execute");
|
||||
return { done: true, timestamp: new Date(), executed: 0 };
|
||||
}
|
||||
|
||||
logger.info(`Found ${scheduleIds.length} backup schedule(s) to execute`);
|
||||
|
||||
for (const scheduleId of scheduleIds) {
|
||||
try {
|
||||
await backupsService.executeBackup(scheduleId);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to execute backup for schedule ${scheduleId}: ${toMessage(error)}`);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date(), executed: scheduleIds.length };
|
||||
}
|
||||
}
|
||||
49
app/server/jobs/cleanup-dangling.ts
Normal file
49
app/server/jobs/cleanup-dangling.ts
Normal file
@@ -0,0 +1,49 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import path from "node:path";
|
||||
import fs from "node:fs/promises";
|
||||
import { volumeService } from "../modules/volumes/volume.service";
|
||||
import { readMountInfo } from "../utils/mountinfo";
|
||||
import { getVolumePath } from "../modules/volumes/helpers";
|
||||
import { logger } from "../utils/logger";
|
||||
import { executeUnmount } from "../modules/backends/utils/backend-utils";
|
||||
import { toMessage } from "../utils/errors";
|
||||
import { VOLUME_MOUNT_BASE } from "../core/constants";
|
||||
|
||||
export class CleanupDanglingMountsJob extends Job {
|
||||
async run() {
|
||||
const allVolumes = await volumeService.listVolumes();
|
||||
const allSystemMounts = await readMountInfo();
|
||||
|
||||
for (const mount of allSystemMounts) {
|
||||
if (mount.mountPoint.includes("ironmount") && mount.mountPoint.endsWith("_data")) {
|
||||
const matchingVolume = allVolumes.find((v) => getVolumePath(v) === mount.mountPoint);
|
||||
if (!matchingVolume) {
|
||||
logger.info(`Found dangling mount at ${mount.mountPoint}, attempting to unmount...`);
|
||||
await executeUnmount(mount.mountPoint);
|
||||
|
||||
await fs.rmdir(path.dirname(mount.mountPoint)).catch((err) => {
|
||||
logger.warn(
|
||||
`Failed to remove dangling mount directory ${path.dirname(mount.mountPoint)}: ${toMessage(err)}`,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const allIronmountDirs = await fs.readdir(VOLUME_MOUNT_BASE).catch(() => []);
|
||||
|
||||
for (const dir of allIronmountDirs) {
|
||||
const volumePath = `${VOLUME_MOUNT_BASE}/${dir}/_data`;
|
||||
const matchingVolume = allVolumes.find((v) => getVolumePath(v) === volumePath);
|
||||
if (!matchingVolume) {
|
||||
const fullPath = path.join(VOLUME_MOUNT_BASE, dir);
|
||||
logger.info(`Found dangling mount directory at ${fullPath}, attempting to remove...`);
|
||||
await fs.rmdir(fullPath, { recursive: true }).catch((err) => {
|
||||
logger.warn(`Failed to remove dangling mount directory ${fullPath}: ${toMessage(err)}`);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
10
app/server/jobs/cleanup-sessions.ts
Normal file
10
app/server/jobs/cleanup-sessions.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { authService } from "../modules/auth/auth.service";
|
||||
|
||||
export class CleanupSessionsJob extends Job {
|
||||
async run() {
|
||||
authService.cleanupExpiredSessions();
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
25
app/server/jobs/healthchecks.ts
Normal file
25
app/server/jobs/healthchecks.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { volumeService } from "../modules/volumes/volume.service";
|
||||
import { logger } from "../utils/logger";
|
||||
import { db } from "../db/db";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { volumesTable } from "../db/schema";
|
||||
|
||||
export class VolumeHealthCheckJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Running health check for all volumes...");
|
||||
|
||||
const volumes = await db.query.volumesTable.findMany({
|
||||
where: or(eq(volumesTable.status, "mounted"), eq(volumesTable.status, "error")),
|
||||
});
|
||||
|
||||
for (const volume of volumes) {
|
||||
const { status } = await volumeService.checkHealth(volume.name);
|
||||
if (status === "error" && volume.autoRemount) {
|
||||
await volumeService.mountVolume(volume.name);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
26
app/server/jobs/repository-healthchecks.ts
Normal file
26
app/server/jobs/repository-healthchecks.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { Job } from "../core/scheduler";
|
||||
import { repositoriesService } from "../modules/repositories/repositories.service";
|
||||
import { logger } from "../utils/logger";
|
||||
import { db } from "../db/db";
|
||||
import { eq, or } from "drizzle-orm";
|
||||
import { repositoriesTable } from "../db/schema";
|
||||
|
||||
export class RepositoryHealthCheckJob extends Job {
|
||||
async run() {
|
||||
logger.debug("Running health check for all repositories...");
|
||||
|
||||
const repositories = await db.query.repositoriesTable.findMany({
|
||||
where: or(eq(repositoriesTable.status, "healthy"), eq(repositoriesTable.status, "error")),
|
||||
});
|
||||
|
||||
for (const repository of repositories) {
|
||||
try {
|
||||
await repositoriesService.checkHealth(repository.id);
|
||||
} catch (error) {
|
||||
logger.error(`Health check failed for repository ${repository.name}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
return { done: true, timestamp: new Date() };
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user