refactor: use short ids to allow changing the name of volumes & repos (#67)

* refactor: use short ids to allow changing the name of volumes & repos

* refactor: address PR feedbacks

* fix: make short_id non null after initial population
This commit is contained in:
Nico
2025-11-26 19:47:09 +01:00
committed by GitHub
parent d190d9c8cd
commit b26a062648
29 changed files with 3432 additions and 31 deletions

View File

@@ -1,6 +1,9 @@
import { Hono } from "hono";
import { volumeService } from "../volumes/volume.service";
import { getVolumePath } from "../volumes/helpers";
import { eq } from "drizzle-orm";
import { db } from "../../db/db";
import { volumesTable } from "../../db/schema";
export const driverController = new Hono()
.post("/VolumeDriver.Capabilities", (c) => {
@@ -30,10 +33,18 @@ export const driverController = new Hono()
return c.json({ Err: "Volume name is required" }, 400);
}
const volumeName = body.Name.replace(/^zb-/, "");
const shortId = body.Name.replace(/^zb-/, "");
const volume = await db.query.volumesTable.findFirst({
where: eq(volumesTable.shortId, shortId),
});
if (!volume) {
return c.json({ Err: `Volume with shortId ${shortId} not found` }, 404);
}
return c.json({
Mountpoint: getVolumePath(volumeName),
Mountpoint: getVolumePath(volume),
});
})
.post("/VolumeDriver.Unmount", (c) => {
@@ -48,7 +59,15 @@ export const driverController = new Hono()
return c.json({ Err: "Volume name is required" }, 400);
}
const { volume } = await volumeService.getVolume(body.Name.replace(/^zb-/, ""));
const shortId = body.Name.replace(/^zb-/, "");
const volume = await db.query.volumesTable.findFirst({
where: eq(volumesTable.shortId, shortId),
});
if (!volume) {
return c.json({ Err: `Volume with shortId ${shortId} not found` }, 404);
}
return c.json({
Mountpoint: getVolumePath(volume),
@@ -61,11 +80,19 @@ export const driverController = new Hono()
return c.json({ Err: "Volume name is required" }, 400);
}
const { volume } = await volumeService.getVolume(body.Name.replace(/^zb-/, ""));
const shortId = body.Name.replace(/^zb-/, "");
const volume = await db.query.volumesTable.findFirst({
where: eq(volumesTable.shortId, shortId),
});
if (!volume) {
return c.json({ Err: `Volume with shortId ${shortId} not found` }, 404);
}
return c.json({
Volume: {
Name: `zb-${volume.name}`,
Name: `zb-${volume.shortId}`,
Mountpoint: getVolumePath(volume),
Status: {},
},
@@ -76,7 +103,7 @@ export const driverController = new Hono()
const volumes = await volumeService.listVolumes();
const res = volumes.map((volume) => ({
Name: `zb-${volume.name}`,
Name: `zb-${volume.shortId}`,
Mountpoint: getVolumePath(volume),
Status: {},
}));

View File

@@ -0,0 +1,89 @@
import { eq, sql } from "drizzle-orm";
import { db } from "../../db/db";
import { appMetadataTable, usersTable } from "../../db/schema";
import { logger } from "../../utils/logger";
import { REQUIRED_MIGRATIONS } from "~/server/core/constants";
const MIGRATION_KEY_PREFIX = "migration:";
export const recordMigrationCheckpoint = async (version: string): Promise<void> => {
const key = `${MIGRATION_KEY_PREFIX}${version}`;
const now = Math.floor(Date.now() / 1000);
await db
.insert(appMetadataTable)
.values({
key,
value: JSON.stringify({ completedAt: new Date().toISOString() }),
createdAt: now,
updatedAt: now,
})
.onConflictDoUpdate({
target: appMetadataTable.key,
set: {
value: JSON.stringify({ completedAt: new Date().toISOString() }),
updatedAt: now,
},
});
logger.info(`Recorded migration checkpoint for ${version}`);
};
export const hasMigrationCheckpoint = async (version: string): Promise<boolean> => {
const key = `${MIGRATION_KEY_PREFIX}${version}`;
const result = await db.query.appMetadataTable.findFirst({
where: eq(appMetadataTable.key, key),
});
return result !== undefined;
};
export const validateRequiredMigrations = async (requiredVersions: string[]): Promise<void> => {
const userCount = await db.select({ count: sql<number>`count(*)` }).from(usersTable);
const isFreshInstall = userCount[0]?.count === 0;
if (isFreshInstall) {
logger.info("Fresh installation detected, skipping migration checkpoint validation.");
for (const version of requiredVersions) {
const hasCheckpoint = await hasMigrationCheckpoint(version);
if (!hasCheckpoint) {
await recordMigrationCheckpoint(version);
}
}
return;
}
for (const version of requiredVersions) {
const hasCheckpoint = await hasMigrationCheckpoint(version);
if (!hasCheckpoint) {
logger.error(`
================================================================================
MIGRATION ERROR: Required migration ${version} has not been run.
You are attempting to start a version of Zerobyte that requires migration
checkpoints from previous versions. This typically happens when you skip
versions during an upgrade.
To fix this:
1. First upgrade to version ${version} and run the application once
2. Validate that everything is still working correctly
3. Then upgrade to the current version
================================================================================
`);
process.exit(1);
}
}
};
export const getMigrationCheckpoints = async (): Promise<{ version: string; completedAt: string }[]> => {
const results = await db.query.appMetadataTable.findMany({
where: (table, { like }) => like(table.key, `${MIGRATION_KEY_PREFIX}%`),
});
return results.map((r) => ({
version: r.key.replace(MIGRATION_KEY_PREFIX, ""),
completedAt: JSON.parse(r.value).completedAt,
}));
};

View File

@@ -0,0 +1,193 @@
import * as fs from "node:fs/promises";
import * as path from "node:path";
import { eq } from "drizzle-orm";
import { db } from "../../db/db";
import { repositoriesTable } from "../../db/schema";
import { VOLUME_MOUNT_BASE, REPOSITORY_BASE } from "../../core/constants";
import { logger } from "../../utils/logger";
import { hasMigrationCheckpoint, recordMigrationCheckpoint } from "./checkpoint";
import type { RepositoryConfig } from "~/schemas/restic";
const MIGRATION_VERSION = "v0.14.0";
interface MigrationResult {
success: boolean;
errors: Array<{ name: string; error: string }>;
}
export class MigrationError extends Error {
version: string;
failedItems: Array<{ name: string; error: string }>;
constructor(version: string, failedItems: Array<{ name: string; error: string }>) {
const itemNames = failedItems.map((e) => e.name).join(", ");
super(`Migration ${version} failed for: ${itemNames}`);
this.version = version;
this.failedItems = failedItems;
this.name = "MigrationError";
}
}
export const migrateToShortIds = async () => {
const alreadyMigrated = await hasMigrationCheckpoint(MIGRATION_VERSION);
if (alreadyMigrated) {
logger.debug(`Migration ${MIGRATION_VERSION} already completed, skipping.`);
return;
}
logger.info(`Starting short ID migration (${MIGRATION_VERSION})...`);
const volumeResult = await migrateVolumeFolders();
const repoResult = await migrateRepositoryFolders();
const allErrors = [...volumeResult.errors, ...repoResult.errors];
if (allErrors.length > 0) {
for (const err of allErrors) {
logger.error(`Migration failure - ${err.name}: ${err.error}`);
}
throw new MigrationError(MIGRATION_VERSION, allErrors);
}
await recordMigrationCheckpoint(MIGRATION_VERSION);
logger.info(`Short ID migration (${MIGRATION_VERSION}) complete.`);
};
const migrateVolumeFolders = async (): Promise<MigrationResult> => {
const errors: Array<{ name: string; error: string }> = [];
const volumes = await db.query.volumesTable.findMany({});
for (const volume of volumes) {
if (volume.config.backend === "directory") {
continue;
}
const oldPath = path.join(VOLUME_MOUNT_BASE, volume.name);
const newPath = path.join(VOLUME_MOUNT_BASE, volume.shortId);
const oldExists = await pathExists(oldPath);
const newExists = await pathExists(newPath);
if (oldExists && !newExists) {
try {
logger.info(`Migrating volume folder: ${oldPath} -> ${newPath}`);
await fs.rename(oldPath, newPath);
logger.info(`Successfully migrated volume folder for "${volume.name}"`);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
errors.push({ name: `volume:${volume.name}`, error: errorMessage });
}
} else if (oldExists && newExists) {
logger.warn(
`Both old (${oldPath}) and new (${newPath}) paths exist for volume "${volume.name}". Manual intervention may be required.`,
);
}
}
return { success: errors.length === 0, errors };
};
const migrateRepositoryFolders = async (): Promise<MigrationResult> => {
const errors: Array<{ name: string; error: string }> = [];
const repositories = await db.query.repositoriesTable.findMany({});
for (const repo of repositories) {
if (repo.config.backend !== "local") {
continue;
}
const config = repo.config as Extract<RepositoryConfig, { backend: "local" }>;
if (config.name === repo.shortId) {
continue;
}
const basePath = config.path || REPOSITORY_BASE;
const oldPath = path.join(basePath, config.name);
const newPath = path.join(basePath, repo.shortId);
const oldExists = await pathExists(oldPath);
const newExists = await pathExists(newPath);
if (oldExists && !newExists) {
try {
logger.info(`Migrating repository folder: ${oldPath} -> ${newPath}`);
await fs.rename(oldPath, newPath);
const updatedConfig: RepositoryConfig = {
...config,
name: repo.shortId,
};
await db
.update(repositoriesTable)
.set({
config: updatedConfig,
updatedAt: Math.floor(Date.now() / 1000),
})
.where(eq(repositoriesTable.id, repo.id));
logger.info(`Successfully migrated repository folder and config for "${repo.name}"`);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
errors.push({ name: `repository:${repo.name}`, error: errorMessage });
}
} else if (oldExists && newExists) {
logger.warn(
`Both old (${oldPath}) and new (${newPath}) paths exist for repository "${repo.name}". Manual intervention may be required.`,
);
} else if (!oldExists && !newExists) {
try {
logger.info(`Updating config.name for repository "${repo.name}" (no folder exists yet)`);
const updatedConfig: RepositoryConfig = {
...config,
name: repo.shortId,
};
await db
.update(repositoriesTable)
.set({
config: updatedConfig,
updatedAt: Math.floor(Date.now() / 1000),
})
.where(eq(repositoriesTable.id, repo.id));
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
errors.push({ name: `repository:${repo.name}`, error: errorMessage });
}
} else if (newExists && !oldExists && config.name !== repo.shortId) {
try {
logger.info(`Folder already at new path, updating config.name for repository "${repo.name}"`);
const updatedConfig: RepositoryConfig = {
...config,
name: repo.shortId,
};
await db
.update(repositoriesTable)
.set({
config: updatedConfig,
updatedAt: Math.floor(Date.now() / 1000),
})
.where(eq(repositoriesTable.id, repo.id));
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
errors.push({ name: `repository:${repo.name}`, error: errorMessage });
}
}
}
return { success: errors.length === 0, errors };
};
const pathExists = async (p: string): Promise<boolean> => {
try {
await fs.access(p);
return true;
} catch {
return false;
}
};

View File

@@ -1,8 +1,6 @@
import type { NotificationConfig } from "~/schemas/notifications";
export function buildPushoverShoutrrrUrl(
config: Extract<NotificationConfig, { type: "pushover" }>,
): string {
export function buildPushoverShoutrrrUrl(config: Extract<NotificationConfig, { type: "pushover" }>): string {
const params = new URLSearchParams();
if (config.devices) {

View File

@@ -1,4 +1,4 @@
import { eq, and } from "drizzle-orm";
import { eq, and, ne } from "drizzle-orm";
import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced";
import slugify from "slugify";
import { db } from "../../db/db";
@@ -164,10 +164,10 @@ const updateDestination = async (
const slug = slugify(updates.name, { lower: true, strict: true });
const conflict = await db.query.notificationDestinationsTable.findFirst({
where: and(eq(notificationDestinationsTable.name, slug), eq(notificationDestinationsTable.id, id)),
where: and(eq(notificationDestinationsTable.name, slug), ne(notificationDestinationsTable.id, id)),
});
if (conflict && conflict.id !== id) {
if (conflict) {
throw new ConflictError("Notification destination with this name already exists");
}
updateData.name = slug;

View File

@@ -16,6 +16,8 @@ import {
listSnapshotsFilters,
restoreSnapshotBody,
restoreSnapshotDto,
updateRepositoryBody,
updateRepositoryDto,
type DeleteRepositoryDto,
type DeleteSnapshotDto,
type DoctorRepositoryDto,
@@ -25,6 +27,7 @@ import {
type ListSnapshotFilesDto,
type ListSnapshotsDto,
type RestoreSnapshotDto,
type UpdateRepositoryDto,
} from "./repositories.dto";
import { repositoriesService } from "./repositories.service";
import { getRcloneRemoteInfo, listRcloneRemotes } from "../../utils/rclone";
@@ -152,4 +155,12 @@ export const repositoriesController = new Hono()
await repositoriesService.deleteSnapshot(name, snapshotId);
return c.json<DeleteSnapshotDto>({ message: "Snapshot deleted" }, 200);
})
.patch("/:name", updateRepositoryDto, validator("json", updateRepositoryBody), async (c) => {
const { name } = c.req.param();
const body = c.req.valid("json");
const res = await repositoriesService.updateRepository(name, body);
return c.json<UpdateRepositoryDto>(res.repository, 200);
});

View File

@@ -4,6 +4,7 @@ import { COMPRESSION_MODES, REPOSITORY_BACKENDS, REPOSITORY_STATUS, repositoryCo
export const repositorySchema = type({
id: "string",
shortId: "string",
name: "string",
type: type.valueOf(REPOSITORY_BACKENDS),
config: repositoryConfigSchema,
@@ -123,6 +124,41 @@ export const deleteRepositoryDto = describeRoute({
},
});
/**
* Update a repository
*/
export const updateRepositoryBody = type({
name: "string?",
compressionMode: type.valueOf(COMPRESSION_MODES).optional(),
});
export type UpdateRepositoryBody = typeof updateRepositoryBody.infer;
export const updateRepositoryResponse = repositorySchema;
export type UpdateRepositoryDto = typeof updateRepositoryResponse.infer;
export const updateRepositoryDto = describeRoute({
description: "Update a repository's name or settings",
tags: ["Repositories"],
operationId: "updateRepository",
responses: {
200: {
description: "Repository updated successfully",
content: {
"application/json": {
schema: resolver(updateRepositoryResponse),
},
},
},
404: {
description: "Repository not found",
},
409: {
description: "Repository with this name already exists",
},
},
});
/**
* List snapshots in a repository
*/

View File

@@ -1,10 +1,11 @@
import crypto from "node:crypto";
import { eq } from "drizzle-orm";
import { and, eq, ne } from "drizzle-orm";
import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced";
import slugify from "slugify";
import { db } from "../../db/db";
import { repositoriesTable } from "../../db/schema";
import { toMessage } from "../../utils/errors";
import { generateShortId } from "../../utils/id";
import { restic } from "../../utils/restic";
import { cryptoUtils } from "../../utils/crypto";
import type { CompressionMode, RepositoryConfig } from "~/schemas/restic";
@@ -61,13 +62,20 @@ const createRepository = async (name: string, config: RepositoryConfig, compress
}
const id = crypto.randomUUID();
const shortId = generateShortId();
const encryptedConfig = await encryptConfig(config);
let processedConfig = config;
if (config.backend === "local") {
processedConfig = { ...config, name: shortId };
}
const encryptedConfig = await encryptConfig(processedConfig);
const [created] = await db
.insert(repositoriesTable)
.values({
id,
shortId,
name: slug,
type: config.backend,
config: encryptedConfig,
@@ -350,11 +358,53 @@ const deleteSnapshot = async (name: string, snapshotId: string) => {
await restic.deleteSnapshot(repository.config, snapshotId);
};
const updateRepository = async (name: string, updates: { name?: string; compressionMode?: CompressionMode }) => {
const existing = await db.query.repositoriesTable.findFirst({
where: eq(repositoriesTable.name, name),
});
if (!existing) {
throw new NotFoundError("Repository not found");
}
let newName = existing.name;
if (updates.name !== undefined && updates.name !== existing.name) {
const newSlug = slugify(updates.name, { lower: true, strict: true });
const conflict = await db.query.repositoriesTable.findFirst({
where: and(eq(repositoriesTable.name, newSlug), ne(repositoriesTable.id, existing.id)),
});
if (conflict) {
throw new ConflictError("A repository with this name already exists");
}
newName = newSlug;
}
const [updated] = await db
.update(repositoriesTable)
.set({
name: newName,
compressionMode: updates.compressionMode ?? existing.compressionMode,
updatedAt: Math.floor(Date.now() / 1000),
})
.where(eq(repositoriesTable.id, existing.id))
.returning();
if (!updated) {
throw new InternalServerError("Failed to update repository");
}
return { repository: updated };
};
export const repositoriesService = {
listRepositories,
createRepository,
getRepository,
deleteRepository,
updateRepository,
listSnapshots,
listSnapshotFiles,
restoreSnapshot,

View File

@@ -6,5 +6,5 @@ export const getVolumePath = (volume: Volume) => {
return volume.config.path;
}
return `${VOLUME_MOUNT_BASE}/${volume.name}/_data`;
return `${VOLUME_MOUNT_BASE}/${volume.shortId}/_data`;
};

View File

@@ -4,6 +4,7 @@ import { BACKEND_STATUS, BACKEND_TYPES, volumeConfigSchema } from "~/schemas/vol
export const volumeSchema = type({
id: "number",
shortId: "string",
name: "string",
type: type.valueOf(BACKEND_TYPES),
status: type.valueOf(BACKEND_STATUS),
@@ -128,6 +129,7 @@ export const getVolumeDto = describeRoute({
* Update a volume
*/
export const updateVolumeBody = type({
name: "string?",
autoRemount: "boolean?",
config: volumeConfigSchema.optional(),
});

View File

@@ -2,13 +2,14 @@ import * as fs from "node:fs/promises";
import * as os from "node:os";
import * as path from "node:path";
import Docker from "dockerode";
import { eq } from "drizzle-orm";
import { and, eq, ne } from "drizzle-orm";
import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced";
import slugify from "slugify";
import { getCapabilities } from "../../core/capabilities";
import { db } from "../../db/db";
import { volumesTable } from "../../db/schema";
import { toMessage } from "../../utils/errors";
import { generateShortId } from "../../utils/id";
import { getStatFs, type StatFs } from "../../utils/mountinfo";
import { withTimeout } from "../../utils/timeout";
import { createVolumeBackend } from "../backends/backend";
@@ -35,9 +36,12 @@ const createVolume = async (name: string, backendConfig: BackendConfig) => {
throw new ConflictError("Volume already exists");
}
const shortId = generateShortId();
const [created] = await db
.insert(volumesTable)
.values({
shortId,
name: slug,
config: backendConfig,
type: backendConfig.backend,
@@ -147,6 +151,21 @@ const updateVolume = async (name: string, volumeData: UpdateVolumeBody) => {
throw new NotFoundError("Volume not found");
}
let newName = existing.name;
if (volumeData.name !== undefined && volumeData.name !== existing.name) {
const newSlug = slugify(volumeData.name, { lower: true, strict: true });
const conflict = await db.query.volumesTable.findFirst({
where: and(eq(volumesTable.name, newSlug), ne(volumesTable.id, existing.id)),
});
if (conflict) {
throw new ConflictError("A volume with this name already exists");
}
newName = newSlug;
}
const configChanged =
JSON.stringify(existing.config) !== JSON.stringify(volumeData.config) && volumeData.config !== undefined;
@@ -159,12 +178,13 @@ const updateVolume = async (name: string, volumeData: UpdateVolumeBody) => {
const [updated] = await db
.update(volumesTable)
.set({
name: newName,
config: volumeData.config,
type: volumeData.config?.backend,
autoRemount: volumeData.autoRemount,
updatedAt: Date.now(),
})
.where(eq(volumesTable.name, name))
.where(eq(volumesTable.id, existing.id))
.returning();
if (!updated) {
@@ -177,9 +197,9 @@ const updateVolume = async (name: string, volumeData: UpdateVolumeBody) => {
await db
.update(volumesTable)
.set({ status, lastError: error ?? null, lastHealthCheck: Date.now() })
.where(eq(volumesTable.name, name));
.where(eq(volumesTable.id, existing.id));
serverEvents.emit("volume:updated", { volumeName: name });
serverEvents.emit("volume:updated", { volumeName: updated.name });
}
return { volume: updated };
@@ -190,6 +210,7 @@ const testConnection = async (backendConfig: BackendConfig) => {
const mockVolume = {
id: 0,
shortId: "test",
name: "test-connection",
path: tempDir,
config: backendConfig,
@@ -264,7 +285,7 @@ const getContainersUsingVolume = async (name: string) => {
const container = docker.getContainer(info.Id);
const inspect = await container.inspect();
const mounts = inspect.Mounts || [];
const usesVolume = mounts.some((mount) => mount.Type === "volume" && mount.Name === `im-${volume.name}`);
const usesVolume = mounts.some((mount) => mount.Type === "volume" && mount.Name === `zb-${volume.shortId}`);
if (usesVolume) {
usingContainers.push({
id: inspect.Id,