feat: mirror repositories

feat: mirror backup repositories
This commit is contained in:
Nicolas Meienberger
2025-11-28 19:17:05 +01:00
parent 7ff38f0128
commit 16b8be2cd9
22 changed files with 2265 additions and 89 deletions

View File

@@ -24,6 +24,14 @@ interface ServerEvents {
repositoryName: string;
status: "success" | "error" | "stopped" | "warning";
}) => void;
"mirror:started": (data: { scheduleId: number; repositoryId: string; repositoryName: string }) => void;
"mirror:completed": (data: {
scheduleId: number;
repositoryId: string;
repositoryName: string;
status: "success" | "error";
error?: string;
}) => void;
"volume:mounted": (data: { volumeName: string }) => void;
"volume:unmounted": (data: { volumeName: string }) => void;
"volume:updated": (data: { volumeName: string }) => void;

View File

@@ -93,6 +93,7 @@ export const backupSchedulesTable = sqliteTable("backup_schedules_table", {
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch() * 1000)`),
updatedAt: int("updated_at", { mode: "number" }).notNull().default(sql`(unixepoch() * 1000)`),
});
export const backupScheduleRelations = relations(backupSchedulesTable, ({ one, many }) => ({
volume: one(volumesTable, {
fields: [backupSchedulesTable.volumeId],
@@ -103,6 +104,7 @@ export const backupScheduleRelations = relations(backupSchedulesTable, ({ one, m
references: [repositoriesTable.id],
}),
notifications: many(backupScheduleNotificationsTable),
mirrors: many(backupScheduleMirrorsTable),
}));
export type BackupSchedule = typeof backupSchedulesTable.$inferSelect;
@@ -154,6 +156,37 @@ export const backupScheduleNotificationRelations = relations(backupScheduleNotif
}));
export type BackupScheduleNotification = typeof backupScheduleNotificationsTable.$inferSelect;
/**
* Backup Schedule Mirrors Junction Table (Many-to-Many)
* Allows copying snapshots to secondary repositories after backup completes
*/
export const backupScheduleMirrorsTable = sqliteTable("backup_schedule_mirrors_table", {
id: int().primaryKey({ autoIncrement: true }),
scheduleId: int("schedule_id")
.notNull()
.references(() => backupSchedulesTable.id, { onDelete: "cascade" }),
repositoryId: text("repository_id")
.notNull()
.references(() => repositoriesTable.id, { onDelete: "cascade" }),
enabled: int("enabled", { mode: "boolean" }).notNull().default(true),
lastCopyAt: int("last_copy_at", { mode: "number" }),
lastCopyStatus: text("last_copy_status").$type<"success" | "error">(),
lastCopyError: text("last_copy_error"),
createdAt: int("created_at", { mode: "number" }).notNull().default(sql`(unixepoch() * 1000)`),
});
export const backupScheduleMirrorRelations = relations(backupScheduleMirrorsTable, ({ one }) => ({
schedule: one(backupSchedulesTable, {
fields: [backupScheduleMirrorsTable.scheduleId],
references: [backupSchedulesTable.id],
}),
repository: one(repositoriesTable, {
fields: [backupScheduleMirrorsTable.repositoryId],
references: [repositoriesTable.id],
}),
}));
export type BackupScheduleMirror = typeof backupScheduleMirrorsTable.$inferSelect;
/**
* App Metadata Table
* Used for storing key-value pairs like migration checkpoints

View File

@@ -12,6 +12,10 @@ import {
stopBackupDto,
updateBackupScheduleDto,
updateBackupScheduleBody,
getScheduleMirrorsDto,
updateScheduleMirrorsDto,
updateScheduleMirrorsBody,
getMirrorCompatibilityDto,
type CreateBackupScheduleDto,
type DeleteBackupScheduleDto,
type GetBackupScheduleDto,
@@ -21,6 +25,9 @@ import {
type RunForgetDto,
type StopBackupDto,
type UpdateBackupScheduleDto,
type GetScheduleMirrorsDto,
type UpdateScheduleMirrorsDto,
type GetMirrorCompatibilityDto,
} from "./backups.dto";
import { backupsService } from "./backups.service";
import {
@@ -113,4 +120,23 @@ export const backupScheduleController = new Hono()
return c.json<UpdateScheduleNotificationsDto>(assignments, 200);
},
);
)
.get("/:scheduleId/mirrors", getScheduleMirrorsDto, async (c) => {
const scheduleId = Number.parseInt(c.req.param("scheduleId"), 10);
const mirrors = await backupsService.getMirrors(scheduleId);
return c.json<GetScheduleMirrorsDto>(mirrors, 200);
})
.put("/:scheduleId/mirrors", updateScheduleMirrorsDto, validator("json", updateScheduleMirrorsBody), async (c) => {
const scheduleId = Number.parseInt(c.req.param("scheduleId"), 10);
const body = c.req.valid("json");
const mirrors = await backupsService.updateMirrors(scheduleId, body);
return c.json<UpdateScheduleMirrorsDto>(mirrors, 200);
})
.get("/:scheduleId/mirrors/compatibility", getMirrorCompatibilityDto, async (c) => {
const scheduleId = Number.parseInt(c.req.param("scheduleId"), 10);
const compatibility = await backupsService.getMirrorCompatibility(scheduleId);
return c.json<GetMirrorCompatibilityDto>(compatibility, 200);
});

View File

@@ -37,6 +37,19 @@ const backupScheduleSchema = type({
}),
);
const scheduleMirrorSchema = type({
scheduleId: "number",
repositoryId: "string",
enabled: "boolean",
lastCopyAt: "number | null",
lastCopyStatus: "'success' | 'error' | null",
lastCopyError: "string | null",
createdAt: "number",
repository: repositorySchema,
});
export type ScheduleMirrorDto = typeof scheduleMirrorSchema.infer;
/**
* List all backup schedules
*/
@@ -276,3 +289,75 @@ export const runForgetDto = describeRoute({
},
},
});
export const getScheduleMirrorsResponse = scheduleMirrorSchema.array();
export type GetScheduleMirrorsDto = typeof getScheduleMirrorsResponse.infer;
export const getScheduleMirrorsDto = describeRoute({
description: "Get mirror repository assignments for a backup schedule",
operationId: "getScheduleMirrors",
tags: ["Backups"],
responses: {
200: {
description: "List of mirror repository assignments for the schedule",
content: {
"application/json": {
schema: resolver(getScheduleMirrorsResponse),
},
},
},
},
});
export const updateScheduleMirrorsBody = type({
mirrors: type({
repositoryId: "string",
enabled: "boolean",
}).array(),
});
export type UpdateScheduleMirrorsBody = typeof updateScheduleMirrorsBody.infer;
export const updateScheduleMirrorsResponse = scheduleMirrorSchema.array();
export type UpdateScheduleMirrorsDto = typeof updateScheduleMirrorsResponse.infer;
export const updateScheduleMirrorsDto = describeRoute({
description: "Update mirror repository assignments for a backup schedule",
operationId: "updateScheduleMirrors",
tags: ["Backups"],
responses: {
200: {
description: "Mirror assignments updated successfully",
content: {
"application/json": {
schema: resolver(updateScheduleMirrorsResponse),
},
},
},
},
});
const mirrorCompatibilitySchema = type({
repositoryId: "string",
compatible: "boolean",
reason: "string | null",
});
export const getMirrorCompatibilityResponse = mirrorCompatibilitySchema.array();
export type GetMirrorCompatibilityDto = typeof getMirrorCompatibilityResponse.infer;
export const getMirrorCompatibilityDto = describeRoute({
description: "Get mirror compatibility info for all repositories relative to a backup schedule's primary repository",
operationId: "getMirrorCompatibility",
tags: ["Backups"],
responses: {
200: {
description: "List of repositories with their mirror compatibility status",
content: {
"application/json": {
schema: resolver(getMirrorCompatibilityResponse),
},
},
},
},
});

View File

@@ -3,15 +3,16 @@ import cron from "node-cron";
import { CronExpressionParser } from "cron-parser";
import { NotFoundError, BadRequestError, ConflictError } from "http-errors-enhanced";
import { db } from "../../db/db";
import { backupSchedulesTable, repositoriesTable, volumesTable } from "../../db/schema";
import { backupSchedulesTable, backupScheduleMirrorsTable, repositoriesTable, volumesTable } from "../../db/schema";
import { restic } from "../../utils/restic";
import { logger } from "../../utils/logger";
import { getVolumePath } from "../volumes/helpers";
import type { CreateBackupScheduleBody, UpdateBackupScheduleBody } from "./backups.dto";
import type { CreateBackupScheduleBody, UpdateBackupScheduleBody, UpdateScheduleMirrorsBody } from "./backups.dto";
import { toMessage } from "../../utils/errors";
import { serverEvents } from "../../core/events";
import { notificationsService } from "../notifications/notifications.service";
import { repoMutex } from "../../core/repository-mutex";
import { checkMirrorCompatibility, getIncompatibleMirrorError } from "~/server/utils/backend-compatibility";
const runningBackups = new Map<number, AbortController>();
@@ -266,19 +267,25 @@ const executeBackup = async (scheduleId: number, manual = false) => {
void runForget(schedule.id);
}
copyToMirrors(scheduleId, repository, schedule.retentionPolicy).catch((error) => {
logger.error(`Background mirror copy failed for schedule ${scheduleId}: ${toMessage(error)}`);
});
const finalStatus = exitCode === 0 ? "success" : "warning";
const nextBackupAt = calculateNextRun(schedule.cronExpression);
await db
.update(backupSchedulesTable)
.set({
lastBackupAt: Date.now(),
lastBackupStatus: exitCode === 0 ? "success" : "warning",
lastBackupStatus: finalStatus,
lastBackupError: null,
nextBackupAt: nextBackupAt,
updatedAt: Date.now(),
})
.where(eq(backupSchedulesTable.id, scheduleId));
if (exitCode !== 0) {
if (finalStatus === "warning") {
logger.warn(`Backup completed with warnings for volume ${volume.name} to repository ${repository.name}`);
} else {
logger.info(`Backup completed successfully for volume ${volume.name} to repository ${repository.name}`);
@@ -288,11 +295,11 @@ const executeBackup = async (scheduleId: number, manual = false) => {
scheduleId,
volumeName: volume.name,
repositoryName: repository.name,
status: exitCode === 0 ? "success" : "warning",
status: finalStatus,
});
notificationsService
.sendBackupNotification(scheduleId, exitCode === 0 ? "success" : "warning", {
.sendBackupNotification(scheduleId, finalStatus === "success" ? "success" : "warning", {
volumeName: volume.name,
repositoryName: repository.name,
})
@@ -421,6 +428,162 @@ const runForget = async (scheduleId: number) => {
logger.info(`Retention policy applied successfully for schedule ${scheduleId}`);
};
const getMirrors = async (scheduleId: number) => {
const schedule = await db.query.backupSchedulesTable.findFirst({
where: eq(backupSchedulesTable.id, scheduleId),
});
if (!schedule) {
throw new NotFoundError("Backup schedule not found");
}
const mirrors = await db.query.backupScheduleMirrorsTable.findMany({
where: eq(backupScheduleMirrorsTable.scheduleId, scheduleId),
with: { repository: true },
});
return mirrors;
};
const updateMirrors = async (scheduleId: number, data: UpdateScheduleMirrorsBody) => {
const schedule = await db.query.backupSchedulesTable.findFirst({
where: eq(backupSchedulesTable.id, scheduleId),
with: { repository: true },
});
if (!schedule) {
throw new NotFoundError("Backup schedule not found");
}
for (const mirror of data.mirrors) {
if (mirror.repositoryId === schedule.repositoryId) {
throw new BadRequestError("Cannot add the primary repository as a mirror");
}
const repo = await db.query.repositoriesTable.findFirst({
where: eq(repositoriesTable.id, mirror.repositoryId),
});
if (!repo) {
throw new NotFoundError(`Repository ${mirror.repositoryId} not found`);
}
const compatibility = await checkMirrorCompatibility(schedule.repository.config, repo.config, repo.id);
if (!compatibility.compatible) {
throw new BadRequestError(
getIncompatibleMirrorError(repo.name, schedule.repository.config.backend, repo.config.backend),
);
}
}
await db.delete(backupScheduleMirrorsTable).where(eq(backupScheduleMirrorsTable.scheduleId, scheduleId));
if (data.mirrors.length > 0) {
await db.insert(backupScheduleMirrorsTable).values(
data.mirrors.map((mirror) => ({
scheduleId,
repositoryId: mirror.repositoryId,
enabled: mirror.enabled,
})),
);
}
return getMirrors(scheduleId);
};
const copyToMirrors = async (
scheduleId: number,
sourceRepository: { id: string; config: (typeof repositoriesTable.$inferSelect)["config"] },
retentionPolicy: (typeof backupSchedulesTable.$inferSelect)["retentionPolicy"],
) => {
const mirrors = await db.query.backupScheduleMirrorsTable.findMany({
where: eq(backupScheduleMirrorsTable.scheduleId, scheduleId),
with: { repository: true },
});
const enabledMirrors = mirrors.filter((m) => m.enabled);
if (enabledMirrors.length === 0) {
return;
}
logger.info(
`[Background] Copying snapshots to ${enabledMirrors.length} mirror repositories for schedule ${scheduleId}`,
);
for (const mirror of enabledMirrors) {
try {
logger.info(`[Background] Copying to mirror repository: ${mirror.repository.name}`);
serverEvents.emit("mirror:started", {
scheduleId,
repositoryId: mirror.repositoryId,
repositoryName: mirror.repository.name,
});
await restic.copy(sourceRepository.config, mirror.repository.config, {
tag: scheduleId.toString(),
});
if (retentionPolicy) {
logger.info(`[Background] Applying retention policy to mirror repository: ${mirror.repository.name}`);
await restic.forget(mirror.repository.config, retentionPolicy, { tag: scheduleId.toString() });
}
await db
.update(backupScheduleMirrorsTable)
.set({ lastCopyAt: Date.now(), lastCopyStatus: "success", lastCopyError: null })
.where(eq(backupScheduleMirrorsTable.id, mirror.id));
logger.info(`[Background] Successfully copied to mirror repository: ${mirror.repository.name}`);
serverEvents.emit("mirror:completed", {
scheduleId,
repositoryId: mirror.repositoryId,
repositoryName: mirror.repository.name,
status: "success",
});
} catch (error) {
const errorMessage = toMessage(error);
logger.error(`[Background] Failed to copy to mirror repository ${mirror.repository.name}: ${errorMessage}`);
await db
.update(backupScheduleMirrorsTable)
.set({ lastCopyAt: Date.now(), lastCopyStatus: "error", lastCopyError: errorMessage })
.where(eq(backupScheduleMirrorsTable.id, mirror.id));
serverEvents.emit("mirror:completed", {
scheduleId,
repositoryId: mirror.repositoryId,
repositoryName: mirror.repository.name,
status: "error",
error: errorMessage,
});
}
}
};
const getMirrorCompatibility = async (scheduleId: number) => {
const schedule = await db.query.backupSchedulesTable.findFirst({
where: eq(backupSchedulesTable.id, scheduleId),
with: { repository: true },
});
if (!schedule) {
throw new NotFoundError("Backup schedule not found");
}
const allRepositories = await db.query.repositoriesTable.findMany();
const repos = allRepositories.filter((repo) => repo.id !== schedule.repositoryId);
const compatibility = await Promise.all(
repos.map((repo) => checkMirrorCompatibility(schedule.repository.config, repo.config, repo.id)),
);
return compatibility;
};
export const backupsService = {
listSchedules,
getSchedule,
@@ -432,4 +595,7 @@ export const backupsService = {
getScheduleForVolume,
stopBackup,
runForget,
getMirrors,
updateMirrors,
getMirrorCompatibility,
};

View File

@@ -70,12 +70,34 @@ export const eventsController = new Hono().get("/", (c) => {
});
};
const onMirrorStarted = (data: { scheduleId: number; repositoryId: string; repositoryName: string }) => {
stream.writeSSE({
data: JSON.stringify(data),
event: "mirror:started",
});
};
const onMirrorCompleted = (data: {
scheduleId: number;
repositoryId: string;
repositoryName: string;
status: "success" | "error";
error?: string;
}) => {
stream.writeSSE({
data: JSON.stringify(data),
event: "mirror:completed",
});
};
serverEvents.on("backup:started", onBackupStarted);
serverEvents.on("backup:progress", onBackupProgress);
serverEvents.on("backup:completed", onBackupCompleted);
serverEvents.on("volume:mounted", onVolumeMounted);
serverEvents.on("volume:unmounted", onVolumeUnmounted);
serverEvents.on("volume:updated", onVolumeUpdated);
serverEvents.on("mirror:started", onMirrorStarted);
serverEvents.on("mirror:completed", onMirrorCompleted);
let keepAlive = true;
@@ -88,6 +110,8 @@ export const eventsController = new Hono().get("/", (c) => {
serverEvents.off("volume:mounted", onVolumeMounted);
serverEvents.off("volume:unmounted", onVolumeUnmounted);
serverEvents.off("volume:updated", onVolumeUpdated);
serverEvents.off("mirror:started", onMirrorStarted);
serverEvents.off("mirror:completed", onMirrorCompleted);
});
while (keepAlive) {

View File

@@ -90,6 +90,7 @@ export const repositoriesController = new Hono()
short_id: snapshot.short_id,
duration,
paths: snapshot.paths,
tags: snapshot.tags ?? [],
size: summary?.total_bytes_processed || 0,
time: new Date(snapshot.time).getTime(),
};
@@ -113,6 +114,7 @@ export const repositoriesController = new Hono()
time: new Date(snapshot.time).getTime(),
paths: snapshot.paths,
size: snapshot.summary?.total_bytes_processed || 0,
tags: snapshot.tags ?? [],
summary: snapshot.summary,
};

View File

@@ -174,6 +174,7 @@ export const snapshotSchema = type({
paths: "string[]",
size: "number",
duration: "number",
tags: "string[]",
});
const listSnapshotsResponse = snapshotSchema.array();

View File

@@ -0,0 +1,148 @@
import type { RepositoryConfig } from "~/schemas/restic";
import { cryptoUtils } from "./crypto";
type BackendConflictGroup = "s3" | "gcs" | "azure" | "rest" | "sftp" | null;
export const getBackendConflictGroup = (backend: string): BackendConflictGroup => {
switch (backend) {
case "s3":
case "r2":
return "s3";
case "gcs":
return "gcs";
case "azure":
return "azure";
case "rest":
return "rest";
case "sftp":
return "sftp";
case "local":
case "rclone":
return null;
default:
return null;
}
};
export const hasCompatibleCredentials = async (
config1: RepositoryConfig,
config2: RepositoryConfig,
): Promise<boolean> => {
const group1 = getBackendConflictGroup(config1.backend);
const group2 = getBackendConflictGroup(config2.backend);
if (!group1 || !group2 || group1 !== group2) {
return true;
}
switch (group1) {
case "s3": {
if (
(config1.backend === "s3" || config1.backend === "r2") &&
(config2.backend === "s3" || config2.backend === "r2")
) {
const accessKey1 = await cryptoUtils.decrypt(config1.accessKeyId);
const secretKey1 = await cryptoUtils.decrypt(config1.secretAccessKey);
const accessKey2 = await cryptoUtils.decrypt(config2.accessKeyId);
const secretKey2 = await cryptoUtils.decrypt(config2.secretAccessKey);
return accessKey1 === accessKey2 && secretKey1 === secretKey2;
}
return false;
}
case "gcs": {
if (config1.backend === "gcs" && config2.backend === "gcs") {
const credentials1 = await cryptoUtils.decrypt(config1.credentialsJson);
const credentials2 = await cryptoUtils.decrypt(config2.credentialsJson);
return credentials1 === credentials2 && config1.projectId === config2.projectId;
}
return false;
}
case "azure": {
if (config1.backend === "azure" && config2.backend === "azure") {
const config1Accountkey = await cryptoUtils.decrypt(config1.accountKey);
const config2Accountkey = await cryptoUtils.decrypt(config2.accountKey);
return config1.accountName === config2.accountName && config1Accountkey === config2Accountkey;
}
return false;
}
case "rest": {
if (config1.backend === "rest" && config2.backend === "rest") {
if (!config1.username && !config2.username && !config1.password && !config2.password) {
return true;
}
const config1Username = await cryptoUtils.decrypt(config1.username || "");
const config1Password = await cryptoUtils.decrypt(config1.password || "");
const config2Username = await cryptoUtils.decrypt(config2.username || "");
const config2Password = await cryptoUtils.decrypt(config2.password || "");
return config1Username === config2Username && config1Password === config2Password;
}
return false;
}
case "sftp": {
return false;
}
default:
return false;
}
};
export interface CompatibilityResult {
repositoryId: string;
compatible: boolean;
reason: string | null;
}
export const checkMirrorCompatibility = async (
primaryConfig: RepositoryConfig,
mirrorConfig: RepositoryConfig,
mirrorRepositoryId: string,
): Promise<CompatibilityResult> => {
const primaryConflictGroup = getBackendConflictGroup(primaryConfig.backend);
const mirrorConflictGroup = getBackendConflictGroup(mirrorConfig.backend);
if (!primaryConflictGroup || !mirrorConflictGroup) {
return {
repositoryId: mirrorRepositoryId,
compatible: true,
reason: null,
};
}
if (primaryConflictGroup !== mirrorConflictGroup) {
return {
repositoryId: mirrorRepositoryId,
compatible: true,
reason: null,
};
}
const compatible = await hasCompatibleCredentials(primaryConfig, mirrorConfig);
if (compatible) {
return {
repositoryId: mirrorRepositoryId,
compatible: true,
reason: null,
};
}
return {
repositoryId: mirrorRepositoryId,
compatible: false,
reason: `Both use ${primaryConflictGroup.toUpperCase()} backends with different credentials`,
};
};
export const getIncompatibleMirrorError = (mirrorRepoName: string, primaryBackend: string, mirrorBackend: string) => {
return (
`Cannot mirror to ${mirrorRepoName}: both repositories use the same backend type (${primaryBackend}/${mirrorBackend}) with different credentials. ` +
"Restic cannot use different credentials for the same backend in a copy operation. " +
"Consider creating a new backup scheduler with the desired destination instead."
);
};

View File

@@ -40,6 +40,7 @@ const snapshotInfoSchema = type({
time: "string",
uid: "number?",
username: "string",
tags: "string[]?",
summary: type({
backup_end: "string",
backup_start: "string",
@@ -713,12 +714,65 @@ const repairIndex = async (config: RepositoryConfig) => {
};
};
const addCommonArgs = (args: string[], config: RepositoryConfig, env: Record<string, string>) => {
args.push("--retry-lock", "1m", "--json");
const copy = async (
sourceConfig: RepositoryConfig,
destConfig: RepositoryConfig,
options: {
tag?: string;
snapshotId?: string;
},
) => {
const sourceRepoUrl = buildRepoUrl(sourceConfig);
const destRepoUrl = buildRepoUrl(destConfig);
if (config.backend === "sftp" && env._SFTP_SSH_ARGS) {
args.push("-o", `sftp.args=${env._SFTP_SSH_ARGS}`);
const sourceEnv = await buildEnv(sourceConfig);
const destEnv = await buildEnv(destConfig);
const env: Record<string, string> = {
...sourceEnv,
...destEnv,
RESTIC_FROM_PASSWORD_FILE: sourceEnv.RESTIC_PASSWORD_FILE,
};
const args: string[] = ["--repo", destRepoUrl, "copy", "--from-repo", sourceRepoUrl];
if (options.tag) {
args.push("--tag", options.tag);
}
if (options.snapshotId) {
args.push(options.snapshotId);
} else {
args.push("latest");
}
addCommonArgs(args, destConfig, destEnv);
if (sourceConfig.backend === "sftp" && sourceEnv._SFTP_SSH_ARGS) {
args.push("-o", `sftp.args=${sourceEnv._SFTP_SSH_ARGS}`);
}
logger.info(`Copying snapshots from ${sourceRepoUrl} to ${destRepoUrl}...`);
logger.debug(`Executing: restic ${args.join(" ")}`);
const res = await $`restic ${args}`.env(env).nothrow();
await cleanupTemporaryKeys(sourceConfig, sourceEnv);
await cleanupTemporaryKeys(destConfig, destEnv);
const stdout = res.text();
const stderr = res.stderr.toString();
if (res.exitCode !== 0) {
logger.error(`Restic copy failed: ${stderr}`);
throw new ResticError(res.exitCode, stderr);
}
logger.info(`Restic copy completed from ${sourceRepoUrl} to ${destRepoUrl}`);
return {
success: true,
output: stdout,
};
};
const cleanupTemporaryKeys = async (config: RepositoryConfig, env: Record<string, string>) => {
@@ -731,6 +785,13 @@ const cleanupTemporaryKeys = async (config: RepositoryConfig, env: Record<string
}
};
const addCommonArgs = (args: string[], config: RepositoryConfig, env: Record<string, string>) => {
args.push("--retry-lock", "1m", "--json");
if (config.backend === "sftp" && env._SFTP_SSH_ARGS) {
args.push("-o", `sftp.args=${env._SFTP_SSH_ARGS}`);
}
};
export const restic = {
ensurePassfile,
init,
@@ -743,4 +804,5 @@ export const restic = {
ls,
check,
repairIndex,
copy,
};