Sync and backup of volumes
This commit is contained in:
parent
e4330c781f
commit
ecff804038
7 changed files with 501 additions and 83 deletions
5
.vscode/settings.json
vendored
Normal file
5
.vscode/settings.json
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"cSpell.words": [
|
||||
"Restic"
|
||||
]
|
||||
}
|
17
README.md
17
README.md
|
@ -21,6 +21,21 @@ Functions of Sentinel depends on user KV values configured on each instance. Her
|
|||
| user.sync-target-pool | pool0 | Target's storage pool |
|
||||
| user.sync-target-instance-suffix | -cold | Instance name suffix at the target side |
|
||||
|
||||
It can also backup and sync volumes, here is list of KV fields for them:
|
||||
|
||||
| Key | Default | Purpose |
|
||||
| ------------------------------ | --------- | ----------------------------------------------------------------------------- |
|
||||
| user.backup | false | true/false, if true, regular backup job into Restic is performed |
|
||||
| user.backup-mode | dir | dir or native, dir backs up directory of the volume, native uses incus export |
|
||||
| user.sync | false | true/false, if true, regular sync job into Restic is performed |
|
||||
| user.backup-notify-url | "" | Call this URL when backup is done |
|
||||
| user.sync-notify-url | "" | Call this URL when sync is done |
|
||||
| user.backup-schedule | 0 6 * * * | Cron-like line for backup scheduling |
|
||||
| user.sync-schedule | 0 6 * * * | Cron-like line for sync scheduling |
|
||||
| user.sync-target-remote | "" | Sync's target host (needs to be configured in Incus) |
|
||||
| user.sync-target-pool | pool0 | Target's storage pool |
|
||||
| user.sync-target-volume-suffix | -cold | Volume name suffix at the target side |
|
||||
|
||||
### Examples
|
||||
|
||||
Enabled sync and backup on existing instance:
|
||||
|
@ -44,3 +59,5 @@ Also its binary needs to be available on the system where sentinel is running.
|
|||
Sentinel uses Incus's CLI interface, not its API. Currently it can work only on the same machine where Incus is running.
|
||||
|
||||
Synced instances have sync and backup flags disabled so if the remote system runs sentinel too it won't interfere with configuration of the main location.
|
||||
|
||||
Volumes can be backed up in two ways. The first one is a snapshot and backup of directory where the snapshot is located. The second way is Incus's native export where a binary blob or an archive is exported and stored in Restic repo. In this case it can be imported back with incus import feature.
|
||||
|
|
277
incus/main.go
277
incus/main.go
|
@ -3,71 +3,28 @@ package incus
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Name of the snapshot used for backup
|
||||
const backupSnapshot = "backup-snapshot"
|
||||
|
||||
type IncusDriver struct{}
|
||||
|
||||
func NewIncusDriver() *IncusDriver {
|
||||
return &IncusDriver{}
|
||||
}
|
||||
|
||||
func (d *IncusDriver) GetInstances(target string) ([]Instance, error) {
|
||||
// Command: incus list -f json
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if target == "" {
|
||||
cmd = exec.Command("incus", "list", "--format", "json")
|
||||
} else {
|
||||
cmd = exec.Command("incus", "list", target+":", "--format", "json")
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute incus list: %w", err)
|
||||
}
|
||||
|
||||
var instances []Instance
|
||||
err = json.Unmarshal(output, &instances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) Sync(sourceInstance string, targetInstance string, targetHost string, targetPool string) error {
|
||||
// incus copy edge0 racker1:edge0-cold -s pool0 --mode push -p default -p net_edge0 --stateless --refresh
|
||||
// incus copy edge0 racker1:edge0-cold -s pool0 --mode push -p default -p net_edge0 --stateless
|
||||
|
||||
instances, err := d.GetInstances(targetHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if len(instances) == 0 {
|
||||
cmd = exec.Command("incus", "copy", sourceInstance, targetHost+":"+targetInstance, "-s", targetPool, "--mode", "push", "--stateless", "-c", "user.backup=false", "-c", "user.sync=false")
|
||||
} else {
|
||||
cmd = exec.Command("incus", "copy", sourceInstance, targetHost+":"+targetInstance, "-s", targetPool, "--mode", "push", "--stateless", "-c", "user.backup=false", "-c", "user.sync=false", "--refresh")
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute incus copy: %w (%s)", err, string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) Backup(instance string, tags []string) error {
|
||||
// incus export ups - -q --compression=zstd --instance-only --optimized-storage | restic backup --stdin --stdin-filename ups.btrfs.zstd --tag instance
|
||||
|
||||
// Create the incus export command
|
||||
incusCmd := exec.Command("incus", "export", instance, "-", "-q", "--compression=zstd", "--instance-only", "--optimized-storage")
|
||||
|
||||
func (d *IncusDriver) pipeToRestic(incusCmd *exec.Cmd, filename string, tags []string) error {
|
||||
// Create the restic backup command
|
||||
resticCmd := exec.Command("restic", "backup", "--host", instance, "--stdin", "--stdin-filename", fmt.Sprintf("%s.btrfs.zstd", instance), "--tag", strings.Join(tags, ","))
|
||||
var resticCmd *exec.Cmd
|
||||
if len(tags) == 0 {
|
||||
resticCmd = exec.Command("restic", "backup", "--stdin", "--stdin-filename", filename)
|
||||
} else {
|
||||
resticCmd = exec.Command("restic", "backup", "--stdin", "--stdin-filename", filename, "--tag", strings.Join(tags, ","))
|
||||
}
|
||||
|
||||
// Connect the output of incusCmd to the input of resticCmd
|
||||
pipe, err := incusCmd.StdoutPipe()
|
||||
|
@ -98,3 +55,215 @@ func (d *IncusDriver) Backup(instance string, tags []string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) GetInstances(target string) ([]Instance, error) {
|
||||
// Command: incus list -f json
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if target == "" {
|
||||
cmd = exec.Command("incus", "list", "--format", "json", "--all-projects")
|
||||
} else {
|
||||
cmd = exec.Command("incus", "list", target+":", "--format", "json", "--all-projects")
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute incus list: %w", err)
|
||||
}
|
||||
|
||||
var instances []Instance
|
||||
err = json.Unmarshal(output, &instances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) GetPools(target string) ([]Pool, error) {
|
||||
// Command: incus storage list -f json
|
||||
var cmd *exec.Cmd
|
||||
if target == "" {
|
||||
cmd = exec.Command("incus", "storage", "list", "--format", "json")
|
||||
} else {
|
||||
cmd = exec.Command("incus", "storage", "list", target+":", "--format", "json")
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute incus list: %w", err)
|
||||
}
|
||||
|
||||
var pools []Pool
|
||||
err = json.Unmarshal(output, &pools)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pools, nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) GetVolumes(target string) ([]Volume, error) {
|
||||
volumes := []Volume{}
|
||||
var cmd *exec.Cmd
|
||||
|
||||
pools, err := d.GetPools(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pool := range pools {
|
||||
if target == "" {
|
||||
cmd = exec.Command("incus", "storage", "volume", "list", pool.Name, "--format", "json", "--all-projects")
|
||||
} else {
|
||||
cmd = exec.Command("incus", "storage", "volume", "list", target+":"+pool.Name, "--format", "json", "--all-projects")
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute incus list: %w", err)
|
||||
}
|
||||
poolVolumes := []Volume{}
|
||||
err = json.Unmarshal(output, &poolVolumes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal volumes: %w", err)
|
||||
}
|
||||
|
||||
for _, p := range poolVolumes {
|
||||
// Skip volumes with "/" in their name because they are snapshots
|
||||
if strings.Contains(p.Name, "/") {
|
||||
continue
|
||||
}
|
||||
|
||||
// We skip everything except custom volumes
|
||||
if p.Type != "custom" {
|
||||
continue
|
||||
}
|
||||
|
||||
p.Pool = pool.Name
|
||||
volumes = append(volumes, p)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) Sync(project string, sourceInstance string, targetInstance string, targetHost string, targetPool string) error {
|
||||
// incus copy edge0 racker1:edge0-cold -s pool0 --mode push -p default -p net_edge0 --stateless --refresh
|
||||
// incus copy edge0 racker1:edge0-cold -s pool0 --mode push -p default -p net_edge0 --stateless
|
||||
|
||||
instances, err := d.GetInstances(targetHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if len(instances) == 0 {
|
||||
cmd = exec.Command("incus", "copy", sourceInstance, targetHost+":"+targetInstance, "-s", targetPool, "--mode", "push", "--stateless", "-c", "user.backup=false", "-c", "user.sync=false", "--project", project)
|
||||
} else {
|
||||
cmd = exec.Command("incus", "copy", sourceInstance, targetHost+":"+targetInstance, "-s", targetPool, "--mode", "push", "--stateless", "-c", "user.backup=false", "-c", "user.sync=false", "--project", project, "--refresh")
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute incus copy: %w (%s)", err, string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) Backup(project string, instance string, tags []string) error {
|
||||
// incus export ups - -q --compression=zstd --instance-only --optimized-storage | restic backup --stdin --stdin-filename ups.btrfs.zstd --tag instance
|
||||
|
||||
// Create the incus export command
|
||||
// ? --compression=zstd parameter is not good for this because restic uses compression on its own
|
||||
incusCmd := exec.Command("incus", "export", instance, "-", "-q", "--instance-only", "--optimized-storage", "--project", project)
|
||||
|
||||
// Create the restic backup command
|
||||
err := d.pipeToRestic(incusCmd, fmt.Sprintf("%s-%s.btrfs.instance", project, instance), tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *IncusDriver) SyncVolume(project string, sourcePool string, sourceVolume string, targetHost string, targetPool string, targetVolume string) error {
|
||||
// incus storage volume copy pool0/custom/node-27-apps merkur:pool0/custom/node-27-apps-old --mode push --refresh
|
||||
|
||||
vols, err := d.GetVolumes(targetHost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, vol := range vols {
|
||||
if vol.Project == project && vol.Pool == targetPool && vol.Name == targetVolume {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
|
||||
if found {
|
||||
cmd = exec.Command("incus", "storage", "volume", "copy", sourcePool+"/custom/"+sourceVolume, targetHost+":"+targetPool+"/custom/"+targetVolume, "--mode", "push", "--refresh", "--project", project)
|
||||
} else {
|
||||
cmd = exec.Command("incus", "storage", "volume", "copy", sourcePool+"/custom/"+sourceVolume, targetHost+":"+targetPool+"/custom/"+targetVolume, "--mode", "push", "--project", project)
|
||||
}
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute incus storage volume copy: %w (%s)", err, string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Runs backup of volume's directory
|
||||
func (d *IncusDriver) BackupVolumeDir(project string, pool string, volume string, tags []string) error {
|
||||
// /var/lib/incus/storage-pools/{POOL}/custom/{PROJECT}_{VOLUME}
|
||||
// /var/lib/incus/storage-pools/pool0/custom-snapshots/default_node-27-apps/backup-snapshot/
|
||||
|
||||
// TODO: check if volume is block device or filesystem and return error when it is a block device
|
||||
|
||||
var cmd *exec.Cmd
|
||||
|
||||
// Create snapshot
|
||||
cmd = exec.Command("incus", "storage", "volume", "snapshot", "create", pool, volume, backupSnapshot, "--project", project)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute incus storage volume copy: %w (%s)", err, string(out))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
cmd = exec.Command("incus", "storage", "volume", "snapshot", "delete", pool, volume, backupSnapshot, "--project", project)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
log.Printf("failed to delete snapshot: %s (%s)", err.Error(), string(out))
|
||||
}
|
||||
}()
|
||||
|
||||
// Run restic backup
|
||||
cmd = exec.Command("restic", "backup", "--tag", strings.Join(tags, ","), "./")
|
||||
cmd.Dir = fmt.Sprintf("/var/lib/incus/storage-pools/%s/custom-snapshots/%s_%s/%s", pool, project, volume, backupSnapshot)
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute restic backup: %w (%s)", err, string(out))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Backup volume with Incus's native volume export feature
|
||||
func (d *IncusDriver) BackupVolumeNative(project string, pool string, volume string, tags []string) error {
|
||||
// Create the incus export command
|
||||
incusCmd := exec.Command("incus", "storage", "volume", "export", pool, volume, "--optimized-storage", "--volume-only", "--project", project)
|
||||
|
||||
err := d.pipeToRestic(incusCmd, fmt.Sprintf("%s-%s.btrfs.volume", pool, volume), tags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -85,3 +85,98 @@ func (i *Instance) Sentinel() InstanceSentinel {
|
|||
|
||||
return s
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
Config map[string]string `json:"config"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name"`
|
||||
Driver string `json:"driver"`
|
||||
UsedBy []string `json:"used_by"`
|
||||
Status string `json:"status"`
|
||||
Locations []string `json:"locations"`
|
||||
}
|
||||
|
||||
type VolumeSentinel struct {
|
||||
Backup bool
|
||||
BackupMode string
|
||||
Sync bool
|
||||
BackupSchedule string
|
||||
BackupNotifyURL string
|
||||
SyncNotifyURL string
|
||||
SyncSchedule string
|
||||
SyncTargetRemote string
|
||||
SyncTargetPool string
|
||||
SyncTargetVolumeSuffix string
|
||||
}
|
||||
|
||||
type Volume struct {
|
||||
Config map[string]string `json:"config"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name"`
|
||||
Pool string `json:"pool"`
|
||||
Type string `json:"type"`
|
||||
UsedBy []string `json:"used_by"`
|
||||
Location string `json:"location"`
|
||||
ContentType string `json:"content_type"`
|
||||
Project string `json:"project"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
}
|
||||
|
||||
func (v *Volume) Sentinel() VolumeSentinel {
|
||||
s := VolumeSentinel{
|
||||
Backup: false,
|
||||
BackupMode: "dir", // dir or native
|
||||
Sync: false,
|
||||
BackupNotifyURL: "",
|
||||
SyncNotifyURL: "",
|
||||
BackupSchedule: "0 6 * * *",
|
||||
SyncSchedule: "0 6 * * *",
|
||||
SyncTargetRemote: "",
|
||||
SyncTargetPool: "pool0",
|
||||
SyncTargetVolumeSuffix: "-cold",
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.backup"]; ok {
|
||||
s.Backup = val == "true"
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.backup-mode"]; ok {
|
||||
if val == "native" || val == "dir" {
|
||||
s.BackupMode = val
|
||||
}
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync"]; ok {
|
||||
s.Sync = val == "true"
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.backup-notify-url"]; ok {
|
||||
s.BackupNotifyURL = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync-notify-url"]; ok {
|
||||
s.SyncNotifyURL = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.backup-schedule"]; ok {
|
||||
s.BackupSchedule = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync-schedule"]; ok {
|
||||
s.SyncSchedule = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync-target-remote"]; ok {
|
||||
s.SyncTargetRemote = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync-target-pool"]; ok {
|
||||
s.SyncTargetPool = val
|
||||
}
|
||||
|
||||
if val, ok := v.Config["user.sync-target-instance-suffix"]; ok {
|
||||
s.SyncTargetVolumeSuffix = val
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
|
95
main.go
95
main.go
|
@ -41,6 +41,30 @@ func main() {
|
|||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "list-volumes",
|
||||
Usage: "List all volumes with their sync and backup settings",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(c context.Context, cmd *cli.Command) error {
|
||||
i := incus.NewIncusDriver()
|
||||
vols, err := i.GetVolumes("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range vols {
|
||||
s := vol.Sentinel()
|
||||
fmt.Printf("%s/%s/%s\n", vol.Project, vol.Pool, vol.Name)
|
||||
fmt.Printf(" Backup: %t (%s, %s)\n", s.Backup, s.BackupMode, s.BackupSchedule)
|
||||
fmt.Printf(" Sync: %t (%s)\n", s.Sync, s.SyncSchedule)
|
||||
if s.Sync {
|
||||
fmt.Printf(" Sync Target: %s (pool: %s, suffix: %s)\n", s.SyncTargetRemote, s.SyncTargetPool, s.SyncTargetVolumeSuffix)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "sync",
|
||||
Usage: "Syncs all instances where sync is enabled",
|
||||
|
@ -56,7 +80,38 @@ func main() {
|
|||
s := inst.Sentinel()
|
||||
if s.Sync {
|
||||
fmt.Println(".. syncing", inst.Name)
|
||||
err := i.Sync(inst.Name, fmt.Sprintf("%s%s", inst.Name, s.SyncTargetInstanceSuffix), s.SyncTargetRemote, s.SyncTargetPool)
|
||||
err := i.Sync(inst.Project, inst.Name, fmt.Sprintf("%s%s", inst.Name, s.SyncTargetInstanceSuffix), s.SyncTargetRemote, s.SyncTargetPool)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "sync-volumes",
|
||||
Usage: "Syncs all volumes where sync is enabled",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(c context.Context, cmd *cli.Command) error {
|
||||
i := incus.NewIncusDriver()
|
||||
vols, err := i.GetVolumes("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range vols {
|
||||
s := vol.Sentinel()
|
||||
if s.Sync {
|
||||
fmt.Printf(".. syncing %s/%s/%s\n", vol.Project, vol.Pool, vol.Name)
|
||||
|
||||
targetPool := vol.Pool
|
||||
if s.SyncTargetPool != "" {
|
||||
targetPool = s.SyncTargetPool
|
||||
}
|
||||
|
||||
err := i.SyncVolume(vol.Project, vol.Pool, vol.Name, s.SyncTargetRemote, targetPool, fmt.Sprintf("%s%s", vol.Name, s.SyncTargetVolumeSuffix))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -81,7 +136,7 @@ func main() {
|
|||
s := inst.Sentinel()
|
||||
if s.Backup {
|
||||
fmt.Println(".. backing up", inst.Name)
|
||||
err := i.Backup(inst.Name, []string{})
|
||||
err := i.Backup(inst.Project, inst.Name, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -91,6 +146,42 @@ func main() {
|
|||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "backup-volumes",
|
||||
Usage: "Backs up all volumes where backup is enabled",
|
||||
Flags: []cli.Flag{},
|
||||
Action: func(c context.Context, cmd *cli.Command) error {
|
||||
i := incus.NewIncusDriver()
|
||||
vols, err := i.GetVolumes("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, vol := range vols {
|
||||
s := vol.Sentinel()
|
||||
if s.Backup {
|
||||
fmt.Printf(".. backing up %s/%s/%s\n", vol.Project, vol.Pool, vol.Name)
|
||||
|
||||
if s.BackupMode == "dir" {
|
||||
err := i.BackupVolumeDir(vol.Project, vol.Pool, vol.Name, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if s.BackupMode == "native" {
|
||||
err := i.BackupVolumeNative(vol.Project, vol.Pool, vol.Name, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("invalid backup mode: %s", s.BackupMode)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
Usage: "Runs the sentinel that syncs and backs up instances based on their configuration",
|
||||
|
|
|
@ -11,10 +11,15 @@ import (
|
|||
|
||||
const queueLength = 100
|
||||
const planReasonBackup = "backup"
|
||||
const planReasonBackupVolume = "backup-volume"
|
||||
const planReasonSync = "sync"
|
||||
const planReasonSyncVolume = "sync-volume"
|
||||
|
||||
var defaultTags = []string{"sentinel"}
|
||||
|
||||
type schedulerPlan struct {
|
||||
Instance incus.Instance
|
||||
Volume incus.Volume
|
||||
Reason string // backup or sync
|
||||
}
|
||||
|
||||
|
@ -59,40 +64,76 @@ func (s *Scheduler) do(plan schedulerPlan, done chan schedulerPlan) {
|
|||
// Do the actual work
|
||||
sen := plan.Instance.Sentinel()
|
||||
var err error
|
||||
if plan.Reason == planReasonBackup {
|
||||
start := time.Now()
|
||||
|
||||
err = s.driver.Backup(plan.Instance.Name, []string{"sentinel"})
|
||||
start := time.Now()
|
||||
|
||||
notifyURL := ""
|
||||
|
||||
switch plan.Reason {
|
||||
case planReasonBackup:
|
||||
err = s.driver.Backup(plan.Instance.Project, plan.Instance.Name, defaultTags)
|
||||
if err != nil {
|
||||
log.Printf("Failed to backup %s: %s", plan.Instance.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Backup of %s took %s", plan.Instance.Name, time.Since(start).String())
|
||||
|
||||
if sen.BackupNotifyURL != "" && s.notifier != nil {
|
||||
err = s.notifier.Notify(sen.BackupNotifyURL)
|
||||
if err != nil {
|
||||
log.Printf("Failed to notify %s: %s", sen.BackupNotifyURL, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if plan.Reason == planReasonSync {
|
||||
start := time.Now()
|
||||
|
||||
err = s.driver.Sync(plan.Instance.Name, plan.Instance.Name+sen.SyncTargetInstanceSuffix, sen.SyncTargetRemote, sen.SyncTargetPool)
|
||||
notifyURL = sen.BackupNotifyURL
|
||||
case planReasonSync:
|
||||
err = s.driver.Sync(plan.Instance.Project, plan.Instance.Name, plan.Instance.Name+sen.SyncTargetInstanceSuffix, sen.SyncTargetRemote, sen.SyncTargetPool)
|
||||
if err != nil {
|
||||
log.Printf("Failed to sync %s: %s", plan.Instance.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Sync of %s took %s", plan.Instance.Name, time.Since(start).String())
|
||||
|
||||
if sen.SyncNotifyURL != "" && s.notifier != nil {
|
||||
err = s.notifier.Notify(sen.SyncNotifyURL)
|
||||
if err != nil {
|
||||
log.Printf("Failed to notify %s: %s", sen.SyncNotifyURL, err.Error())
|
||||
}
|
||||
notifyURL = sen.SyncNotifyURL
|
||||
case planReasonBackupVolume:
|
||||
sen := plan.Volume.Sentinel()
|
||||
if sen.BackupMode == "dir" {
|
||||
err = s.driver.BackupVolumeDir(plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, defaultTags)
|
||||
} else if sen.BackupMode == "native" {
|
||||
err = s.driver.BackupVolumeNative(plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, defaultTags)
|
||||
} else {
|
||||
log.Printf("Invalid backup mode for volume %s/%s/%s: %s", plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, sen.BackupMode)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Failed to backup volume %s/%s/%s: %s", plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Backup of volume %s/%s/%s took %s", plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, time.Since(start).String())
|
||||
|
||||
notifyURL = sen.BackupNotifyURL
|
||||
case planReasonSyncVolume:
|
||||
sen := plan.Volume.Sentinel()
|
||||
targetPool := plan.Volume.Pool
|
||||
if sen.SyncTargetPool != "" {
|
||||
targetPool = sen.SyncTargetPool
|
||||
}
|
||||
|
||||
err = s.driver.SyncVolume(plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, sen.SyncTargetRemote, targetPool, plan.Volume.Name+sen.SyncTargetVolumeSuffix)
|
||||
if err != nil {
|
||||
log.Printf("Failed to sync volume %s/%s/%s: %s", plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Sync of volume %s/%s/%s took %s", plan.Volume.Project, plan.Volume.Pool, plan.Volume.Name, time.Since(start).String())
|
||||
|
||||
notifyURL = sen.SyncNotifyURL
|
||||
}
|
||||
|
||||
if notifyURL != "" && s.notifier != nil {
|
||||
err = s.notifier.Notify(notifyURL)
|
||||
if err != nil {
|
||||
log.Printf("Failed to notify %s: %s", notifyURL, err.Error())
|
||||
}
|
||||
} else if notifyURL != "" && s.notifier == nil {
|
||||
log.Println("Warning: No notifier configured, skipping notification")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,14 +3,14 @@ package scheduler
|
|||
import "gitea.ceperka.net/rosti/incus-sentinel/incus"
|
||||
|
||||
type Driver interface {
|
||||
// GetInstances retrieves a list of instances from the target
|
||||
GetInstances(target string) ([]incus.Instance, error)
|
||||
|
||||
// Sync synchronizes instances between source and target
|
||||
Sync(sourceInstance string, targetInstance string, targetHost string, targetPool string) error
|
||||
|
||||
// Backup creates a backup of the specified instance with given tags
|
||||
Backup(instance string, tags []string) error
|
||||
GetPools(target string) ([]incus.Pool, error)
|
||||
GetVolumes(target string) ([]incus.Volume, error)
|
||||
Sync(project string, sourceInstance string, targetInstance string, targetHost string, targetPool string) error
|
||||
Backup(project string, instance string, tags []string) error
|
||||
SyncVolume(project string, sourcePool string, sourceVolume string, targetHost string, targetPool string, targetVolume string) error
|
||||
BackupVolumeDir(project string, pool string, volume string, tags []string) error
|
||||
BackupVolumeNative(project string, pool string, volume string, tags []string) error
|
||||
}
|
||||
|
||||
type Notifier interface {
|
||||
|
|
Loading…
Reference in a new issue