Skip to content

Commit

Permalink
cleaned up planner
Browse files Browse the repository at this point in the history
  • Loading branch information
psy0rz committed Sep 24, 2024
1 parent 9a48851 commit 6b987b2
Showing 1 changed file with 52 additions and 49 deletions.
101 changes: 52 additions & 49 deletions zfs_autobackup/ZfsDataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,12 +268,11 @@ def parent(self):
# return self.snapshots[index]
# return None

def find_next_snapshot(self, snapshot, also_other_snapshots=False):
def find_next_snapshot(self, snapshot):
"""find next snapshot in this dataset. None if it doesn't exist
Args:
:type snapshot: ZfsDataset
:type also_other_snapshots: bool
"""

if self.is_snapshot:
Expand All @@ -282,8 +281,7 @@ def find_next_snapshot(self, snapshot, also_other_snapshots=False):
index = self.find_snapshot_index(snapshot)
while index is not None and index < len(self.snapshots) - 1:
index = index + 1
if also_other_snapshots or self.snapshots[index].is_ours():
return self.snapshots[index]
return self.snapshots[index]
return None

@property
Expand Down Expand Up @@ -962,31 +960,31 @@ def find_common_snapshot(self, target_dataset, guid_check):
# target_dataset.error("Cant find common snapshot with source.")
raise (Exception("Cant find common snapshot with target."))

def find_start_snapshot(self, common_snapshot, also_other_snapshots):
"""finds first snapshot to send :rtype: ZfsDataset or None if we cant
find it.
Args:
:rtype: ZfsDataset|None
:type common_snapshot: ZfsDataset
:type also_other_snapshots: bool
"""

if not common_snapshot:
if not self.snapshots:
start_snapshot = None
else:
# no common snapshot, start from beginning
start_snapshot = self.snapshots[0]

if not start_snapshot.is_ours() and not also_other_snapshots:
# try to start at a snapshot thats ours
start_snapshot = self.find_next_snapshot(start_snapshot, also_other_snapshots)
else:
# normal situation: start_snapshot is the one after the common snapshot
start_snapshot = self.find_next_snapshot(common_snapshot, also_other_snapshots)

return start_snapshot
# def find_start_snapshot(self, common_snapshot, also_other_snapshots):
# """finds first snapshot to send :rtype: ZfsDataset or None if we cant
# find it.
#
# Args:
# :rtype: ZfsDataset|None
# :type common_snapshot: ZfsDataset
# :type also_other_snapshots: bool
# """
#
# if not common_snapshot:
# if not self.snapshots:
# start_snapshot = None
# else:
# # no common snapshot, start from beginning
# start_snapshot = self.snapshots[0]
#
# if not start_snapshot.is_ours() and not also_other_snapshots:
# # try to start at a snapshot thats ours
# start_snapshot = self.find_next_snapshot(start_snapshot, also_other_snapshots)
# else:
# # normal situation: start_snapshot is the one after the common snapshot
# start_snapshot = self.find_next_snapshot(common_snapshot, also_other_snapshots)
#
# return start_snapshot

def find_incompatible_snapshots(self, common_snapshot, raw):
"""returns a list[snapshots] that is incompatible for a zfs recv onto
Expand Down Expand Up @@ -1088,7 +1086,7 @@ def _validate_resume_token(self, target_dataset, start_snapshot):
"""

if target_dataset.exists and 'receive_resume_token' in target_dataset.properties:
if start_snapshot == None:
if start_snapshot is None:
target_dataset.verbose("Aborting resume, its obsolete.")
target_dataset.abort_resume()
else:
Expand All @@ -1105,7 +1103,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
"""Determine at what snapshot to start syncing to target_dataset and what to sync and what to keep.
Args:
:rtype: ( ZfsDataset, ZfsDataset, list[ZfsDataset], list[ZfsDataset], list[ZfsDataset], list[ZfsDataset] )
:rtype: ( ZfsDataset, list[ZfsDataset], list[ZfsDataset], list[ZfsDataset], list[ZfsDataset] )
:type target_dataset: ZfsDataset
:type also_other_snapshots: bool
:type guid_check: bool
Expand All @@ -1114,7 +1112,6 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
Returns:
tuple: A tuple containing:
- ZfsDataset: The common snapshot
- ZfsDataset: The start snapshotplan_
- list[ZfsDataset]: Our obsolete source snapshots, after transfer is done. (will be thinned asap)
- list[ZfsDataset]: Our obsolete target snapshots, after transfer is done. (will be thinned asap)
- list[ZfsDataset]: Transfer target snapshots. These need to be transferred.
Expand All @@ -1125,31 +1122,33 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
# determine common and start snapshot
target_dataset.debug("Determining start snapshot")
source_common_snapshot = self.find_common_snapshot(target_dataset, guid_check=guid_check)
start_snapshot = self.find_start_snapshot(source_common_snapshot, also_other_snapshots)
# start_snapshot = self.find_start_snapshot(source_common_snapshot, also_other_snapshots)
incompatible_target_snapshots = target_dataset.find_incompatible_snapshots(source_common_snapshot, raw)

# let thinner decide whats obsolete on source after the transfer is done, keeping the last snapshot as common.
source_obsoletes = []
if self.our_snapshots:
source_obsoletes = self.thin_list(keeps=[self.our_snapshots[-1]])[1]

# A list of all our possible target snapshots ( existing - incompatible + transferrable from source )
# We will use this list to let the thinner decide what to transfer to the target, and which target snapshots to destroy.

# start with snapshots that already exist, minus imcompatibles
if target_dataset.exists:
possible_target_snapshots = [snapshot for snapshot in target_dataset.snapshots if snapshot not in incompatible_target_snapshots]
else:
possible_target_snapshots = []

#Add all snapshots from the source to the target list, as a virtual snapshot that doesnt exist yet (force_exist=False)
source_snapshot = start_snapshot
#Add all snapshots from the source, starting after the common snapshot if it exists
if source_common_snapshot:
source_snapshot=self.find_next_snapshot(source_common_snapshot )
else:
source_snapshot=self.snapshots[0]

while source_snapshot:
# we want it?
if (also_other_snapshots or source_snapshot.is_ours()) and not source_snapshot.is_excluded:
# virtual target snapshot
# create virtual target snapshot
target_snapshot=target_dataset.zfs_node.get_dataset(target_dataset.filesystem_name + "@" + source_snapshot.snapshot_name, force_exists=False)
possible_target_snapshots.append(target_snapshot)
source_snapshot = self.find_next_snapshot(source_snapshot, False)
source_snapshot = self.find_next_snapshot(source_snapshot)

#Now the thinner can decide which snapshots we want on the target, by looking at the whole picture:
if possible_target_snapshots:
Expand All @@ -1164,7 +1163,7 @@ def _plan_sync(self, target_dataset, also_other_snapshots, guid_check, raw):
if not target_keep.exists:
target_transfers.append(target_keep)

return source_common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_transfers, incompatible_target_snapshots
return source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers, incompatible_target_snapshots

def handle_incompatible_snapshots(self, incompatible_target_snapshots, destroy_incompatible):
"""destroy incompatbile snapshots on target before sync, or inform user
Expand Down Expand Up @@ -1229,7 +1228,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
# keep data encrypted by sending it raw (including properties)
raw = True

(source_common_snapshot, start_snapshot, source_obsoletes, target_obsoletes, target_transfers,
(source_common_snapshot, source_obsoletes, target_obsoletes, target_transfers,
incompatible_target_snapshots) = \
self._plan_sync(target_dataset=target_dataset, also_other_snapshots=also_other_snapshots,
guid_check=guid_check, raw=raw)
Expand All @@ -1248,7 +1247,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
return

# check if we can resume
resume_token = self._validate_resume_token(target_dataset, start_snapshot)
resume_token = self._validate_resume_token(target_dataset, target_transfers[0])

(active_filter_properties, active_set_properties) = self.get_allowed_properties(filter_properties,
set_properties)
Expand All @@ -1260,13 +1259,17 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
write_embedded = False

# now actually transfer the snapshots
prev_source_snapshot = source_common_snapshot
prev_target_snapshot=target_dataset.find_snapshot(prev_source_snapshot)
# prev_source_snapshot = start_snapshot
# source_dataset=start_snapshot.parent
# prev_target_snapshot=target_dataset.find_snapshot(prev_source_snapshot)

source_snapshot = start_snapshot
do_rollback = rollback
while source_snapshot:
target_snapshot = source_snapshot.find_snapshot_in_list(target_transfers)
prev_source_snapshot=source_common_snapshot
prev_target_snapshot=target_dataset.find_snapshot(source_common_snapshot)
for target_snapshot in target_transfers:

source_snapshot=self.find_snapshot(target_snapshot)


# do the rollback, one time at first transfer
if do_rollback:
Expand Down Expand Up @@ -1307,7 +1310,7 @@ def sync_snapshots(self, target_dataset, features, show_progress, filter_propert
prev_source_snapshot = source_snapshot
prev_target_snapshot = target_snapshot

source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)
# source_snapshot = self.find_next_snapshot(source_snapshot, also_other_snapshots)

def mount(self, mount_point):

Expand Down

0 comments on commit 6b987b2

Please sign in to comment.