forked from SpaceinvaderOne/Unraid_ZFS_Auto_Dataset_Update
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathZFS_Auto_Dataset_Update.sh
209 lines (194 loc) · 8.86 KB
/
ZFS_Auto_Dataset_Update.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
#!/bin/bash
#set -x
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # Script for watching a dataset and auto updating regular folders converting them to datasets # #
# # (needs Unraid 6.12 or above) # #
# # by - SpaceInvaderOne # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# real run or dry run
dry_run="no" # Set to "yes" for a dry run. Change to "no" to run for real
#
# Main Variables
source_pool="cyberflux" #this is the zpool in which your source dataset resides (note the does NOT start with /mnt/)
source_dataset="appdata" #this is the name of the dataset you want to check and convert its child directories to datasets
should_stop_containers="yes" # Setting to "yes" will stop all containers except thos listed below. This should be set to yes if watching the appdata share
containers_to_keep_running=("Emby" "container2") #Containers that you do not want to be stopped (see readme)
should_stop_vms="no" #Setting to "yes" will stop all vms except thos listed below. This should be set to yes if watching the domain share
vms_to_keep_running=("Home Assistant" "vm2") #Containers that you do not want to be stopped (see readme)
cleanup="yes" #Seeting to yes will cleanup after running (see readme)
replace_spaces="no" # Set this to "no" to keep spaces in the dataset names
#
#
#Avanced variables you do not need to change these.
source_path="${source_pool}/${source_dataset}"
mount_point="/mnt"
stopped_containers=()
stopped_vms=()
converted_folders=()
buffer_zone=11 # this is a bufferzone for addional free space needed in the dataset set as a percentage value beween 1 and 100.
# it should be set a little higher than what you have your minimum free space floor that is set in the Unraid gui for the zpool
#
# This function is to stop running Docker containers if required
stop_docker_containers() {
if [ "$should_stop_containers" = "yes" ]; then
echo "Checking Docker containers..."
for container in $(docker ps -q); do
container_name=$(docker container inspect --format '{{.Name}}' "$container" | cut -c 2-)
if ! [[ " ${containers_to_keep_running[@]} " =~ " ${container_name} " ]]; then
echo "Stopping Docker container ${container_name}..."
if [ "$dry_run" != "yes" ]; then
docker stop "$container"
stopped_containers+=($container) # Save the id of the stopped container
else
echo "Dry Run: Docker container ${container_name} would be stopped"
fi
fi
done
fi
}
#
# this function is to stoprunning VMs if required
stop_virtual_machines() {
if [ "$should_stop_vms" = "yes" ]; then
echo "Checking VMs..."
# Get the list of running vms
running_vms=$(virsh list --name | awk NF)
oldIFS=$IFS
IFS=$'\n'
for vm in $running_vms; do
# restore the IFS
IFS=$oldIFS
# Check if VM is in the array of VMs to keep running
if ! [[ " ${vms_to_keep_running[@]} " =~ " ${vm} " ]]; then
echo "Stopping VM $vm..."
if [ "$dry_run" != "yes" ]; then
# Shutdown the VM then wait for it to stop
virsh shutdown "$vm"
for i in {1..18}; do
if virsh domstate "$vm" | grep -q 'shut off'; then
break
fi
if ((i == 18)); then
virsh destroy "$vm"
fi
sleep 5
done
stopped_vms+=("$vm") # Save the name of the stopped VM
else
echo "Dry Run: VM $vm would be stopped"
fi
fi
# cchange IFS back to handle newline only for the next loop iteration
IFS=$'\n'
done
# restore the IFS
IFS=$oldIFS
fi
}
#
# Function to start Docker containers which had been stopped earlier
start_docker_containers() {
if [ "$should_stop_containers" = "yes" ]; then
for container in ${stopped_containers[@]}; do
echo "Restarting Docker container $(docker container inspect --format '{{.Name}}' "$container")..."
if [ "$dry_run" != "yes" ]; then
docker start "$container"
else
echo "Dry Run: Docker container $(docker container inspect --format '{{.Name}}' "$container") would be restarted"
fi
done
fi
}
#
# function starts VMs that had been stopped earlier
start_virtual_machines() {
if [ "$should_stop_vms" = "yes" ]; then
for vm in "${stopped_vms[@]}"; do
echo "Restarting VM $vm..."
if [ "$dry_run" != "yes" ]; then
virsh start "$vm"
else
echo "Dry Run: VM $vm would be started"
fi
done
fi
}
# function normalize umlauts
normalize_name() {
local original_name="$1"
# Replace German umlauts with ASCII approximations
local normalized_name=$(echo "$original_name" |
sed 's/ä/ae/g; s/ö/oe/g; s/ü/ue/g;
s/Ä/Ae/g; s/Ö/Oe/g; s/Ü/Ue/g;
s/ß/ss/g')
echo "$normalized_name"
}
#
# main function creating/converting to datasets and copying data within
create_datasets() {
for entry in "${mount_point}/${source_path}"/*; do
base_entry=$(basename "$entry")
if [[ "$base_entry" != *_temp ]]; then
base_entry_no_spaces=$(if [ "$replace_spaces" = "yes" ]; then echo "$base_entry" | tr ' ' '_'; else echo "$base_entry"; fi)
normalized_base_entry=$(normalize_name "$base_entry_no_spaces")
if zfs list -o name | grep -q "^${source_path}/${normalized_base_entry}$"; then
echo "Skipping dataset ${entry}..."
elif [ -d "$entry" ]; then
echo "Processing folder ${entry}..."
folder_size=$(du -sb "$entry" | cut -f1) # This is in bytes
folder_size_hr=$(du -sh "$entry" | cut -f1) # This is in human readable
echo "Folder size: $folder_size_hr"
buffer_zone_size=$((folder_size * buffer_zone / 100))
if zfs list | grep -q "$source_path" && (( $(zfs list -o avail -p -H "${source_path}") >= buffer_zone_size )); then
echo "Creating and populating new dataset ${source_path}/${normalized_base_entry}..."
if [ "$dry_run" != "yes" ]; then
mv "$entry" "${mount_point}/${source_path}/${normalized_base_entry}_temp"
if zfs create "${source_path}/${normalized_base_entry}"; then
rsync -a "${mount_point}/${source_path}/${normalized_base_entry}_temp/" "${mount_point}/${source_path}/${normalized_base_entry}/"
rsync_exit_status=$?
if [ "$cleanup" = "yes" ] && [ $rsync_exit_status -eq 0 ]; then
echo "Validating copy..."
source_file_count=$(find "${mount_point}/${source_path}/${normalized_base_entry}_temp" -type f | wc -l)
destination_file_count=$(find "${mount_point}/${source_path}/${normalized_base_entry}" -type f | wc -l)
source_total_size=$(du -sb "${mount_point}/${source_path}/${normalized_base_entry}_temp" | cut -f1)
destination_total_size=$(du -sb "${mount_point}/${source_path}/${normalized_base_entry}" | cut -f1)
if [ "$source_file_count" -eq "$destination_file_count" ] && [ "$source_total_size" -eq "$destination_total_size" ]; then
echo "Validation successful, cleanup can proceed."
rm -r "${mount_point}/${source_path}/${normalized_base_entry}_temp"
converted_folders+=("$entry") # Save the name of the converted folder
else
echo "Validation failed. Source and destination file count or total size do not match."
echo "Source files: $source_file_count, Destination files: $destination_file_count"
echo "Source total size: $source_total_size, Destination total size: $destination_total_size"
fi
elif [ "$cleanup" = "no" ]; then
echo "Cleanup is disabled.. Skipping cleanup for ${entry}"
else
echo "Rsync encountered an error. Skipping cleanup for ${entry}"
fi
else
echo "Failed to create new dataset ${source_path}/${normalized_base_entry}"
fi
fi
else
echo "Skipping folder ${entry} due to insufficient space"
fi
fi
fi
done
}
print_new_datasets() {
echo "The following folders were successfully converted to datasets:"
for folder in "${converted_folders[@]}"; do
echo "$folder"
done
}
#
#
# Run the functions
stop_docker_containers
stop_virtual_machines
create_datasets
start_docker_containers
start_virtual_machines
print_new_datasets