Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- // Scripted by iMagic
- // ZippySQL v1.0
- // This is the folder backups are saved to.
- // It should not be included with the other paths
- // since the backups folder itself should not be backed up
- //
- // Make sure (npcserver) has rw rights to this!
- // Additionally, add the following to your folder config:
- // file backups/*.*
- const BACKUP_FOLDER = "backups/";
- // This is the SQLite database Zippy connects to.
- // Make sure to add the following to your serverops
- //
- // database=zippy,zippy
- const DATABASE = "zippy";
- // Whether or not to send an echo of current backup processes
- const DEBUG_PROGRESS = false;
- // DBNPC properties can have a max length (string length, not array size!)
- // of 60,000 characters. I believe data can still be stored beyond that point
- // without causing errors, it would just be corrupted with unpredictive behavior.
- //
- // We'll use this value to spread data across different
- // properties dynamically so that this isn't an issue
- // DO NOT CHANGE THIS VALUE
- const PROPERTY_OVERLOAD_LENGTH = 60000;
- // After a conversation with Merlin we found
- // that the game server has a `scriptwrite` limit for
- // file system operations:
- // U32 TServerOptions::scriptwritelimit = 10000000;
- // Going over this limit within an unknown timeframe
- // (assumed to be 1 minute) seems to be what causes
- // flood alerts.
- // DO NOT CHANGE THIS VALUE
- const SERVER_WRITE_LIMIT = 10000000;
- // Feel free to change these at will
- enum FLOOD_CONTROL {
- // How many files to attempt processing at once
- // increasing this value increases the chances of
- // flood alerts
- //#
- // Greater values increase the risk of flood alerts
- // Smaller values make backup processes last longer
- CHUNK_SIZE = 50,
- // Filesystem read/write operations in this script trigger
- // a "tick". The write limit is the max amount of ticks
- // we can trigger before enforcing a pause on the script.
- // Surpassing the SERVER_WRITE_LIMIT value too quickly is
- // what causes a flood alert. Since this is a server-wide
- // value, we'll attempt to use no more than 1% of it.
- // If you own a smaller server you should be able to increase
- // the percent to 0.01 for 10%
- //#
- // Larger values increase the risk of flood alerts
- WRITE_LIMIT = SERVER_WRITE_LIMIT * /* 1% */ 0.001,
- // Another reason why the script may decide to take
- // a nap is because it's causing too much stress on
- // the server. Adjust the max CPU usage to match your
- // server's needs.
- //#
- // Larger values reduce the risk of flood alerts
- // Lower values cause the entire backup process
- // to last longer
- CPU_STRESS_THRESHOLD = 0.45,
- // After "ticks" reaches it's threshold, this determines
- // how many seconds the script will sleep for
- // Ideally this value should remain 60 since the server
- // write limit seems to be in a minute timer before
- // allowing more write operations.
- //#
- // Smaller values increase the risk of flood alerts
- NAP_TIME = 60
- };
- // Enums for paths
- // Make sure the (npcserver) has AT LEAST read access to these folders!
- enum PATHS {
- ACCOUNTS = "accounts/",
- DATABASES = "databases/",
- NPCS = "npcs/",
- LOGS = "logs/",
- LEVELS = "levels/",
- TEMP = "temp/",
- SCRIPT_FILES = "scriptfiles/",
- SCRIPTS = "scripts/",
- STORAGE = "storage/",
- WEAPONS = "weapons/"
- };
- enum SAVE_OPTIONS {
- ACCOUNTS = 1 << 0,
- DATABASES = 1 << 1,
- NPCS = 1 << 2,
- LOGS = 1 << 3,
- LEVELS = 1 << 4,
- TEMP = 1 << 5,
- SCRIPT_FILES = 1 << 6,
- SCRIPTS = 1 << 7,
- STORAGE = 1 << 8,
- WEAPONS = 1 << 9
- };
- // Compute it dynamically based on the highest bit in the enum
- // Update this if the highest bit changes!!
- const SAVE_OPTIONS_ALL = (SAVE_OPTIONS.WEAPONS << 1) - 1;
- // Handy shorthands for setting up
- // backup lifespans
- enum TIME {
- MINUTE = 60,
- HOUR = 3600,
- DAY = 86400,
- WEEK = 604800,
- AVG_MONTH = 2630016,
- // We use NEVER to more explicitly say that we dont
- // want a backup to be deleted. Moreover, we specifically
- // add NEVER at this position so that it inherits the value
- // of AVG_MONTH + 1 which helps differenciate it from NULL
- // in logic checks
- NEVER
- };
- // This is the default lifetime for every backup.
- // For backups to persist forever or until manually deleted
- // from the database (WHICH IS NOT RECOMMENDED) TIME.NEVER must be passed to
- // onStartBackup
- const DEFAULT_BACKUP_LIFESPAN = TIME.AVG_MONTH;
- function onInitialized() {
- this.scheduleEvent(this, "onCreated", 5);
- }
- function onCreated() {
- // This is only allowed in specific servers.
- // If available, this would take advantage of that
- this.looplimit = 0xffffffff;
- // Stop any on-going processes
- this.cancelevents("onZipPaths");
- // Run this function to setup the SQL tables
- //onInitialSetup();
- }
- /************************************************************************
- *************************** EXAMPLES ******************************
- ************************************************************************/
- // The example functions all have a function modifier of 'public'.
- // This is just to show that these functions can be called
- // externally by other scripts like, say, a cron scheduler.
- function onFullBackupExample() {
- // In this THEORETICAL scenario, we set a configuration
- // bit that toggles all of the folders except
- // the levels folder because of how big it can
- // be (not that all of the others combined aren't!).
- //
- // Although this tool can theoretically
- // backup all of the files on the server,
- // it should never be used for that purpose.
- temp.config =
- SAVE_OPTIONS.NPCS |
- SAVE_OPTIONS.DATABASES |
- SAVE_OPTIONS.WEAPONS |
- SAVE_OPTIONS.LOGS |
- SAVE_OPTIONS.STORAGE |
- SAVE_OPTIONS.SCRIPT_FILES |
- SAVE_OPTIONS.SCRIPTS;
- // Which can also be written like this
- //temp.config = SAVE_OPTIONS.LEVELS & ~SAVE_OPTIONS_ALL;
- // This initiates the backup process
- onStartBackup(null, temp.config, false, TIME.WEEK);
- }
- public function onLogsBackupExample() {
- /**
- In this example we create a backup of JUST the logs folder that expires
- every month. It also immediately outputs the log into the backups folder as a zip
- file.
- - The first argument is an identifier of sorts that is added
- to the zip file's filename so that you can tell them apart
- easier.
- - The second argument is the bit for the LOGS folder.
- - The third argument is a boolean that determines whether or not
- the backup is written as a zip file to the backups folder
- With this configuration we can expect a similar filename
- to be created for our backup file
- backups/2025_03_14_logs_backup_69d8e420.zip
- */
- // This initiates the backup process
- onStartBackup("logs_backup", SAVE_OPTIONS.LOGS, true, TIME.WEEK);
- }
- public function onCustomBackupExample() {
- // In this example we want to create a more complex
- // backup file that only lasts 3 days. The `additional_paths`
- // argument gives us a lot of flexibility when choosing what
- // is backed up.
- // For example's sake, the following paths exist exclusively
- // for the purposes of demonstration
- temp.custom_backup_paths = {
- // Backup all of the files that may be
- // on the favorite books folder
- "books/favorites/",
- // Add a singular file to the backup
- "books/scifi/snow_crash.pdf", // Neal Stephenson's cyberpunk novel that coined the term "Metaverse"
- // Backup just the '.png' files matching a pattern
- // '*' here is a wildcard including anything
- // that may come before the filename
- "books/covers/cover_*.png"
- };
- // With this configuration we can expect a filename similar
- // to this one to be created for our backup:
- // backups/2025_03_09_books-images_69d8e420.zip
- onStartBackup("books-images", null, true, TIME.DAY * 3, temp.custom_backup_paths);
- }
- /************************************************************************
- ************************ CORE FUNCTIONS ***************************
- ************************************************************************/
- function onInitialSetup() {
- onBackupEcho(format("Setting up ZippySQL..."));
- // Enable auto vacuuming after row deletion. This helps
- // reduce the filesize of the SQLite database when
- // large rows are deleted. It does come with some
- // overhead costs that are worth it.
- // This must be done when the database is new and
- // there are yet no tables
- onExecuteSQL("PRAGMA auto_vacuum = 2");
- // Create the backups table
- // This is where the final zips are stored
- temp.sql = onExecuteSQL(
- "CREATE TABLE IF NOT EXISTS backups (" @
- "id TEXT NOT NULL UNIQUE," @
- "created_at INTEGER NOT NULL," @
- "remove_at INTEGER," @
- "zip BLOB NOT NULL" @
- ")", true
- );
- if (temp.sql.error) {
- onBackupEcho("Failed to create backups table:" SPC temp.sql.error);
- return;
- }
- temp.sql = onExecuteSQL(
- "CREATE UNIQUE INDEX IF NOT EXISTS idx_backups_id_unique ON backups(id)"
- );
- if (temp.sql.error) {
- onBackupEcho("Failed to create index for backups table:" SPC temp.sql.error);
- return;
- }
- // Create the data table
- temp.sql = onExecuteSQL(
- "CREATE TABLE IF NOT EXISTS process (" @
- "id TEXT NOT NULL UNIQUE," @
- "total_files INTEGER NOT NULL DEFAULT 0," @
- "processed_files INTEGER NOT NULL DEFAULT 0," @
- "processed_big_files INTEGER NOT NULL DEFAULT 0," @
- "last_logged_percent REAL NOT NULL DEFAULT 0," @
- "read_errors INTEGER NOT NULL DEFAULT 0," @
- "start INTEGER NOT NULL," @
- "paths TEXT" @
- ")", true
- );
- if (temp.sql.error) {
- onBackupEcho("Failed to create paths table:" SPC temp.sql.error);
- return;
- }
- // Create the chunks table
- temp.sql = onExecuteSQL(
- "CREATE TABLE IF NOT EXISTS chunks (" @
- "id TEXT NOT NULL," @
- "filename TEXT NOT NULL," @
- "content BLOB NOT NULL" @
- ")", true
- );
- if (temp.sql.error) {
- onBackupEcho("Failed to create chunks table:" SPC temp.sql.error);
- return;
- }
- onBackupEcho("Setup complete!");
- }
- /**
- * Starts a backup process with the given parameters.
- * @param {string} temp.custom_identifier
- A custom identifier to include in the backup name.
- * @param {number} temp.options
- A bitfield representing the backup options. Each bit corresponds
- to a specific path to include in the backup.
- * @param {boolean} temp.output_file
- Whether to output the backup as a file in the backups folder.
- If `false`, the backup remains as a record in the SQLite database
- until its lifetime expires.
- * @param {number | null} temp.lifespan
- An amount of seconds to keep the backup on the database. If null,
- the backup is permanent unless manually removed.
- * @param {array} temp.additional_paths
- Additional paths to include in the backup.
- */
- function onStartBackup(
- temp.custom_identifier = "",
- temp.options = 0,
- temp.output_file = false,
- temp.lifespan = 0,
- temp.additional_paths = {}
- ) {
- // Prune expired backups
- onPruneExpiredBackups();
- // Create a backup identifier name
- // format is: YYYY_MM_DD_hour_minute_identifier?_randomutf8
- temp.id = format(
- "%s_%s%s",
- formattimestring("%Y_%m_%d", timevar2),
- temp.custom_identifier == NULL ? "" : temp.custom_identifier @ "_",
- // This will help identify our process if there are multiple backups happening at once
- generateRandomChars(8)
- );
- //#
- temp.paths = {};
- // Iterate over the options bitfield adding enabled paths
- if (temp.options != NULL) {
- for (temp.bit = 1; temp.bit <= SAVE_OPTIONS_ALL; temp.bit <<= 1) {
- // Check whether or not the current bit is toggled
- temp.enabled = (temp.options & temp.bit) != 0;
- if (!temp.enabled) { continue; }
- // Get the name of the path and add it to paths list
- // assuming we havent added it before
- temp.path = getPathForOption(temp.bit);
- if (temp.path && !(temp.path in temp.paths)) {
- temp.paths.add(temp.path);
- }
- }
- }
- // Check if we should add any additional paths
- // given as an argument
- if (temp.additional_paths.size() > 0) {
- // Add the additional paths to our paths list
- temp.paths.addarray(temp.additional_paths);
- temp.paths = getTopLevelPaths(temp.paths);
- }
- temp.query = onExecuteSQL(format(
- "INSERT INTO process (id, start) " @
- "VALUES ('%s', %d)",
- temp.id, timevar2
- ), true);
- // Resolve all of the paths into files
- expandPathsToFiles(temp.id, temp.paths);
- //#
- temp.query = onExecuteSQL(format(
- "SELECT total_files FROM process WHERE id = '%s' LIMIT 1",
- temp.id
- ));
- if (temp.query.error || !temp.query.total_files) {
- onBackupEcho(format(
- "Warning! No paths to backup for '%s'! Stopping...",
- temp.id
- ));
- // Delete any residual records that might've been created
- onDeleteBackupRecords(temp.id);
- return;
- }
- // Log the start of the backup process
- onBackupEcho(format(
- "Starting backup process with id: %s..." @
- "\n\tTotal files to backup: %d" @
- "\n\tEstimated duration: %s",
- temp.id,
- temp.query.total_files,
- estimateBackupDuration(temp.query.total_files)
- ));
- // Start the zipping
- this.scheduleEvent(5, "onZipPaths", temp.id, temp.output_file, temp.lifespan);
- }
- function onZipPaths(temp.id, temp.output_file, temp.lifespan) {
- if (!temp.id) { return; }
- // Get the data for the current process
- temp.process = onExecuteSQL(format(
- "SELECT * FROM process WHERE id = '%s' LIMIT 1",
- temp.id
- ), true);
- // The record of this process could not be found...
- if (temp.process.error) {
- onBackupEcho(format("Could not find process record for process: %s. Aborting...", temp.id));
- onDeleteBackupRecords(temp.id);
- return;
- }
- // Extract the paths from the process
- // We want to resolve the file paths in chunks.
- // Pick whichever would take the least amount of iterations
- temp.paths = temp.process.paths.subarray(0, min(temp.process.paths.size(), FLOOD_CONTROL.CHUNK_SIZE));
- temp.paths_size = temp.paths.size();
- // If we have no more files to process...
- if (temp.paths_size <= 0) {
- this.scheduleEvent(5, "onFinishZip", temp.id, temp.output_file, temp.lifespan);
- return;
- }
- // This is used in the SQL query to remove the processed
- // paths from the database
- temp.paths_length = temp.paths.length();
- for (temp.i = 0; temp.i < temp.paths_size; temp.i++) {
- temp.filename = temp.paths[temp.i];
- // If this file didnt exist, move on
- if (!fileexists(temp.filename)) {
- temp.process.read_errors++;
- onTick();
- continue;
- }
- // Load the contents of the file and get it's length
- temp.content.loadstring(temp.filename);
- onTick();
- // Turn the content into a blob
- temp.content = bintohex(temp.content);
- temp.content_length = temp.content.length();
- if (temp.content_length <= 0) {
- temp.process.read_errors++;
- continue;
- }
- //#
- temp.process.processed_files++;
- temp.cost = temp.filename.length() + temp.content.length();
- temp.is_big_file = (temp.cost > PROPERTY_OVERLOAD_LENGTH);
- // Increase the count of files processed
- if (temp.is_big_file) {
- temp.process.processed_big_files++;
- }
- onExecuteSQL(format("INSERT INTO chunks VALUES ('%s', '%s', x'%s')", temp.id, temp.filename, temp.content));
- }
- // Calculate current progress percentage
- temp.process.last_logged_percent = temp.process.processed_files / temp.process.total_files;
- temp.closest_tenth = closestTenPercent(temp.process.last_logged_percent);
- // Log progress at percentage intervals (e.g., every 10%)
- if (DEBUG_PROGRESS && temp.closest_tenth > temp.process.last_logged_percent) {
- onBackupEcho(format(
- "Backup progress (%s): %d/%d files processed (%d%%) [%d big files]",
- temp.id,
- temp.process.processed_files,
- temp.process.total_files,
- temp.closest_tenth * 100,
- temp.process.processed_big_files
- ));
- }
- // Remove the amount of items we'll be iterating over from the database
- // and update the other progress variables for next iteration
- onExecuteSQL(format(
- "UPDATE process " @
- "SET " @
- "processed_files = %d," @
- "processed_big_files = %d," @
- "last_logged_percent = %f," @
- "read_errors = %d," @
- "paths = CASE " @
- "WHEN LENGTH(paths) <= %d THEN NULL " @
- "ELSE SUBSTR(paths, %d + 2) " @
- "END " @
- "WHERE id = '%s'",
- temp.process.processed_files,
- temp.process.processed_big_files,
- temp.process.last_logged_percent,
- temp.process.read_errors,
- temp.paths_length, temp.paths_length,
- temp.id
- ));
- // Schedule the next zipping
- this.scheduleEvent(FLOOD_CONTROL.NAP_TIME * 0.1, "onZipPaths", temp.id, temp.output_file, temp.lifespan);
- }
- function onFinishZip(temp.id = "", temp.output_file = false, temp.lifespan = 0) {
- if (!temp.id) { return; }
- // Solidify the lifespan of the zip file
- if (temp.lifespan <= 0) {
- temp.lifespan = DEFAULT_BACKUP_LIFESPAN;
- } else if (temp.lifespan == TIME.NEVER) {
- temp.lifespan = null;
- }
- onBackupEcho("Building final zip file...");
- // Get the data for the current process
- temp.process = onExecuteSQL(format(
- "SELECT * FROM process WHERE id = '%s'",
- temp.id
- ));
- // We were not able to load the lines of the backup file
- if (temp.process.error) {
- onBackupEcho(format(
- "Warning! Could not load backup file '%s' in final step. Aborting...",
- temp.id
- ));
- onDeleteBackupRecords(temp.id);
- return;
- }
- // This loads as many as FLOOD_CONTROL.CHUNK_SIZE chunks
- // into the array and removes the returned rows
- temp.chunks = onFetchChunksDestructive(temp.id);
- temp.chunks_size = temp.chunks.rows.size();
- // If no chunks were found or there was an error...
- if (temp.chunks.error || temp.chunks_size <= 0) {
- onBackupEcho(format(
- "Warning! Could not load backup file '%s' in final step. Aborting...",
- temp.id
- ));
- onDeleteBackupRecords(temp.id);
- return;
- }
- // Create an array to collect the contents of all
- // of the files to be backed up so that they can be
- // turned into a zip file
- temp.zip = {};
- do {
- // Keep track of the amount of chunks we
- // started with...
- temp.initial_chunks = temp.chunks_size;
- // Iterate over all of the chunks, adding the contents
- // to the final zip file
- for (temp.chunk : temp.chunks.rows) {
- temp.chunks_size--;
- temp.zip.addarray({temp.chunk.filename, hextobin(temp.chunk.content)});
- }
- // If we had the max amount of chunks, that means
- // there may still be chunks left to process...
- if (temp.initial_chunks >= FLOOD_CONTROL.CHUNK_SIZE) {
- temp.chunks = onFetchChunksDestructive(temp.id);
- temp.chunks_size = temp.chunks.rows.size();
- }
- } while (temp.chunks_size > 0);
- // Remove the process and chunks
- onExecuteSQL(format("DELETE FROM process WHERE id = '%s'", temp.id));
- onExecuteSQL(format("DELETE FROM chunks WHERE id = '%s'", temp.id));
- // Convert the file to a zip file and overwrite it...
- temp.zip = generatezipstring(temp.zip);
- temp.backup = onExecuteSQL(format(
- "INSERT INTO backups VALUES('%s', %d, %d, x'%s')",
- temp.id,
- timevar2,
- temp.lifespan == null ? 0 : timevar2 + temp.lifespan,
- bintohex(temp.zip)
- ), true);
- if (temp.backup.error) {
- onBackupEcho(format("Aborted! Failed to add backup '%s' with error: %s", temp.id, temp.backup.error));
- onDeleteBackupRecords(temp.id);
- return;
- }
- if (temp.output_file) {
- temp.filename = format("%s%s.zip", BACKUP_FOLDER, temp.id);
- temp.zip.savestring(temp.filename, 0);
- onTick();
- }
- onBackupEcho(format(
- "Finished backup '%s':" @
- "\n\tFiles Processed: %d/%d (%d read errors)" @
- "\n\tBig Files Processed: %d" @
- "\n\tDuration: %s" @
- "%s",
- temp.id,
- temp.process.processed_files, temp.process.total_files, temp.process.read_errors,
- temp.process.processed_big_files,
- getMinutesAndSecondsFromTime(timevar2 - temp.process.start),
- temp.output_file ? format("\n\tBackup file: %s (%s)", temp.filename, getFormattedFileSize(filesize(temp.filename))) : ""
- ));
- }
- function onDeleteBackupRecords(temp.id = "") {
- onExecuteSQL(format("DELETE FROM backups WHERE id = '%s'", temp.id));
- onExecuteSQL(format("DELETE FROM process WHERE id = '%s'", temp.id));
- onExecuteSQL(format("DELETE FROM chunks WHERE id = '%s'", temp.id));
- }
- function onPruneExpiredBackups() {
- onExecuteSQL(format(
- "DELETE * FROM backups WHERE lifespan >= 0 AND lifespan <= %d; VACUUM;", timevar2
- ));
- }
- function onExportBackup(temp.id = "") {
- // Get the most recent backup
- temp.backup = onExecuteSQL(format(
- "SELECT HEX(zip) AS zip " @
- "FROM backups " @
- "WHERE id = '%s'" @
- "ORDER BY created_at DEC " @
- "LIMIT 1",
- temp.id
- ));
- if (temp.backup.error || temp.backup.rows.size() <= 0) {
- onBackupEcho(format("Error! Could not find backup file for id: '%s'", temp.id));
- return;
- }
- temp.content = hextobin(temp.backup.zip);
- temp.content.savestring(format("%s%s.zip", BACKUPS_FOLDER, temp.id), 0);
- }
- function onUpdateFilePaths(temp.id = "", temp.files = {}) {
- onExecuteSQL(format(
- "UPDATE process " @
- "SET " @
- "total_files = total_files + %d, " @
- "paths = CASE " @
- "WHEN paths IS NULL THEN '%s' " @
- "ELSE paths || '%s' " @
- "END " @
- "WHERE id = '%s'",
- temp.files.size(), temp.files, temp.files, temp.id
- ));
- }
- function onFetchChunksDestructive(temp.id = "") {
- temp.chunks = onExecuteSQL(format(
- "SELECT filename, HEX(content) AS content FROM chunks " @
- "WHERE id = '%s' " @
- "ORDER BY filename " @
- "LIMIT %d; ",
- temp.id, FLOOD_CONTROL.CHUNK_SIZE
- ));
- onExecuteSQL(format(
- "DELETE FROM chunks " @
- "WHERE rowid IN ( " @
- "SELECT rowid FROM chunks " @
- "WHERE id = '%s' " @
- "ORDER BY filename " @
- "LIMIT %d " @
- ") ",
- temp.id, FLOOD_CONTROL.CHUNK_SIZE
- ));
- return temp.chunks;
- }
- /************************************************************************
- ************************ UTILITY FUNCTIONS ************************
- ************************************************************************/
- function onBackupEcho(temp.msg) {
- echo(format("[ZippySQL]: %s", temp.msg));
- }
- function onBackupDebug(temp.msg) {
- echo(format("[ZippySQL Debug]: %s", temp.msg));
- }
- function onCPUTick() {
- if (this.last_cpu_check < timevar2) {
- this.last_cpu_check = timevar2 + FLOOD_CONTROL.NAP_TIME * 0.25;
- if (getprocesscpuusage() > FLOOD_CONTROL.CPU_STRESS_THRESHOLD) {
- onBackupDEbuc("Zippy is taking a CPU usage break");
- sleep(FLOOD_CONTROL.NAP_TIME);
- }
- }
- }
- function onTick() {
- // Trigger a CPU tick which checks CPU Usage
- onCPUTick();
- // Increase the amount of ticks
- this.ticks++;
- // Take a little nap... zzz...
- if (this.ticks >= FLOOD_CONTROL.WRITE_LIMIT) {
- this.ticks = 0;
- sleep(FLOOD_CONTROL.NAP_TIME);
- }
- }
- // Returns a folder path as a string given a path as a numeric bit
- function getPathForOption(temp.bit) {
- switch (temp.bit) {
- case SAVE_OPTIONS.ACCOUNTS: return PATHS.ACCOUNTS;
- case SAVE_OPTIONS.DATABASES: return PATHS.DATABASES;
- case SAVE_OPTIONS.NPCS: return PATHS.NPCS;
- case SAVE_OPTIONS.LOGS: return PATHS.LOGS;
- case SAVE_OPTIONS.LEVELS: return PATHS.LEVELS;
- case SAVE_OPTIONS.TEMP: return PATHS.TEMP;
- case SAVE_OPTIONS.SCRIPT_FILES: return PATHS.SCRIPT_FILES;
- case SAVE_OPTIONS.SCRIPTS: return PATHS.SCRIPTS;
- case SAVE_OPTIONS.STORAGE: return PATHS.STORAGE;
- case SAVE_OPTIONS.WEAPONS: return PATHS.WEAPONS;
- default: return NULL;
- }
- }
- /**
- * Removes duplicate elements from an array by using a two-pointer approach.
- * This function sorts the array and iterates through it, removing duplicates.
- * It modifies the input array and returns a new array with only unique elements.
- *
- * @param {Array} temp.arr - The array to make unique.
- * @returns {Array} - A new array with duplicates removed.
- */
- function makeUniqueTwoPointer(temp.arr = {}) {
- if (temp.arr.size() <= 1) { return temp.arr; }
- // Create a copy of the array
- temp.list = temp.arr;
- temp.list_size = temp.list.size();
- //#
- temp.list.sortascending();
- // Start at 1 since first element is always unique
- temp.write_index = 1;
- for (temp.read_index = 1; temp.read_index < temp.list_size; temp.read_index++) {
- // If current element is different from previous, it's unique
- if (temp.list[temp.read_index] != temp.list[temp.read_index - 1]) {
- temp.list[temp.write_index] = temp.list[temp.read_index];
- temp.write_index++;
- }
- }
- // Everything prior to the write index should be unique
- return temp.list.subarray(0, temp.write_index);
- }
- /**
- * Given an array of paths, this function returns an array of top-level paths.
- * It separates directories and files, removes subdirectories from directories,
- * and ensures files are included only if they are not covered by a directory.
- *
- * @param {Array} temp.paths - An array containing the paths to be processed
- * @returns {Array} - An array of top-level directories and files.
- */
- function getTopLevelPaths(temp.paths = {}) {
- // This sorts the paths alphabetically and removes possible duplicates
- temp.paths = makeUniqueTwoPointer(temp.paths);
- temp.total_paths = temp.paths.size();
- temp.top_level = {};
- if (temp.total_paths <= 0) {
- return temp.top_level;
- }
- for (temp.i = 0; temp.i < temp.total_paths; temp.i++) {
- temp.is_covered = false;
- for (temp.j = 0; temp.j < temp.total_paths; temp.j++) {
- if (temp.i == temp.j) { continue; }
- temp.a = temp.paths[temp.i];
- temp.b = temp.paths[temp.j];
- // This means temp.a is covered by a higher level folder
- if (isPathCovered(temp.a, temp.b)) {
- temp.is_covered = true;
- break;
- }
- }
- // Only add uncovered paths to the final array
- if (!temp.is_covered) {
- temp.top_level.add(temp.paths[temp.i]);
- }
- }
- // Return all of the top-level (uncovered) files
- return temp.top_level;
- }
- /**
- * This function checks whether or not a path is covered by a potential parent
- *
- * @returns {Boolean} - Whether or not the path is covered by the parent.
- */
- function isPathCovered(temp.path = "", temp.parent = "") {
- // A path can't cover itself if they're identical
- if (temp.path == temp.parent) { return true; }
- //#
- temp.parent_is_folder = (temp.parent.ends("/") && temp.parent == extractfilepath(temp.parent));
- temp.parent_has_wildcard = temp.parent.pos("*") > -1;
- if (temp.parent_is_folder) {
- // Return whether or not the parent starts with the
- return temp.path.starts(temp.parent) && (temp.path != temp.parent);
- }
- if (temp.parent_has_wildcard) {
- // Split the parent into prefix and suffix based on the wildcard
- temp.tokens = temp.parent.tokenize("*");
- temp.prefix = temp.tokens[0];
- temp.suffix = temp.tokens[1];
- // If the path starts with the prefix and ends with the suffix
- if (temp.path.starts(temp.prefix) && temp.path.ends(temp.suffix)) {
- return true;
- }
- // If there's no suffix, just check if the path starts with the prefix
- if (!temp.suffix) {
- return temp.path.starts(temp.prefix);
- }
- }
- // Otherwise, the path is not covered
- return false;
- }
- /**
- This function takes an array of paths and resolves them
- into files.
- This step is mostly for paths which are folders or may
- have a wildcard. Otherwise the path would already point
- to a file.
- @param {Array} temp.paths - Array of top level paths
- @param {String} temp.id - the process id for these paths
- @returns {Boolean} - Whether or not paths were resolved successfully
- */
- function expandPathsToFiles(temp.id = "", temp.paths = {}) {
- //#
- temp.paths_size = temp.paths.size();
- temp.files = {};
- for (temp.path : temp.paths) {
- temp.is_folder = (temp.path.ends("/") && temp.path == extractfilepath(temp.path));
- temp.has_wildcard = temp.path.pos("*") > -1;
- // If this is not a folder nor a path with a wildcard
- // that must mean this is a direct path to a file
- if (!temp.is_folder && !temp.has_wildcard) {
- // Check for overflow before adding
- temp.will_overflow = temp.files.length() + temp.path.length() > PROPERTY_OVERLOAD_LENGTH;
- // If this would cause a property overflow
- // flush the files array
- if (temp.will_overflow) {
- onUpdateFilePaths(temp.id, temp.files);
- temp.files = {};
- } else {
- temp.files.add(temp.path);
- }
- continue;
- }
- // At this point, we need to use loadfolder
- // because the path is either a folder
- // or has a wildcard
- temp.found_files = {};
- // We use load folder instead here
- // since findfiles can only find files
- // inside of levels/
- // We also add a wildcard for folders in this next line
- // So that we get all of the files inside it
- temp.found_files.loadfolder(temp.path @ (temp.is_folder && !temp.path.ends("*") ? "*" : ""), true);
- temp.found_files_size = temp.found_files.size();
- onTick();
- // No files were found
- if (temp.found_files_size <= 0) { continue; }
- // Iterate over all of the found files to add them to our
- // final object
- for (temp.j = 0; temp.j < temp.found_files_size; temp.j++) {
- // TGraalVar.loadfolder returns just filenames, not the paths.
- // We need to add the path again to each found file
- temp.found_file = temp.path @ temp.found_files[temp.j];
- // Check for overflow before adding
- temp.will_overflow = temp.files.length() + temp.found_file.length() > PROPERTY_OVERLOAD_LENGTH;
- // If this would cause a property overflow
- // flush the files array
- if (temp.will_overflow) {
- onUpdateFilePaths(temp.id, temp.files);
- temp.files = {};
- } else {
- temp.files.add(temp.found_file);
- }
- }
- }
- // Add left over files to the paths
- if (temp.files.size() > 0) {
- onUpdateFilePaths(temp.id, temp.files);
- }
- }
- function generateRandomChars(temp.length = 0) {
- temp.chars = "abcdef1234567890";
- temp.chars_length = temp.chars.length();
- temp.result = "";
- for (temp.i = 0; temp.i < temp.length; temp.i++) {
- temp.index = int(random(0, 1) * temp.chars_length);
- temp.result @= temp.chars.charat(temp.index);
- }
- return temp.result;
- }
- function getFormattedFileSize(temp.bytes = 0) {
- temp.units = {"b", "kb", "mb", "gb"};
- temp.index = 0;
- while (temp.bytes >= 1024 && temp.index < temp.units.size() - 1) {
- temp.bytes /= 1024;
- temp.index++;
- }
- return format("%.2f %s", temp.bytes, temp.units[temp.index]);
- }
- function estimateBackupDuration(temp.total_files = 0) {
- const TIME_PER_FILE = 0.1;
- temp.iterations = int(temp.total_tiles / FLOOD_CONTROL.CHUNK_SIZE);
- temp.processing_time = temp.total_files * TIME_PER_FILE;
- temp.pause_time = temp.iterations * (FLOOD_CONTROL.NAP_TIME * 0.1);
- // The 20 is a rough estimate of how much time is spent in this.waitfor's
- temp.total_time = 20 + temp.processing_time + temp.pause_time;
- // Convert total time to minutes and seconds for readability
- return getMinutesAndSecondsFromTime(temp.total_time);
- }
- function getMinutesAndSecondsFromTime(temp.time = 0) {
- temp.minutes = int(temp.time / 60);
- temp.seconds = int(temp.time % 60);
- return format("%d minute(s) and %.2f second(s)", temp.minutes, temp.seconds);
- }
- function closestTenPercent(temp.n) {
- if (temp.n > 1 || temp.n < 0) { return temp.n; }
- temp.scaled = int(temp.n * 100);
- // Extract the tenths and hundredths
- temp.tenths = int(temp.scaled / 10);
- temp.hundredths = temp.scaled % 10;
- if (temp.hundredths >= 5) {
- temp.tenths += 1;
- }
- // Handle carryover (e.g. 0.95 becomes 1.0)
- if (temp.tenths >= 10) {
- return 1.0;
- }
- return temp.tenths / 10;
- }
- function onExecuteSQL(temp.query = "", temp.expect_return = false) {
- // Query debugging
- if (temp.query.lower().starts("echo")) {
- temp.do_echo = true;
- temp.query = temp.query.substring(5);
- }
- if (temp.query.lower().starts("select")) { temp.expect_return = true; }
- temp.t = timevar2;
- temp.request = requestsql2(DATABASE, temp.query, temp.expect_return);
- onCPUTick();
- // If we dont expect a response...
- if (!temp.expect_return) {
- if (temp.do_echo) {
- onBackupDebug(format("SQL Query Debug: %s", temp.query));
- }
- return temp.request;
- }
- if (!temp.request.completed && !temp.request.error) {
- waitfor(temp.request, "onReceiveData", 60);
- }
- // Just a way to keep track of the amount of SQL requests per minute
- temp.min_before = this.last_minute;
- this.last_minute = int(timevar2 / 60);
- if (temp.min_before == this.last_minute) {
- this.sql_reqs_last_minute++;
- } else {
- this.sql_reqs_last_minute = 1;
- }
- if (temp.request.error != NULL) {
- onBackupDebug(format("SQL Error!\n\tError: %s\n\tQuery: %s", temp.request.error, temp.query));
- savelog2("zippy_sqlerrors.txt", temp.request.error NL temp.query);
- } else {
- if (temp.do_echo) {
- onBackupDebug(format("SQL\n\tQuery: %s\n\tResponse: %s", temp.query, temp.request.savejsontostring(2)));
- }
- }
- return temp.request;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement