From 576b46ea7086d0e8230f63fd3ac5cc69429f408a Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 8 Oct 2024 05:47:50 +1100 Subject: [PATCH] Improve performance with reduced execution time and lower CPU/system resource usage (#2880) * Use global variables for logging verbose|debug logging state * Only perform debug logging steps including calculations and/or output if debugging is actually enabled * Only perform verbose logging steps including calculations and/or output if verbose is actually enabled * Optimise code execution and reduce resource usage by ~12% in real time, ~65% in CPU time, and ~87% in system time. --- src/clientSideFiltering.d | 158 ++--- src/config.d | 155 ++--- src/curlEngine.d | 78 ++- src/itemdb.d | 36 +- src/log.d | 8 +- src/main.d | 180 +++--- src/monitor.d | 102 +-- src/onedrive.d | 195 +++--- src/sqlite.d | 8 +- src/sync.d | 1264 ++++++++++++++++++++----------------- src/util.d | 104 +-- 11 files changed, 1239 insertions(+), 1049 deletions(-) diff --git a/src/clientSideFiltering.d b/src/clientSideFiltering.d index 6d925c37a..07dfa7d0d 100644 --- a/src/clientSideFiltering.d +++ b/src/clientSideFiltering.d @@ -33,7 +33,7 @@ class ClientSideFiltering { // Initialise the required items bool initialise() { // Log what is being done - addLogEntry("Configuring Client Side Filtering (Selective Sync)", ["debug"]); + if (debugLogging) {addLogEntry("Configuring Client Side Filtering (Selective Sync)", ["debug"]);} // Load the sync_list file if it exists if (exists(appConfig.syncListFilePath)){ @@ -42,26 +42,32 @@ class ClientSideFiltering { // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries // Handle skip_dir configuration in config file - addLogEntry("Configuring skip_dir ...", ["debug"]); - addLogEntry("skip_dir: " ~ to!string(appConfig.getValueString("skip_dir")), ["debug"]); + if (debugLogging) { + addLogEntry("Configuring skip_dir ...", ["debug"]); + addLogEntry("skip_dir: " ~ to!string(appConfig.getValueString("skip_dir")), ["debug"]); + } setDirMask(appConfig.getValueString("skip_dir")); // Was --skip-dir-strict-match configured? - addLogEntry("Configuring skip_dir_strict_match ...", ["debug"]); - addLogEntry("skip_dir_strict_match: " ~ to!string(appConfig.getValueBool("skip_dir_strict_match")), ["debug"]); + if (debugLogging) { + addLogEntry("Configuring skip_dir_strict_match ...", ["debug"]); + addLogEntry("skip_dir_strict_match: " ~ to!string(appConfig.getValueBool("skip_dir_strict_match")), ["debug"]); + } if (appConfig.getValueBool("skip_dir_strict_match")) { setSkipDirStrictMatch(); } // Was --skip-dot-files configured? - addLogEntry("Configuring skip_dotfiles ...", ["debug"]); - addLogEntry("skip_dotfiles: " ~ to!string(appConfig.getValueBool("skip_dotfiles")), ["debug"]); + if (debugLogging) { + addLogEntry("Configuring skip_dotfiles ...", ["debug"]); + addLogEntry("skip_dotfiles: " ~ to!string(appConfig.getValueBool("skip_dotfiles")), ["debug"]); + } if (appConfig.getValueBool("skip_dotfiles")) { setSkipDotfiles(); } // Handle skip_file configuration in config file - addLogEntry("Configuring skip_file ...", ["debug"]); + if (debugLogging) {addLogEntry("Configuring skip_file ...", ["debug"]);} // Validate skip_file to ensure that this does not contain an invalid configuration // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. @@ -74,7 +80,7 @@ class ClientSideFiltering { } // All skip_file entries are valid - addLogEntry("skip_file: " ~ appConfig.getValueString("skip_file"), ["debug"]); + if (debugLogging) {addLogEntry("skip_file: " ~ appConfig.getValueString("skip_file"), ["debug"]);} setFileMask(appConfig.getValueString("skip_file")); // All configured OK @@ -114,13 +120,13 @@ class ClientSideFiltering { // Configure the regex that will be used for 'skip_file' void setFileMask(const(char)[] mask) { fileMask = wild2regex(mask); - addLogEntry("Selective Sync File Mask: " ~ to!string(fileMask), ["debug"]); + if (debugLogging) {addLogEntry("Selective Sync File Mask: " ~ to!string(fileMask), ["debug"]);} } // Configure the regex that will be used for 'skip_dir' void setDirMask(const(char)[] dirmask) { directoryMask = wild2regex(dirmask); - addLogEntry("Selective Sync Directory Mask: " ~ to!string(directoryMask), ["debug"]); + if (debugLogging) {addLogEntry("Selective Sync Directory Mask: " ~ to!string(directoryMask), ["debug"]);} } // Configure skipDirStrictMatch if function is called @@ -151,16 +157,16 @@ class ClientSideFiltering { // Does the directory name match skip_dir config entry? // Returns true if the name matches a skip_dir config entry // Returns false if no match - addLogEntry("skip_dir evaluation for: " ~ name, ["debug"]); + if (debugLogging) {addLogEntry("skip_dir evaluation for: " ~ name, ["debug"]);} // Try full path match first if (!name.matchFirst(directoryMask).empty) { - addLogEntry("'!name.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + if (debugLogging) {addLogEntry("'!name.matchFirst(directoryMask).empty' returned true = matched", ["debug"]);} return true; } else { // Do we check the base name as well? if (!skipDirStrictMatch) { - addLogEntry("No Strict Matching Enforced", ["debug"]); + if (debugLogging) {addLogEntry("No Strict Matching Enforced", ["debug"]);} // Test the entire path working backwards from child string path = buildNormalizedPath(name); @@ -170,14 +176,14 @@ class ClientSideFiltering { // This will add a leading '/' but that needs to be stripped to check checkPath = "/" ~ directory ~ checkPath; if(!checkPath.strip('/').matchFirst(directoryMask).empty) { - addLogEntry("'!checkPath.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + if (debugLogging) {addLogEntry("'!checkPath.matchFirst(directoryMask).empty' returned true = matched", ["debug"]);} return true; } } } } else { // No match - addLogEntry("Strict Matching Enforced - No Match", ["debug"]); + if (debugLogging) {addLogEntry("Strict Matching Enforced - No Match", ["debug"]);} } } // no match @@ -189,7 +195,7 @@ class ClientSideFiltering { // Does the file name match skip_file config entry? // Returns true if the name matches a skip_file config entry // Returns false if no match - addLogEntry("skip_file evaluation for: " ~ name, ["debug"]); + if (debugLogging) {addLogEntry("skip_file evaluation for: " ~ name, ["debug"]);} // Try full path match first if (!name.matchFirst(fileMask).empty) { @@ -230,13 +236,14 @@ class ClientSideFiltering { path = buildPath("/", buildNormalizedPath(path)); // Evaluation start point, in order of what is checked as well - addLogEntry("******************* SYNC LIST RULES EVALUATION START *******************", ["debug"]); - addLogEntry("Evaluation against 'sync_list' rules for this input path: " ~ path, ["debug"]); - addLogEntry("[S]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]); - addLogEntry("[S]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]); - addLogEntry("[S]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]); - addLogEntry("[S]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]); - + if (debugLogging) { + addLogEntry("******************* SYNC LIST RULES EVALUATION START *******************", ["debug"]); + addLogEntry("Evaluation against 'sync_list' rules for this input path: " ~ path, ["debug"]); + addLogEntry("[S]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]); + addLogEntry("[S]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]); + addLogEntry("[S]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]); + addLogEntry("[S]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]); + } // Unless path is an exact match, entire sync_list entries need to be processed to ensure negative matches are also correctly detected foreach (syncListRuleEntry; syncListRules) { @@ -256,7 +263,7 @@ class ClientSideFiltering { // /path/to/foldername/*.extention = As there IS a preceding '/' .. this is a rule that should INCLUDE any item that has the specified extention in this path ONLY // /path/to/foldername/*/specific_target/* = As there IS a preceding '/' .. this INCLUDES 'specific_target' in any subfolder of '/path/to/foldername/' - addLogEntry("------------------------------ NEW RULE --------------------------------", ["debug"]); + if (debugLogging) {addLogEntry("------------------------------ NEW RULE --------------------------------", ["debug"]);} // Is this rule an 'exclude' or 'include' rule? bool thisIsAnExcludeRule = false; @@ -294,9 +301,9 @@ class ClientSideFiltering { // What 'sync_list' rule are we comparing against? if (thisIsAnExcludeRule) { - addLogEntry("Evaluation against EXCLUSION 'sync_list' rule: !" ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against EXCLUSION 'sync_list' rule: !" ~ syncListRuleEntry, ["debug"]);} } else { - addLogEntry("Evaluation against INCLUSION 'sync_list' rule: " ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against INCLUSION 'sync_list' rule: " ~ syncListRuleEntry, ["debug"]);} } // Is path is an exact match of the 'sync_list' rule, or do the input path segments (directories) match the 'sync_list' rule? @@ -305,18 +312,18 @@ class ClientSideFiltering { // attempt to perform an exact segment match if (exactMatchRuleSegmentsToPathSegments(syncListRuleEntry, path)) { // EXACT PATH MATCH - addLogEntry("Exact path match with 'sync_list' rule entry", ["debug"]); + if (debugLogging) {addLogEntry("Exact path match with 'sync_list' rule entry", ["debug"]);} if (!thisIsAnExcludeRule) { // Include Rule - addLogEntry("Evaluation against 'sync_list' rule result: direct match", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: direct match", ["debug"]);} // final result finalResult = false; // direct match, break and search rules no more given include rule match break; } else { // Exclude rule - addLogEntry("Evaluation against 'sync_list' rule result: exclusion direct match - path to be excluded", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: exclusion direct match - path to be excluded", ["debug"]);} // flag exludeExactMatch so that a 'wildcard match' will not override this exclude exludeExactMatch = true; exclude = true; @@ -329,7 +336,7 @@ class ClientSideFiltering { // - This is so that paths in 'sync_list' as specified as /some path/another path/ actually get included|excluded correctly if (matchFirstSegmentToPathFirstSegment(syncListRuleEntry, path)) { // PARENT ROOT MATCH - addLogEntry("Parent root path match with 'sync_list' rule entry", ["debug"]); + if (debugLogging) {addLogEntry("Parent root path match with 'sync_list' rule entry", ["debug"]);} // Does the 'rest' of the input path match? // We only need to do this step if the input path has more and 1 segment (the parent folder) @@ -338,18 +345,18 @@ class ClientSideFiltering { // More segments to check, so do a parental path match if (matchRuleSegmentsToPathSegments(syncListRuleEntry, path)) { // PARENTAL PATH MATCH - addLogEntry("Parental path match with 'sync_list' rule entry", ["debug"]); + if (debugLogging) {addLogEntry("Parental path match with 'sync_list' rule entry", ["debug"]);} if (!thisIsAnExcludeRule) { // Include Rule - addLogEntry("Evaluation against 'sync_list' rule result: parental path match", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: parental path match", ["debug"]);} // final result finalResult = false; // parental path match, break and search rules no more given include rule match break; } else { // Exclude rule - addLogEntry("Evaluation against 'sync_list' rule result: exclusion parental path match - path to be excluded", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: exclusion parental path match - path to be excluded", ["debug"]);} excludeParentMatched = true; exclude = true; // final result @@ -361,14 +368,14 @@ class ClientSideFiltering { // No more segments to check if (!thisIsAnExcludeRule) { // Include Rule - addLogEntry("Evaluation against 'sync_list' rule result: parent root path match to rule", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: parent root path match to rule", ["debug"]);} // final result finalResult = false; // parental path match, break and search rules no more given include rule match break; } else { // Exclude rule - addLogEntry("Evaluation against 'sync_list' rule result: exclusion parent root path match to rul - path to be excluded", ["debug"]); + {addLogEntry("Evaluation against 'sync_list' rule result: exclusion parent root path match to rul - path to be excluded", ["debug"]);} excludeParentMatched = true; exclude = true; // final result @@ -393,9 +400,9 @@ class ClientSideFiltering { // what sort of rule if (thisIsAnExcludeRule) { - addLogEntry("anywhere 'sync_list' exclusion rule: !" ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("anywhere 'sync_list' exclusion rule: !" ~ syncListRuleEntry, ["debug"]);} } else { - addLogEntry("anywhere 'sync_list' inclusion rule: " ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("anywhere 'sync_list' inclusion rule: " ~ syncListRuleEntry, ["debug"]);} } // this is an 'anywhere' rule @@ -411,11 +418,11 @@ class ClientSideFiltering { if (canFind(path, anywhereRuleStripped)) { // we matched the path to the rule - addLogEntry("anywhere rule 'canFind' MATCH", ["debug"]); + if (debugLogging) {addLogEntry("anywhere rule 'canFind' MATCH", ["debug"]);} anywhereRuleMatched = true; } else { // no 'canFind' match, try via regex - addLogEntry("No anywhere rule 'canFind' MATCH .. trying a regex match", ["debug"]); + if (debugLogging) {addLogEntry("No anywhere rule 'canFind' MATCH .. trying a regex match", ["debug"]);} // create regex from 'syncListRuleEntry' auto allowedMask = regex(createRegexCompatiblePath(syncListRuleEntry)); @@ -423,7 +430,7 @@ class ClientSideFiltering { // perform regex match attempt if (matchAll(path, allowedMask)) { // we regex matched the path to the rule - addLogEntry("anywhere rule 'matchAll via regex' MATCH", ["debug"]); + if (debugLogging) {addLogEntry("anywhere rule 'matchAll via regex' MATCH", ["debug"]);} anywhereRuleMatched = true; } } @@ -432,14 +439,14 @@ class ClientSideFiltering { if (anywhereRuleMatched) { // Is this an exclude rule? if (thisIsAnExcludeRule) { - addLogEntry("Evaluation against 'sync_list' rule result: anywhere rule matched and must be excluded", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: anywhere rule matched and must be excluded", ["debug"]);} excludeAnywhereMatched = true; exclude = true; finalResult = true; // anywhere match, break and search rules no more break; } else { - addLogEntry("Evaluation against 'sync_list' rule result: anywhere rule matched and must be included", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: anywhere rule matched and must be included", ["debug"]);} finalResult = false; excludeAnywhereMatched = false; // anywhere match, break and search rules no more @@ -455,9 +462,9 @@ class ClientSideFiltering { // sync_list rule contains some sort of wildcard sequence if (thisIsAnExcludeRule) { - addLogEntry("wildcard (* or **) exclusion rule: !" ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("wildcard (* or **) exclusion rule: !" ~ syncListRuleEntry, ["debug"]);} } else { - addLogEntry("wildcard (* or **) inclusion rule: " ~ syncListRuleEntry, ["debug"]); + if (debugLogging) {addLogEntry("wildcard (* or **) inclusion rule: " ~ syncListRuleEntry, ["debug"]);} } // Is this a globbing rule (**) or just a single wildcard (*) entries @@ -466,7 +473,7 @@ class ClientSideFiltering { if (matchPathAgainstRule(path, syncListRuleEntry)) { // set the applicable flag wildcardRuleMatched = true; - addLogEntry("Evaluation against 'sync_list' rule result: globbing pattern match", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: globbing pattern match", ["debug"]);} } } else { // wildcard (*) rule processing @@ -475,13 +482,13 @@ class ClientSideFiltering { if (matchAll(path, allowedMask)) { // set the applicable flag wildcardRuleMatched = true; - addLogEntry("Evaluation against 'sync_list' rule result: wildcard pattern match", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: wildcard pattern match", ["debug"]);} } else { // matchAll no match ... try another way just to be sure if (matchPathAgainstRule(path, syncListRuleEntry)) { // set the applicable flag wildcardRuleMatched = true; - addLogEntry("Evaluation against 'sync_list' rule result: wildcard pattern match using segment matching", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: wildcard pattern match using segment matching", ["debug"]);} } } } @@ -491,13 +498,13 @@ class ClientSideFiltering { // Is this an exclude rule? if (thisIsAnExcludeRule) { // Yes exclude rule - addLogEntry("Evaluation against 'sync_list' rule result: wildcard|globbing rule matched and must be excluded", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: wildcard|globbing rule matched and must be excluded", ["debug"]);} excludeWildcardMatched = true; exclude = true; finalResult = true; } else { // include rule - addLogEntry("Evaluation against 'sync_list' rule result: wildcard|globbing pattern matched and must be included", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' rule result: wildcard|globbing pattern matched and must be included", ["debug"]);} finalResult = false; excludeWildcardMatched = false; } @@ -505,14 +512,17 @@ class ClientSideFiltering { } } - // Rule evaluation complete - addLogEntry("------------------------------------------------------------------------", ["debug"]); - // Interim results after checking each 'sync_list' rule against the input path - addLogEntry("[F]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]); - addLogEntry("[F]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]); - addLogEntry("[F]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]); - addLogEntry("[F]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]); + if (debugLogging) { + // Rule evaluation complete + addLogEntry("------------------------------------------------------------------------", ["debug"]); + + // Interim results after checking each 'sync_list' rule against the input path + addLogEntry("[F]exludeExactMatch = " ~ to!string(exludeExactMatch), ["debug"]); + addLogEntry("[F]excludeParentMatched = " ~ to!string(excludeParentMatched), ["debug"]); + addLogEntry("[F]excludeAnywhereMatched = " ~ to!string(excludeAnywhereMatched), ["debug"]); + addLogEntry("[F]excludeWildcardMatched = " ~ to!string(excludeWildcardMatched), ["debug"]); + } // If any of these exclude match items is true, then finalResult has to be flagged as true if ((exclude) || (exludeExactMatch) || (excludeParentMatched) || (excludeAnywhereMatched) || (excludeWildcardMatched)) { @@ -521,11 +531,11 @@ class ClientSideFiltering { // Final Result if (finalResult) { - addLogEntry("Evaluation against 'sync_list' final result: EXCLUDED as no rule included path", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' final result: EXCLUDED as no rule included path", ["debug"]);} } else { - addLogEntry("Evaluation against 'sync_list' final result: included for sync", ["debug"]); + if (debugLogging) {addLogEntry("Evaluation against 'sync_list' final result: included for sync", ["debug"]);} } - addLogEntry("******************* SYNC LIST RULES EVALUATION END *********************", ["debug"]); + if (debugLogging) {addLogEntry("******************* SYNC LIST RULES EVALUATION END *********************", ["debug"]);} return finalResult; } @@ -591,14 +601,16 @@ class ClientSideFiltering { } bool exactMatchRuleSegmentsToPathSegments(string rulePath, string inputPath) { - addLogEntry("Running exactMatchRuleSegmentsToPathSegments()", ["debug"]); + if (debugLogging) {addLogEntry("Running exactMatchRuleSegmentsToPathSegments()", ["debug"]);} // Split both paths by '/' auto ruleSegments = rulePath.strip.split("/").filter!(s => !s.empty).array; auto inputSegments = inputPath.strip.split("/").filter!(s => !s.empty).array; // Print rule and input segments for validation - addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); - addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + if (debugLogging) { + addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); + addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + } // If rule has more segments than input, or input has more segments than rule, no match is possible if ((ruleSegments.length > inputSegments.length) || ( inputSegments.length > ruleSegments.length)) { @@ -608,25 +620,27 @@ class ClientSideFiltering { // Iterate over each segment and compare for (size_t i = 0; i < ruleSegments.length; ++i) { if (ruleSegments[i] != inputSegments[i]) { - addLogEntry("Mismatch at segment " ~ to!string(i) ~ ": Rule Segment = " ~ ruleSegments[i] ~ ", Input Segment = " ~ inputSegments[i], ["debug"]); + if (debugLogging) {addLogEntry("Mismatch at segment " ~ to!string(i) ~ ": Rule Segment = " ~ ruleSegments[i] ~ ", Input Segment = " ~ inputSegments[i], ["debug"]);} return false; // Return false if any segment doesn't match } } // If all segments match, return true - addLogEntry("All segments matched: Rule Segments = " ~ to!string(ruleSegments) ~ ", Input Segments = " ~ to!string(inputSegments), ["debug"]); + if (debugLogging) {addLogEntry("All segments matched: Rule Segments = " ~ to!string(ruleSegments) ~ ", Input Segments = " ~ to!string(inputSegments), ["debug"]);} return true; } bool matchRuleSegmentsToPathSegments(string rulePath, string inputPath) { - addLogEntry("Running matchRuleSegmentsToPathSegments()", ["debug"]); + if (debugLogging) {addLogEntry("Running matchRuleSegmentsToPathSegments()", ["debug"]);} // Split both paths by '/' auto ruleSegments = rulePath.strip.split("/").filter!(s => !s.empty).array; auto inputSegments = inputPath.strip.split("/").filter!(s => !s.empty).array; // Print rule and input segments for validation - addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); - addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + if (debugLogging) { + addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); + addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + } // If rule has more segments than input, no match is possible if (ruleSegments.length > inputSegments.length) { @@ -638,14 +652,16 @@ class ClientSideFiltering { } bool matchFirstSegmentToPathFirstSegment(string rulePath, string inputPath) { - addLogEntry("Running matchFirstSegmentToPathFirstSegment()", ["debug"]); + if (debugLogging) {addLogEntry("Running matchFirstSegmentToPathFirstSegment()", ["debug"]);} // Split both paths by '/' auto ruleSegments = rulePath.strip.split("/").filter!(s => !s.empty).array; auto inputSegments = inputPath.strip.split("/").filter!(s => !s.empty).array; // Print rule and input segments for validation - addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); - addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + if (debugLogging) { + addLogEntry("Rule Segments: " ~ to!string(ruleSegments), ["debug"]); + addLogEntry("Input Segments: " ~ to!string(inputSegments), ["debug"]); + } // Compare the first segments only return equal(ruleSegments[0], inputSegments[0]); diff --git a/src/config.d b/src/config.d index 68cddd5bf..dbc3069d3 100644 --- a/src/config.d +++ b/src/config.d @@ -101,9 +101,7 @@ class ApplicationConfig { // Application items that depend on application run-time environment, thus cannot be immutable // Public variables - // Logging output - bool verboseLogging = false; - bool debugLogging = false; + // Logging verbosity count long verbosityCount = 0; // Was the application just authorised - paste of response uri @@ -351,25 +349,25 @@ class ApplicationConfig { // Check for HOME environment variable if (environment.get("HOME") != ""){ // Use HOME environment variable - addLogEntry("runtime_environment: HOME environment variable detected, expansion of '~' should be possible", ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: HOME environment variable detected, expansion of '~' should be possible", ["debug"]);} defaultHomePath = environment.get("HOME"); shellEnvironmentSet = true; } else { if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ // No shell is set or username - observed case when running as systemd service under CentOS 7.x - addLogEntry("runtime_environment: No HOME, SHELL or USER environment variable configuration detected. Expansion of '~' not possible", ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: No HOME, SHELL or USER environment variable configuration detected. Expansion of '~' not possible", ["debug"]);} defaultHomePath = "/root"; shellEnvironmentSet = false; } else { // A shell & valid user is set, but no HOME is set, use ~ which can be expanded - addLogEntry("runtime_environment: SHELL and USER environment variable detected, expansion of '~' should be possible", ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: SHELL and USER environment variable detected, expansion of '~' should be possible", ["debug"]);} defaultHomePath = "~"; shellEnvironmentSet = true; } } // outcome of setting defaultHomePath - addLogEntry("runtime_environment: Calculated defaultHomePath: " ~ defaultHomePath, ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: Calculated defaultHomePath: " ~ defaultHomePath, ["debug"]);} // DEVELOPER OPTIONS // display_memory = true | false @@ -402,12 +400,12 @@ class ApplicationConfig { // A CLI 'confdir' was passed in // Clean up any stray " .. these should not be there for correct process handling of the configuration option confdirOption = strip(confdirOption,"\""); - addLogEntry("configDirName: CLI override to set configDirName to: " ~ confdirOption, ["debug"]); + if (debugLogging) {addLogEntry("configDirName: CLI override to set configDirName to: " ~ confdirOption, ["debug"]);} // For the passed in --confdir option .. if (canFind(confdirOption,"~")) { // A ~ was found - addLogEntry("configDirName: A '~' was found in configDirName, using the calculated 'defaultHomePath' to replace '~'", ["debug"]); + if (debugLogging) {addLogEntry("configDirName: A '~' was found in configDirName, using the calculated 'defaultHomePath' to replace '~'", ["debug"]);} configDirName = defaultHomePath ~ strip(confdirOption,"~","~"); } else { configDirName = confdirOption; @@ -415,18 +413,20 @@ class ApplicationConfig { } else { // Determine the base directory relative to which user specific configuration files should be stored if (environment.get("XDG_CONFIG_HOME") != ""){ - addLogEntry("configDirBase: XDG_CONFIG_HOME environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("configDirBase: XDG_CONFIG_HOME environment variable set", ["debug"]);} configDirBase = environment.get("XDG_CONFIG_HOME"); } else { // XDG_CONFIG_HOME does not exist on systems where X11 is not present - ie - headless systems / servers - addLogEntry("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set", ["debug"]);} configDirBase = buildNormalizedPath(buildPath(defaultHomePath, ".config")); } // Output configDirBase calculation - addLogEntry("configDirBase: " ~ configDirBase, ["debug"]); - // Set the calculated application configuration directory - addLogEntry("configDirName: Configuring application to use calculated config path", ["debug"]); + if (debugLogging) { + addLogEntry("configDirBase: " ~ configDirBase, ["debug"]); + // Set the calculated application configuration directory + addLogEntry("configDirName: Configuring application to use calculated config path", ["debug"]); + } // configDirBase contains the correct path so we do not need to check for presence of '~' configDirName = buildNormalizedPath(buildPath(configDirBase, "onedrive")); } @@ -484,17 +484,19 @@ class ApplicationConfig { syncListHashFile = buildNormalizedPath(buildPath(configDirName, ".sync_list.hash")); // Debug Output for application set variables based on configDirName - addLogEntry("refreshTokenFilePath = " ~ refreshTokenFilePath, ["debug"]); - addLogEntry("deltaLinkFilePath = " ~ deltaLinkFilePath, ["debug"]); - addLogEntry("databaseFilePath = " ~ databaseFilePath, ["debug"]); - addLogEntry("databaseFilePathDryRun = " ~ databaseFilePathDryRun, ["debug"]); - addLogEntry("uploadSessionFilePath = " ~ uploadSessionFilePath, ["debug"]); - addLogEntry("userConfigFilePath = " ~ userConfigFilePath, ["debug"]); - addLogEntry("syncListFilePath = " ~ syncListFilePath, ["debug"]); - addLogEntry("systemConfigFilePath = " ~ systemConfigFilePath, ["debug"]); - addLogEntry("configBackupFile = " ~ configBackupFile, ["debug"]); - addLogEntry("configHashFile = " ~ configHashFile, ["debug"]); - addLogEntry("syncListHashFile = " ~ syncListHashFile, ["debug"]); + if (debugLogging) { + addLogEntry("refreshTokenFilePath = " ~ refreshTokenFilePath, ["debug"]); + addLogEntry("deltaLinkFilePath = " ~ deltaLinkFilePath, ["debug"]); + addLogEntry("databaseFilePath = " ~ databaseFilePath, ["debug"]); + addLogEntry("databaseFilePathDryRun = " ~ databaseFilePathDryRun, ["debug"]); + addLogEntry("uploadSessionFilePath = " ~ uploadSessionFilePath, ["debug"]); + addLogEntry("userConfigFilePath = " ~ userConfigFilePath, ["debug"]); + addLogEntry("syncListFilePath = " ~ syncListFilePath, ["debug"]); + addLogEntry("systemConfigFilePath = " ~ systemConfigFilePath, ["debug"]); + addLogEntry("configBackupFile = " ~ configBackupFile, ["debug"]); + addLogEntry("configHashFile = " ~ configHashFile, ["debug"]); + addLogEntry("syncListHashFile = " ~ syncListHashFile, ["debug"]); + } // Configure the Hash and Backup File Permission Value string valueToConvert = to!string(defaultFilePermissionMode); @@ -513,7 +515,7 @@ class ApplicationConfig { // Is there a system configuration file? if (!exists(systemConfigFilePath)) { // 'system' configuration file does not exist - addLogEntry("No user or system config file found, using application defaults", ["verbose"]); + if (verboseLogging) {addLogEntry("No user or system config file found, using application defaults", ["verbose"]);} applicableConfigFilePath = userConfigFilePath; configurationInitialised = true; } else { @@ -558,11 +560,13 @@ class ApplicationConfig { // Advise the user path that we will use for the application state data if (canFind(applicableConfigFilePath, configDirName)) { - addLogEntry("Using 'user' configuration path for application config and state data: " ~ configDirName, ["verbose"]); + if (verboseLogging) {addLogEntry("Using 'user' configuration path for application config and state data: " ~ configDirName, ["verbose"]);} } else { if (canFind(applicableConfigFilePath, systemConfigDirName)) { - addLogEntry("Using 'system' configuration path for application config data: " ~ systemConfigDirName, ["verbose"]); - addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); + if (verboseLogging) { + addLogEntry("Using 'system' configuration path for application config data: " ~ systemConfigDirName, ["verbose"]); + addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); + } } } } @@ -576,7 +580,7 @@ class ApplicationConfig { if (!getValueBool("dry_run")) { // Is there a backup of the config file if the config file exists? if (exists(applicableConfigFilePath)) { - addLogEntry("Creating a backup of the applicable config file", ["debug"]); + if (debugLogging) {addLogEntry("Creating a backup of the applicable config file", ["debug"]);} // create backup copy of current config file try { std.file.copy(applicableConfigFilePath, configBackupFile); @@ -818,7 +822,7 @@ class ApplicationConfig { string tempApplicationId = strip(value); if (tempApplicationId.empty) { addLogEntry("Invalid value for key in config file - using default value: " ~ key); - addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]); + if (debugLogging) {addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]);} setValueString("application_id", defaultApplicationId); } } else if (key == "drive_id") { @@ -826,7 +830,7 @@ class ApplicationConfig { if (tempDriveId.empty) { addLogEntry(); addLogEntry("Invalid value for key in config file: " ~ key); - addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file.", ["debug"]); + if (debugLogging) {addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file.", ["debug"]);} addLogEntry(); forceExit(); } else { @@ -836,7 +840,7 @@ class ApplicationConfig { string tempLogDir = strip(value); if (tempLogDir.empty) { addLogEntry("Invalid value for key in config file - using default value: " ~ key); - addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]); + if (debugLogging) {addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]);} setValueString("log_dir", defaultLogFileDir); } } @@ -1228,14 +1232,14 @@ class ApplicationConfig { // Does the 'currently configured' tempAuthUrl include a ~ if (canFind(tempAuthUrl, "~")) { // A ~ was found in auth_files(authURL) - addLogEntry("auth_files: A '~' was found in 'auth_files(authURL)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("auth_files: A '~' was found in 'auth_files(authURL)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]);} tempAuthUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempAuthUrl, "~"))); } // Does the 'currently configured' tempAuthUrl include a ~ if (canFind(tempResponseUrl, "~")) { // A ~ was found in auth_files(authURL) - addLogEntry("auth_files: A '~' was found in 'auth_files(tempResponseUrl)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("auth_files: A '~' was found in 'auth_files(tempResponseUrl)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]);} tempResponseUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempResponseUrl, "~"))); } } else { @@ -1243,21 +1247,21 @@ class ApplicationConfig { // Does the 'currently configured' tempAuthUrl include a ~ if (canFind(tempAuthUrl, "~")) { // A ~ was found in auth_files(authURL) - addLogEntry("auth_files: A '~' was found in the configured 'auth_files(authURL)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + if (debugLogging) {addLogEntry("auth_files: A '~' was found in the configured 'auth_files(authURL)', automatically expanding as SHELL and USER environment variable is set", ["debug"]);} tempAuthUrl = expandTilde(tempAuthUrl); } // Does the 'currently configured' tempAuthUrl include a ~ if (canFind(tempResponseUrl, "~")) { // A ~ was found in auth_files(authURL) - addLogEntry("auth_files: A '~' was found in the configured 'auth_files(tempResponseUrl)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + if (debugLogging) {addLogEntry("auth_files: A '~' was found in the configured 'auth_files(tempResponseUrl)', automatically expanding as SHELL and USER environment variable is set", ["debug"]);} tempResponseUrl = expandTilde(tempResponseUrl); } } // Build new string newAuthFilesString = tempAuthUrl ~ ":" ~ tempResponseUrl; - addLogEntry("auth_files - updated value: " ~ newAuthFilesString, ["debug"]); + if (debugLogging) {addLogEntry("auth_files - updated value: " ~ newAuthFilesString, ["debug"]);} setValueString("auth_files", newAuthFilesString); } @@ -1499,7 +1503,7 @@ class ApplicationConfig { } // What did the user enter? - addLogEntry("--resync warning User Response Entered: " ~ to!string(response), ["debug"]); + if (debugLogging) {addLogEntry("--resync warning User Response Entered: " ~ to!string(response), ["debug"]);} // Evaluate user response if ((to!string(response) == "y") || (to!string(response) == "Y")) { @@ -1545,7 +1549,7 @@ class ApplicationConfig { } // What did the user enter? - addLogEntry("--force-sync warning User Response Entered: " ~ to!string(response), ["debug"]); + if (debugLogging) {addLogEntry("--force-sync warning User Response Entered: " ~ to!string(response), ["debug"]);} // Evaluate user response if ((to!string(response) == "y") || (to!string(response) == "Y")) { @@ -1585,7 +1589,7 @@ class ApplicationConfig { // Helper lambda for logging and setting the difference flag auto logAndSetDifference = (string message, size_t index) { - addLogEntry(message, ["debug"]); + if (debugLogging) {addLogEntry(message, ["debug"]);} configOptionsDifferent[index] = true; }; @@ -1596,7 +1600,7 @@ class ApplicationConfig { // Check for updates in the config file if (currentConfigHash != previousConfigHash) { addLogEntry("Application configuration file has been updated, checking if --resync needed"); - addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]); + if (debugLogging) {addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]);} if (exists(configBackupFile)) { string[string] backupConfigStringValues; @@ -1648,7 +1652,7 @@ class ApplicationConfig { if (!c.empty) { c.popFront(); // skip the whole match string key = c.front.dup; - addLogEntry("Backup Config Key: " ~ key, ["debug"]); + if (debugLogging) {addLogEntry("Backup Config Key: " ~ key, ["debug"]);} auto p = key in backupConfigStringValues; if (p) { @@ -1772,7 +1776,7 @@ class ApplicationConfig { void cleanupHashFilesDueToResync() { if (!getValueBool("dry_run")) { // cleanup hash files - addLogEntry("Cleaning up configuration hash files", ["debug"]); + if (debugLogging) {addLogEntry("Cleaning up configuration hash files", ["debug"]);} safeRemove(configHashFile); safeRemove(syncListHashFile); } else { @@ -1789,7 +1793,7 @@ class ApplicationConfig { // Update applicable 'config' files if (exists(applicableConfigFilePath)) { // Update the hash of the applicable config file - addLogEntry("Updating applicable config file hash", ["debug"]); + if (debugLogging) {addLogEntry("Updating applicable config file hash", ["debug"]);} try { std.file.write(configHashFile, computeQuickXorHash(applicableConfigFilePath)); // Hash file should only be readable by the user who created it - 0600 permissions needed @@ -1802,7 +1806,7 @@ class ApplicationConfig { // Update 'sync_list' files if (exists(syncListFilePath)) { // update sync_list hash - addLogEntry("Updating sync_list hash", ["debug"]); + if (debugLogging) {addLogEntry("Updating sync_list hash", ["debug"]);} try { std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); // Hash file should only be readable by the user who created it - 0600 permissions needed @@ -1914,9 +1918,9 @@ class ApplicationConfig { operationalConflictDetected = true; } else { // Debug log output what permissions are being set to - addLogEntry("Configuring default new folder permissions as: " ~ to!string(getValueLong("sync_dir_permissions")), ["debug"]); + if (debugLogging) {addLogEntry("Configuring default new folder permissions as: " ~ to!string(getValueLong("sync_dir_permissions")), ["debug"]);} configureRequiredDirectoryPermisions(); - addLogEntry("Configuring default new file permissions as: " ~ to!string(getValueLong("sync_file_permissions")), ["debug"]); + if (debugLogging) {addLogEntry("Configuring default new file permissions as: " ~ to!string(getValueLong("sync_file_permissions")), ["debug"]);} configureRequiredFilePermisions(); } @@ -2178,16 +2182,20 @@ class ApplicationConfig { // Reset skip_file and skip_dir to application defaults when --force-sync is used void resetSkipToDefaults() { // skip_file - addLogEntry("original skip_file: " ~ getValueString("skip_file"), ["debug"]); - addLogEntry("resetting skip_file to application defaults", ["debug"]); + if (debugLogging) { + addLogEntry("original skip_file: " ~ getValueString("skip_file"), ["debug"]); + addLogEntry("resetting skip_file to application defaults", ["debug"]); + } setValueString("skip_file", defaultSkipFile); - addLogEntry("reset skip_file: " ~ getValueString("skip_file"), ["debug"]); + if (debugLogging) {addLogEntry("reset skip_file: " ~ getValueString("skip_file"), ["debug"]);} // skip_dir - addLogEntry("original skip_dir: " ~ getValueString("skip_dir"), ["debug"]); - addLogEntry("resetting skip_dir to application defaults", ["debug"]); + if (debugLogging) { + addLogEntry("original skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + addLogEntry("resetting skip_dir to application defaults", ["debug"]); + } setValueString("skip_dir", defaultSkipDir); - addLogEntry("reset skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + if (debugLogging) {addLogEntry("reset skip_dir: " ~ getValueString("skip_dir"), ["debug"]);} } // Initialise the correct 'sync_dir' expanding any '~' if present @@ -2195,43 +2203,43 @@ class ApplicationConfig { string runtimeSyncDirectory; - addLogEntry("sync_dir: Setting runtimeSyncDirectory from config value 'sync_dir'", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: Setting runtimeSyncDirectory from config value 'sync_dir'", ["debug"]);} if (!shellEnvironmentSet){ - addLogEntry("sync_dir: No SHELL or USER environment variable configuration detected", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: No SHELL or USER environment variable configuration detected", ["debug"]);} // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker // Does the 'currently configured' sync_dir include a ~ if (canFind(getValueString("sync_dir"), "~")) { // A ~ was found in sync_dir - addLogEntry("sync_dir: A '~' was found in 'sync_dir', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: A '~' was found in 'sync_dir', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]);} runtimeSyncDirectory = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("sync_dir"), "~"))); } else { // No ~ found in sync_dir, use as is - addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as no SHELL or USER environment variable configuration detected", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as no SHELL or USER environment variable configuration detected", ["debug"]);} runtimeSyncDirectory = getValueString("sync_dir"); } } else { // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present if (canFind(getValueString("sync_dir"), "~")) { - addLogEntry("sync_dir: A '~' was found in the configured 'sync_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: A '~' was found in the configured 'sync_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]);} runtimeSyncDirectory = expandTilde(getValueString("sync_dir")); } else { // No ~ found in sync_dir, does the path begin with a '/' ? - addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as however SHELL or USER environment variable configuration detected - should be placed in USER home directory", ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as however SHELL or USER environment variable configuration detected - should be placed in USER home directory", ["debug"]);} if (!startsWith(getValueString("sync_dir"), "/")) { - addLogEntry("Configured 'sync_dir' does not start with a '/' or '~/' - adjusting configured 'sync_dir' to use User Home Directory as base for 'sync_dir' path", ["debug"]); + if (debugLogging) {addLogEntry("Configured 'sync_dir' does not start with a '/' or '~/' - adjusting configured 'sync_dir' to use User Home Directory as base for 'sync_dir' path", ["debug"]);} string updatedPathWithHome = "~/" ~ getValueString("sync_dir"); runtimeSyncDirectory = expandTilde(updatedPathWithHome); } else { - addLogEntry("use 'sync_dir' as is - no touch", ["debug"]); + if (debugLogging) {addLogEntry("use 'sync_dir' as is - no touch", ["debug"]);} runtimeSyncDirectory = getValueString("sync_dir"); } } } // What will runtimeSyncDirectory be actually set to? - addLogEntry("sync_dir: runtimeSyncDirectory set to: " ~ runtimeSyncDirectory, ["debug"]); + if (debugLogging) {addLogEntry("sync_dir: runtimeSyncDirectory set to: " ~ runtimeSyncDirectory, ["debug"]);} // Configure configuredBusinessSharedFilesDirectoryName configuredBusinessSharedFilesDirectoryName = buildNormalizedPath(buildPath(runtimeSyncDirectory, defaultBusinessSharedFilesDirectoryName)); @@ -2244,7 +2252,7 @@ class ApplicationConfig { string configuredLogDirPath; - addLogEntry("log_dir: Setting runtime application log from config value 'log_dir'", ["debug"]); + if (debugLogging) {addLogEntry("log_dir: Setting runtime application log from config value 'log_dir'", ["debug"]);} if (getValueString("log_dir") != defaultLogFileDir) { // User modified 'log_dir' to be used with 'enable_logging' @@ -2253,11 +2261,11 @@ class ApplicationConfig { // ~ needs to be expanded correctly if (!shellEnvironmentSet) { // No shell or user environment variable set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - addLogEntry("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + if (debugLogging) {addLogEntry("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]);} configuredLogDirPath = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("log_dir"), "~"))); } else { // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present - addLogEntry("log_dir: A '~' was found in the configured 'log_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + if (debugLogging) {addLogEntry("log_dir: A '~' was found in the configured 'log_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]);} configuredLogDirPath = expandTilde(getValueString("log_dir")); } } else { @@ -2291,13 +2299,6 @@ class ApplicationConfig { return configuredLogDirPath; } - void setConfigLoggingLevels(bool verboseLoggingInput, bool debugLoggingInput, long verbosityCountInput) { - // set the appConfig logging values - verboseLogging = verboseLoggingInput; - debugLogging = debugLoggingInput; - verbosityCount = verbosityCountInput; - } - // What IP protocol is going to be used to access Microsoft OneDrive void displayIPProtocol() { if (getValueLong("ip_protocol_version") == 0) addLogEntry("Using IPv4 and IPv6 (if configured) for all network operations"); @@ -2418,15 +2419,15 @@ class ApplicationConfig { // Output the result if (xdg_exists) { - addLogEntry("runtime_environment: XDG_RUNTIME_DIR exists with value: " ~ xdg_value , ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: XDG_RUNTIME_DIR exists with value: " ~ xdg_value , ["debug"]);} } else { - addLogEntry("runtime_environment: XDG_RUNTIME_DIR missing from runtime user environment", ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: XDG_RUNTIME_DIR missing from runtime user environment", ["debug"]);} } if (dbus_exists) { - addLogEntry("runtime_environment: DBUS_SESSION_BUS_ADDRESS exists with value: " ~ dbus_value, ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: DBUS_SESSION_BUS_ADDRESS exists with value: " ~ dbus_value, ["debug"]);} } else { - addLogEntry("runtime_environment: DBUS_SESSION_BUS_ADDRESS missing from runtime user environment", ["debug"]); + if (debugLogging) {addLogEntry("runtime_environment: DBUS_SESSION_BUS_ADDRESS missing from runtime user environment", ["debug"]);} } // Determine result diff --git a/src/curlEngine.d b/src/curlEngine.d index a07e5faad..ca9ee00f4 100644 --- a/src/curlEngine.d +++ b/src/curlEngine.d @@ -64,7 +64,7 @@ class CurlResponse { json = content.parseJSON(); } catch (JSONException e) { // Log that a JSON Exception was caught, dont output the HTML response from OneDrive - addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]); + if (debugLogging) {addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]);} } return json; }; @@ -73,8 +73,10 @@ class CurlResponse { hasResponse = true; this.responseHeaders = http.responseHeaders(); this.statusLine = http.statusLine; - addLogEntry("HTTP Response Headers: " ~ to!string(this.responseHeaders), ["debug"]); - addLogEntry("HTTP Status Line: " ~ to!string(this.statusLine), ["debug"]); + if (debugLogging) { + addLogEntry("HTTP Response Headers: " ~ to!string(this.responseHeaders), ["debug"]); + addLogEntry("HTTP Status Line: " ~ to!string(this.statusLine), ["debug"]); + } } @safe pure HTTP.StatusLine getStatus() { @@ -87,15 +89,17 @@ class CurlResponse { // Is 'retry-after' in the response headers if ("retry-after" in responseHeaders) { // Set the retry-after value - addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]); - addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]); + if (debugLogging) { + addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]); + addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]); + } delayBeforeRetry = to!int(responseHeaders["retry-after"]); } else { // Use a 120 second delay as a default given header value was zero // This value is based on log files and data when determining correct process for 429 response handling delayBeforeRetry = 120; // Update that we are over-riding the provided value with a default - addLogEntry("HTTP Response Header retry-after value was missing - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]); + if (debugLogging) {addLogEntry("HTTP Response Header retry-after value was missing - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]);} } return delayBeforeRetry; } @@ -209,14 +213,16 @@ class CurlEngine { // We are releasing a curl instance back to the pool void releaseEngine() { // Log that we are releasing this engine back to the pool - addLogEntry("CurlEngine releaseEngine() called on instance id: " ~ to!string(internalThreadId), ["debug"]); - addLogEntry("CurlEngine curlEnginePool size before release: " ~ to!string(curlEnginePool.length), ["debug"]); + if (debugLogging) { + addLogEntry("CurlEngine releaseEngine() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + addLogEntry("CurlEngine curlEnginePool size before release: " ~ to!string(curlEnginePool.length), ["debug"]); + } // cleanup this curl instance before putting it back in the pool cleanup(true); // Cleanup instance by resetting values and flushing cookie cache synchronized (CurlEngine.classinfo) { curlEnginePool ~= this; - addLogEntry("CurlEngine curlEnginePool size after release: " ~ to!string(curlEnginePool.length), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine curlEnginePool size after release: " ~ to!string(curlEnginePool.length), ["debug"]);} } // Perform Garbage Collection GC.collect(); @@ -299,13 +305,15 @@ class CurlEngine { if (httpsDebug) { // Output what options we are using so that in the debug log this can be tracked - addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]); - addLogEntry("http.connectTimeout = " ~ to!string(connectTimeout), ["debug"]); - addLogEntry("http.dataTimeout = " ~ to!string(dataTimeout), ["debug"]); - addLogEntry("http.operationTimeout = " ~ to!string(operationTimeout), ["debug"]); - addLogEntry("http.maxRedirects = " ~ to!string(maxRedirects), ["debug"]); - addLogEntry("http.CurlOption.ipresolve = " ~ to!string(protocolVersion), ["debug"]); - addLogEntry("http.header.Connection.keepAlive = " ~ to!string(keepAlive), ["debug"]); + if (debugLogging) { + addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]); + addLogEntry("http.connectTimeout = " ~ to!string(connectTimeout), ["debug"]); + addLogEntry("http.dataTimeout = " ~ to!string(dataTimeout), ["debug"]); + addLogEntry("http.operationTimeout = " ~ to!string(operationTimeout), ["debug"]); + addLogEntry("http.maxRedirects = " ~ to!string(maxRedirects), ["debug"]); + addLogEntry("http.CurlOption.ipresolve = " ~ to!string(protocolVersion), ["debug"]); + addLogEntry("http.header.Connection.keepAlive = " ~ to!string(keepAlive), ["debug"]); + } } } @@ -419,7 +427,7 @@ class CurlEngine { // Cleanup this instance internal variables that may have been set void cleanup(bool flushCookies = false) { // Reset any values to defaults, freeing any set objects - addLogEntry("CurlEngine cleanup() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine cleanup() called on instance id: " ~ to!string(internalThreadId), ["debug"]);} // Is the instance is stopped? if (!http.isStopped) { @@ -456,21 +464,23 @@ class CurlEngine { // Shut down the curl instance & close any open sockets void shutdownCurlHTTPInstance() { // Log that we are attempting to shutdown this curl instance - addLogEntry("CurlEngine shutdownCurlHTTPInstance() called on instance id: " ~ to!string(internalThreadId), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine shutdownCurlHTTPInstance() called on instance id: " ~ to!string(internalThreadId), ["debug"]);} // Is this curl instance is stopped? if (!http.isStopped) { - addLogEntry("HTTP instance still active: " ~ to!string(internalThreadId), ["debug"]); - addLogEntry("HTTP instance isStopped state before http.shutdown(): " ~ to!string(http.isStopped), ["debug"]); + if (debugLogging) { + addLogEntry("HTTP instance still active: " ~ to!string(internalThreadId), ["debug"]); + addLogEntry("HTTP instance isStopped state before http.shutdown(): " ~ to!string(http.isStopped), ["debug"]); + } http.shutdown(); - addLogEntry("HTTP instance isStopped state post http.shutdown(): " ~ to!string(http.isStopped), ["debug"]); + if (debugLogging) {addLogEntry("HTTP instance isStopped state post http.shutdown(): " ~ to!string(http.isStopped), ["debug"]);} object.destroy(http); // Destroy, however we cant set to null - addLogEntry("HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]); + if (debugLogging) {addLogEntry("HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]);} } else { // Already stopped .. destroy it object.destroy(http); // Destroy, however we cant set to null - addLogEntry("Stopped HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]); + if (debugLogging) {addLogEntry("Stopped HTTP instance shutdown and destroyed: " ~ to!string(internalThreadId), ["debug"]);} } // Perform Garbage Collection GC.collect(); @@ -483,14 +493,14 @@ class CurlEngine { // Get a curl instance for the OneDrive API to use CurlEngine getCurlInstance() { - addLogEntry("CurlEngine getCurlInstance() called", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine getCurlInstance() called", ["debug"]);} synchronized (CurlEngine.classinfo) { // What is the current pool size - addLogEntry("CurlEngine curlEnginePool current size: " ~ to!string(curlEnginePool.length), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine curlEnginePool current size: " ~ to!string(curlEnginePool.length), ["debug"]);} if (curlEnginePool.empty) { - addLogEntry("CurlEngine curlEnginePool is empty - constructing a new CurlEngine instance", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine curlEnginePool is empty - constructing a new CurlEngine instance", ["debug"]);} return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance } else { CurlEngine curlEngine = curlEnginePool[$ - 1]; @@ -499,12 +509,14 @@ CurlEngine getCurlInstance() { // Is this engine stopped? if (curlEngine.http.isStopped) { // return a new curl engine as a stopped one cannot be used - addLogEntry("CurlEngine was in a stopped state (not usable) - constructing a new CurlEngine instance", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine was in a stopped state (not usable) - constructing a new CurlEngine instance", ["debug"]);} return new CurlEngine; // Constructs a new CurlEngine with a fresh HTTP instance } else { // return an existing curl engine - addLogEntry("CurlEngine was in a valid state - returning existing CurlEngine instance", ["debug"]); - addLogEntry("CurlEngine instance ID: " ~ curlEngine.internalThreadId, ["debug"]); + if (debugLogging) { + addLogEntry("CurlEngine was in a valid state - returning existing CurlEngine instance", ["debug"]); + addLogEntry("CurlEngine instance ID: " ~ curlEngine.internalThreadId, ["debug"]); + } return curlEngine; } } @@ -513,10 +525,10 @@ CurlEngine getCurlInstance() { // Release all CurlEngine instances void releaseAllCurlInstances() { - addLogEntry("CurlEngine releaseAllCurlInstances() called", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine releaseAllCurlInstances() called", ["debug"]);} synchronized (CurlEngine.classinfo) { // What is the current pool size - addLogEntry("CurlEngine curlEnginePool size to release: " ~ to!string(curlEnginePool.length), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine curlEnginePool size to release: " ~ to!string(curlEnginePool.length), ["debug"]);} if (curlEnginePool.length > 0) { // Safely iterate and clean up each CurlEngine instance foreach (curlEngineInstance; curlEnginePool) { @@ -534,7 +546,7 @@ void releaseAllCurlInstances() { // Perform Garbage Collection on this destroyed curl engine GC.collect(); // Log release - addLogEntry("CurlEngine released", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine released", ["debug"]);} } // Clear the array after all instances have been handled @@ -546,7 +558,7 @@ void releaseAllCurlInstances() { // Return free memory to the OS GC.minimize(); // Log that all curl engines have been released - addLogEntry("CurlEngine releaseAllCurlInstances() completed", ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine releaseAllCurlInstances() completed", ["debug"]);} } // Return how many curl engines there are diff --git a/src/itemdb.d b/src/itemdb.d index 1e77eb9fe..3b2f89acf 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -127,21 +127,21 @@ Item makeDatabaseItem(JSONValue driveItem) { bool typeSet = false; if (isItemFile(driveItem)) { // 'file' object exists in the JSON - addLogEntry("Flagging object as a file", ["debug"]); + if (debugLogging) {addLogEntry("Flagging object as a file", ["debug"]);} typeSet = true; item.type = ItemType.file; } if (isItemFolder(driveItem)) { // 'folder' object exists in the JSON - addLogEntry("Flagging object as a directory", ["debug"]); + if (debugLogging) {addLogEntry("Flagging object as a directory", ["debug"]);} typeSet = true; item.type = ItemType.dir; } if (isItemRemote(driveItem)) { // 'remote' object exists in the JSON - addLogEntry("Flagging object as a remote", ["debug"]); + if (debugLogging) {addLogEntry("Flagging object as a remote", ["debug"]);} typeSet = true; item.type = ItemType.remote; } @@ -163,7 +163,7 @@ Item makeDatabaseItem(JSONValue driveItem) { if ("quickXorHash" in driveItem["file"]["hashes"]) { item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; } else { - addLogEntry("quickXorHash is missing from " ~ driveItem["id"].str, ["debug"]); + if (debugLogging) {addLogEntry("quickXorHash is missing from " ~ driveItem["id"].str, ["debug"]);} } // If quickXorHash is empty .. @@ -172,7 +172,7 @@ Item makeDatabaseItem(JSONValue driveItem) { if ("sha256Hash" in driveItem["file"]["hashes"]) { item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; } else { - addLogEntry("sha256Hash is missing from " ~ driveItem["id"].str, ["debug"]); + if (debugLogging) {addLogEntry("sha256Hash is missing from " ~ driveItem["id"].str, ["debug"]);} } } } else { @@ -296,7 +296,7 @@ final class ItemDatabase { // What is the threadsafe value auto threadsafeValue = db.getThreadsafeValue(); - addLogEntry("SQLite Threadsafe database value: " ~ to!string(threadsafeValue), ["debug"]); + if (debugLogging) {addLogEntry("SQLite Threadsafe database value: " ~ to!string(threadsafeValue), ["debug"]);} try { // Set the enforcement of foreign key constraints. @@ -613,12 +613,12 @@ final class ItemDatabase { // If the item is of type remote, substitute it with the child if (currItem.type == ItemType.remote) { - addLogEntry("Record is a Remote Object: " ~ to!string(currItem), ["debug"]); + if (debugLogging) {addLogEntry("Record is a Remote Object: " ~ to!string(currItem), ["debug"]);} Item child; if (selectById(currItem.remoteDriveId, currItem.remoteId, child)) { assert(child.type != ItemType.remote, "The type of the child cannot be remote"); currItem = child; - addLogEntry("Selecting Record that is NOT Remote Object: " ~ to!string(currItem), ["debug"]); + if (debugLogging) {addLogEntry("Selecting Record that is NOT Remote Object: " ~ to!string(currItem), ["debug"]);} } } } @@ -654,7 +654,7 @@ final class ItemDatabase { } if (currItem.type == ItemType.remote) { - addLogEntry("Record selected is a Remote Object: " ~ to!string(currItem), ["debug"]); + if (debugLogging) {addLogEntry("Record selected is a Remote Object: " ~ to!string(currItem), ["debug"]);} } item = currItem; @@ -845,10 +845,10 @@ final class ItemDatabase { id = r2.front[1].dup; } } else { - // broken tree - addLogEntry("The following generated a broken tree query:", ["debug"]); - addLogEntry("Drive ID: " ~ to!string(driveId), ["debug"]); - addLogEntry("Item ID: " ~ to!string(id), ["debug"]); + // broken database tree + addLogEntry("The following generated a broken database tree query:"); + addLogEntry("Drive ID: " ~ to!string(driveId)); + addLogEntry("Item ID: " ~ to!string(id)); assert(0); } } @@ -885,8 +885,10 @@ final class ItemDatabase { string getDeltaLink(const(char)[] driveId, const(char)[] id) { synchronized(databaseLock) { // Log what we received - addLogEntry("DeltaLink Query (driveId): " ~ to!string(driveId), ["debug"]); - addLogEntry("DeltaLink Query (id): " ~ to!string(id), ["debug"]); + if (debugLogging) { + addLogEntry("DeltaLink Query (driveId): " ~ to!string(driveId), ["debug"]); + addLogEntry("DeltaLink Query (id): " ~ to!string(id), ["debug"]); + } // assert if these are null assert(driveId && id); @@ -1054,7 +1056,7 @@ final class ItemDatabase { void performCheckpoint() { synchronized(databaseLock) { // Log what we are attempting to do - addLogEntry("Attempting to perform a database checkpoint to merge temporary data", ["debug"]); + if (debugLogging) {addLogEntry("Attempting to perform a database checkpoint to merge temporary data", ["debug"]);} try { // Check the current DB Status - we have to be in a clean state here @@ -1077,7 +1079,7 @@ final class ItemDatabase { // Ensure there are no pending operations by performing a checkpoint db.exec("PRAGMA wal_checkpoint(TRUNCATE);"); - addLogEntry("Database checkpoint is complete", ["debug"]); + if (debugLogging) {addLogEntry("Database checkpoint is complete", ["debug"]);} } catch (SqliteException exception) { addLogEntry(); diff --git a/src/log.d b/src/log.d index 4dd352859..56556ec59 100644 --- a/src/log.d +++ b/src/log.d @@ -21,7 +21,11 @@ version(Notifications) { import dnotify; } -// Shared module object +// Shared Application Logging Level Variables +__gshared bool verboseLogging = false; +__gshared bool debugLogging = false; + +// Private Shared Module Objects private __gshared LogBuffer logBuffer; // Timer for logging private __gshared MonoTime lastInsertedTime; @@ -310,7 +314,7 @@ void validateDBUSServerAvailability() { logBuffer.sendGUINotification = false; } else { addLogEntry("D-Bus message bus daemon is available; GUI notifications are now enabled"); - addLogEntry("D-Bus message bus daemon server details: " ~ to!string(dnotify.get_server_info()), ["debug"]); + if (debugLogging) {addLogEntry("D-Bus message bus daemon server details: " ~ to!string(dnotify.get_server_info()), ["debug"]);} logBuffer.sendGUINotification = true; } } else { diff --git a/src/main.d b/src/main.d index 16fb0bbf3..5b77c86d4 100644 --- a/src/main.d +++ b/src/main.d @@ -78,12 +78,8 @@ int main(string[] cliArgs) { // What is the runtime synchronisation directory that will be used // Typically this will be '~/OneDrive' .. however tilde expansion is unreliable string runtimeSyncDirectory = ""; - // Verbosity Logging Count - this defines if verbose or debug logging is being used long verbosityCount = 0; - // Application Logging Level - bool verboseLogging = false; - bool debugLogging = false; // Monitor loop failures bool monitorFailures = false; // Help requested @@ -99,7 +95,7 @@ int main(string[] cliArgs) { // Define 'exit' and 'failure' scopes scope(exit) { // Detail what scope was called - addLogEntry("Exit scope was called", ["debug"]); + if (debugLogging) {addLogEntry("Exit scope was called", ["debug"]);} // Perform synchronised exit performSynchronisedExitProcess("exitScope"); // Setup signal handling for the exit scope @@ -108,7 +104,7 @@ int main(string[] cliArgs) { scope(failure) { // Detail what scope was called - addLogEntry("Failure scope was called", ["debug"]); + if (debugLogging) {addLogEntry("Failure scope was called", ["debug"]);} // Perform synchronised exit performSynchronisedExitProcess("failureScope"); // Setup signal handling for the exit scope @@ -151,33 +147,35 @@ int main(string[] cliArgs) { } // Determine the application logging verbosity - if (verbosityCount == 1) { verboseLogging = true;} - if (verbosityCount >= 2) { debugLogging = true;} + if (verbosityCount == 1) { verboseLogging = true;} // set __gshared bool verboseLogging in log.d + if (verbosityCount >= 2) { debugLogging = true;} // set __gshared bool debugLogging in log.d // Initialize the application logging class, as we know the application verbosity level // If we need to enable logging to a file, we can only do this once we know the application configuration which is done slightly later on initialiseLogging(verboseLogging, debugLogging); // Log application start time, log line has start time - addLogEntry("Application started", ["debug"]); + if (debugLogging) {addLogEntry("Application started", ["debug"]);} // Who are we running as? This will print the ProcessID, UID, GID and username the application is running as runtimeUserName = getUserName(); // Print the application version and how this was compiled as soon as possible - addLogEntry("Application Version: " ~ applicationVersion, ["debug"]); - addLogEntry("Application Compiled With: " ~ compilerDetails(), ["debug"]); + if (debugLogging) { + addLogEntry("Application Version: " ~ applicationVersion, ["debug"]); + addLogEntry("Application Compiled With: " ~ compilerDetails(), ["debug"]); - // How was this application started - what options were passed in - addLogEntry("Passed in 'cliArgs': " ~ to!string(cliArgs), ["debug"]); - addLogEntry("Note: --confdir and --verbose are not listed in 'cliArgs' array", ["debug"]); - addLogEntry("Passed in --confdir if present: " ~ confdirOption, ["debug"]); - addLogEntry("Passed in --verbose count if present: " ~ to!string(verbosityCount), ["debug"]); + // How was this application started - what options were passed in + addLogEntry("Passed in 'cliArgs': " ~ to!string(cliArgs), ["debug"]); + addLogEntry("Note: --confdir and --verbose are not listed in 'cliArgs' array", ["debug"]); + addLogEntry("Passed in --confdir if present: " ~ confdirOption, ["debug"]); + addLogEntry("Passed in --verbose count if present: " ~ to!string(verbosityCount), ["debug"]); + } // Create a new AppConfig object with default values, appConfig = new ApplicationConfig(); - // Update the default application configuration with the logging level so these can be used as a config option throughout the application - appConfig.setConfigLoggingLevels(verboseLogging, debugLogging, verbosityCount); + // Update the default application configuration with the verbosity count so this can be used throughout the application as needed + appConfig.verbosityCount = verbosityCount; // Initialise the application configuration, utilising --confdir if it was passed in // Otherwise application defaults will be used to configure the application @@ -219,7 +217,7 @@ int main(string[] cliArgs) { string calculatedLogDirPath = appConfig.calculateLogDirectory(); string calculatedLogFilePath; // Initialise using the configured logging directory - addLogEntry("Using the following path to store the runtime application log: " ~ calculatedLogDirPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Using the following path to store the runtime application log: " ~ calculatedLogDirPath, ["verbose"]);} // Calculate the logfile name if (calculatedLogDirPath != appConfig.defaultHomePath) { // Log file is not going to the home directory @@ -317,7 +315,7 @@ int main(string[] cliArgs) { // Handle --logout as separate item, do not 'resync' on a --logout if (appConfig.getValueBool("logout")) { - addLogEntry("--logout requested", ["debug"]); + if (debugLogging) {addLogEntry("--logout requested", ["debug"]);} addLogEntry("Deleting the saved authentication status ..."); if (!dryRun) { safeRemove(appConfig.refreshTokenFilePath); @@ -331,7 +329,7 @@ int main(string[] cliArgs) { // Handle --reauth to re-authenticate the client if (appConfig.getValueBool("reauth")) { - addLogEntry("--reauth requested", ["debug"]); + if (debugLogging) {addLogEntry("--reauth requested", ["debug"]);} addLogEntry("Deleting the saved authentication status ... re-authentication requested"); if (!dryRun) { safeRemove(appConfig.refreshTokenFilePath); @@ -346,14 +344,14 @@ int main(string[] cliArgs) { if (appConfig.getValueBool("resync")) { // what is the risk acceptance for --resync? bool resyncRiskAcceptance = appConfig.displayResyncRiskForAcceptance(); - addLogEntry("Returned --resync risk acceptance: " ~ to!string(resyncRiskAcceptance), ["debug"]); + if (debugLogging) {addLogEntry("Returned --resync risk acceptance: " ~ to!string(resyncRiskAcceptance), ["debug"]);} // Action based on user response if (!resyncRiskAcceptance){ // --resync risk not accepted return EXIT_FAILURE; } else { - addLogEntry("--resync issued and risk accepted", ["debug"]); + if (debugLogging) {addLogEntry("--resync issued and risk accepted", ["debug"]);} // --resync risk accepted, perform a cleanup of items that require a cleanup appConfig.cleanupHashFilesDueToResync(); // Make a backup of the applicable configuration file @@ -394,7 +392,7 @@ int main(string[] cliArgs) { addLogEntry("WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used"); addLogEntry(); bool forceSyncRiskAcceptance = appConfig.displayForceSyncRiskForAcceptance(); - addLogEntry("Returned --force-sync risk acceptance: " ~ forceSyncRiskAcceptance, ["debug"]); + if (debugLogging) {addLogEntry("Returned --force-sync risk acceptance: " ~ forceSyncRiskAcceptance, ["debug"]);} // Action based on user response if (!forceSyncRiskAcceptance){ @@ -414,7 +412,7 @@ int main(string[] cliArgs) { appConfig.displayIPProtocol(); // Test if OneDrive service can be reached, exit if it cant be reached - addLogEntry("Testing network to ensure network connectivity to Microsoft OneDrive Service", ["debug"]); + if (debugLogging) {addLogEntry("Testing network to ensure network connectivity to Microsoft OneDrive Service", ["debug"]);} online = testInternetReachability(appConfig); // If we are not 'online' - how do we handle this situation? @@ -440,17 +438,17 @@ int main(string[] cliArgs) { // This needs to be a separate 'if' statement, as, if this was an 'if-else' from above, if we were originally offline and using --monitor, we would never get to this point if (online) { // Check Application Version - addLogEntry("Checking Application Version ...", ["verbose"]); + if (verboseLogging) {addLogEntry("Checking Application Version ...", ["verbose"]);} checkApplicationVersion(); // Initialise the OneDrive API - addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]); + if (verboseLogging) {addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]);} OneDriveApi oneDriveApiInstance = new OneDriveApi(appConfig); appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); // Did the API initialise successfully? if (appConfig.apiWasInitialised) { - addLogEntry("The OneDrive API was initialised successfully", ["verbose"]); + if (verboseLogging) {addLogEntry("The OneDrive API was initialised successfully", ["verbose"]);} // Flag that we were able to initialise the API in the application config oneDriveApiInstance.debugOutputConfiguredAPIItems(); @@ -459,7 +457,7 @@ int main(string[] cliArgs) { oneDriveApiInstance = null; // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations - addLogEntry("Opening the item database ...", ["verbose"]); + if (verboseLogging) {addLogEntry("Opening the item database ...", ["verbose"]);} // Configure the Item Database itemDB = new ItemDatabase(runtimeDatabaseFile); @@ -644,11 +642,11 @@ int main(string[] cliArgs) { } // Configure the sync directory based on the runtimeSyncDirectory configured directory - addLogEntry("All application operations will be performed in the configured local 'sync_dir' directory: " ~ runtimeSyncDirectory, ["verbose"]); + if (verboseLogging) {addLogEntry("All application operations will be performed in the configured local 'sync_dir' directory: " ~ runtimeSyncDirectory, ["verbose"]);} // Try and set the 'sync_dir', attempt to create if it does not exist try { if (!exists(runtimeSyncDirectory)) { - addLogEntry("runtimeSyncDirectory: Configured 'sync_dir' is missing locally. Creating: " ~ runtimeSyncDirectory, ["debug"]); + if (debugLogging) {addLogEntry("runtimeSyncDirectory: Configured 'sync_dir' is missing locally. Creating: " ~ runtimeSyncDirectory, ["debug"]);} // At this point 'sync_dir' is missing and we have requested to create it // However ... 'itemDB' is pointing to a valid database file @@ -663,7 +661,7 @@ int main(string[] cliArgs) { // Attempt to create the sync dir we have been configured with mkdirRecurse(runtimeSyncDirectory); // Configure the applicable permissions for the folder - addLogEntry("Setting directory permissions for: " ~ runtimeSyncDirectory, ["debug"]); + if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ runtimeSyncDirectory, ["debug"]);} runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermisions()); } catch (std.file.FileException e) { // Creating the sync directory failed @@ -739,7 +737,7 @@ int main(string[] cliArgs) { // Make the required --single-directory path locally mkdirRecurse(singleDirectoryPath); // Configure the applicable permissions for the folder - addLogEntry("Setting directory permissions for: " ~ singleDirectoryPath, ["debug"]); + if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ singleDirectoryPath, ["debug"]);} singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); } @@ -748,7 +746,7 @@ int main(string[] cliArgs) { remotePath = singleDirectoryPath; // Display that we are syncing from a specific path due to --single-directory - addLogEntry("Syncing changes from this selected path: " ~ singleDirectoryPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Syncing changes from this selected path: " ~ singleDirectoryPath, ["verbose"]);} } // Handle SIGINT and SIGTERM @@ -810,8 +808,10 @@ int main(string[] cliArgs) { // If we are in a --download-only method of operation, the output of these is not required if (!appConfig.getValueBool("download_only")) { - addLogEntry("Maximum allowed open files: " ~ maxOpenFiles, ["verbose"]); - addLogEntry("Maximum allowed inotify user watches: " ~ maxInotifyWatches, ["verbose"]); + if (verboseLogging) { + addLogEntry("Maximum allowed open files: " ~ maxOpenFiles, ["verbose"]); + addLogEntry("Maximum allowed inotify user watches: " ~ maxInotifyWatches, ["verbose"]); + } } // Configure the monitor class @@ -821,13 +821,13 @@ int main(string[] cliArgs) { filesystemMonitor.onDirCreated = delegate(string path) { // Handle .folder creation if skip_dotfiles is enabled if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(path))) { - addLogEntry("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: " ~ path, ["verbose"]);} } else { - addLogEntry("[M] Local directory created: " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Local directory created: " ~ path, ["verbose"]);} try { syncEngineInstance.scanLocalFilesystemPathForNewData(path); } catch (CurlException e) { - addLogEntry("Offline, cannot create remote dir: " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("Offline, cannot create remote dir: " ~ path, ["verbose"]);} } catch (Exception e) { addLogEntry("Cannot create remote directory: " ~ e.msg, ["info", "notify"]); } @@ -839,20 +839,20 @@ int main(string[] cliArgs) { // Handle a potentially locally changed file // Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above syncEngineInstance.handleLocalFileTrigger(changedLocalFilesToUploadToOneDrive); - addLogEntry("[M] Total number of local file(s) added or changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length), ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Total number of local file(s) added or changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length), ["verbose"]);} }; // Delegated function for when inotify detects a delete event filesystemMonitor.onDelete = delegate(string path) { - addLogEntry("[M] Local item deleted: " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Local item deleted: " ~ path, ["verbose"]);} try { addLogEntry("The operating system sent a deletion notification. Trying to delete the item as requested"); syncEngineInstance.deleteByPath(path); } catch (CurlException e) { - addLogEntry("Offline, cannot delete item: " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("Offline, cannot delete item: " ~ path, ["verbose"]);} } catch (SyncException e) { if (e.msg == "The item to delete is not in the local database") { - addLogEntry("Item cannot be deleted from Microsoft OneDrive because it was not found in the local database", ["verbose"]); + if (verboseLogging) {addLogEntry("Item cannot be deleted from Microsoft OneDrive because it was not found in the local database", ["verbose"]);} } else { addLogEntry("Cannot delete remote item: " ~ e.msg, ["info", "notify"]); } @@ -863,7 +863,7 @@ int main(string[] cliArgs) { // Delegated function for when inotify detects a move event filesystemMonitor.onMove = delegate(string from, string to) { - addLogEntry("[M] Local item moved: " ~ from ~ " -> " ~ to, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Local item moved: " ~ from ~ " -> " ~ to, ["verbose"]);} try { // Handle .folder -> folder if skip_dotfiles is enabled if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(from))) { @@ -873,7 +873,7 @@ int main(string[] cliArgs) { syncEngineInstance.uploadMoveItem(from, to); } } catch (CurlException e) { - addLogEntry("Offline, cannot move item !", ["verbose"]); + if (verboseLogging) {addLogEntry("Offline, cannot move item !", ["verbose"]);} } catch (Exception e) { addLogEntry("Cannot move item: " ~ e.msg, ["info", "notify"]); } @@ -951,12 +951,12 @@ int main(string[] cliArgs) { // Full Scan set for some 'frequency' - do we flag to perform a full scan of the online data? if (fullScanFrequencyLoopCount > fullScanFrequency) { // set full scan trigger for true up - addLogEntry("Enabling Full Scan True Up (fullScanFrequencyLoopCount > fullScanFrequency), resetting fullScanFrequencyLoopCount = 1", ["debug"]); + if (debugLogging) {addLogEntry("Enabling Full Scan True Up (fullScanFrequencyLoopCount > fullScanFrequency), resetting fullScanFrequencyLoopCount = 1", ["debug"]);} fullScanFrequencyLoopCount = 1; appConfig.fullScanTrueUpRequired = true; } else { // unset full scan trigger for true up - addLogEntry("Disabling Full Scan True Up", ["debug"]); + if (debugLogging) {addLogEntry("Disabling Full Scan True Up", ["debug"]);} appConfig.fullScanTrueUpRequired = false; } } else { @@ -965,11 +965,13 @@ int main(string[] cliArgs) { } // Loop Start - addLogEntry(loopStartOutputMessage, ["debug"]); - addLogEntry("Total Run-Time Loop Number: " ~ to!string(monitorLoopFullCount), ["debug"]); - addLogEntry("Full Scan Frequency Loop Number: " ~ to!string(fullScanFrequencyLoopCount), ["debug"]); + if (debugLogging) { + addLogEntry(loopStartOutputMessage, ["debug"]); + addLogEntry("Total Run-Time Loop Number: " ~ to!string(monitorLoopFullCount), ["debug"]); + addLogEntry("Full Scan Frequency Loop Number: " ~ to!string(fullScanFrequencyLoopCount), ["debug"]); + } SysTime startFunctionProcessingTime = Clock.currTime(); - addLogEntry("Start Monitor Loop Time: " ~ to!string(startFunctionProcessingTime), ["debug"]); + if (debugLogging) {addLogEntry("Start Monitor Loop Time: " ~ to!string(startFunctionProcessingTime), ["debug"]);} // Do we perform any monitor console logging output suppression? // 'monitor_log_frequency' controls how often, in a non-verbose application output mode, how often @@ -978,21 +980,21 @@ int main(string[] cliArgs) { if (monitorLogOutputLoopCount > logOutputSupressionInterval) { // unsurpress the logging output monitorLogOutputLoopCount = 1; - addLogEntry("Unsuppressing initial sync log output", ["debug"]); + if (debugLogging) {addLogEntry("Unsuppressing initial sync log output", ["debug"]);} appConfig.suppressLoggingOutput = false; } else { // do we suppress the logging output to absolute minimal if (monitorLoopFullCount == 1) { // application startup with --monitor - addLogEntry("Unsuppressing initial sync log output", ["debug"]); + if (debugLogging) {addLogEntry("Unsuppressing initial sync log output", ["debug"]);} appConfig.suppressLoggingOutput = false; } else { // only suppress if we are not doing --verbose or higher if (appConfig.verbosityCount == 0) { - addLogEntry("Suppressing --monitor log output", ["debug"]); + if (debugLogging) {addLogEntry("Suppressing --monitor log output", ["debug"]);} appConfig.suppressLoggingOutput = true; } else { - addLogEntry("Unsuppressing log output", ["debug"]); + if (debugLogging) {addLogEntry("Unsuppressing log output", ["debug"]);} appConfig.suppressLoggingOutput = false; } } @@ -1000,7 +1002,7 @@ int main(string[] cliArgs) { // How long has the application been running for? auto elapsedTime = Clock.currTime() - applicationStartTime; - addLogEntry("Application run-time thus far: " ~ to!string(elapsedTime), ["debug"]); + if (debugLogging) {addLogEntry("Application run-time thus far: " ~ to!string(elapsedTime), ["debug"]);} // Need to re-validate that the client is still online for this loop if (testInternetReachability(appConfig)) { @@ -1032,7 +1034,7 @@ int main(string[] cliArgs) { syncEngineInstance.cleanupArrays(); // Write WAL and SHM data to file for this loop and release memory used by in-memory processing - addLogEntry("Merge contents of WAL and SHM files into main database file", ["debug"]); + if (debugLogging) {addLogEntry("Merge contents of WAL and SHM files into main database file", ["debug"]);} itemDB.performCheckpoint(); } else { // Not online @@ -1041,14 +1043,16 @@ int main(string[] cliArgs) { // Output end of loop processing times SysTime endFunctionProcessingTime = Clock.currTime(); - addLogEntry("End Monitor Loop Time: " ~ to!string(endFunctionProcessingTime), ["debug"]); - addLogEntry("Elapsed Monitor Loop Processing Time: " ~ to!string((endFunctionProcessingTime - startFunctionProcessingTime)), ["debug"]); + if (debugLogging) { + addLogEntry("End Monitor Loop Time: " ~ to!string(endFunctionProcessingTime), ["debug"]); + addLogEntry("Elapsed Monitor Loop Processing Time: " ~ to!string((endFunctionProcessingTime - startFunctionProcessingTime)), ["debug"]); + } // Release all the curl instances used during this loop // New curl instances will be established on next loop - addLogEntry("CurlEngine Pool Size PRE Cleanup: " ~ to!string(curlEnginePoolLength()), ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine Pool Size PRE Cleanup: " ~ to!string(curlEnginePoolLength()), ["debug"]);} releaseAllCurlInstances(); // Release all CurlEngine instances - addLogEntry("CurlEngine Pool Size POST Cleanup: " ~ to!string(curlEnginePoolLength()) , ["debug"]); + if (debugLogging) {addLogEntry("CurlEngine Pool Size POST Cleanup: " ~ to!string(curlEnginePoolLength()) , ["debug"]);} // Display memory details before garbage collection if (displayMemoryUsage) displayMemoryUsagePreGC(); @@ -1061,7 +1065,7 @@ int main(string[] cliArgs) { if (displayMemoryUsage) displayMemoryUsagePostGC(); // Log that this loop is complete - addLogEntry(loopStopOutputMessage, ["debug"]); + if (debugLogging) {addLogEntry(loopStopOutputMessage, ["debug"]);} // performSync complete, set lastCheckTime to current time lastCheckTime = MonoTime.currTime(); @@ -1080,7 +1084,7 @@ int main(string[] cliArgs) { auto nextCheckTime = lastCheckTime + checkOnlineInterval; currentTime = MonoTime.currTime(); auto sleepTime = nextCheckTime - currentTime; - addLogEntry("Sleep for " ~ to!string(sleepTime), ["debug"]); + if (debugLogging) {addLogEntry("Sleep for " ~ to!string(sleepTime), ["debug"]);} if(filesystemMonitor.initialised || webhookEnabled) { @@ -1098,7 +1102,7 @@ int main(string[] cliArgs) { Duration nextWebhookCheckDuration = oneDriveWebhook.getNextExpirationCheckDuration(); if (nextWebhookCheckDuration < sleepTime) { sleepTime = nextWebhookCheckDuration; - addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]); + if (debugLogging) {addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]);} } // Webhook Notification reset to false for this loop notificationReceived = false; @@ -1109,9 +1113,11 @@ int main(string[] cliArgs) { auto signalExists = receiveTimeout(sleepTime, (int msg) {res = msg;},(ulong _) {notificationReceived = true;}); // Debug values - addLogEntry("signalExists = " ~ to!string(signalExists), ["debug"]); - addLogEntry("worker status = " ~ to!string(res), ["debug"]); - addLogEntry("notificationReceived = " ~ to!string(notificationReceived), ["debug"]); + if (debugLogging) { + addLogEntry("signalExists = " ~ to!string(signalExists), ["debug"]); + addLogEntry("worker status = " ~ to!string(res), ["debug"]); + addLogEntry("notificationReceived = " ~ to!string(notificationReceived), ["debug"]); + } // Empirical evidence shows that Microsoft often sends multiple // notifications for one single change, so we need a loop to exhaust @@ -1314,7 +1320,7 @@ void displaySyncOutcome() { } void processResyncDatabaseRemoval(string databaseFilePathToRemove) { - addLogEntry("Testing if we have exclusive access to local database file", ["debug"]); + if (debugLogging) {addLogEntry("Testing if we have exclusive access to local database file", ["debug"]);} // Are we the only running instance? Test that we can open the database file path itemDB = new ItemDatabase(databaseFilePathToRemove); @@ -1350,13 +1356,13 @@ void cleanupDatabaseFiles(string activeDatabaseFileName) { // If the dry run database exists, clean this up if (exists(activeDatabaseFileName)) { // remove the dry run database file - addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3 as it still exists for some reason", ["debug"]); + if (debugLogging) {addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3 as it still exists for some reason", ["debug"]);} safeRemove(activeDatabaseFileName); } } else { // we may have not been using --dry-run, however we may have been running some operations that use a dry-run database, and this needs to be explicitly cleaned up if (exists(appConfig.databaseFilePathDryRun)) { - addLogEntry("Removing items-dryrun.sqlite3 as it still exists for some reason post being used for non-dryrun operations", ["debug"]); + if (debugLogging) {addLogEntry("Removing items-dryrun.sqlite3 as it still exists for some reason post being used for non-dryrun operations", ["debug"]);} safeRemove(appConfig.databaseFilePathDryRun); } } @@ -1371,7 +1377,7 @@ void cleanupDatabaseFiles(string activeDatabaseFileName) { } // Remove -shm file - addLogEntry(logMessage, ["debug"]); + if (debugLogging) {addLogEntry(logMessage, ["debug"]);} safeRemove(databaseShmFile); } @@ -1385,7 +1391,7 @@ void cleanupDatabaseFiles(string activeDatabaseFileName) { } // Remove -wal file - addLogEntry(logMessage, ["debug"]); + if (debugLogging) {addLogEntry(logMessage, ["debug"]);} safeRemove(databaseWalFile); } } @@ -1472,7 +1478,7 @@ void performSynchronisedExitProcess(string scopeCaller = null) { // Perform cleanup and shutdown of various services and resources try { // Log who called this function - addLogEntry("performSynchronisedExitProcess called by: " ~ scopeCaller, ["debug"]); + if (debugLogging) {addLogEntry("performSynchronisedExitProcess called by: " ~ scopeCaller, ["debug"]);} // Shutdown the OneDrive Webhook instance shutdownOneDriveWebhook(); // Shutdown any local filesystem monitoring @@ -1501,47 +1507,47 @@ void performSynchronisedExitProcess(string scopeCaller = null) { void shutdownOneDriveWebhook() { if (oneDriveWebhook !is null) { - addLogEntry("Shutting down OneDrive Webhook instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down OneDrive Webhook instance", ["debug"]);} oneDriveWebhook.stop(); object.destroy(oneDriveWebhook); oneDriveWebhook = null; - addLogEntry("Shutdown of OneDrive Webhook instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown of OneDrive Webhook instance is complete", ["debug"]);} } } void shutdownFilesystemMonitor() { if (filesystemMonitor !is null) { - addLogEntry("Shutting down Filesystem Monitoring instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Filesystem Monitoring instance", ["debug"]);} filesystemMonitor.shutdown(); object.destroy(filesystemMonitor); filesystemMonitor = null; - addLogEntry("Shutdown of Filesystem Monitoring instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown of Filesystem Monitoring instance is complete", ["debug"]);} } } void shutdownSelectiveSync() { if (selectiveSync !is null) { - addLogEntry("Shutting down Client Side Filtering instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Client Side Filtering instance", ["debug"]);} selectiveSync.shutdown(); object.destroy(selectiveSync); selectiveSync = null; - addLogEntry("Shutdown of Client Side Filtering instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown of Client Side Filtering instance is complete", ["debug"]);} } } void shutdownSyncEngine() { if (syncEngineInstance !is null) { - addLogEntry("Shutting down Sync Engine instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Sync Engine instance", ["debug"]);} syncEngineInstance.shutdown(); // Make sure any running thread completes first object.destroy(syncEngineInstance); syncEngineInstance = null; - addLogEntry("Shutdown Sync Engine instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown Sync Engine instance is complete", ["debug"]);} } } void shutdownDatabase() { if (itemDB !is null && itemDB.isDatabaseInitialised()) { - addLogEntry("Shutting down Database instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Database instance", ["debug"]);} if (performDatabaseVacuum) { // Logging to attempt this is dentoed from performVacuum() - so no need to confirm here itemDB.performVacuum(); @@ -1551,16 +1557,16 @@ void shutdownDatabase() { object.destroy(itemDB); cleanupDatabaseFiles(runtimeDatabaseFile); itemDB = null; - addLogEntry("Shutdown of Database instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown of Database instance is complete", ["debug"]);} } } void shutdownAppConfig() { if (appConfig !is null) { - addLogEntry("Shutting down Application Configuration instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Application Configuration instance", ["debug"]);} object.destroy(appConfig); appConfig = null; - addLogEntry("Shutdown of Application Configuration instance is complete", ["debug"]); + if (debugLogging) {addLogEntry("Shutdown of Application Configuration instance is complete", ["debug"]);} } } @@ -1570,10 +1576,10 @@ void shutdownApplicationLogging() { if (loggingActive()) { // join all threads thread_joinAll(); - addLogEntry("Application is exiting", ["debug"]); + if (debugLogging) {addLogEntry("Application is exiting", ["debug"]);} addLogEntry("#######################################################################################################################################", ["logFileOnly"]); // Destroy the shared logging buffer which flushes any remaing logs - addLogEntry("Shutting down Application Logging instance", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down Application Logging instance", ["debug"]);} // Allow any logging complete before we exit Thread.sleep(dur!("msecs")(500)); // Shutdown Logging which also sets logBuffer to null diff --git a/src/monitor.d b/src/monitor.d index b6830e665..349562c7f 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -66,7 +66,7 @@ class MonitorBackgroundWorker { addLogEntry("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=" ~ to!string((maxInotifyWatches * 2))); } if (errno() == 13) { - addLogEntry("WARNING: inotify_add_watch failed - permission denied: " ~ pathname, ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: inotify_add_watch failed - permission denied: " ~ pathname, ["verbose"]);} } // Flag any other errors addLogEntry("ERROR: inotify_add_watch failed: " ~ pathname); @@ -74,12 +74,12 @@ class MonitorBackgroundWorker { } // Add path to inotify watch - required regardless if a '.folder' or 'folder' - addLogEntry("inotify_add_watch successfully added for: " ~ pathname, ["debug"]); + if (debugLogging) {addLogEntry("inotify_add_watch successfully added for: " ~ pathname, ["debug"]);} // Do we log that we are monitoring this directory? if (isDir(pathname)) { // Log that this is directory is being monitored - addLogEntry("Monitoring directory: " ~ pathname, ["verbose"]); + if (verboseLogging) {addLogEntry("Monitoring directory: " ~ pathname, ["verbose"]);} } return wd; } @@ -339,7 +339,7 @@ final class Monitor { private void addRecursive(string dirname) { // skip non existing/disappeared items if (!exists(dirname)) { - addLogEntry("Not adding non-existing/disappeared directory: " ~ dirname, ["verbose"]); + if (verboseLogging) {addLogEntry("Not adding non-existing/disappeared directory: " ~ dirname, ["verbose"]);} return; } @@ -351,7 +351,7 @@ final class Monitor { if (isDir(dirname)) { if (selectiveSync.isDirNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - addLogEntry("Skipping monitoring due to skip_dir match: " ~ dirname, ["debug"]); + if (debugLogging) {addLogEntry("Skipping monitoring due to skip_dir match: " ~ dirname, ["debug"]);} return; } } @@ -361,14 +361,14 @@ final class Monitor { // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched if (selectiveSync.isFileNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - addLogEntry("Skipping monitoring due to skip_file match: " ~ dirname, ["debug"]); + if (debugLogging) {addLogEntry("Skipping monitoring due to skip_file match: " ~ dirname, ["debug"]);} return; } } // is the path excluded by sync_list? if (selectiveSync.isPathExcludedViaSyncList(buildNormalizedPath(dirname))) { // dont add a watch for this item - addLogEntry("Skipping monitoring due to sync_list match: " ~ dirname, ["debug"]); + if (debugLogging) {addLogEntry("Skipping monitoring due to sync_list match: " ~ dirname, ["debug"]);} return; } } @@ -385,7 +385,7 @@ final class Monitor { // Do we need to check for .nosync? Only if check_nosync is true if (check_nosync) { if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) { - addLogEntry("Skipping watching path - .nosync found & --check-for-nosync enabled: " ~ buildNormalizedPath(dirname), ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping watching path - .nosync found & --check-for-nosync enabled: " ~ buildNormalizedPath(dirname), ["verbose"]);} return; } } @@ -401,7 +401,7 @@ final class Monitor { // passed all potential exclusions // add inotify watch for this path / directory / file - addLogEntry("Calling worker.addInotifyWatch() for this dirname: " ~ dirname, ["debug"]); + if (debugLogging) {addLogEntry("Calling worker.addInotifyWatch() for this dirname: " ~ dirname, ["debug"]);} int wd = worker.addInotifyWatch(dirname); if (wd > 0) { wdToDirName[wd] = buildNormalizedPath(dirname) ~ "/"; @@ -414,7 +414,7 @@ final class Monitor { auto pathList = dirEntries(dirname, SpanMode.shallow, false); foreach(DirEntry entry; pathList) { if (entry.isDir) { - addLogEntry("Calling addRecursive() for this directory: " ~ entry.name, ["debug"]); + if (debugLogging) {addLogEntry("Calling addRecursive() for this directory: " ~ entry.name, ["debug"]);} addRecursive(entry.name); } } @@ -455,7 +455,7 @@ final class Monitor { assert(wd in wdToDirName); int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); - addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]); + if (verboseLogging) {addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]);} wdToDirName.remove(wd); } @@ -467,7 +467,7 @@ final class Monitor { int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); wdToDirName.remove(wd); - addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]); + if (verboseLogging) {addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]);} } } } @@ -476,7 +476,7 @@ final class Monitor { private string getPath(const(inotify_event)* event) { string path = wdToDirName[event.wd]; if (event.len > 0) path ~= fromStringz(event.name.ptr); - addLogEntry("inotify path event for: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("inotify path event for: " ~ path, ["debug"]);} return path; } @@ -509,36 +509,40 @@ final class Monitor { string evalPath; // inotify event debug - addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); - addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); - addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); - addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); - addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); + if (debugLogging) { + addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); + addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); + addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); + addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); + addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); + } // inotify event handling - if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); - if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); - if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); - if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); - if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); - if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); - if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); - if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); - if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); - if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); - if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); - if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); - if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); - if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); - if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); - if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); - if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); - if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); - if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); - if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); - if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); - if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); - if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); + if (debugLogging) { + if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); + if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); + if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); + if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); + if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); + if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); + if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); + if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); + if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); + if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); + if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); + if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); + if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); + if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); + if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); + if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); + if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); + if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); + if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); + if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); + if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); + if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); + if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); + } // skip events that need to be ignored if (event.mask & IN_IGNORED) { @@ -584,11 +588,11 @@ final class Monitor { // handle the inotify events if (event.mask & IN_MOVED_FROM) { - addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]);} cookieToPath[event.cookie] = path; movedNotDeleted[path] = true; // Mark as moved, not deleted } else if (event.mask & IN_MOVED_TO) { - addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]);} if (event.mask & IN_ISDIR) addRecursive(path); auto from = event.cookie in cookieToPath; if (from) { @@ -604,7 +608,7 @@ final class Monitor { } } } else if (event.mask & IN_CREATE) { - addLogEntry("event IN_CREATE: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("event IN_CREATE: " ~ path, ["debug"]);} if (event.mask & IN_ISDIR) { addRecursive(path); if (useCallbacks) actionHolder.append(ActionType.createDir, path); @@ -613,14 +617,14 @@ final class Monitor { if (path in movedNotDeleted) { movedNotDeleted.remove(path); // Ignore delete for moved files } else { - addLogEntry("event IN_DELETE: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("event IN_DELETE: " ~ path, ["debug"]);} if (useCallbacks) actionHolder.append(ActionType.deleted, path); } } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { - addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]);} if (useCallbacks) actionHolder.append(ActionType.changed, path); } else { - addLogEntry("event unhandled: " ~ path, ["debug"]); + addLogEntry("inotify event unhandled: " ~ path); assert(0); } @@ -639,13 +643,13 @@ final class Monitor { // Assume that the items moved outside the watched directory have been deleted foreach (cookie, path; cookieToPath) { - addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]);} if (useCallbacks) onDelete(path); remove(path); cookieToPath.remove(cookie); } // Debug Log that all inotify events are flushed - addLogEntry("inotify events flushed", ["debug"]); + if (debugLogging) {addLogEntry("inotify events flushed", ["debug"]);} } } diff --git a/src/onedrive.d b/src/onedrive.d index 37b018d4b..df7543d7f 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -208,7 +208,7 @@ class OneDriveApi { tokenUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; if (clientId == appConfig.defaultApplicationId) { // application_id == default - addLogEntry("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + if (debugLogging) {addLogEntry("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]);} redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id @@ -236,7 +236,7 @@ class OneDriveApi { tokenUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; if (clientId == appConfig.defaultApplicationId) { // application_id == default - addLogEntry("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + if (debugLogging) {addLogEntry("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]);} redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id @@ -264,7 +264,7 @@ class OneDriveApi { tokenUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; if (clientId == appConfig.defaultApplicationId) { // application_id == default - addLogEntry("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + if (debugLogging) {addLogEntry("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]);} redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id @@ -292,7 +292,7 @@ class OneDriveApi { tokenUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; if (clientId == appConfig.defaultApplicationId) { // application_id == default - addLogEntry("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + if (debugLogging) {addLogEntry("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]);} redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id @@ -320,12 +320,12 @@ class OneDriveApi { // Has the application been authenticated? if (!exists(appConfig.refreshTokenFilePath)) { - addLogEntry("Application has no 'refresh_token' thus needs to be authenticated", ["debug"]); + if (debugLogging) {addLogEntry("Application has no 'refresh_token' thus needs to be authenticated", ["debug"]);} authorised = authorise(); } else { // Try and read the value from the appConfig if it is set, rather than trying to read the value from disk if (!appConfig.refreshToken.empty) { - addLogEntry("Read token from appConfig", ["debug"]); + if (debugLogging) {addLogEntry("Read token from appConfig", ["debug"]);} refreshToken = strip(appConfig.refreshToken); authorised = true; } else { @@ -354,12 +354,12 @@ class OneDriveApi { if (refreshToken.empty) { // PROBLEM ... CODING TO DO ?????????? - addLogEntry("DEBUG: refreshToken is empty !!!!!!!!!!", ["debug"]); + if (debugLogging) {addLogEntry("DEBUG: refreshToken is empty !!!!!!!!!!", ["debug"]);} } } // Return if we are authorised - addLogEntry("Authorised State: " ~ to!string(authorised), ["debug"]); + if (debugLogging) {addLogEntry("Authorised State: " ~ to!string(authorised), ["debug"]);} return authorised; } @@ -367,30 +367,32 @@ class OneDriveApi { void debugOutputConfiguredAPIItems() { // Debug output of configured URL's // Application Identification - addLogEntry("Configured clientId " ~ clientId, ["debug"]); - addLogEntry("Configured userAgent " ~ appConfig.getValueString("user_agent"), ["debug"]); - // Authentication - addLogEntry("Configured authScope: " ~ authScope, ["debug"]); - addLogEntry("Configured authUrl: " ~ authUrl, ["debug"]); - addLogEntry("Configured redirectUrl: " ~ redirectUrl, ["debug"]); - addLogEntry("Configured tokenUrl: " ~ tokenUrl, ["debug"]); - // Drive Queries - addLogEntry("Configured driveUrl: " ~ driveUrl, ["debug"]); - addLogEntry("Configured driveByIdUrl: " ~ driveByIdUrl, ["debug"]); - // Shared With Me - addLogEntry("Configured sharedWithMeUrl: " ~ sharedWithMeUrl, ["debug"]); - // Item Queries - addLogEntry("Configured itemByIdUrl: " ~ itemByIdUrl, ["debug"]); - addLogEntry("Configured itemByPathUrl: " ~ itemByPathUrl, ["debug"]); - // SharePoint Queries - addLogEntry("Configured siteSearchUrl: " ~ siteSearchUrl, ["debug"]); - addLogEntry("Configured siteDriveUrl: " ~ siteDriveUrl, ["debug"]); + if (debugLogging) { + addLogEntry("Configured clientId " ~ clientId, ["debug"]); + addLogEntry("Configured userAgent " ~ appConfig.getValueString("user_agent"), ["debug"]); + // Authentication + addLogEntry("Configured authScope: " ~ authScope, ["debug"]); + addLogEntry("Configured authUrl: " ~ authUrl, ["debug"]); + addLogEntry("Configured redirectUrl: " ~ redirectUrl, ["debug"]); + addLogEntry("Configured tokenUrl: " ~ tokenUrl, ["debug"]); + // Drive Queries + addLogEntry("Configured driveUrl: " ~ driveUrl, ["debug"]); + addLogEntry("Configured driveByIdUrl: " ~ driveByIdUrl, ["debug"]); + // Shared With Me + addLogEntry("Configured sharedWithMeUrl: " ~ sharedWithMeUrl, ["debug"]); + // Item Queries + addLogEntry("Configured itemByIdUrl: " ~ itemByIdUrl, ["debug"]); + addLogEntry("Configured itemByPathUrl: " ~ itemByPathUrl, ["debug"]); + // SharePoint Queries + addLogEntry("Configured siteSearchUrl: " ~ siteSearchUrl, ["debug"]); + addLogEntry("Configured siteDriveUrl: " ~ siteDriveUrl, ["debug"]); + } } // Release CurlEngine bask to the Curl Engine Pool void releaseCurlEngine() { // Log that this was called - addLogEntry("OneDrive API releaseCurlEngine() Called", ["debug"]); + if (debugLogging) {addLogEntry("OneDrive API releaseCurlEngine() Called", ["debug"]);} // Release curl instance back to the pool if (curlEngine !is null) { @@ -677,8 +679,10 @@ class OneDriveApi { // For the moment, comment out adding the If-Match header in createUploadSession, which then avoids this issue string contentRange = "bytes " ~ to!string(offset) ~ "-" ~ to!string(offset + offsetSize - 1) ~ "/" ~ to!string(fileSize); - addLogEntry("", ["debug"]); // Add an empty newline before log output - addLogEntry("contentRange: " ~ contentRange, ["debug"]); + if (debugLogging) { + addLogEntry("", ["debug"]); // Add an empty newline before log output + addLogEntry("contentRange: " ~ contentRange, ["debug"]); + } return put(uploadUrl, filepath, true, contentRange, offset, offsetSize); } @@ -770,10 +774,10 @@ class OneDriveApi { // Does the path exist locally? if (!exists(newPath)) { try { - addLogEntry("Requested local path does not exist, creating directory structure: " ~ newPath, ["debug"]); + if (debugLogging) {addLogEntry("Requested local path does not exist, creating directory structure: " ~ newPath, ["debug"]);} mkdirRecurse(newPath); // Configure the applicable permissions for the folder - addLogEntry("Setting directory permissions for: " ~ newPath, ["debug"]); + if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ newPath, ["debug"]);} newPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); } catch (FileException exception) { // display the error message @@ -787,7 +791,7 @@ class OneDriveApi { // Does path exist? if (exists(saveToPath)) { // File was downloaded successfully - configure the applicable permissions for the file - addLogEntry("Setting file permissions for: " ~ saveToPath, ["debug"]); + if (debugLogging) {addLogEntry("Setting file permissions for: " ~ saveToPath, ["debug"]);} saveToPath.setAttributes(appConfig.returnRequiredFilePermisions()); } } @@ -799,7 +803,7 @@ class OneDriveApi { // Private OneDrive API Functions private void addIncludeFeatureRequestHeader(string[string]* headers) { - addLogEntry("Adding 'Include-Feature=AddToOneDrive' API request header as 'sync_business_shared_items' config option is enabled", ["debug"]); + if (debugLogging) {addLogEntry("Adding 'Include-Feature=AddToOneDrive' API request header as 'sync_business_shared_items' config option is enabled", ["debug"]);} (*headers)["Prefer"] = "Include-Feature=AddToOneDrive"; } @@ -844,7 +848,7 @@ class OneDriveApi { string effectiveScopes = response["scope"].str(); // Display the effective authentication scopes addLogEntry(); - addLogEntry("Effective API Authentication Scopes: " ~ effectiveScopes, ["verbose"]); + if (verboseLogging) {addLogEntry("Effective API Authentication Scopes: " ~ effectiveScopes, ["verbose"]);} // if we have any write scopes, we need to tell the user to update an remove online prior authentication and exit application if (canFind(effectiveScopes, "Write")) { @@ -870,7 +874,7 @@ class OneDriveApi { if (appConfig.getValueBool("debug_https")) { if (appConfig.getValueBool("print_token")) { // This needs to be highly restricted in output .... - addLogEntry("CAUTION - KEEP THIS SAFE: Current access token: " ~ to!string(appConfig.accessToken), ["debug"]); + if (debugLogging) {addLogEntry("CAUTION - KEEP THIS SAFE: Current access token: " ~ to!string(appConfig.accessToken), ["debug"]);} } } } @@ -881,22 +885,22 @@ class OneDriveApi { // Update the refreshToken in appConfig so that we can reuse it if (appConfig.refreshToken.empty) { // The access token is empty - addLogEntry("Updating appConfig.refreshToken with new refreshToken as appConfig.refreshToken is empty", ["debug"]); + if (debugLogging) {addLogEntry("Updating appConfig.refreshToken with new refreshToken as appConfig.refreshToken is empty", ["debug"]);} appConfig.refreshToken = refreshToken; } else { // Is the access token different? if (appConfig.refreshToken != refreshToken) { // Update the memory version - addLogEntry("Updating appConfig.refreshToken with updated refreshToken", ["debug"]); + if (debugLogging) {addLogEntry("Updating appConfig.refreshToken with updated refreshToken", ["debug"]);} appConfig.refreshToken = refreshToken; } } // try and update the refresh_token file on disk try { - addLogEntry("Updating refreshToken on disk", ["debug"]); + if (debugLogging) {addLogEntry("Updating refreshToken on disk", ["debug"]);} std.file.write(appConfig.refreshTokenFilePath, refreshToken); - addLogEntry("Setting file permissions for: " ~ appConfig.refreshTokenFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Setting file permissions for: " ~ appConfig.refreshTokenFilePath, ["debug"]);} appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermisions()); } catch (FileException exception) { // display the error message @@ -921,7 +925,7 @@ class OneDriveApi { } private void newToken() { - addLogEntry("Need to generate a new access token for Microsoft OneDrive", ["debug"]); + if (debugLogging) {addLogEntry("Need to generate a new access token for Microsoft OneDrive", ["debug"]);} auto postData = appender!(string)(); postData ~= "client_id=" ~ clientId; postData ~= "&redirect_uri=" ~ redirectUrl; @@ -932,10 +936,10 @@ class OneDriveApi { private void checkAccessTokenExpired() { if (Clock.currTime() >= appConfig.accessTokenExpiration) { - addLogEntry("Microsoft OneDrive Access Token has expired. Must generate a new Microsoft OneDrive Access Token", ["debug"]); + if (debugLogging) {addLogEntry("Microsoft OneDrive Access Token has expired. Must generate a new Microsoft OneDrive Access Token", ["debug"]);} newToken(); } else { - addLogEntry("Existing Microsoft OneDrive Access Token Expires: " ~ to!string(appConfig.accessTokenExpiration), ["debug"]); + if (debugLogging) {addLogEntry("Existing Microsoft OneDrive Access Token Expires: " ~ to!string(appConfig.accessTokenExpiration), ["debug"]);} } } @@ -949,7 +953,7 @@ class OneDriveApi { } private void connect(HTTP.Method method, const(char)[] url, bool skipToken, CurlResponse response, string[string] requestHeaders=null) { - addLogEntry("Request URL = " ~ to!string(url), ["debug"]); + if (debugLogging) {addLogEntry("Request URL = " ~ to!string(url), ["debug"]);} // Check access token first in case the request is overridden if (!skipToken) addAccessTokenHeader(&requestHeaders); curlEngine.setResponseHolder(response); @@ -999,11 +1003,12 @@ class OneDriveApi { // Have we started downloading? if (currentDLPercent > 0){ // We have started downloading - addLogEntry("", ["debug"]); // Debug new line only - addLogEntry("Data Received = " ~ to!string(dlnow), ["debug"]); - addLogEntry("Expected Total = " ~ to!string(dltotal), ["debug"]); - addLogEntry("Percent Complete = " ~ to!string(currentDLPercent), ["debug"]); - + if (debugLogging) { + addLogEntry("", ["debug"]); // Debug new line only + addLogEntry("Data Received = " ~ to!string(dlnow), ["debug"]); + addLogEntry("Expected Total = " ~ to!string(dltotal), ["debug"]); + addLogEntry("Percent Complete = " ~ to!string(currentDLPercent), ["debug"]); + } // Every 5% download we need to increment the download bar // Has the user set a data rate limit? @@ -1030,7 +1035,7 @@ class OneDriveApi { // Has the data that has been received in a 5% window that we need to increment the progress bar at if ((dlnow > thisSegmentData) && (dlnow < nextSegmentData) && (previousProgressPercent != currentDLPercent) || (dlnow == dltotal)) { // Downloaded data equals approx 5% - addLogEntry("Incrementing Progress Bar using calculated 5% of data received", ["debug"]); + if (debugLogging) {addLogEntry("Incrementing Progress Bar using calculated 5% of data received", ["debug"]);} // 100% check if (currentDLPercent != 100) { @@ -1053,16 +1058,16 @@ class OneDriveApi { } // update values - addLogEntry("Setting previousProgressPercent to " ~ to!string(currentDLPercent), ["debug"]); + if (debugLogging) {addLogEntry("Setting previousProgressPercent to " ~ to!string(currentDLPercent), ["debug"]);} previousProgressPercent = currentDLPercent; - addLogEntry("Incrementing segmentCount", ["debug"]); + if (debugLogging) {addLogEntry("Incrementing segmentCount", ["debug"]);} segmentCount++; } } else { // Is currentDLPercent divisible by 5 leaving remainder 0 and does previousProgressPercent not equal currentDLPercent if ((isIdentical(fmod(currentDLPercent, percentCheck), 0.0)) && (previousProgressPercent != currentDLPercent)) { // currentDLPercent matches a new increment - addLogEntry("Incrementing Progress Bar using fmod match", ["debug"]); + if (debugLogging) {addLogEntry("Incrementing Progress Bar using fmod match", ["debug"]);} // 100% check if (currentDLPercent != 100) { @@ -1184,7 +1189,7 @@ class OneDriveApi { result = response.json(); // Print response if 'debugResponse' is flagged if (debugResponse){ - addLogEntry("Microsoft Graph API Response: " ~ response.dumpResponse(), ["debug"]); + if (debugLogging) {addLogEntry("Microsoft Graph API Response: " ~ response.dumpResponse(), ["debug"]);} } // Check http response code, raise a OneDriveException if the operation was not successfully performed @@ -1198,9 +1203,11 @@ class OneDriveApi { // Why are throwing a OneDriveException - do not do this for a 404 error as this is not required as we use a 404 if things are not online, to create them if (response.statusLine.code != 404) { - addLogEntry("response.statusLine.code: " ~ to!string(response.statusLine.code), ["debug"]); - addLogEntry("response.statusLine.reason: " ~ to!string(response.statusLine.reason), ["debug"]); - addLogEntry("actual curl response: " ~ to!string(response), ["debug"]); + if (debugLogging) { + addLogEntry("response.statusLine.code: " ~ to!string(response.statusLine.code), ["debug"]); + addLogEntry("response.statusLine.reason: " ~ to!string(response.statusLine.reason), ["debug"]); + addLogEntry("actual curl response: " ~ to!string(response), ["debug"]); + } } // For every HTTP error status code, including those from 3xx (other Redirection codes excluding 302), 4xx (Client Error), and 5xx (Server Error) series, will trigger the following line of code. @@ -1222,7 +1229,7 @@ class OneDriveApi { addLogEntry("Internet connectivity to Microsoft OneDrive service has been restored"); } // unset the fresh connect option as this then creates performance issues if left enabled - addLogEntry("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled", ["debug"]); + if (debugLogging) {addLogEntry("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled", ["debug"]);} curlEngine.http.handle.set(CurlOption.fresh_connect,0); } @@ -1239,11 +1246,13 @@ class OneDriveApi { // Handle 'curl' exception errors // Detail the curl exception, debug output only - addLogEntry("Handling a specific Curl exception:", ["debug"]); - addLogEntry(to!string(response), ["debug"]); + if (debugLogging) { + addLogEntry("Handling a specific Curl exception:", ["debug"]); + addLogEntry(to!string(response), ["debug"]); + } // Parse and display error message received from OneDrive - addLogEntry(callingFunction ~ "() - Generated a OneDrive CurlException", ["debug"]); + if (debugLogging) {addLogEntry(callingFunction ~ "() - Generated a OneDrive CurlException", ["debug"]);} auto errorArray = splitLines(exception.msg); string errorMessage = errorArray[0]; @@ -1256,18 +1265,24 @@ class OneDriveApi { addLogEntry("Internet connectivity to Microsoft OneDrive service has been interrupted .. re-trying in the background"); // What caused the initial curl exception? - if (canFind(errorMessage, "Couldn't resolve host name on handle")) addLogEntry("Unable to resolve server - DNS access blocked?", ["debug"]); - if (canFind(errorMessage, "Couldn't connect to server on handle")) addLogEntry("Unable to connect to server - HTTPS access blocked?", ["debug"]); + if (canFind(errorMessage, "Couldn't resolve host name on handle")) { + if (debugLogging) {addLogEntry("Unable to resolve server - DNS access blocked?", ["debug"]);} + } + if (canFind(errorMessage, "Couldn't connect to server on handle")) { + if (debugLogging) {addLogEntry("Unable to connect to server - HTTPS access blocked?", ["debug"]);} + } if (canFind(errorMessage, "Timeout was reached on handle")) { // Common cause is libcurl trying IPv6 DNS resolution when there are only IPv4 DNS servers available - addLogEntry("A libcurl timeout has been triggered - data transfer too slow, no DNS resolution response, no server response", ["verbose"]); - // There are 3 common causes for this issue: - // 1. Usually poor DNS resolution where libcurl flip/flops to use IPv6 and is unable to resolve - // 2. A device between the user and Microsoft OneDrive is unable to correctly handle HTTP/2 communication - // 3. No Internet access from this system at this point in time - addLogEntry(" - IPv6 DNS resolution issues may be causing timeouts. Consider setting 'ip_protocol_version' to IPv4 to potentially avoid this", ["verbose"]); - addLogEntry(" - HTTP/2 compatibility issues might also be interfering with your system. Use 'force_http_11' to switch to HTTP/1.1 to potentially avoid this", ["verbose"]); - addLogEntry(" - If these options do not resolve this timeout issue, please use --debug-https to diagnose this issue further.", ["verbose"]); + if (verboseLogging) { + addLogEntry("A libcurl timeout has been triggered - data transfer too slow, no DNS resolution response, no server response", ["verbose"]); + // There are 3 common causes for this issue: + // 1. Usually poor DNS resolution where libcurl flip/flops to use IPv6 and is unable to resolve + // 2. A device between the user and Microsoft OneDrive is unable to correctly handle HTTP/2 communication + // 3. No Internet access from this system at this point in time + addLogEntry(" - IPv6 DNS resolution issues may be causing timeouts. Consider setting 'ip_protocol_version' to IPv4 to potentially avoid this", ["verbose"]); + addLogEntry(" - HTTP/2 compatibility issues might also be interfering with your system. Use 'force_http_11' to switch to HTTP/1.1 to potentially avoid this", ["verbose"]); + addLogEntry(" - If these options do not resolve this timeout issue, please use --debug-https to diagnose this issue further.", ["verbose"]); + } } } else { // Some other 'libcurl' error was returned @@ -1350,11 +1365,13 @@ class OneDriveApi { **/ // Detail the OneDriveAPI exception, debug output only - addLogEntry("Handling a OneDrive API exception:", ["debug"]); - addLogEntry(to!string(response), ["debug"]); - - // Parse and display error message received from OneDrive - addLogEntry(callingFunction ~ "() - Generated a OneDriveException", ["debug"]); + if (debugLogging) { + addLogEntry("Handling a OneDrive API exception:", ["debug"]); + addLogEntry(to!string(response), ["debug"]); + + // Parse and display error message received from OneDrive + addLogEntry(callingFunction ~ "() - Generated a OneDriveException", ["debug"]); + } // Perform action based on the HTTP Status Code switch(exception.httpStatusCode) { @@ -1377,7 +1394,7 @@ class OneDriveApi { } // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request thisBackOffInterval = response.getRetryAfterValue(); - addLogEntry("Using Retry-After Value = " ~ to!string(thisBackOffInterval), ["debug"]); + if (debugLogging) {addLogEntry("Using Retry-After Value = " ~ to!string(thisBackOffInterval), ["debug"]);} transientError = true; break; // Transient errors @@ -1387,7 +1404,7 @@ class OneDriveApi { // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request auto errorArray = splitLines(exception.msg); addLogEntry(to!string(errorArray[0]) ~ " when attempting to query the Microsoft Graph API Service - retrying applicable request in 30 seconds - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId)); - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + if (debugLogging) {addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]);} // Transient error - try again in 30 seconds thisBackOffInterval = 30; transientError = true; @@ -1435,17 +1452,21 @@ class OneDriveApi { } } - // When are we re-trying the API call? + // set the current time for this thread currentTime = Clock.currTime(); currentTime.fracSecs = Duration.zero; - auto timeString = currentTime.toString(); - addLogEntry("Retry attempt: " ~ to!string(retryAttempts) ~ " - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId), ["verbose"]); - addLogEntry(" This attempt timestamp: " ~ timeString, ["verbose"]); - // Detail when the next attempt will be tried - // Factor in the delay for curl to generate the exception - otherwise the next timestamp appears to be 'out' even though technically correct - auto nextRetry = currentTime + dur!"seconds"(thisBackOffInterval) + dur!"seconds"(timestampAlign); - addLogEntry(" Next retry in approx: " ~ to!string((thisBackOffInterval + timestampAlign)) ~ " seconds"); - addLogEntry(" Next retry approx: " ~ to!string(nextRetry), ["verbose"]); + + // If verbose logging, detail when we are re-trying the call + if (verboseLogging) { + auto timeString = currentTime.toString(); + addLogEntry("Retry attempt: " ~ to!string(retryAttempts) ~ " - Internal Thread ID: " ~ to!string(curlEngine.internalThreadId), ["verbose"]); + addLogEntry(" This attempt timestamp: " ~ timeString, ["verbose"]); + // Detail when the next attempt will be tried + // Factor in the delay for curl to generate the exception - otherwise the next timestamp appears to be 'out' even though technically correct + auto nextRetry = currentTime + dur!"seconds"(thisBackOffInterval) + dur!"seconds"(timestampAlign); + addLogEntry(" Next retry in approx: " ~ to!string((thisBackOffInterval + timestampAlign)) ~ " seconds"); + addLogEntry(" Next retry approx: " ~ to!string(nextRetry), ["verbose"]); + } // Thread sleep Thread.sleep(dur!"seconds"(thisBackOffInterval)); @@ -1496,7 +1517,7 @@ class OneDriveApi { // Configure libcurl to perform a fresh connection private void setFreshConnectOption() { - addLogEntry("Configuring libcurl to use a fresh connection for re-try", ["debug"]); + if (debugLogging) {addLogEntry("Configuring libcurl to use a fresh connection for re-try", ["debug"]);} curlEngine.http.handle.set(CurlOption.fresh_connect,1); } diff --git a/src/sqlite.d b/src/sqlite.d index 78430b26d..a9bf64080 100644 --- a/src/sqlite.d +++ b/src/sqlite.d @@ -54,17 +54,17 @@ struct Database { // Dump open statements void dump_open_statements() { - addLogEntry("Dumping open SQL statements:", ["debug"]); + if (debugLogging) {addLogEntry("Dumping open SQL statements:", ["debug"]);} auto p = sqlite3_next_stmt(pDb, null); while (p != null) { - addLogEntry(" Still Open: " ~ to!string(ifromStringz(sqlite3_sql(p))), ["debug"]); + if (debugLogging) {addLogEntry(" Still Open: " ~ to!string(ifromStringz(sqlite3_sql(p))), ["debug"]);} p = sqlite3_next_stmt(pDb, p); } } // Close open statements void close_open_statements() { - addLogEntry("Closing open SQL statements:", ["debug"]); + if (debugLogging) {addLogEntry("Closing open SQL statements:", ["debug"]);} auto p = sqlite3_next_stmt(pDb, null); while (p != null) { // The sqlite3_finalize() function is called to delete a prepared statement @@ -76,7 +76,7 @@ struct Database { // Count open statements int count_open_statements() { - addLogEntry("Counting open SQL statements", ["debug"]); + if (debugLogging) {addLogEntry("Counting open SQL statements", ["debug"]);} int openStatementCount = 0; auto p = sqlite3_next_stmt(pDb, null); while (p != null) { diff --git a/src/sync.d b/src/sync.d index 48db1510b..16a544429 100644 --- a/src/sync.d +++ b/src/sync.d @@ -206,7 +206,7 @@ class SyncEngine { // Create the specific task pool to process items in parallel processPool = new TaskPool(to!int(appConfig.getValueLong("threads"))); - addLogEntry("Initialised TaskPool worker with threads: " ~ to!string(processPool.size), ["debug"]); + if (debugLogging) {addLogEntry("Initialised TaskPool worker with threads: " ~ to!string(processPool.size), ["debug"]);} // Configure the class variable to consume the application configuration this.appConfig = appConfig; @@ -230,25 +230,25 @@ class SyncEngine { // Configure the uploadOnly flag to capture if --upload-only was used if (appConfig.getValueBool("upload_only")) { - addLogEntry("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured", ["debug"]); + if (debugLogging) {addLogEntry("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured", ["debug"]);} this.uploadOnly = true; } // Configure the localDeleteAfterUpload flag if (appConfig.getValueBool("remove_source_files")) { - addLogEntry("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured", ["debug"]); + if (debugLogging) {addLogEntry("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured", ["debug"]);} this.localDeleteAfterUpload = true; } // Configure the disableDownloadValidation flag if (appConfig.getValueBool("disable_download_validation")) { - addLogEntry("Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured", ["debug"]); + if (debugLogging) {addLogEntry("Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured", ["debug"]);} this.disableDownloadValidation = true; } // Configure the disableUploadValidation flag if (appConfig.getValueBool("disable_upload_validation")) { - addLogEntry("Configuring disableUploadValidation flag to TRUE as --disable-upload-validation passed in or configured", ["debug"]); + if (debugLogging) {addLogEntry("Configuring disableUploadValidation flag to TRUE as --disable-upload-validation passed in or configured", ["debug"]);} this.disableUploadValidation = true; } @@ -315,10 +315,10 @@ class SyncEngine { // Did the user downgrade all HTTP operations to force HTTP 1.1 if (appConfig.getValueBool("force_http_11")) { // User is forcing downgrade to curl to use HTTP 1.1 for all operations - addLogEntry("Downgrading all HTTP operations to HTTP/1.1 due to user configuration", ["verbose"]); + if (verboseLogging) {addLogEntry("Downgrading all HTTP operations to HTTP/1.1 due to user configuration", ["verbose"]);} } else { // Use curl defaults - addLogEntry("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)", ["debug"]); + if (debugLogging) {addLogEntry("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)", ["debug"]);} } } @@ -376,13 +376,13 @@ class SyncEngine { } // API was initialised - addLogEntry("Sync Engine Initialised with new Onedrive API instance", ["verbose"]); + if (verboseLogging) {addLogEntry("Sync Engine Initialised with new Onedrive API instance", ["verbose"]);} return true; } // Shutdown the sync engine, wait for anything in processPool to complete void shutdown() { - addLogEntry("SyncEngine: Waiting for all internal threads to complete", ["debug"]); + if (debugLogging) {addLogEntry("SyncEngine: Waiting for all internal threads to complete", ["debug"]);} shutdownProcessPool(); } @@ -392,7 +392,7 @@ class SyncEngine { if (processPool.size > 0) { // TaskPool is still configured for 'thread' size // Normal TaskPool shutdown process - addLogEntry("Shutting down processPool in a thread blocking manner", ["debug"]); + if (debugLogging) {addLogEntry("Shutting down processPool in a thread blocking manner", ["debug"]);} // All worker threads are daemon threads which are automatically terminated when all non-daemon threads have terminated. processPool.finish(true); // If blocking argument is true, wait for all worker threads to terminate before returning. } @@ -411,10 +411,10 @@ class SyncEngine { // Get Default Drive Details for this Account try { - addLogEntry("Getting Account Default Drive Details", ["debug"]); + if (debugLogging) {addLogEntry("Getting Account Default Drive Details", ["debug"]);} defaultOneDriveDriveDetails = getDefaultDriveApiInstance.getDefaultDriveDetails(); } catch (OneDriveException exception) { - addLogEntry("defaultOneDriveDriveDetails = getDefaultDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("defaultOneDriveDriveDetails = getDefaultDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException", ["debug"]);} string thisFunctionName = getFunctionName!({}); if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { @@ -430,7 +430,7 @@ class SyncEngine { // If the JSON response is a correct JSON object, and has an 'id' we can set these details if ((defaultOneDriveDriveDetails.type() == JSONType.object) && (hasId(defaultOneDriveDriveDetails))) { - addLogEntry("OneDrive Account Default Drive Details: " ~ to!string(defaultOneDriveDriveDetails), ["debug"]); + if (debugLogging) {addLogEntry("OneDrive Account Default Drive Details: " ~ to!string(defaultOneDriveDriveDetails), ["debug"]);} appConfig.accountType = defaultOneDriveDriveDetails["driveType"].str; appConfig.defaultDriveId = defaultOneDriveDriveDetails["id"].str; @@ -455,11 +455,13 @@ class SyncEngine { // - cachedOnlineDriveData.quotaRemaining; // What did we set based on the data from the JSON and cached drive data - addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); - addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); - addLogEntry("cachedOnlineDriveData.quotaRemaining = " ~ to!string(cachedOnlineDriveData.quotaRemaining), ["debug"]); - addLogEntry("cachedOnlineDriveData.quotaAvailable = " ~ to!string(cachedOnlineDriveData.quotaAvailable), ["debug"]); - addLogEntry("cachedOnlineDriveData.quotaRestricted = " ~ to!string(cachedOnlineDriveData.quotaRestricted), ["debug"]); + if (debugLogging) { + addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); + addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRemaining = " ~ to!string(cachedOnlineDriveData.quotaRemaining), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaAvailable = " ~ to!string(cachedOnlineDriveData.quotaAvailable), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRestricted = " ~ to!string(cachedOnlineDriveData.quotaRestricted), ["debug"]); + } } else { // Handle the invalid JSON response throw new AccountDetailsException(); @@ -485,10 +487,10 @@ class SyncEngine { // Get Default Root Details for this Account try { - addLogEntry("Getting Account Default Root Details", ["debug"]); + if (debugLogging) {addLogEntry("Getting Account Default Root Details", ["debug"]);} defaultOneDriveRootDetails = getDefaultRootApiInstance.getDefaultRootDetails(); } catch (OneDriveException exception) { - addLogEntry("defaultOneDriveRootDetails = getDefaultRootApiInstance.getDefaultRootDetails() generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("defaultOneDriveRootDetails = getDefaultRootApiInstance.getDefaultRootDetails() generated a OneDriveException", ["debug"]);} string thisFunctionName = getFunctionName!({}); if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { @@ -504,9 +506,9 @@ class SyncEngine { // If the JSON response is a correct JSON object, and has an 'id' we can set these details if ((defaultOneDriveRootDetails.type() == JSONType.object) && (hasId(defaultOneDriveRootDetails))) { - addLogEntry("OneDrive Account Default Root Details: " ~ to!string(defaultOneDriveRootDetails), ["debug"]); + if (debugLogging) {addLogEntry("OneDrive Account Default Root Details: " ~ to!string(defaultOneDriveRootDetails), ["debug"]);} appConfig.defaultRootId = defaultOneDriveRootDetails["id"].str; - addLogEntry("appConfig.defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + if (debugLogging) {addLogEntry("appConfig.defaultRootId = " ~ appConfig.defaultRootId, ["debug"]);} // Save the item to the database, so the account root drive is is always going to be present in the DB saveItem(defaultOneDriveRootDetails); @@ -547,7 +549,7 @@ class SyncEngine { } // Log the final decision and conditions - addLogEntry(logMessage, ["debug"]); + if (debugLogging) {addLogEntry(logMessage, ["debug"]);} } // Perform a sync of the OneDrive Account @@ -560,7 +562,7 @@ class SyncEngine { void syncOneDriveAccountToLocalDisk() { // performFullScanTrueUp value - addLogEntry("Perform a Full Scan True-Up: " ~ to!string(appConfig.fullScanTrueUpRequired), ["debug"]); + if (debugLogging) {addLogEntry("Perform a Full Scan True-Up: " ~ to!string(appConfig.fullScanTrueUpRequired), ["debug"]);} // Fetch the API response of /delta to track changes that were performed online fetchOneDriveDeltaAPIResponse(); @@ -585,7 +587,7 @@ class SyncEngine { // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched if (selectiveSync.isDirNameExcluded(remoteItem.name)) { // This directory name is excluded - addLogEntry("Skipping path - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]);} continue; } } @@ -623,7 +625,7 @@ class SyncEngine { // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched if (selectiveSync.isDirNameExcluded(remoteItem.name)) { // This directory name is excluded - addLogEntry("Skipping path - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]);} continue; } } @@ -634,9 +636,11 @@ class SyncEngine { } // Debug log output - addLogEntry("Fetching /delta API response for:", ["debug"]); - addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); - addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); + if (debugLogging) { + addLogEntry("Fetching /delta API response for:", ["debug"]); + addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); + addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); + } // Check this OneDrive Business Shared Folder for changes fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); @@ -673,7 +677,7 @@ class SyncEngine { Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); // Add DB record to the local database - addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]);} itemDB.upsert(sharedFilesPath); } else { // Folder exists locally, is the folder in the database? @@ -684,13 +688,13 @@ class SyncEngine { Item sharedFilesPath = makeItem(createFakeResponse(baseName(appConfig.configuredBusinessSharedFilesDirectoryName))); // Add DB record to the local database - addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]);} itemDB.upsert(sharedFilesPath); } } // Query for OneDrive Business Shared Files - addLogEntry("Checking for any applicable OneDrive Business Shared Files which need to be synced locally", ["verbose"]); + if (verboseLogging) {addLogEntry("Checking for any applicable OneDrive Business Shared Files which need to be synced locally", ["verbose"]);} queryBusinessSharedObjects(); // Download any OneDrive Business Shared Files @@ -703,7 +707,7 @@ class SyncEngine { // Cleanup arrays when used in --monitor loops void cleanupArrays() { - addLogEntry("Cleaning up all internal arrays used when processing data", ["debug"]); + if (debugLogging) {addLogEntry("Cleaning up all internal arrays used when processing data", ["debug"]);} // Multi Dimensional Arrays idsToDelete.length = 0; @@ -729,7 +733,7 @@ class SyncEngine { // Perform Garbage Collection on this destroyed curl engine GC.collect(); - addLogEntry("Cleaning of internal arrays complete", ["debug"]); + if (debugLogging) {addLogEntry("Cleaning of internal arrays complete", ["debug"]);} } // Configure singleDirectoryScope = true if this function is called @@ -764,7 +768,7 @@ class SyncEngine { if (onlinePathData.type() == JSONType.object) { // Valid JSON item was returned searchItem = makeItem(onlinePathData); - addLogEntry("searchItem: " ~ to!string(searchItem), ["debug"]); + if (debugLogging) {addLogEntry("searchItem: " ~ to!string(searchItem), ["debug"]);} // Is this item a potential Shared Folder? // Is this JSON a remote object @@ -809,17 +813,21 @@ class SyncEngine { // Was a driveId provided as an input if (strip(driveIdToQuery).empty) { // No provided driveId to query, use the account default - addLogEntry("driveIdToQuery was empty, setting to appConfig.defaultDriveId", ["debug"]); driveIdToQuery = appConfig.defaultDriveId; - addLogEntry("driveIdToQuery: " ~ driveIdToQuery, ["debug"]); + if (debugLogging) { + addLogEntry("driveIdToQuery was empty, setting to appConfig.defaultDriveId", ["debug"]); + addLogEntry("driveIdToQuery: " ~ driveIdToQuery, ["debug"]); + } } // Was an itemId provided as an input if (strip(itemIdToQuery).empty) { // No provided itemId to query, use the account default - addLogEntry("itemIdToQuery was empty, setting to appConfig.defaultRootId", ["debug"]); itemIdToQuery = appConfig.defaultRootId; - addLogEntry("itemIdToQuery: " ~ itemIdToQuery, ["debug"]); + if (debugLogging) { + addLogEntry("itemIdToQuery was empty, setting to appConfig.defaultRootId", ["debug"]); + addLogEntry("itemIdToQuery: " ~ itemIdToQuery, ["debug"]); + } } // What OneDrive API query do we use? @@ -869,7 +877,7 @@ class SyncEngine { // Do we need to perform a Full Scan True Up? Is 'appConfig.fullScanTrueUpRequired' set to 'true'? if (appConfig.fullScanTrueUpRequired) { addLogEntry("Performing a full scan of online data to ensure consistent local state"); - addLogEntry("Setting currentDeltaLink = null", ["debug"]); + if (debugLogging) {addLogEntry("Setting currentDeltaLink = null", ["debug"]);} currentDeltaLink = null; } else { // Try and get the current Delta Link from the internal cache, this saves a DB I/O call @@ -880,15 +888,15 @@ class SyncEngine { // Try and get the current delta link from the database for this DriveID and RootID databaseDeltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); if (!databaseDeltaLink.empty) { - addLogEntry("Using database stored deltaLink", ["debug"]); + if (debugLogging) {addLogEntry("Using database stored deltaLink", ["debug"]);} currentDeltaLink = databaseDeltaLink; } else { - addLogEntry("Zero deltaLink available for use, we will be performing a full online scan", ["debug"]); + if (debugLogging) {addLogEntry("Zero deltaLink available for use, we will be performing a full online scan", ["debug"]);} currentDeltaLink = null; } } else { // Log that we are using the deltaLink for cache - addLogEntry("Using cached deltaLink", ["debug"]); + if (debugLogging) {addLogEntry("Using cached deltaLink", ["debug"]);} } } @@ -898,7 +906,7 @@ class SyncEngine { addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery, appConfig.verbosityCount); } } else { - addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]); + if (verboseLogging) {addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]);} } // Create a new API Instance for querying the actual /delta and initialise it @@ -936,7 +944,7 @@ class SyncEngine { } // Handle the invalid JSON response and retry - addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + if (debugLogging) {addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]);} deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink, getDeltaDataOneDriveApiInstance); } } @@ -950,7 +958,7 @@ class SyncEngine { addProcessingDotEntry(); } } else { - addLogEntry("Processing API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["verbose"]); + if (verboseLogging) {addLogEntry("Processing API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["verbose"]);} } // Update the count of items received @@ -962,7 +970,7 @@ class SyncEngine { // @odata.nextLink is the pointer within the API to the next '200+' JSON bundle - this is the checkpoint link for this bundle // This URL changes between JSON bundle sets // Log the action of setting currentDeltaLink to @odata.nextLink - addLogEntry("Setting currentDeltaLink to @odata.nextLink: " ~ deltaChanges["@odata.nextLink"].str, ["debug"]); + if (debugLogging) {addLogEntry("Setting currentDeltaLink to @odata.nextLink: " ~ deltaChanges["@odata.nextLink"].str, ["debug"]);} // Update currentDeltaLink to @odata.nextLink for the next '200+' JSON bundle - this is the checkpoint link for this bundle currentDeltaLink = deltaChanges["@odata.nextLink"].str; @@ -988,7 +996,7 @@ class SyncEngine { // Finished processing /delta JSON response from the OneDrive API // Log the action of setting currentDeltaLink to @odata.deltaLink - addLogEntry("Setting currentDeltaLink to (@odata.deltaLink): " ~ deltaChanges["@odata.deltaLink"].str, ["debug"]); + if (debugLogging) {addLogEntry("Setting currentDeltaLink to (@odata.deltaLink): " ~ deltaChanges["@odata.deltaLink"].str, ["debug"]);} // Update currentDeltaLink to @odata.deltaLink as the final checkpoint URL for this entire JSON response set currentDeltaLink = deltaChanges["@odata.deltaLink"].str; @@ -1042,7 +1050,7 @@ class SyncEngine { GC.collect(); // To finish off the JSON processing items, this is needed to reflect this in the log - addLogEntry("------------------------------------------------------------------", ["debug"]); + if (debugLogging) {addLogEntry("------------------------------------------------------------------", ["debug"]);} // Log that we have finished querying the /delta API if (appConfig.verbosityCount == 0) { @@ -1051,12 +1059,12 @@ class SyncEngine { completeProcessingDots(); } } else { - addLogEntry("Finished processing /delta JSON response from the OneDrive API", ["verbose"]); + if (verboseLogging) {addLogEntry("Finished processing /delta JSON response from the OneDrive API", ["verbose"]);} } // If this was set, now unset it, as this will have been completed, so that for a true up, we dont do a double full scan if (appConfig.fullScanTrueUpRequired) { - addLogEntry("Unsetting fullScanTrueUpRequired as this has been performed", ["debug"]); + if (debugLogging) {addLogEntry("Unsetting fullScanTrueUpRequired as this has been performed", ["debug"]);} appConfig.fullScanTrueUpRequired = false; } @@ -1066,11 +1074,13 @@ class SyncEngine { GC.collect(); } else { // Why are are generating a /delta response - addLogEntry("Why are we generating a /delta response:", ["debug"]); - addLogEntry(" singleDirectoryScope: " ~ to!string(singleDirectoryScope), ["debug"]); - addLogEntry(" nationalCloudDeployment: " ~ to!string(nationalCloudDeployment), ["debug"]); - addLogEntry(" cleanupLocalFiles: " ~ to!string(cleanupLocalFiles), ["debug"]); - addLogEntry(" sharedFolderName: " ~ sharedFolderName, ["debug"]); + if (debugLogging) { + addLogEntry("Why are we generating a /delta response:", ["debug"]); + addLogEntry(" singleDirectoryScope: " ~ to!string(singleDirectoryScope), ["debug"]); + addLogEntry(" nationalCloudDeployment: " ~ to!string(nationalCloudDeployment), ["debug"]); + addLogEntry(" cleanupLocalFiles: " ~ to!string(cleanupLocalFiles), ["debug"]); + addLogEntry(" sharedFolderName: " ~ sharedFolderName, ["debug"]); + } // What 'path' are we going to start generating the response for string pathToQuery; @@ -1100,7 +1110,7 @@ class SyncEngine { // How many changes were returned? ulong nrChanges = count(deltaChanges["value"].array); int changeCount = 0; - addLogEntry("API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["debug"]); + if (debugLogging) {addLogEntry("API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["debug"]);} // Update the count of items received jsonItemsReceived = jsonItemsReceived + nrChanges; @@ -1120,7 +1130,7 @@ class SyncEngine { GC.collect(); // To finish off the JSON processing items, this is needed to reflect this in the log - addLogEntry("------------------------------------------------------------------", ["debug"]); + if (debugLogging) {addLogEntry("------------------------------------------------------------------", ["debug"]);} // Log that we have finished generating our self generated /delta response if (!appConfig.suppressLoggingOutput) { @@ -1137,12 +1147,13 @@ class SyncEngine { GC.collect(); // We have JSON items received from the OneDrive API - addLogEntry("Number of JSON Objects received from OneDrive API: " ~ to!string(jsonItemsReceived), ["debug"]); - addLogEntry("Number of JSON Objects already processed (root and deleted items): " ~ to!string((jsonItemsReceived - jsonItemsToProcess.length)), ["debug"]); - - // We should have now at least processed all the JSON items as returned by the /delta call - // Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items - addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + if (debugLogging) { + addLogEntry("Number of JSON Objects received from OneDrive API: " ~ to!string(jsonItemsReceived), ["debug"]); + addLogEntry("Number of JSON Objects already processed (root and deleted items): " ~ to!string((jsonItemsReceived - jsonItemsToProcess.length)), ["debug"]); + // We should have now at least processed all the JSON items as returned by the /delta call + // Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + } // Are there items to process? if (jsonItemsToProcess.length > 0) { @@ -1167,14 +1178,14 @@ class SyncEngine { addProcessingDotEntry(); } } else { - addLogEntry("Processing OneDrive JSON item batch [" ~ to!string(batchesProcessed) ~ "/" ~ to!string(batchCount) ~ "] to ensure consistent local state", ["verbose"]); + if (verboseLogging) {addLogEntry("Processing OneDrive JSON item batch [" ~ to!string(batchesProcessed) ~ "/" ~ to!string(batchCount) ~ "] to ensure consistent local state", ["verbose"]);} } // Process the batch processJSONItemsInBatch(batchOfJSONItems, batchesProcessed, batchCount); // To finish off the JSON processing items, this is needed to reflect this in the log - addLogEntry("------------------------------------------------------------------", ["debug"]); + if (debugLogging) {addLogEntry("------------------------------------------------------------------", ["debug"]);} } if (appConfig.verbosityCount == 0) { @@ -1186,8 +1197,10 @@ class SyncEngine { } // Debug output - what was processed - addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); - addLogEntry("Number of JSON items processed was: " ~ to!string(processedCount), ["debug"]); + if (debugLogging) { + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + addLogEntry("Number of JSON items processed was: " ~ to!string(processedCount), ["debug"]); + } // Free up memory and items processed as it is pointless now having this data around jsonItemsToProcess = []; @@ -1221,11 +1234,15 @@ class SyncEngine { bool itemIdMatchesDefaultRootId = false; bool itemNameExplicitMatchRoot = false; string objectParentDriveId; - auto jsonProcessingStartTime = Clock.currTime(); + MonoTime jsonProcessingStartTime; - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("Processing OneDrive Item " ~ to!string(changeCount) ~ " of " ~ to!string(nrChanges) ~ " from API Response Bundle " ~ to!string(responseBundleCount), ["debug"]); - addLogEntry("Raw JSON OneDrive Item: " ~ to!string(onedriveJSONItem), ["debug"]); + // Debugging the processing start of the JSON item + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + jsonProcessingStartTime = MonoTime.currTime(); + addLogEntry("Processing OneDrive Item " ~ to!string(changeCount) ~ " of " ~ to!string(nrChanges) ~ " from API Response Bundle " ~ to!string(responseBundleCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item: " ~ to!string(onedriveJSONItem), ["debug"]); + } // What is this item's id thisItemId = onedriveJSONItem["id"].str; @@ -1234,7 +1251,7 @@ class SyncEngine { itemIsDeletedOnline = isItemDeleted(onedriveJSONItem); if (!itemIsDeletedOnline) { // This is not a deleted item - addLogEntry("This item is not a OneDrive deletion change", ["debug"]); + if (debugLogging) {addLogEntry("This item is not a OneDrive deletion change", ["debug"]);} // Only calculate this once itemIsRoot = isItemRoot(onedriveJSONItem); @@ -1245,16 +1262,18 @@ class SyncEngine { // Test is this is the OneDrive Users Root? // Debug output of change evaluation items - addLogEntry("defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); - addLogEntry("'search id' = " ~ thisItemId, ["debug"]); - addLogEntry("id == defaultRootId = " ~ to!string(itemIdMatchesDefaultRootId), ["debug"]); - addLogEntry("isItemRoot(onedriveJSONItem) = " ~ to!string(itemIsRoot), ["debug"]); - addLogEntry("onedriveJSONItem['name'].str == 'root' = " ~ to!string(itemNameExplicitMatchRoot), ["debug"]); - addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + if (debugLogging) { + addLogEntry("defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + addLogEntry("'search id' = " ~ thisItemId, ["debug"]); + addLogEntry("id == defaultRootId = " ~ to!string(itemIdMatchesDefaultRootId), ["debug"]); + addLogEntry("isItemRoot(onedriveJSONItem) = " ~ to!string(itemIsRoot), ["debug"]); + addLogEntry("onedriveJSONItem['name'].str == 'root' = " ~ to!string(itemNameExplicitMatchRoot), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + } if ( (itemIdMatchesDefaultRootId || singleDirectoryScope) && itemIsRoot && itemNameExplicitMatchRoot) { // This IS a OneDrive Root item or should be classified as such in the case of 'singleDirectoryScope' - addLogEntry("JSON item will flagged as a 'root' item", ["debug"]); + if (debugLogging) {addLogEntry("JSON item will flagged as a 'root' item", ["debug"]);} handleItemAsRootObject = true; } } @@ -1263,11 +1282,13 @@ class SyncEngine { // Is this a confirmed 'root' item, has no Parent ID, or is a Deleted Item if (handleItemAsRootObject || !itemHasParentReferenceId || itemIsDeletedOnline){ // Is a root item, has no id in parentReference or is a OneDrive deleted item - addLogEntry("objectParentDriveId = " ~ objectParentDriveId, ["debug"]); - addLogEntry("handleItemAsRootObject = " ~ to!string(handleItemAsRootObject), ["debug"]); - addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); - addLogEntry("itemIsDeletedOnline = " ~ to!string(itemIsDeletedOnline), ["debug"]); - addLogEntry("Handling change immediately as 'root item', or has no parent reference id or is a deleted item", ["debug"]); + if (debugLogging) { + addLogEntry("objectParentDriveId = " ~ objectParentDriveId, ["debug"]); + addLogEntry("handleItemAsRootObject = " ~ to!string(handleItemAsRootObject), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + addLogEntry("itemIsDeletedOnline = " ~ to!string(itemIsDeletedOnline), ["debug"]); + addLogEntry("Handling change immediately as 'root item', or has no parent reference id or is a deleted item", ["debug"]); + } // OK ... do something with this JSON post here .... processRootAndDeletedJSONItems(onedriveJSONItem, objectParentDriveId, handleItemAsRootObject, itemIsDeletedOnline, itemHasParentReferenceId); @@ -1275,7 +1296,7 @@ class SyncEngine { // Do we need to update this RAW JSON from OneDrive? if ( (objectParentDriveId != appConfig.defaultDriveId) && (appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items")) ) { // Potentially need to update this JSON data - addLogEntry("Potentially need to update this source JSON .... need to check the database", ["debug"]); + if (debugLogging) {addLogEntry("Potentially need to update this source JSON .... need to check the database", ["debug"]);} // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id Item remoteDBItem; @@ -1284,17 +1305,19 @@ class SyncEngine { // Is the data that was returned from the database what we are looking for? if ((remoteDBItem.remoteDriveId == objectParentDriveId) && (remoteDBItem.remoteId == thisItemId)) { // Yes, this is the record we are looking for - addLogEntry("DB Item response for remoteDBItem: " ~ to!string(remoteDBItem), ["debug"]); + if (debugLogging) {addLogEntry("DB Item response for remoteDBItem: " ~ to!string(remoteDBItem), ["debug"]);} // Must compare remoteDBItem.name with remoteItem.name if (remoteDBItem.name != onedriveJSONItem["name"].str) { // Update JSON Item string actualOnlineName = onedriveJSONItem["name"].str; - addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); - addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); - addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + if (debugLogging) { + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + } onedriveJSONItem["name"] = remoteDBItem.name; - addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + if (debugLogging) {addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]);} // Add the original name to the JSON onedriveJSONItem["actualOnlineName"] = actualOnlineName; } @@ -1315,14 +1338,16 @@ class SyncEngine { // Add this JSON item for further processing if this is not being discarded if (!discardDeltaJSONItem) { // Add onedriveJSONItem to jsonItemsToProcess - addLogEntry("Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing", ["debug"]); + if (debugLogging) {addLogEntry("Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing", ["debug"]);} jsonItemsToProcess ~= onedriveJSONItem; } } // How long to initially process this JSON item - auto jsonProcessingElapsedTime = Clock.currTime() - jsonProcessingStartTime; - addLogEntry("Initial JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); + if (debugLogging) { + Duration jsonProcessingElapsedTime = MonoTime.currTime() - jsonProcessingStartTime; + addLogEntry("Initial JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); + } } // Process 'root' and 'deleted' OneDrive JSON items @@ -1348,7 +1373,7 @@ class SyncEngine { // 3. Was detected by an input flag as to be handled as a root item regardless of actual status if ((handleItemAsRootObject) || (!itemHasParentReferenceId)) { - addLogEntry("Handing JSON object as OneDrive 'root' object", ["debug"]); + if (debugLogging) {addLogEntry("Handing JSON object as OneDrive 'root' object", ["debug"]);} if (!existingDBEntry) { // we have not seen this item before saveItem(onedriveJSONItem); @@ -1356,7 +1381,7 @@ class SyncEngine { } } else { // Change is to delete an item - addLogEntry("Handing a OneDrive Deleted Item", ["debug"]); + if (debugLogging) {addLogEntry("Handing a OneDrive Deleted Item", ["debug"]);} if (existingDBEntry) { // Is the item to delete locally actually in sync with OneDrive currently? // What is the source of this item data? @@ -1366,7 +1391,7 @@ class SyncEngine { string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) { // Flag to delete - addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) {addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]);} idsToDelete ~= [thisItemDriveId, thisItemId]; } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not @@ -1376,7 +1401,7 @@ class SyncEngine { } } else { // Flag to ignore - addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) {addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]);} skippedItems.insert(thisItemId); } } @@ -1386,16 +1411,19 @@ class SyncEngine { void processJSONItemsInBatch(JSONValue[] array, ulong batchGroup, ulong batchCount) { ulong batchElementCount = array.length; + MonoTime jsonProcessingStartTime; foreach (i, onedriveJSONItem; array.enumerate) { // Use the JSON elements rather can computing a DB struct via makeItem() ulong elementCount = i +1; - auto jsonProcessingStartTime = Clock.currTime(); + jsonProcessingStartTime = MonoTime.currTime(); // To show this is the processing for this particular item, start off with this breaker line - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("Processing OneDrive JSON item " ~ to!string(elementCount) ~ " of " ~ to!string(batchElementCount) ~ " as part of JSON Item Batch " ~ to!string(batchGroup) ~ " of " ~ to!string(batchCount), ["debug"]); - addLogEntry("Raw JSON OneDrive Item (Batched Item): " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Processing OneDrive JSON item " ~ to!string(elementCount) ~ " of " ~ to!string(batchElementCount) ~ " as part of JSON Item Batch " ~ to!string(batchGroup) ~ " of " ~ to!string(batchCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item (Batched Item): " ~ to!string(onedriveJSONItem), ["debug"]); + } // Configure required items from the JSON elements string thisItemId = onedriveJSONItem["id"].str; @@ -1426,7 +1454,7 @@ class SyncEngine { if (parentInDatabase) { // Calculate this items path newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; - addLogEntry("JSON Item calculated full path is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("JSON Item calculated full path is: " ~ newItemPath, ["debug"]);} } else { // Parent not in the database // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? @@ -1434,53 +1462,55 @@ class SyncEngine { // Lets determine why? if (thisItemDriveId == appConfig.defaultDriveId) { // Parent path does not exist - flagging as unwanted - addLogEntry("Flagging as unwanted: thisItemDriveId (" ~ thisItemDriveId ~ "), thisItemParentId (" ~ thisItemParentId ~ ") not in local database", ["debug"]); + if (debugLogging) {addLogEntry("Flagging as unwanted: thisItemDriveId (" ~ thisItemDriveId ~ "), thisItemParentId (" ~ thisItemParentId ~ ") not in local database", ["debug"]);} // Was this a skipped item? if (thisItemParentId in skippedItems) { // Parent is a skipped item - addLogEntry("Reason: thisItemParentId listed within skippedItems", ["debug"]); + if (debugLogging) {addLogEntry("Reason: thisItemParentId listed within skippedItems", ["debug"]);} } else { // Parent is not in the database, as we are not creating it - addLogEntry("Reason: Parent ID is not in the DB .. ", ["debug"]); + if (debugLogging) {addLogEntry("Reason: Parent ID is not in the DB .. ", ["debug"]);} } // Flag as unwanted unwanted = true; } else { // Edge case as the parent (from another users OneDrive account) will never be in the database - potentially a shared object? - addLogEntry("The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (" ~ remoteItem.driveId ~ "), remoteItem.parentId (" ~ remoteItem.parentId ~ ")", ["debug"]); - addLogEntry("Potential Shared Object JSON: " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) { + addLogEntry("The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (" ~ remoteItem.driveId ~ "), remoteItem.parentId (" ~ remoteItem.parentId ~ ")", ["debug"]); + addLogEntry("Potential Shared Object JSON: " ~ to!string(onedriveJSONItem), ["debug"]); + } // Format the OneDrive change into a consumable object for the database remoteItem = makeItem(onedriveJSONItem); if (appConfig.accountType == "personal") { // Personal Account Handling - addLogEntry("Handling a Personal Shared Item JSON object", ["debug"]); + if (debugLogging) {addLogEntry("Handling a Personal Shared Item JSON object", ["debug"]);} if (hasSharedElement(onedriveJSONItem)) { // Has the Shared JSON structure - addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]); + if (debugLogging) {addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]);} // Create a 'root' DB Tie Record for this JSON object createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); } // Ensure that this item has no parent - addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + if (debugLogging) {addLogEntry("Setting remoteItem.parentId to be null", ["debug"]);} remoteItem.parentId = null; // Add this record to the local database - addLogEntry("Update/Insert local database with remoteItem details with remoteItem.parentId as null: " ~ to!string(remoteItem), ["debug"]); + if (debugLogging) {addLogEntry("Update/Insert local database with remoteItem details with remoteItem.parentId as null: " ~ to!string(remoteItem), ["debug"]);} itemDB.upsert(remoteItem); } else { // Business or SharePoint Account Handling - addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]); + if (debugLogging) {addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]);} if (appConfig.accountType == "business") { // Create a 'root' DB Tie Record for this JSON object createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); // Ensure that this item has no parent - addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + if (debugLogging) {addLogEntry("Setting remoteItem.parentId to be null", ["debug"]);} remoteItem.parentId = null; // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id @@ -1490,32 +1520,36 @@ class SyncEngine { // Must compare remoteDBItem.name with remoteItem.name if ((!remoteDBItem.name.empty) && (remoteDBItem.name != remoteItem.name)) { // Update DB Item - addLogEntry("The shared item stored in OneDrive, has a different name to the actual name on the remote drive", ["debug"]); - addLogEntry("Updating remoteItem.name JSON data with the actual name being used on account drive and local folder", ["debug"]); - addLogEntry("remoteItem.name was: " ~ remoteItem.name, ["debug"]); - addLogEntry("Updating remoteItem.name to: " ~ remoteDBItem.name, ["debug"]); + if (debugLogging) { + addLogEntry("The shared item stored in OneDrive, has a different name to the actual name on the remote drive", ["debug"]); + addLogEntry("Updating remoteItem.name JSON data with the actual name being used on account drive and local folder", ["debug"]); + addLogEntry("remoteItem.name was: " ~ remoteItem.name, ["debug"]); + addLogEntry("Updating remoteItem.name to: " ~ remoteDBItem.name, ["debug"]); + } remoteItem.name = remoteDBItem.name; - addLogEntry("Setting remoteItem.remoteName to: " ~ onedriveJSONItem["name"].str, ["debug"]); + if (debugLogging) {addLogEntry("Setting remoteItem.remoteName to: " ~ onedriveJSONItem["name"].str, ["debug"]);} // Update JSON Item remoteItem.remoteName = onedriveJSONItem["name"].str; - addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); - addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); - addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + if (debugLogging) { + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + } onedriveJSONItem["name"] = remoteDBItem.name; - addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + if (debugLogging) {addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]);} // Update newItemPath value newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ remoteDBItem.name; - addLogEntry("New Item updated calculated full path is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("New Item updated calculated full path is: " ~ newItemPath, ["debug"]);} } // Add this record to the local database - addLogEntry("Update/Insert local database with remoteItem details: " ~ to!string(remoteItem), ["debug"]); + if (debugLogging) {addLogEntry("Update/Insert local database with remoteItem details: " ~ to!string(remoteItem), ["debug"]);} itemDB.upsert(remoteItem); } else { // Sharepoint account type - addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ ", ["info"]); + addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ RAISE A BUG PLEASE", ["info"]); } } } @@ -1525,13 +1559,13 @@ class SyncEngine { if (!unwanted) { if (thisItemParentId in skippedItems) { // Flag this JSON item as unwanted - addLogEntry("Flagging as unwanted: find(thisItemParentId).length != 0", ["debug"]); + if (debugLogging) {addLogEntry("Flagging as unwanted: find(thisItemParentId).length != 0", ["debug"]);} unwanted = true; // Is this item id in the database? if (existingDBEntry) { // item exists in database, most likely moved out of scope for current client configuration - addLogEntry("This item was previously synced / seen by the client", ["debug"]); + if (debugLogging) {addLogEntry("This item was previously synced / seen by the client", ["debug"]);} if (("name" in onedriveJSONItem["parentReference"]) != null) { @@ -1541,11 +1575,11 @@ class SyncEngine { // sync_list configured and in use if (selectiveSync.isPathExcludedViaSyncList(onedriveJSONItem["parentReference"]["name"].str)) { // Previously synced item is now out of scope as it has been moved out of what is included in sync_list - addLogEntry("This previously synced item is now excluded from being synced due to sync_list exclusion", ["debug"]); + if (debugLogging) {addLogEntry("This previously synced item is now excluded from being synced due to sync_list exclusion", ["debug"]);} } } // flag to delete local file as it now is no longer in sync with OneDrive - addLogEntry("Flagging to delete item locally: ", ["debug"]); + if (debugLogging) {addLogEntry("Flagging to delete item locally: ", ["debug"]);} idsToDelete ~= [thisItemDriveId, thisItemId]; } } @@ -1555,28 +1589,28 @@ class SyncEngine { // Check the item type - if it not an item type that we support, we cant process the JSON item if (!unwanted) { if (isItemFile(onedriveJSONItem)) { - addLogEntry("The item we are syncing is a file", ["debug"]); + if (debugLogging) {addLogEntry("The item we are syncing is a file", ["debug"]);} } else if (isItemFolder(onedriveJSONItem)) { - addLogEntry("The item we are syncing is a folder", ["debug"]); + if (debugLogging) {addLogEntry("The item we are syncing is a folder", ["debug"]);} } else if (isItemRemote(onedriveJSONItem)) { - addLogEntry("The item we are syncing is a remote item", ["debug"]); + if (debugLogging) {addLogEntry("The item we are syncing is a remote item", ["debug"]);} } else { // Why was this unwanted? if (newItemPath.empty) { // Compute this item path & need the full path for this file newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; - addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]);} } // Microsoft OneNote container objects present as neither folder or file but has file size if ((!isItemFile(onedriveJSONItem)) && (!isItemFolder(onedriveJSONItem)) && (hasFileSize(onedriveJSONItem))) { // Log that this was skipped as this was a Microsoft OneNote item and unsupported - addLogEntry("The Microsoft OneNote Notebook '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); + if (verboseLogging) {addLogEntry("The Microsoft OneNote Notebook '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]);} } else { // Log that this item was skipped as unsupported - addLogEntry("The OneDrive item '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); + if (verboseLogging) {addLogEntry("The OneDrive item '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]);} } unwanted = true; - addLogEntry("Flagging as unwanted: item type is not supported", ["debug"]); + if (debugLogging) {addLogEntry("Flagging as unwanted: item type is not supported", ["debug"]);} } } @@ -1599,17 +1633,17 @@ class SyncEngine { } else { simplePathToCheck = onedriveJSONItem["name"].str; } - addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + if (debugLogging) {addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]);} // complex path if (parentInDatabase) { // build up complexPathToCheck complexPathToCheck = buildNormalizedPath(newItemPath); } else { - addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); + if (debugLogging) {addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]);} } if (!complexPathToCheck.empty) { - addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + if (debugLogging) {addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]);} } } else { simplePathToCheck = onedriveJSONItem["name"].str; @@ -1618,39 +1652,39 @@ class SyncEngine { // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder // then isDirNameExcluded matching will not work if (simplePathToCheck.canFind(":")) { - addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + if (debugLogging) {addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]);} simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } if (complexPathToCheck.canFind(":")) { - addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + if (debugLogging) {addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]);} complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); } // OK .. what checks are we doing? if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { // just a simple check - addLogEntry("Performing a simple check only", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple check only", ["debug"]);} unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); } else { // simple and complex - addLogEntry("Performing a simple then complex path match if required", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple then complex path match if required", ["debug"]);} // simple first - addLogEntry("Performing a simple check first", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple check first", ["debug"]);} unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); matchDisplay = simplePathToCheck; if (!unwanted) { // simple didnt match, perform a complex check - addLogEntry("Simple match was false, attempting complex match", ["debug"]); + if (debugLogging) {addLogEntry("Simple match was false, attempting complex match", ["debug"]);} unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); matchDisplay = complexPathToCheck; } } // result - addLogEntry("skip_dir exclude result (directory based): " ~ to!string(unwanted), ["debug"]); + if (debugLogging) {addLogEntry("skip_dir exclude result (directory based): " ~ to!string(unwanted), ["debug"]);} if (unwanted) { // This path should be skipped - addLogEntry("Skipping path - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]);} } } // Is the item a file? @@ -1670,10 +1704,10 @@ class SyncEngine { // perform the check unwanted = selectiveSync.isDirNameExcluded(pathToCheck); // result - addLogEntry("skip_dir exclude result (file based): " ~ to!string(unwanted), ["debug"]); + if (debugLogging) {addLogEntry("skip_dir exclude result (file based): " ~ to!string(unwanted), ["debug"]);} if (unwanted) { // this files path should be skipped - addLogEntry("Skipping file - file path is excluded by skip_dir config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - file path is excluded by skip_dir config: " ~ newItemPath, ["verbose"]);} } } } @@ -1694,7 +1728,7 @@ class SyncEngine { // Compute this item path & need the full path for this file if (newItemPath.empty) { newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; - addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]);} } // The path that needs to be checked needs to include the '/' @@ -1706,14 +1740,16 @@ class SyncEngine { exclusionTestPath = '/' ~ newItemPath; } - addLogEntry("skip_file item to check: " ~ exclusionTestPath, ["debug"]); + if (debugLogging) {addLogEntry("skip_file item to check: " ~ exclusionTestPath, ["debug"]);} unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); - addLogEntry("Result: " ~ to!string(unwanted), ["debug"]); - if (unwanted) addLogEntry("Skipping file - excluded by skip_dir config: " ~ thisItemName, ["verbose"]); + if (debugLogging) {addLogEntry("Result: " ~ to!string(unwanted), ["debug"]);} + if (unwanted) { + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_dir config: " ~ thisItemName, ["verbose"]);} + } } else { // parent id is not in the database unwanted = true; - addLogEntry("Skipping file - parent path not present in local database", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - parent path not present in local database", ["verbose"]);} } } } @@ -1726,11 +1762,11 @@ class SyncEngine { if (newItemPath.empty) { // Calculate this items path newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; - addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]);} } // What path are we checking? - addLogEntry("Path to check against 'sync_list' entries: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Path to check against 'sync_list' entries: " ~ newItemPath, ["debug"]);} // Unfortunately there is no avoiding this call to check if the path is excluded|included via sync_list if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { @@ -1743,11 +1779,11 @@ class SyncEngine { } else { // path is unwanted unwanted = true; - addLogEntry("Skipping path - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by sync_list config: " ~ newItemPath, ["verbose"]);} // flagging to skip this item now, but does this exist in the DB thus needs to be removed / deleted? if (existingDBEntry) { // flag to delete - addLogEntry("Flagging item for local delete as item exists in database: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Flagging item for local delete as item exists in database: " ~ newItemPath, ["verbose"]);} idsToDelete ~= [thisItemDriveId, thisItemId]; } } @@ -1759,7 +1795,7 @@ class SyncEngine { if (!unwanted) { if (appConfig.getValueBool("skip_dotfiles")) { if (isDotFile(newItemPath)) { - addLogEntry("Skipping item - .file or .folder: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping item - .file or .folder: " ~ newItemPath, ["verbose"]);} unwanted = true; } } @@ -1772,7 +1808,7 @@ class SyncEngine { string parentPath = dirName(newItemPath); // Check for the presence of a .nosync in the parent path if (exists(parentPath ~ "/.nosync")) { - addLogEntry("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: " ~ newItemPath, ["verbose"]);} unwanted = true; } } @@ -1783,7 +1819,7 @@ class SyncEngine { if (isItemFile(onedriveJSONItem)) { if (fileSizeLimit != 0) { if (onedriveJSONItem["size"].integer >= fileSizeLimit) { - addLogEntry("Skipping file - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]);} unwanted = true; } } @@ -1802,7 +1838,7 @@ class SyncEngine { // We know if this JSON item is unwanted or not if (unwanted) { // This JSON item is NOT wanted - it is excluded - addLogEntry("Skipping OneDrive change as this is determined to be unwanted", ["debug"]); + if (debugLogging) {addLogEntry("Skipping OneDrive change as this is determined to be unwanted", ["debug"]);} // Add to the skippedItems array, but only if it is a directory ... pointless adding 'files' here, as it is the 'id' we check as the parent path which can only be a directory if (!isItemFile(onedriveJSONItem)) { @@ -1816,7 +1852,7 @@ class SyncEngine { if (existingDBEntry) { // The details of this JSON item are already in the DB // Is the item in the DB the same as the JSON data provided - or is the JSON data advising this is an updated file? - addLogEntry("OneDrive change is an update to an existing local item", ["debug"]); + if (debugLogging) {addLogEntry("OneDrive change is an update to an existing local item", ["debug"]);} // Compute the existing item path // NOTE: @@ -1854,7 +1890,7 @@ class SyncEngine { // Calculate the existing path string existingItemPath = computeItemPath(queryDriveID, queryParentID) ~ "/" ~ existingDatabaseItem.name; - addLogEntry("existingItemPath calculated full path is: " ~ existingItemPath, ["debug"]); + if (debugLogging) {addLogEntry("existingItemPath calculated full path is: " ~ existingItemPath, ["debug"]);} // Attempt to apply this changed item applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, newDatabaseItem, newItemPath, onedriveJSONItem); @@ -1862,7 +1898,7 @@ class SyncEngine { // Action this JSON item as a new item as we have no DB record of it // The actual item may actually exist locally already, meaning that just the database is out-of-date or missing the data due to --resync // But we also cannot compute the newItemPath as the parental objects may not exist as well - addLogEntry("OneDrive change is potentially a new local item", ["debug"]); + if (debugLogging) {addLogEntry("OneDrive change is potentially a new local item", ["debug"]);} // Attempt to apply this potentially new item applyPotentiallyNewLocalItem(newDatabaseItem, onedriveJSONItem, newItemPath); @@ -1870,8 +1906,10 @@ class SyncEngine { } // How long to process this JSON item in batch - auto jsonProcessingElapsedTime = Clock.currTime() - jsonProcessingStartTime; - addLogEntry("Batched JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); + if (debugLogging) { + Duration jsonProcessingElapsedTime = MonoTime.currTime() - jsonProcessingStartTime; + addLogEntry("Batched JSON item processing time: " ~ to!string(jsonProcessingElapsedTime), ["debug"]); + } // Tracking as to if this item was processed processedCount++; @@ -1884,17 +1922,17 @@ class SyncEngine { // Are there any items to delete locally? Cleanup space locally first if (!idsToDelete.empty) { // There are elements that potentially need to be deleted locally - addLogEntry("Items to potentially delete locally: " ~ to!string(idsToDelete.length), ["verbose"]); + if (verboseLogging) {addLogEntry("Items to potentially delete locally: " ~ to!string(idsToDelete.length), ["verbose"]);} if (appConfig.getValueBool("download_only")) { // Download only has been configured if (cleanupLocalFiles) { // Process online deleted items - addLogEntry("Processing local deletion activity as --download-only & --cleanup-local-files configured", ["verbose"]); + if (verboseLogging) {addLogEntry("Processing local deletion activity as --download-only & --cleanup-local-files configured", ["verbose"]);} processDeleteItems(); } else { // Not cleaning up local files - addLogEntry("Skipping local deletion activity as --download-only has been used", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping local deletion activity as --download-only has been used", ["verbose"]);} // List files and directories we are not deleting locally listDeletedItems(); } @@ -1923,7 +1961,7 @@ class SyncEngine { // If deltaLinkCache.latestDeltaLink is not empty, update the deltaLink in the database for this driveId so that we can reuse this now that jsonItemsToProcess has been fully processed if (!deltaLinkCache.latestDeltaLink.empty) { - addLogEntry("Updating completed deltaLink for driveID " ~ deltaLinkCache.driveId ~ " in DB to: " ~ deltaLinkCache.latestDeltaLink, ["debug"]); + if (debugLogging) {addLogEntry("Updating completed deltaLink for driveID " ~ deltaLinkCache.driveId ~ " in DB to: " ~ deltaLinkCache.latestDeltaLink, ["debug"]);} itemDB.setDeltaLink(deltaLinkCache.driveId, deltaLinkCache.itemId, deltaLinkCache.latestDeltaLink); // Now that the DB is updated, when we perform the last examination of the most recent online data, cache this so this can be obtained this from memory @@ -1934,9 +1972,9 @@ class SyncEngine { // Function to add or update a key pair in the deltaLinkInfo array void cacheLatestDeltaLink(ref DeltaLinkInfo deltaLinkInfo, string driveId, string latestDeltaLink) { if (driveId !in deltaLinkInfo) { - addLogEntry("Added new latestDeltaLink entry: " ~ driveId ~ " -> " ~ latestDeltaLink, ["debug"]); + if (debugLogging) {addLogEntry("Added new latestDeltaLink entry: " ~ driveId ~ " -> " ~ latestDeltaLink, ["debug"]);} } else { - addLogEntry("Updated latestDeltaLink entry for " ~ driveId ~ " from " ~ deltaLinkInfo[driveId] ~ " to " ~ latestDeltaLink, ["debug"]); + if (debugLogging) {addLogEntry("Updated latestDeltaLink entry for " ~ driveId ~ " from " ~ deltaLinkInfo[driveId] ~ " to " ~ latestDeltaLink, ["debug"]);} } deltaLinkInfo[driveId] = latestDeltaLink; } @@ -1964,13 +2002,13 @@ class SyncEngine { // As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is if (exists(newItemPath)) { - addLogEntry("Path on local disk already exists", ["debug"]); + if (debugLogging) {addLogEntry("Path on local disk already exists", ["debug"]);} // Issue #2209 fix - test if path is a bad symbolic link if (isSymlink(newItemPath)) { - addLogEntry("Path on local disk is a symbolic link ........", ["debug"]); + if (debugLogging) {addLogEntry("Path on local disk is a symbolic link ........", ["debug"]);} if (!exists(readLink(newItemPath))) { // reading the symbolic link failed - addLogEntry("Reading the symbolic link target failed ........ ", ["debug"]); + if (debugLogging) {addLogEntry("Reading the symbolic link target failed ........ ", ["debug"]);} addLogEntry("Skipping item - invalid symbolic link: " ~ newItemPath, ["info", "notify"]); return; } @@ -1982,13 +2020,15 @@ class SyncEngine { string itemSource = "remote"; if (isItemSynced(newDatabaseItem, newItemPath, itemSource)) { // Item details from OneDrive and local item details in database are in-sync - addLogEntry("The item to sync is already present on the local filesystem and is in-sync with what is reported online", ["debug"]); - addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + if (debugLogging) { + addLogEntry("The item to sync is already present on the local filesystem and is in-sync with what is reported online", ["debug"]); + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + } itemDB.upsert(newDatabaseItem); return; } else { // Item details from OneDrive and local item details in database are NOT in-sync - addLogEntry("The item to sync exists locally but is potentially not in the local database - otherwise this would be handled as changed item", ["debug"]); + if (debugLogging) {addLogEntry("The item to sync exists locally but is potentially not in the local database - otherwise this would be handled as changed item", ["debug"]);} // Which object is newer? The local file or the remote file? SysTime localModifiedTime = timeLastModified(newItemPath).toUTC(); @@ -2009,7 +2049,7 @@ class SyncEngine { // Fetch the latest DB record - as this could have been updated by the isItemSynced if the date online was being corrected, then the DB updated as a result Item latestDatabaseItem; itemDB.selectById(newDatabaseItem.driveId, newDatabaseItem.id, latestDatabaseItem); - addLogEntry("latestDatabaseItem: " ~ to!string(latestDatabaseItem), ["debug"]); + if (debugLogging) {addLogEntry("latestDatabaseItem: " ~ to!string(latestDatabaseItem), ["debug"]);} SysTime latestItemModifiedTime = latestDatabaseItem.mtime; // Reduce time resolution to seconds before comparing @@ -2017,19 +2057,19 @@ class SyncEngine { if (localModifiedTime == latestItemModifiedTime) { // Log action - addLogEntry("Local file modified time matches existing database record - keeping local file", ["verbose"]); - addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time matching database data", ["debug"]); + if (verboseLogging) {addLogEntry("Local file modified time matches existing database record - keeping local file", ["verbose"]);} + if (debugLogging) {addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time matching database data", ["debug"]);} } else { // Log action - addLogEntry("Local file modified time is newer based on UTC time conversion - keeping local file as this exists in the local database", ["verbose"]); - addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time being newer than OneDrive file and present in the sqlite database", ["debug"]); + if (verboseLogging) {addLogEntry("Local file modified time is newer based on UTC time conversion - keeping local file as this exists in the local database", ["verbose"]);} + if (debugLogging) {addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time being newer than OneDrive file and present in the sqlite database", ["debug"]);} } // Return as no further action needed return; } else { // item id is not in the database .. maybe a --resync ? // file exists locally but is not in the sqlite database - maybe a failed download? - addLogEntry("Local item does not exist in local database - replacing with file from OneDrive - failed download?", ["verbose"]); + if (verboseLogging) {addLogEntry("Local item does not exist in local database - replacing with file from OneDrive - failed download?", ["verbose"]);} // In a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file // To this pint we have passed the following checks: @@ -2053,9 +2093,11 @@ class SyncEngine { // Is the remote newer? if (localModifiedTime < itemModifiedTime) { // Remote file is newer than the existing local item - addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]); // correct message, remote item is newer - addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); - addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); + if (verboseLogging) {addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]);} // correct message, remote item is newer + if (debugLogging) { + addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); + addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); + } // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { @@ -2072,8 +2114,10 @@ class SyncEngine { // Are the timestamps equal? if (localModifiedTime == itemModifiedTime) { // yes they are equal - addLogEntry("File timestamps are equal, no further action required", ["debug"]); // correct message as timestamps are equal - addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + if (debugLogging) { + addLogEntry("File timestamps are equal, no further action required", ["debug"]); // correct message as timestamps are equal + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + } itemDB.upsert(newDatabaseItem); return; } @@ -2113,21 +2157,21 @@ class SyncEngine { // To create a path, 'newItemPath' must not be empty if (!newItemPath.empty) { // Update the logging output to be consistent - addLogEntry("Creating local directory: " ~ "./" ~ buildNormalizedPath(newItemPath), ["verbose"]); + if (verboseLogging) {addLogEntry("Creating local directory: " ~ "./" ~ buildNormalizedPath(newItemPath), ["verbose"]);} if (!dryRun) { try { // Create the new directory - addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]);} mkdirRecurse(newItemPath); // Configure the applicable permissions for the folder - addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]);} newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); // Update the time of the folder to match the last modified time as is provided by OneDrive // If there are any files then downloaded into this folder, the last modified time will get // updated by the local Operating System with the latest timestamp - as this is normal operation // as the directory has been modified - addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); - addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]);} + if (debugLogging) {addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]);} setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); // Save the item to the database saveItem(onedriveJSONItem); @@ -2173,10 +2217,10 @@ class SyncEngine { string itemSource = "database"; if (isItemSynced(changedLocalItem, changedItemPath, itemSource)) { // The destination item is in-sync - addLogEntry("Destination is in sync and will be overwritten", ["verbose"]); + if (verboseLogging) {addLogEntry("Destination is in sync and will be overwritten", ["verbose"]);} } else { // The destination item is different - addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]); + if (verboseLogging) {addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]);} // Backup this item, passing in if we are performing a --dry-run or not // In case the renamed path is needed string renamedPath; @@ -2184,7 +2228,7 @@ class SyncEngine { } } else { // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss - addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]); + if (verboseLogging) {addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]);} // Backup this item, passing in if we are performing a --dry-run or not // In case the renamed path is needed string renamedPath; @@ -2207,7 +2251,7 @@ class SyncEngine { // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online if (changedOneDriveItem.type == ItemType.file) { // Set the timestamp - addLogEntry("Calling setTimes() for this file: " ~ changedItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ changedItemPath, ["debug"]);} setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); } } else { @@ -2243,7 +2287,7 @@ class SyncEngine { if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { // Save this item in the database // Add to the local database - addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + if (debugLogging) {addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]);} itemDB.upsert(changedOneDriveItem); } } @@ -2277,7 +2321,7 @@ class SyncEngine { if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { // Database update needed for this item because our local record is out-of-date // Add to the local database - addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + if (debugLogging) {addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]);} itemDB.upsert(changedOneDriveItem); } } @@ -2323,7 +2367,7 @@ class SyncEngine { // Calculate this items path string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; - addLogEntry("JSON Item calculated full path for download is: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("JSON Item calculated full path for download is: " ~ newItemPath, ["debug"]);} // Is the item reported as Malware ? if (isMalware(onedriveJSONItem)){ @@ -2337,7 +2381,7 @@ class SyncEngine { jsonFileSize = onedriveJSONItem["size"].integer; } else { // filesize missing - addLogEntry("ERROR: onedriveJSONItem['size'] is missing", ["debug"]); + if (debugLogging) {addLogEntry("ERROR: onedriveJSONItem['size'] is missing", ["debug"]);} } // Configure the hashes for comparison post download @@ -2360,7 +2404,7 @@ class SyncEngine { } } else { // file hash data missing - addLogEntry("ERROR: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download", ["debug"]); + if (debugLogging) {addLogEntry("ERROR: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download", ["debug"]);} } // Does the file already exist in the path locally? @@ -2374,7 +2418,7 @@ class SyncEngine { } // Log the DB details - addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]); + if (debugLogging) {addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]);} // Does the DB (what we think is in sync) hash match the existing local file hash? if (!testFileHash(newItemPath, databaseItem)) { @@ -2395,9 +2439,11 @@ class SyncEngine { // The reservation value is user configurable in the config file, 50MB by default ulong freeSpaceReservation = appConfig.getValueLong("space_reservation"); // debug output - addLogEntry("Local Disk Space Actual: " ~ to!string(localActualFreeSpace), ["debug"]); - addLogEntry("Free Space Reservation: " ~ to!string(freeSpaceReservation), ["debug"]); - addLogEntry("File Size to Download: " ~ to!string(jsonFileSize), ["debug"]); + if (debugLogging) { + addLogEntry("Local Disk Space Actual: " ~ to!string(localActualFreeSpace), ["debug"]); + addLogEntry("Free Space Reservation: " ~ to!string(freeSpaceReservation), ["debug"]); + addLogEntry("File Size to Download: " ~ to!string(jsonFileSize), ["debug"]); + } // Calculate if we can actually download file - is there enough free space? if ((localActualFreeSpace < freeSpaceReservation) || (jsonFileSize > localActualFreeSpace)) { @@ -2432,7 +2478,7 @@ class SyncEngine { GC.collect(); } catch (OneDriveException exception) { - addLogEntry("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException", ["debug"]);} string thisFunctionName = getFunctionName!({}); // HTTP request returned status code 403 @@ -2483,7 +2529,7 @@ class SyncEngine { if ((downloadFileSize == jsonFileSize) && (downloadedFileHash == onlineFileHash)) { // Downloaded file matches size and hash - addLogEntry("Downloaded file matches reported size and reported file hash", ["debug"]); + if (debugLogging) {addLogEntry("Downloaded file matches reported size and reported file hash", ["debug"]);} try { // get the mtime from the JSON data @@ -2519,7 +2565,7 @@ class SyncEngine { // set the correct time on the downloaded file if (!dryRun) { - addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]);} setTimes(newItemPath, itemModifiedTime, itemModifiedTime); } } catch (FileException e) { @@ -2534,8 +2580,10 @@ class SyncEngine { if (downloadFileSize != jsonFileSize) { // downloaded file size does not match downloadValueMismatch = true; - addLogEntry("Actual file size on disk: " ~ to!string(downloadFileSize), ["debug"]); - addLogEntry("OneDrive API reported size: " ~ to!string(jsonFileSize), ["debug"]); + if (debugLogging) { + addLogEntry("Actual file size on disk: " ~ to!string(downloadFileSize), ["debug"]); + addLogEntry("OneDrive API reported size: " ~ to!string(jsonFileSize), ["debug"]); + } addLogEntry("ERROR: File download size mismatch. Increase logging verbosity to determine why."); } @@ -2543,8 +2591,10 @@ class SyncEngine { if (downloadedFileHash != onlineFileHash) { // downloaded file hash does not match downloadValueMismatch = true; - addLogEntry("Actual local file hash: " ~ downloadedFileHash, ["debug"]); - addLogEntry("OneDrive API reported hash: " ~ onlineFileHash, ["debug"]); + if (debugLogging) { + addLogEntry("Actual local file hash: " ~ downloadedFileHash, ["debug"]); + addLogEntry("OneDrive API reported hash: " ~ onlineFileHash, ["debug"]); + } addLogEntry("ERROR: File download hash mismatch. Increase logging verbosity to determine why."); } @@ -2555,7 +2605,7 @@ class SyncEngine { if (downloadValueMismatch && (toLower(extension(newItemPath)) == ".heic")) { // Need to display a message to the user that they have experienced data loss addLogEntry("DATA-LOSS: File downloaded has experienced data loss due to a Microsoft OneDrive API bug. DO NOT DELETE THIS FILE ONLINE: " ~ newItemPath, ["info", "notify"]); - addLogEntry(" Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details.", ["verbose"]); + if (verboseLogging) {addLogEntry(" Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details.", ["verbose"]);} } // Add some workaround messaging for SharePoint @@ -2594,8 +2644,8 @@ class SyncEngine { } } else { // Download validation checks were disabled - addLogEntry("Downloaded file validation disabled due to --disable-download-validation", ["debug"]); - addLogEntry("WARNING: Skipping download integrity check for: " ~ newItemPath, ["verbose"]); + if (debugLogging) {addLogEntry("Downloaded file validation disabled due to --disable-download-validation", ["debug"]);} + if (verboseLogging) {addLogEntry("WARNING: Skipping download integrity check for: " ~ newItemPath, ["verbose"]);} } // end of (!disableDownloadValidation) } else { addLogEntry("ERROR: File failed to download. Increase logging verbosity to determine why."); @@ -2658,15 +2708,17 @@ class SyncEngine { return true; } else { // The file has a different timestamp ... is the hash the same meaning no file modification? - addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); - addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); + if (verboseLogging) { + addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); + } // The file has a different timestamp ... is the hash the same meaning no file modification? // Test the file hash as the date / time stamp is different // Generating a hash is computationally expensive - we only generate the hash if timestamp was different if (testFileHash(path, item)) { // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong - addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]); + if (verboseLogging) {addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]);} // Correction logic based on the configuration and the comparison of timestamps if (localModifiedTime > itemModifiedTime) { // Local file is newer timestamp wise, but has the same hash .. are we in a --download-only situation? @@ -2675,13 +2727,13 @@ class SyncEngine { if (appConfig.getValueBool("resync")) { // --resync was used // The source of the out-of-date timestamp was the local item and needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? - addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --resync", ["verbose"]); + if (verboseLogging) {addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --resync", ["verbose"]);} // Fix the local file timestamp - addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]);} setTimes(path, item.mtime, item.mtime); } else { // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different - addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); + if (verboseLogging) {addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]);} // Attempt to update the online date time stamp // We need to use the correct driveId and itemId, especially if we are updating a OneDrive Business Shared File timestamp if (item.type == ItemType.file) { @@ -2694,22 +2746,22 @@ class SyncEngine { } } else if (!dryRun) { // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? - addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); + if (verboseLogging) {addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]);} // Fix the local file timestamp - addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]);} setTimes(path, item.mtime, item.mtime); } } else if (!dryRun) { // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different - addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]); + if (verboseLogging) {addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]);} // Fix the local file timestamp - addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]);} setTimes(path, item.mtime, item.mtime); } return false; } else { // The hash is different so the content of the file has to be different as to what is stored online - addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]); + if (verboseLogging) {addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]);} return false; } } @@ -2730,17 +2782,19 @@ class SyncEngine { JSONValue deltaChangesBundle; // Get the /delta data for this account | driveId | deltaLink combination - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("selectedDriveId: " ~ selectedDriveId, ["debug"]); - addLogEntry("selectedItemId: " ~ selectedItemId, ["debug"]); - addLogEntry("providedDeltaLink: " ~ providedDeltaLink, ["debug"]); - addLogEntry("------------------------------------------------------------------", ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("selectedDriveId: " ~ selectedDriveId, ["debug"]); + addLogEntry("selectedItemId: " ~ selectedItemId, ["debug"]); + addLogEntry("providedDeltaLink: " ~ providedDeltaLink, ["debug"]); + addLogEntry("------------------------------------------------------------------", ["debug"]); + } try { deltaChangesBundle = getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink); } catch (OneDriveException exception) { // caught an exception - addLogEntry("getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink) generated a OneDriveException", ["debug"]);} auto errorArray = splitLines(exception.msg); string thisFunctionName = getFunctionName!({}); @@ -2753,7 +2807,7 @@ class SyncEngine { // Essentially the 'providedDeltaLink' that we have stored is no longer available ... re-try without the stored deltaLink addLogEntry("WARNING: Retrying OneDrive API call without using the locally stored deltaLink value"); // Configure an empty deltaLink - addLogEntry("Delta link expired for 'getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'", ["debug"]); + if (debugLogging) {addLogEntry("Delta link expired for 'getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'", ["debug"]);} string emptyDeltaLink = ""; // retry with empty deltaLink deltaChangesBundle = getDeltaQueryOneDriveApiInstance.getChangesByItemId(selectedDriveId, selectedItemId, emptyDeltaLink); @@ -2789,10 +2843,12 @@ class SyncEngine { // Display the pertinant details of the sync engine void displaySyncEngineDetails() { // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes - addLogEntry("Application Version: " ~ appConfig.applicationVersion, ["verbose"]); - addLogEntry("Account Type: " ~ appConfig.accountType, ["verbose"]); - addLogEntry("Default Drive ID: " ~ appConfig.defaultDriveId, ["verbose"]); - addLogEntry("Default Root ID: " ~ appConfig.defaultRootId, ["verbose"]); + if (verboseLogging) { + addLogEntry("Application Version: " ~ appConfig.applicationVersion, ["verbose"]); + addLogEntry("Account Type: " ~ appConfig.accountType, ["verbose"]); + addLogEntry("Default Drive ID: " ~ appConfig.defaultDriveId, ["verbose"]); + addLogEntry("Default Root ID: " ~ appConfig.defaultRootId, ["verbose"]); + } // Fetch the details from cachedOnlineDriveData DriveDetailsCache cachedOnlineDriveData; @@ -2801,13 +2857,13 @@ class SyncEngine { // What do we display here for space remaining if (cachedOnlineDriveData.quotaRemaining > 0) { // Display the actual value - addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(cachedOnlineDriveData.quotaRemaining)) ~ " GB (" ~ to!string(cachedOnlineDriveData.quotaRemaining) ~ " bytes)", ["verbose"]); + if (verboseLogging) {addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(cachedOnlineDriveData.quotaRemaining)) ~ " GB (" ~ to!string(cachedOnlineDriveData.quotaRemaining) ~ " bytes)", ["verbose"]);} } else { // zero or non-zero value or restricted if (!cachedOnlineDriveData.quotaRestricted){ - addLogEntry("Remaining Free Space: 0 KB", ["verbose"]); + if (verboseLogging) {addLogEntry("Remaining Free Space: 0 KB", ["verbose"]);} } else { - addLogEntry("Remaining Free Space: Not Available", ["verbose"]); + if (verboseLogging) {addLogEntry("Remaining Free Space: Not Available", ["verbose"]);} } } } @@ -2819,7 +2875,7 @@ class SyncEngine { string calculatedPath; // What driveID and itemID we trying to calculate the path for - addLogEntry("Attempting to calculate local filesystem path for " ~ thisDriveId ~ " and " ~ thisItemId, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to calculate local filesystem path for " ~ thisDriveId ~ " and " ~ thisItemId, ["debug"]);} try { calculatedPath = itemDB.computePath(thisDriveId, thisItemId); @@ -2945,9 +3001,9 @@ class SyncEngine { // Log the action if the path exists .. it may of already been removed and this is a legacy array item if (exists(path)) { if (item.type == ItemType.file) { - addLogEntry("Skipping local deletion for file " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping local deletion for file " ~ path, ["verbose"]);} } else { - addLogEntry("Skipping local deletion for directory " ~ path, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping local deletion for directory " ~ path, ["verbose"]);} } } } @@ -3012,16 +3068,20 @@ class SyncEngine { // Handle the 409 if (exception.httpStatusCode == 409) { // OneDrive threw a 412 error - addLogEntry("OneDrive returned a 'HTTP 409 - ETag does not match current item's value' when attempting file time stamp update - gracefully handling error", ["verbose"]); - addLogEntry("File Metadata Update Failed - OneDrive eTag / cTag match issue", ["debug"]); - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + if (verboseLogging) {addLogEntry("OneDrive returned a 'HTTP 409 - ETag does not match current item's value' when attempting file time stamp update - gracefully handling error", ["verbose"]);} + if (debugLogging) { + addLogEntry("File Metadata Update Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + } } // Handle the 412 if (exception.httpStatusCode == 412) { // OneDrive threw a 412 error - addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error", ["verbose"]); - addLogEntry("File Metadata Update Failed - OneDrive eTag / cTag match issue", ["debug"]); - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + if (verboseLogging) {addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error", ["verbose"]);} + if (debugLogging) { + addLogEntry("File Metadata Update Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + } } // Retry without eTag @@ -3063,7 +3123,7 @@ class SyncEngine { // Use the array we populate, rather than selecting all distinct driveId's from the database foreach (driveId; consistencyCheckDriveIdsArray) { // Make the logging more accurate - we cant update driveId as this then breaks the below queries - addLogEntry("Processing DB entries for this Drive ID: " ~ driveId, ["verbose"]); + if (verboseLogging) {addLogEntry("Processing DB entries for this Drive ID: " ~ driveId, ["verbose"]);} // Initialise the array Item[] driveItems = []; @@ -3097,8 +3157,10 @@ class SyncEngine { idsToDelete.length = 0; assumeSafeAppend(idsToDelete); // flag to delete local file as it now is no longer in sync with OneDrive - addLogEntry("Flagging to delete local item as it now is no longer in sync with OneDrive", ["debug"]); - addLogEntry("outOfSyncItem: " ~ to!string(outOfSyncItem), ["debug"]); + if (debugLogging) { + addLogEntry("Flagging to delete local item as it now is no longer in sync with OneDrive", ["debug"]); + addLogEntry("outOfSyncItem: " ~ to!string(outOfSyncItem), ["debug"]); + } idsToDelete ~= [outOfSyncItem.driveId, outOfSyncItem.id]; // delete items in idsToDelete if (idsToDelete.length > 0) processDeleteItems(); @@ -3115,13 +3177,13 @@ class SyncEngine { driveItems = getChildren(singleDirectoryScopeDriveId, singleDirectoryScopeItemId); } else { // Check everything associated with each driveId we know about - addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + if (debugLogging) {addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]);} // Query database driveItems = itemDB.selectByDriveId(driveId); } // Log DB items to process - addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + if (debugLogging) {addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]);} // Process each database database item associated with the driveId foreach(dbItem; driveItems) { @@ -3130,11 +3192,11 @@ class SyncEngine { } } else { // Check everything associated with each driveId we know about - addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + if (debugLogging) {addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]);} // Query database driveItems = itemDB.selectByDriveId(driveId); - addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + if (debugLogging) {addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]);} // Process each database database item associated with the driveId foreach(dbItem; driveItems) { @@ -3208,7 +3270,7 @@ class SyncEngine { } // Log what we are doing - addLogEntry("Processing: " ~ logOutputPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Processing: " ~ logOutputPath, ["verbose"]);} // Add a processing '.' if (!appConfig.suppressLoggingOutput) { if (appConfig.verbosityCount == 0) { @@ -3259,8 +3321,8 @@ class SyncEngine { if (localModifiedTime != itemModifiedTime) { // The modified dates are different - addLogEntry("Local file time discrepancy detected: " ~ localFilePath, ["verbose"]); - addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["debug"]); + if (verboseLogging) {addLogEntry("Local file time discrepancy detected: " ~ localFilePath, ["verbose"]);} + if (debugLogging) {addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["debug"]);} // Test the file hash if (!testFileHash(localFilePath, dbItem)) { @@ -3268,25 +3330,25 @@ class SyncEngine { if (localModifiedTime >= itemModifiedTime) { // Local file is newer if (!appConfig.getValueBool("download_only")) { - addLogEntry("The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive", ["verbose"]); + if (verboseLogging) {addLogEntry("The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive", ["verbose"]);} // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - addLogEntry("The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); + if (verboseLogging) {addLogEntry("The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]);} } } else { // Local file is older - data recovery process? something else? if (!appConfig.getValueBool("download_only")) { - addLogEntry("The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online", ["verbose"]); + if (verboseLogging) {addLogEntry("The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online", ["verbose"]);} // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; } else { - addLogEntry("The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); + if (verboseLogging) {addLogEntry("The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]);} } } } else { // The file contents have not changed, but the modified timestamp has - addLogEntry("The last modified timestamp has changed however the file content has not changed", ["verbose"]); + if (verboseLogging) {addLogEntry("The last modified timestamp has changed however the file content has not changed", ["verbose"]);} // Local file is newer .. are we in a --download-only situation? if (!appConfig.getValueBool("download_only")) { @@ -3297,28 +3359,28 @@ class SyncEngine { if (dbItem.type == ItemType.file) { // Not a remote file // Log what is being done - addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); + if (verboseLogging) {addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]);} // Correct timestamp uploadLastModifiedTime(dbItem, dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); } else { // Remote file, remote values need to be used, we may not even have permission to change timestamp, update local file - addLogEntry("The local item has the same hash value as the item online, however file is a OneDrive Business Shared File - correcting local timestamp", ["verbose"]); - addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + if (verboseLogging) {addLogEntry("The local item has the same hash value as the item online, however file is a OneDrive Business Shared File - correcting local timestamp", ["verbose"]);} + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]);} setTimes(localFilePath, dbItem.mtime, dbItem.mtime); } } } else { // --download-only being used - addLogEntry("The local item has the same hash value as the item online - correcting local timestamp due to --download-only being used to ensure local file matches timestamp online", ["verbose"]); + if (verboseLogging) {addLogEntry("The local item has the same hash value as the item online - correcting local timestamp due to --download-only being used to ensure local file matches timestamp online", ["verbose"]);} if (!dryRun) { - addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]);} setTimes(localFilePath, dbItem.mtime, dbItem.mtime); } } } } else { // The file has not changed - addLogEntry("The file has not changed", ["verbose"]); + if (verboseLogging) {addLogEntry("The file has not changed", ["verbose"]);} } } else { //The file is not readable - skipped @@ -3326,14 +3388,14 @@ class SyncEngine { } } else { // The item was a file but now is a directory - addLogEntry("The item was a file but now is a directory", ["verbose"]); + if (verboseLogging) {addLogEntry("The item was a file but now is a directory", ["verbose"]);} } } else { // File does not exist locally, but is in our database as a dbItem containing all the data was passed into this function // If we are in a --dry-run situation - this file may never have existed as we never downloaded it if (!dryRun) { // Not --dry-run situation - addLogEntry("The file has been deleted locally", ["verbose"]); + if (verboseLogging) {addLogEntry("The file has been deleted locally", ["verbose"]);} // Add this to the array to handle post checking all database items databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; } else { @@ -3342,14 +3404,14 @@ class SyncEngine { bool idsFakedMatch = false; foreach (i; idsFaked) { if (i[1] == dbItem.id) { - addLogEntry("Matched faked file which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); - addLogEntry("The file has not changed", ["verbose"]); + if (debugLogging) {addLogEntry("Matched faked file which is 'supposed' to exist but not created due to --dry-run use", ["debug"]);} + if (verboseLogging) {addLogEntry("The file has not changed", ["verbose"]);} idsFakedMatch = true; } } if (!idsFakedMatch) { // dbItem.id did not match a 'faked' download new file creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation - addLogEntry("The file has been deleted locally", ["verbose"]); + if (verboseLogging) {addLogEntry("The file has been deleted locally", ["verbose"]);} // Add this to the array to handle post checking all database items databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; } @@ -3368,12 +3430,12 @@ class SyncEngine { // Fix https://github.com/abraunegg/onedrive/issues/1915 try { if (!isDir(localFilePath)) { - addLogEntry("The item was a directory but now it is a file", ["verbose"]); + if (verboseLogging) {addLogEntry("The item was a directory but now it is a file", ["verbose"]);} uploadDeletedItem(dbItem, localFilePath); uploadNewFile(localFilePath); } else { // Directory still exists locally - addLogEntry("The directory has not changed", ["verbose"]); + if (verboseLogging) {addLogEntry("The directory has not changed", ["verbose"]);} // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal if (!singleDirectoryScope) { @@ -3397,11 +3459,11 @@ class SyncEngine { // Not --dry-run situation if (!appConfig.getValueBool("monitor")) { // Not in --monitor mode - addLogEntry("The directory has been deleted locally", ["verbose"]); + if (verboseLogging) {addLogEntry("The directory has been deleted locally", ["verbose"]);} } else { // Appropriate message as we are in --monitor mode - addLogEntry("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'", ["verbose"]); - addLogEntry("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped", ["debug"]); + if (verboseLogging) {addLogEntry("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'", ["verbose"]);} + if (debugLogging) {addLogEntry("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped", ["debug"]);} } // A moved directory will be uploaded as 'new', delete the old directory and database reference // Add this to the array to handle post checking all database items @@ -3412,14 +3474,14 @@ class SyncEngine { bool idsFakedMatch = false; foreach (i; idsFaked) { if (i[1] == dbItem.id) { - addLogEntry("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); - addLogEntry("The directory has not changed", ["verbose"]); + if (debugLogging) {addLogEntry("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use", ["debug"]);} + if (verboseLogging) {addLogEntry("The directory has not changed", ["verbose"]);} idsFakedMatch = true; } } if (!idsFakedMatch) { // dbItem.id did not match a 'faked' download new directory creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation - addLogEntry("The directory has been deleted locally", ["verbose"]); + if (verboseLogging) {addLogEntry("The directory has been deleted locally", ["verbose"]);} // Add this to the array to handle post checking all database items databaseItemsToDeleteOnline ~= [DatabaseItemsToDeleteOnline(dbItem, localFilePath)]; } else { @@ -3516,7 +3578,7 @@ class SyncEngine { // Do we need to check for .nosync? Only if --check-for-nosync was passed in if (appConfig.getValueBool("check_nosync")) { if (exists(localFilePath ~ "/.nosync")) { - addLogEntry("Skipping item - .nosync found & --check-for-nosync enabled: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping item - .nosync found & --check-for-nosync enabled: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3527,7 +3589,7 @@ class SyncEngine { // Do we need to check skip dot files if configured if (appConfig.getValueBool("skip_dotfiles")) { if (isDotFile(localFilePath)) { - addLogEntry("Skipping item - .file or .folder: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping item - .file or .folder: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3539,7 +3601,7 @@ class SyncEngine { if (isSymlink(localFilePath)) { // if config says so we skip all symlinked items if (appConfig.getValueBool("skip_symlinks")) { - addLogEntry("Skipping item - skip symbolic links configured: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping item - skip symbolic links configured: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } // skip unexisting symbolic links @@ -3563,7 +3625,7 @@ class SyncEngine { chdir(currentSyncDir); // results if (relativeLinkTest) { - addLogEntry("Not skipping item - symbolic link is a 'relative link' to target ('" ~ relativeLink ~ "') which can be supported: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Not skipping item - symbolic link is a 'relative link' to target ('" ~ relativeLink ~ "') which can be supported: " ~ localFilePath, ["debug"]);} } else { addLogEntry("Skipping item - invalid symbolic link: "~ localFilePath, ["info", "notify"]); clientSideRuleExcludesPath = true; @@ -3577,14 +3639,14 @@ class SyncEngine { if (localFilePath != ".") { // skip_dir handling if (isDir(localFilePath)) { - addLogEntry("Checking local path: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Checking local path: " ~ localFilePath, ["debug"]);} // Only check path if config is != "" if (appConfig.getValueString("skip_dir") != "") { // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched if (selectiveSync.isDirNameExcluded(localFilePath.strip('.'))) { - addLogEntry("Skipping path - excluded by skip_dir config: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by skip_dir config: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3592,12 +3654,12 @@ class SyncEngine { // skip_file handling if (isFile(localFilePath)) { - addLogEntry("Checking file: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Checking file: " ~ localFilePath, ["debug"]);} // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched if (selectiveSync.isFileNameExcluded(localFilePath.strip('.'))) { - addLogEntry("Skipping file - excluded by skip_dir config: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_dir config: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3611,7 +3673,7 @@ class SyncEngine { // sync_list configured and in use if (selectiveSync.isPathExcludedViaSyncList(localFilePath)) { if ((isFile(localFilePath)) && (appConfig.getValueBool("sync_root_files")) && (rootName(localFilePath.strip('.').strip('/')) == "")) { - addLogEntry("Not skipping path due to sync_root_files inclusion: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("Not skipping path due to sync_root_files inclusion: " ~ localFilePath, ["debug"]);} } else { if (exists(appConfig.syncListFilePath)){ // skipped most likely due to inclusion in sync_list @@ -3619,17 +3681,17 @@ class SyncEngine { // is this path a file or directory? if (isFile(localFilePath)) { // file - addLogEntry("Skipping file - excluded by sync_list config: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by sync_list config: " ~ localFilePath, ["verbose"]);} } else { // directory - addLogEntry("Skipping path - excluded by sync_list config: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by sync_list config: " ~ localFilePath, ["verbose"]);} } // flag as excluded clientSideRuleExcludesPath = true; } else { // skipped for some other reason - addLogEntry("Skipping path - excluded by user config: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by user config: " ~ localFilePath, ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3645,7 +3707,7 @@ class SyncEngine { // Get the file size ulong thisFileSize = getSize(localFilePath); if (thisFileSize >= fileSizeLimit) { - addLogEntry("Skipping file - excluded by skip_size config: " ~ localFilePath ~ " (" ~ to!string(thisFileSize/2^^20) ~ " MB)", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_size config: " ~ localFilePath ~ " (" ~ to!string(thisFileSize/2^^20) ~ " MB)", ["verbose"]);} } } } @@ -3701,7 +3763,7 @@ class SyncEngine { } else { simplePathToCheck = onedriveJSONItem["name"].str; } - addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + if (debugLogging) {addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]);} // complex path if (parentInDatabase) { @@ -3709,10 +3771,10 @@ class SyncEngine { //complexPathToCheck = buildNormalizedPath(newItemPath); complexPathToCheck = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; } else { - addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); + if (debugLogging) {addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]);} } if (!complexPathToCheck.empty) { - addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + if (debugLogging) {addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]);} } } else { simplePathToCheck = onedriveJSONItem["name"].str; @@ -3721,39 +3783,39 @@ class SyncEngine { // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder // then isDirNameExcluded matching will not work if (simplePathToCheck.canFind(":")) { - addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + if (debugLogging) {addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]);} simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } if (complexPathToCheck.canFind(":")) { - addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + if (debugLogging) {addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]);} complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); } // OK .. what checks are we doing? if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { // just a simple check - addLogEntry("Performing a simple check only", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple check only", ["debug"]);} clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); } else { // simple and complex - addLogEntry("Performing a simple then complex path match if required", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple then complex path match if required", ["debug"]);} // simple first - addLogEntry("Performing a simple check first", ["debug"]); + if (debugLogging) {addLogEntry("Performing a simple check first", ["debug"]);} clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); matchDisplay = simplePathToCheck; if (!clientSideRuleExcludesPath) { - addLogEntry("Simple match was false, attempting complex match", ["debug"]); + if (debugLogging) {addLogEntry("Simple match was false, attempting complex match", ["debug"]);} // simple didnt match, perform a complex check clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(complexPathToCheck); matchDisplay = complexPathToCheck; } } // End Result - addLogEntry("skip_dir exclude result (directory based): " ~ to!string(clientSideRuleExcludesPath), ["debug"]); + if (debugLogging) {addLogEntry("skip_dir exclude result (directory based): " ~ to!string(clientSideRuleExcludesPath), ["debug"]);} if (clientSideRuleExcludesPath) { // This path should be skipped - addLogEntry("Skipping path - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]);} } } } @@ -3781,7 +3843,7 @@ class SyncEngine { // Compute this item path & need the full path for this file jsonItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; // Log the calculation - addLogEntry("New Item calculated full path is: " ~ jsonItemPath, ["debug"]); + if (debugLogging) {addLogEntry("New Item calculated full path is: " ~ jsonItemPath, ["debug"]);} // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched @@ -3792,7 +3854,7 @@ class SyncEngine { } // what are we checking - addLogEntry("skip_file item to check (full calculated path): " ~ exclusionTestPath, ["debug"]); + if (debugLogging) {addLogEntry("skip_file item to check (full calculated path): " ~ exclusionTestPath, ["debug"]);} } else { // parent not in database, we can only check using this JSON item's name if (!startsWith(thisItemName, "/")){ @@ -3801,17 +3863,17 @@ class SyncEngine { } // what are we checking - addLogEntry("skip_file item to check (file name only - parent path not in database): " ~ exclusionTestPath, ["debug"]); + if (debugLogging) {addLogEntry("skip_file item to check (file name only - parent path not in database): " ~ exclusionTestPath, ["debug"]);} clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); } // Perform the 'skip_file' evaluation clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); - addLogEntry("Result: " ~ to!string(clientSideRuleExcludesPath), ["debug"]); + if (debugLogging) {addLogEntry("Result: " ~ to!string(clientSideRuleExcludesPath), ["debug"]);} if (clientSideRuleExcludesPath) { // This path should be skipped - addLogEntry("Skipping file - excluded by skip_dir config: " ~ exclusionTestPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_dir config: " ~ exclusionTestPath, ["verbose"]);} } } } @@ -3865,7 +3927,7 @@ class SyncEngine { // Does the path contain HTML encoding? if (containsURLEncodedItems(selfBuiltPath)) { // decode it - addLogEntry("selfBuiltPath for sync_list check needs decoding: " ~ selfBuiltPath, ["debug"]); + if (debugLogging) {addLogEntry("selfBuiltPath for sync_list check needs decoding: " ~ selfBuiltPath, ["debug"]);} try { // try and decode selfBuiltPath @@ -3890,8 +3952,10 @@ class SyncEngine { // Check for HTML entities (e.g., '%20' for space) in newItemPath if (containsURLEncodedItems(newItemPath)) { addLogEntry("CAUTION: The JSON element transmitted by the Microsoft OneDrive API includes HTML URL encoded items, which may complicate pattern matching and potentially lead to synchronisation problems for this item."); - addLogEntry("WORKAROUND: An alternative solution could be to change the name of this item through the online platform: " ~ newItemPath, ["verbose"]); - addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/1765 for further details", ["verbose"]); + if (verboseLogging) { + addLogEntry("WORKAROUND: An alternative solution could be to change the name of this item through the online platform: " ~ newItemPath, ["verbose"]); + addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/1765 for further details", ["verbose"]); + } } // If this is a Shared Folder, we need to 'trim' the resulting path to that of the 'folder' that is actually shared with us so that this can be appropriatly checked against 'sync_list' entries @@ -3913,7 +3977,7 @@ class SyncEngine { } // What path are we checking against sync_list? - addLogEntry("Path to check against 'sync_list' entries: " ~ newItemPath, ["debug"]); + if (debugLogging) {addLogEntry("Path to check against 'sync_list' entries: " ~ newItemPath, ["debug"]);} // Unfortunately there is no avoiding this call to check if the path is excluded|included via sync_list if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { @@ -3931,7 +3995,7 @@ class SyncEngine { if (!syncListSkippedParentIds.canFind(thisItemId)) { if (isItemFolder(onedriveJSONItem)) { // Detail we are skipping this JSON data from online - addLogEntry("Skipping path - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping path - excluded by sync_list config: " ~ newItemPath, ["verbose"]);} // Add this folder id to the elements we have already detailed we are skipping, so we do no output this again syncListSkippedParentIds ~= thisItemId; } @@ -3939,19 +4003,19 @@ class SyncEngine { // If this is a 'add shortcut to onedrive' link, we need to actually scan this path, so add this we need to pass this JSON if (isItemRemote(onedriveJSONItem)) { - addLogEntry("Skipping shared folder shortcut - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping shared folder shortcut - excluded by sync_list config: " ~ newItemPath, ["verbose"]);} } } } else { // Is this a file or directory? if (isItemFile(onedriveJSONItem)) { // File included due to 'sync_list' match - addLogEntry("Including file - included by sync_list config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Including file - included by sync_list config: " ~ newItemPath, ["verbose"]);} // Is the parent item in the database? if (!parentInDatabase) { // Parental database structure needs to be created - addLogEntry("Parental Path structure needs to be created to support included file: " ~ dirName(newItemPath), ["verbose"]); + if (verboseLogging) {addLogEntry("Parental Path structure needs to be created to support included file: " ~ dirName(newItemPath), ["verbose"]);} // Recursivly, stepping backward from 'thisItemParentId', query online, save entry to DB createLocalPathStructure(onedriveJSONItem); @@ -3963,7 +4027,7 @@ class SyncEngine { } } else { // Directory included due to 'sync_list' match - addLogEntry("Including path - included by sync_list config: " ~ newItemPath, ["verbose"]); + if (verboseLogging) {addLogEntry("Including path - included by sync_list config: " ~ newItemPath, ["verbose"]);} } } } @@ -3974,7 +4038,7 @@ class SyncEngine { if (isItemFile(onedriveJSONItem)) { if (fileSizeLimit != 0) { if (onedriveJSONItem["size"].integer >= fileSizeLimit) { - addLogEntry("Skipping file - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping file - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]);} clientSideRuleExcludesPath = true; } } @@ -3998,7 +4062,7 @@ class SyncEngine { string thisItemParentId; // Log what we recieved to analyse - addLogEntry("createLocalPathStructure input onedriveJSONItem: " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) {addLogEntry("createLocalPathStructure input onedriveJSONItem: " ~ to!string(onedriveJSONItem), ["debug"]);} // Configure these variables based on the JSON input thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; @@ -4089,9 +4153,9 @@ class SyncEngine { void processChangedLocalItemsToUploadInParallel(string[3][] array) { // This function received an array of string items to upload, the number of elements based on appConfig.getValueLong("threads") foreach (i, localItemDetails; processPool.parallel(array)) { - addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + if (debugLogging) {addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);} uploadChangedLocalFileToOneDrive(localItemDetails); - addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + if (debugLogging) {addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);} } } @@ -4104,7 +4168,7 @@ class SyncEngine { string localFilePath = localItemDetails[2]; // Log the path that was modified - addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]); + if (debugLogging) {addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]);} // How much space is remaining on OneDrive ulong remainingFreeSpace; @@ -4162,8 +4226,10 @@ class SyncEngine { ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; // Based on what we know, for this thread - can we safely upload this modified local file? - addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); - addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + if (debugLogging) { + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + } JSONValue uploadResponse; // Is there quota available for the given drive where we are uploading to? @@ -4324,12 +4390,12 @@ class SyncEngine { currentETag = currentOnlineData["eTag"].str; } else { // Use the database value - greater potential for a 412 error to occur if we are creating a session upload - addLogEntry("Online data for file returned zero eTag - using database eTag value", ["debug"]); + if (debugLogging) {addLogEntry("Online data for file returned zero eTag - using database eTag value", ["debug"]);} currentETag = dbItem.eTag; } } else { // no valid JSON response - greater potential for a 412 error to occur if we are creating a session upload - addLogEntry("Online data returned was invalid - using database eTag value", ["debug"]); + if (debugLogging) {addLogEntry("Online data returned was invalid - using database eTag value", ["debug"]);} currentETag = dbItem.eTag; } @@ -4355,9 +4421,11 @@ class SyncEngine { // Which file is newer? If local is newer, it will be uploaded as a modified file in the correct manner if (localModifiedTime < onlineModifiedTime) { // Online File is actually newer than the locally modified file - addLogEntry("currentOnlineData: " ~ to!string(currentOnlineData), ["debug"]); - addLogEntry("onlineFile: " ~ to!string(onlineFile), ["debug"]); - addLogEntry("database item: " ~ to!string(dbItem), ["debug"]); + if (debugLogging) { + addLogEntry("currentOnlineData: " ~ to!string(currentOnlineData), ["debug"]); + addLogEntry("onlineFile: " ~ to!string(onlineFile), ["debug"]); + addLogEntry("database item: " ~ to!string(dbItem), ["debug"]); + } addLogEntry("Skipping uploading this item as a locally modified file, will upload as a new file (online file already exists and is newer): " ~ localFilePath); // Online is newer, rename local, then upload the renamed file @@ -4466,7 +4534,7 @@ class SyncEngine { } } else { // Create session Upload URL failed - addLogEntry("Unable to upload modified file as the creation of the upload session URL failed", ["debug"]); + if (debugLogging) {addLogEntry("Unable to upload modified file as the creation of the upload session URL failed", ["debug"]);} } } } else { @@ -4475,7 +4543,7 @@ class SyncEngine { } // Debug Log the modified upload response - addLogEntry("Modified File Upload Response: " ~ to!string(uploadResponse), ["debug"]); + if (debugLogging) {addLogEntry("Modified File Upload Response: " ~ to!string(uploadResponse), ["debug"]);} // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory uploadFileOneDriveApiInstance.releaseCurlEngine(); @@ -4509,7 +4577,7 @@ class SyncEngine { // Create a new OneDrive API instance getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); getCurrentDriveQuotaApiInstance.initialise(); - addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + if (debugLogging) {addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]);} currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory @@ -4519,7 +4587,7 @@ class SyncEngine { GC.collect(); } catch (OneDriveException e) { - addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]);} // If an exception occurs, it's unclear if quota is restricted, but quota details are not available quotaRestricted = true; // Considering restricted due to failure to access // Return result @@ -4540,7 +4608,7 @@ class SyncEngine { // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data // If 'business' accounts, if driveId == defaultDriveId, then we will have data // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - addLogEntry("Quota Details: " ~ to!string(currentDriveQuota), ["debug"]); + if (debugLogging) {addLogEntry("Quota Details: " ~ to!string(currentDriveQuota), ["debug"]);} JSONValue quota = currentDriveQuota["quota"]; if ("remaining" in quota) { @@ -4560,7 +4628,7 @@ class SyncEngine { if (appConfig.accountType == "personal") { addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional capacity."); } else { // Assuming 'business' or 'sharedLibrary' - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." , ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator." , ["verbose"]);} } } } else { @@ -4569,20 +4637,20 @@ class SyncEngine { // what sort of account type is this? if (appConfig.accountType == "personal") { - addLogEntry("ERROR: OneDrive quota information is missing. Your OneDrive account potentially has zero space available. Please free up some space online.", ["verbose"]); + if (verboseLogging) {addLogEntry("ERROR: OneDrive quota information is missing. Your OneDrive account potentially has zero space available. Please free up some space online.", ["verbose"]);} } else { // quota details not available - addLogEntry("WARNING: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]);} } } } else { // When valid quota details are not fetched - addLogEntry("Failed to fetch or query quota details for OneDrive Drive ID: " ~ driveId, ["verbose"]); + if (verboseLogging) {addLogEntry("Failed to fetch or query quota details for OneDrive Drive ID: " ~ driveId, ["verbose"]);} quotaRestricted = true; // Considering restricted due to failure to interpret } // What was the determined available quota? - addLogEntry("Reported Available Online Quota for driveID '" ~ driveId ~ "': " ~ to!string(quotaRemainingOnline), ["debug"]); + if (debugLogging) {addLogEntry("Reported Available Online Quota for driveID '" ~ driveId ~ "': " ~ to!string(quotaRemainingOnline), ["debug"]);} // Return result result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; @@ -4636,8 +4704,11 @@ class SyncEngine { } } - auto startTime = Clock.currTime(); - addLogEntry("Starting Filesystem Walk: " ~ to!string(startTime), ["debug"]); + SysTime startTime; + if (debugLogging) { + startTime = Clock.currTime(); + addLogEntry("Starting Filesystem Walk (Local Time): " ~ to!string(startTime), ["debug"]); + } // Add a processing '.' if this is a directory we are scanning if (exists(path)) { @@ -4668,13 +4739,15 @@ class SyncEngine { } // To finish off the processing items, this is needed to reflect this in the log - addLogEntry("------------------------------------------------------------------", ["debug"]); - - auto finishTime = Clock.currTime(); - addLogEntry("Finished Filesystem Walk: " ~ to!string(finishTime), ["debug"]); - - auto elapsedTime = finishTime - startTime; - addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + // finish filesystem walk time + SysTime finishTime = Clock.currTime(); + addLogEntry("Finished Filesystem Walk (Local Time): " ~ to!string(finishTime), ["debug"]); + // duration + Duration elapsedTime = finishTime - startTime; + addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]); + } } void processNewDirectoriesToCreateOnline() { @@ -4708,25 +4781,29 @@ class SyncEngine { } // How much data is there to upload - if (totalDataToUpload < 1024) { - // Display as Bytes to upload - addLogEntry("Total New Data to Upload: " ~ to!string(totalDataToUpload) ~ " Bytes", ["verbose"]); - } else { - if ((totalDataToUpload > 1024) && (totalDataToUpload < 1048576)) { - // Display as KB to upload - addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024)) ~ " KB", ["verbose"]); + if (verboseLogging) { + if (totalDataToUpload < 1024) { + // Display as Bytes to upload + addLogEntry("Total New Data to Upload: " ~ to!string(totalDataToUpload) ~ " Bytes", ["verbose"]); } else { - // Display as MB to upload - addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024 / 1024)) ~ " MB", ["verbose"]); + if ((totalDataToUpload > 1024) && (totalDataToUpload < 1048576)) { + // Display as KB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024)) ~ " KB", ["verbose"]); + } else { + // Display as MB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024 / 1024)) ~ " MB", ["verbose"]); + } } } // How much space is available // The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ... // Iterate through all the drives we have cached thus far, that we know about - foreach (driveId, driveDetails; onlineDriveDetails) { - // Log how much space is available for each driveId - addLogEntry("Current Available Space Online (" ~ driveId ~ "): " ~ to!string((driveDetails.quotaRemaining / 1024 / 1024)) ~ " MB", ["debug"]); + if (debugLogging) { + foreach (driveId, driveDetails; onlineDriveDetails) { + // Log how much space is available for each driveId + addLogEntry("Current Available Space Online (" ~ driveId ~ "): " ~ to!string((driveDetails.quotaRemaining / 1024 / 1024)) ~ " MB", ["debug"]); + } } // Perform the upload @@ -4756,7 +4833,7 @@ class SyncEngine { // Add this logging break to assist with what was checked for each path if (path != ".") { - addLogEntry("------------------------------------------------------------------", ["debug"]); + if (debugLogging) {addLogEntry("------------------------------------------------------------------", ["debug"]);} } // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders @@ -4801,7 +4878,7 @@ class SyncEngine { } catch (std.utf.UTFException e) { // Path contains characters which generate a UTF exception addLogEntry("Skipping item - invalid UTF sequence: " ~ path, ["info", "notify"]); - addLogEntry(" Error Reason:" ~ e.msg, ["debug"]); + if (debugLogging) {addLogEntry(" Error Reason:" ~ e.msg, ["debug"]);} return; } @@ -4965,7 +5042,7 @@ class SyncEngine { if (isFile(path)) { // Is the file a '.nosync' file? if (canFind(path, ".nosync")) { - addLogEntry("Skipping .nosync file", ["debug"]); + if (debugLogging) {addLogEntry("Skipping .nosync file", ["debug"]);} return; } @@ -4976,7 +5053,7 @@ class SyncEngine { if (!cleanupLocalFiles) { // --download-only --cleanup-local-files not used // Add this path as a file we need to upload - addLogEntry("OneDrive Client flagging to upload this file to Microsoft OneDrive: " ~ path, ["debug"]); + if (debugLogging) {addLogEntry("OneDrive Client flagging to upload this file to Microsoft OneDrive: " ~ path, ["debug"]);} newLocalFilesToUploadToOneDrive ~= path; } else { // we need to clean up this file @@ -5021,14 +5098,14 @@ class SyncEngine { if (!fileFoundInDB) { // This is a new file as it is not in the database // Log that the file has been added locally - addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]);} scanLocalFilesystemPathForNewDataToUpload(localFilePath); } else { // This is a potentially modified file, needs to be handled as such. Is the item truly modified? if (!testFileHash(localFilePath, databaseItem)) { // The local file failed the hash comparison test - there is a data difference // Log that the file has changed locally - addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]);} // Add the modified item to the array to upload uploadChangedLocalFileToOneDrive([databaseItem.driveId, databaseItem.id, localFilePath]); } @@ -5045,11 +5122,11 @@ class SyncEngine { // Check if this path in the database Item databaseItem; - addLogEntry("Search DB for this path: " ~ searchPath, ["debug"]); + if (debugLogging) {addLogEntry("Search DB for this path: " ~ searchPath, ["debug"]);} foreach (driveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { - addLogEntry("DB Record for search path: " ~ to!string(databaseItem), ["debug"]); + if (debugLogging) {addLogEntry("DB Record for search path: " ~ to!string(databaseItem), ["debug"]);} return true; // Early exit on finding the path in the DB } } @@ -5061,7 +5138,7 @@ class SyncEngine { // for the path flow and create the folder that way void createDirectoryOnline(string thisNewPathToCreate) { // Log what we are doing - addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate, ["verbose"]); + if (verboseLogging) {addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate, ["verbose"]);} // Function variables Item parentItem; @@ -5092,7 +5169,7 @@ class SyncEngine { parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 } else { // Query the parent path online - addLogEntry("Attempting to query Local Database for this parent path: " ~ parentPath, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to query Local Database for this parent path: " ~ parentPath, ["debug"]);} // Attempt a 2 step process to work out where to create the directory // Step 1: Query the DB first for the parent path, to try and avoid an API call @@ -5103,26 +5180,29 @@ class SyncEngine { bool parentPathFoundInDB = false; foreach (driveId; onlineDriveDetails.keys) { - addLogEntry("Query DB with this driveID for the Parent Path: " ~ driveId, ["debug"]); + if (debugLogging) {addLogEntry("Query DB with this driveID for the Parent Path: " ~ driveId, ["debug"]);} // Query the database for this parent path using each driveId that we know about if (itemDB.selectByPath(parentPath, driveId, databaseItem)) { parentPathFoundInDB = true; - addLogEntry("Parent databaseItem: " ~ to!string(databaseItem), ["debug"]); - addLogEntry("parentPathFoundInDB: " ~ to!string(parentPathFoundInDB), ["debug"]); + if (debugLogging) { + addLogEntry("Parent databaseItem: " ~ to!string(databaseItem), ["debug"]); + addLogEntry("parentPathFoundInDB: " ~ to!string(parentPathFoundInDB), ["debug"]); + } + // Set parentItem to the item returned from the database parentItem = databaseItem; } } // After querying all DB entries for each driveID for the parent path, what are the details in parentItem? - addLogEntry("Parent parentItem after DB Query exhausted: " ~ to!string(parentItem), ["debug"]); + if (debugLogging) {addLogEntry("Parent parentItem after DB Query exhausted: " ~ to!string(parentItem), ["debug"]);} // Step 2: Query for the path online if not found in the local database if (!parentPathFoundInDB) { // parent path not found in database try { - addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]);} onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); - addLogEntry("Online Parent Path Query Response: " ~ to!string(onlinePathData), ["debug"]); + if (debugLogging) {addLogEntry("Online Parent Path Query Response: " ~ to!string(onlinePathData), ["debug"]);} // Save item to the database saveItem(onlinePathData); @@ -5130,7 +5210,7 @@ class SyncEngine { } catch (OneDriveException exception) { if (exception.httpStatusCode == 404) { // Parent does not exist ... need to create parent - addLogEntry("Parent path does not exist online: " ~ parentPath, ["debug"]); + if (debugLogging) {addLogEntry("Parent path does not exist online: " ~ parentPath, ["debug"]);} createDirectoryOnline(parentPath); // no return here as we need to continue, but need to re-query the OneDrive API to get the right parental details now that they exist onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); @@ -5149,8 +5229,10 @@ class SyncEngine { // Make sure the full path does not exist online, this should generate a 404 response, to which then the folder will be created online try { // Try and query the OneDrive API for the path we need to create - addLogEntry("Attempting to query OneDrive API for this path: " ~ thisNewPathToCreate, ["debug"]); - addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); + if (debugLogging) { + addLogEntry("Attempting to query OneDrive API for this path: " ~ thisNewPathToCreate, ["debug"]); + addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); + } // Depending on the data within parentItem, will depend on what method we are using to search // A Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details @@ -5158,7 +5240,7 @@ class SyncEngine { if (parentItem.type == ItemType.remote) { // This folder is a potential shared object - addLogEntry("ParentItem is a remote item object", ["debug"]); + if (debugLogging) {addLogEntry("ParentItem is a remote item object", ["debug"]);} // Need to create the DB Tie for this shared object to ensure this exists in the database createDatabaseTieRecordForOnlineSharedFolder(parentItem); // Update the queryItem values @@ -5166,23 +5248,23 @@ class SyncEngine { queryItem.id = parentItem.remoteId; } else { // Use parent item for the query item - addLogEntry("Standard Query, use parentItem", ["debug"]); + if (debugLogging) {addLogEntry("Standard Query, use parentItem", ["debug"]);} queryItem = parentItem; } if (queryItem.driveId == appConfig.defaultDriveId) { // Use getPathDetailsByDriveId - addLogEntry("Selecting getPathDetailsByDriveId to query OneDrive API for path data", ["debug"]); + if (debugLogging) {addLogEntry("Selecting getPathDetailsByDriveId to query OneDrive API for path data", ["debug"]);} onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(queryItem.driveId, thisNewPathToCreate); } else { // Use searchDriveForPath to query OneDrive - addLogEntry("Selecting searchDriveForPath to query OneDrive API for path data", ["debug"]); + if (debugLogging) {addLogEntry("Selecting searchDriveForPath to query OneDrive API for path data", ["debug"]);} // If the queryItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId // can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test) // Search the queryItem.driveId for any folder name match that we are going to create, then compare response JSON items with queryItem.id // If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404 onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(queryItem.driveId, baseName(thisNewPathToCreate)); - addLogEntry("onlinePathData: " ~to!string(onlinePathData), ["debug"]); + if (debugLogging) {addLogEntry("onlinePathData: " ~to!string(onlinePathData), ["debug"]);} // Process the response from searching the drive ulong responseCount = count(onlinePathData["value"].array); @@ -5198,8 +5280,10 @@ class SyncEngine { // Direct Match Check if ((queryItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { // High confidence that this child folder is a direct match we are trying to create and it already exists online - addLogEntry("Path we are searching for exists online (Direct Match): " ~ baseName(thisNewPathToCreate), ["debug"]); - addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + if (debugLogging) { + addLogEntry("Path we are searching for exists online (Direct Match): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + } foundDirectoryOnline = true; foundDirectoryJSONItem = childJSON; break; @@ -5218,8 +5302,10 @@ class SyncEngine { if (queryItem.id == thisChildItem.parentId) { // Found the directory in the location, using case in-sensitive matching - addLogEntry("Path we are searching for exists online (POSIX 'case in-sensitive match'): " ~ baseName(thisNewPathToCreate), ["debug"]); - addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + if (debugLogging) { + addLogEntry("Path we are searching for exists online (POSIX 'case in-sensitive match'): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + } foundDirectoryOnline = true; foundDirectoryJSONItem = childJSON; break; @@ -5230,7 +5316,7 @@ class SyncEngine { if (foundDirectoryOnline) { // Directory we are seeking was found online ... - addLogEntry("The directory we are seeking was found online by using searchDriveForPath ...", ["debug"]); + if (debugLogging) {addLogEntry("The directory we are seeking was found online by using searchDriveForPath ...", ["debug"]);} onlinePathData = foundDirectoryJSONItem; } else { // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder @@ -5245,7 +5331,7 @@ class SyncEngine { if (exception.httpStatusCode == 404) { // This is a good error - it means that the directory to create 100% does not exist online // The directory was not found on the drive id we queried - addLogEntry("The requested directory to create was not found on OneDrive - creating remote directory: " ~ thisNewPathToCreate, ["verbose"]); + if (verboseLogging) {addLogEntry("The requested directory to create was not found on OneDrive - creating remote directory: " ~ thisNewPathToCreate, ["verbose"]);} // Build up the online create directory request JSONValue createDirectoryOnlineAPIResponse; @@ -5265,7 +5351,7 @@ class SyncEngine { // Is the item a Remote Object (Shared Folder) ? if (parentItem.type == ItemType.remote) { // Yes .. Shared Folder - addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]); + if (debugLogging) {addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]);} requiredDriveId = parentItem.remoteDriveId; requiredParentItemId = parentItem.remoteId; } else { @@ -5275,9 +5361,11 @@ class SyncEngine { } // Where are we creating this new folder? - addLogEntry("requiredDriveId: " ~ requiredDriveId, ["debug"]); - addLogEntry("requiredParentItemId: " ~ requiredParentItemId, ["debug"]); - addLogEntry("newDriveItem JSON: " ~ to!string(newDriveItem), ["debug"]); + if (debugLogging) { + addLogEntry("requiredDriveId: " ~ requiredDriveId, ["debug"]); + addLogEntry("requiredParentItemId: " ~ requiredParentItemId, ["debug"]); + addLogEntry("newDriveItem JSON: " ~ to!string(newDriveItem), ["debug"]); + } // Create the new folder createDirectoryOnlineAPIResponse = createDirectoryOnlineOneDriveApiInstance.createById(requiredDriveId, requiredParentItemId, newDriveItem); @@ -5289,7 +5377,7 @@ class SyncEngine { if (exception.httpStatusCode == 409) { // OneDrive API returned a 404 (above) to say the directory did not exist // but when we attempted to create it, OneDrive responded that it now already exists - addLogEntry("OneDrive reported that " ~ thisNewPathToCreate ~ " already exists .. OneDrive API race condition", ["verbose"]); + if (verboseLogging) {addLogEntry("OneDrive reported that " ~ thisNewPathToCreate ~ " already exists .. OneDrive API race condition", ["verbose"]);} // Shutdown this API instance, as we will create API instances as required, when required createDirectoryOnlineOneDriveApiInstance.releaseCurlEngine(); // Free object and memory @@ -5338,7 +5426,7 @@ class SyncEngine { createDirectoryOnline(thisNewPathToCreate); } else { // We cant create this directory online - addLogEntry("This folder cannot be created online: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate)), ["debug"]); + if (debugLogging) {addLogEntry("This folder cannot be created online: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate)), ["debug"]);} } } } @@ -5351,17 +5439,17 @@ class SyncEngine { // OneDrive 'name' matches local path name if (appConfig.accountType == "business") { // We are a business account, this existing online folder, could be a Shared Online Folder could be a 'Add shortcut to My files' item - addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]); + if (debugLogging) {addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]);} // Is this a remote folder if (isItemRemote(onlinePathData)) { // The folder is a remote item ... we do not want to create this ... - addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]); + if (debugLogging) {addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]);} // Is Shared Business Folder Syncing enabled ? if (!appConfig.getValueBool("sync_business_shared_items")) { // Shared Business Folder Syncing is NOT enabled - addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]); + if (debugLogging) {addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]);} // Add this path to businessSharedFoldersOnlineToSkip businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; // no save to database, no online create @@ -5388,7 +5476,7 @@ class SyncEngine { } // Path found online - addLogEntry("The requested directory to create was found on OneDrive - skipping creating the directory: " ~ thisNewPathToCreate, ["verbose"]); + if (verboseLogging) {addLogEntry("The requested directory to create was found on OneDrive - skipping creating the directory: " ~ thisNewPathToCreate, ["verbose"]);} // Is the response a valid JSON object - validation checking done in saveItem saveItem(onlinePathData); @@ -5432,14 +5520,13 @@ class SyncEngine { // Test that the online name actually matches the requested local name bool performPosixTest(string localNameToCheck, string onlineName) { - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, // even though some file systems (such as a POSIX-compliant file system) may consider them as different. // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. - bool posixIssue = false; + // Is the name different if (localNameToCheck != onlineName) { // POSIX Error // Local item name has a 'case-insensitive match' to an existing item on OneDrive @@ -5465,16 +5552,16 @@ class SyncEngine { void uploadNewLocalFileItemsInParallel(string[] array) { // This function received an array of string items to upload, the number of elements based on appConfig.getValueLong("threads") foreach (i, fileToUpload; processPool.parallel(array)) { - addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + if (debugLogging) {addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]);} uploadNewFile(fileToUpload); - addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + if (debugLogging) {addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]);} } } // Upload a new file to OneDrive void uploadNewFile(string fileToUpload) { // Debug for the moment - addLogEntry("fileToUpload: " ~ fileToUpload, ["debug"]); + if (debugLogging) {addLogEntry("fileToUpload: " ~ fileToUpload, ["debug"]);} // These are the details of the item we need to upload // How much space is remaining on OneDrive @@ -5523,7 +5610,7 @@ class SyncEngine { // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty if ((parentPathFoundInDB) && (parentItem.driveId.empty)) { // switch to using defaultDriveId - addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]); + if (debugLogging) {addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]);} parentItem.driveId = appConfig.defaultDriveId; } @@ -5568,8 +5655,10 @@ class SyncEngine { calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; // Based on what we know, for this thread - can we safely upload this modified local file? - addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpaceOnline), ["debug"]); - addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + if (debugLogging) { + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpaceOnline), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + } // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true @@ -5597,15 +5686,15 @@ class SyncEngine { if (parentItem.driveId != appConfig.defaultDriveId) { // Different message depending on account type if (appConfig.accountType == "personal") { - addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]);} } else { - addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]);} } } else { if (appConfig.accountType == "personal") { - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]);} } else { - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]);} } } // Space available online is being restricted - so we have no way to really know if there is space available online @@ -5663,10 +5752,10 @@ class SyncEngine { // If we get to this point, the OneDrive API returned a 200 OK with valid JSON data that indicates a 'file' exists at this location already // and that it matches the POSIX filename of the local item we are trying to upload as a new file - addLogEntry("The file we are attempting to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]); + if (verboseLogging) {addLogEntry("The file we are attempting to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]);} // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... - addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + if (debugLogging) {addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]);} // Does the data from online match our local file that we are attempting to upload as a new file? if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { @@ -5674,7 +5763,7 @@ class SyncEngine { saveItem(fileDetailsFromOneDrive); } else { // The local file we are attempting to upload as a new file is different to the existing file online - addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); + if (debugLogging) {addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]);} // Issue #2626 | Case 2-2 (resync) @@ -5723,7 +5812,7 @@ class SyncEngine { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online if (exception.httpStatusCode == 404) { // The file has been checked, client side filtering checked, does not exist online - we need to upload it - addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]); + if (debugLogging) {addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]);} uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); } else { // some other error @@ -5742,7 +5831,7 @@ class SyncEngine { uploadFailed = true; } catch (JsonResponseException e) { // Display JSON error message - addLogEntry(e.msg, ["debug"]); + if (debugLogging) {addLogEntry(e.msg, ["debug"]);} uploadFailed = true; } } else { @@ -5899,17 +5988,17 @@ class SyncEngine { // Validate that we have the following items which we need if (!hasUploadURL(uploadSessionData)) { sessionDataValid = false; - addLogEntry("Session data missing 'uploadUrl'", ["debug"]); + if (debugLogging) {addLogEntry("Session data missing 'uploadUrl'", ["debug"]);} } if (!hasNextExpectedRanges(uploadSessionData)) { sessionDataValid = false; - addLogEntry("Session data missing 'nextExpectedRanges'", ["debug"]); + if (debugLogging) {addLogEntry("Session data missing 'nextExpectedRanges'", ["debug"]);} } if (!hasLocalPath(uploadSessionData)) { sessionDataValid = false; - addLogEntry("Session data missing 'localPath'", ["debug"]); + if (debugLogging) {addLogEntry("Session data missing 'localPath'", ["debug"]);} } if (sessionDataValid) { @@ -5937,7 +6026,7 @@ class SyncEngine { } } else { // No Upload URL or nextExpectedRanges or localPath .. not a valid JSON we can use - addLogEntry("Session data is missing required elements to perform a session upload.", ["verbose"]); + if (verboseLogging) {addLogEntry("Session data is missing required elements to perform a session upload.", ["verbose"]);} addLogEntry("Uploading new file: " ~ fileToUpload ~ " ... failed!", ["info", "notify"]); } } else { @@ -5964,10 +6053,12 @@ class SyncEngine { if (!uploadFailed) { // Upload did not fail ... auto uploadDuration = uploadFinishTime - uploadStartTime; - addLogEntry("File Size: " ~ to!string(thisFileSize) ~ " Bytes", ["debug"]); - addLogEntry("Upload Duration: " ~ to!string((uploadDuration.total!"msecs"/1e3)) ~ " Seconds", ["debug"]); - auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); - addLogEntry("Upload Speed: " ~ to!string(uploadSpeed) ~ " Mbps (approx)", ["debug"]); + if (debugLogging) { + addLogEntry("File Size: " ~ to!string(thisFileSize) ~ " Bytes", ["debug"]); + addLogEntry("Upload Duration: " ~ to!string((uploadDuration.total!"msecs"/1e3)) ~ " Seconds", ["debug"]); + auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); + addLogEntry("Upload Speed: " ~ to!string(uploadSpeed) ~ " Mbps (approx)", ["debug"]); + } // OK as the upload did not fail, we need to save the response from OneDrive, but it has to be a valid JSON response if (uploadResponse.type() == JSONType.object) { @@ -5996,7 +6087,7 @@ class SyncEngine { // Are we in a --dry-run scenario? if (!dryRun) { // No --dry-run ... process local file delete - addLogEntry("Removing local file: " ~ fileToUpload, ["debug"]); + if (debugLogging) {addLogEntry("Removing local file: " ~ fileToUpload, ["debug"]);} safeRemove(fileToUpload); } } @@ -6006,7 +6097,7 @@ class SyncEngine { } } else { // Log that an invalid JSON object was returned - addLogEntry("uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API", ["debug"]); + if (debugLogging) {addLogEntry("uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API", ["debug"]);} } } @@ -6047,7 +6138,7 @@ class SyncEngine { } } else { // no valid session was created - addLogEntry("Creation of OneDrive API Upload Session failed.", ["verbose"]); + if (verboseLogging) {addLogEntry("Creation of OneDrive API Upload Session failed.", ["verbose"]);} // return upload() will return a JSONValue response, create an empty JSONValue response to return uploadSession = null; } @@ -6087,7 +6178,7 @@ class SyncEngine { // Start the session upload using the active API instance for this thread while (true) { fragmentCount++; - addLogEntry("Fragment: " ~ to!string(fragmentCount) ~ " of " ~ to!string(expected_total_fragments), ["debug"]); + if (debugLogging) {addLogEntry("Fragment: " ~ to!string(fragmentCount) ~ " of " ~ to!string(expected_total_fragments), ["debug"]);} // What ETA string do we use? auto eta = calc_eta((fragmentCount -1), expected_total_fragments, start_unix_time); @@ -6107,15 +6198,15 @@ class SyncEngine { addLogEntry(uploadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); // What fragment size will be used? - addLogEntry("fragmentSize: " ~ to!string(fragmentSize) ~ " offset: " ~ to!string(offset) ~ " thisFileSize: " ~ to!string(thisFileSize), ["debug"]); + if (debugLogging) {addLogEntry("fragmentSize: " ~ to!string(fragmentSize) ~ " offset: " ~ to!string(offset) ~ " thisFileSize: " ~ to!string(thisFileSize), ["debug"]);} fragSize = fragmentSize < thisFileSize - offset ? fragmentSize : thisFileSize - offset; - addLogEntry("Using fragSize: " ~ to!string(fragSize), ["debug"]); + if (debugLogging) {addLogEntry("Using fragSize: " ~ to!string(fragSize), ["debug"]);} // fragSize must not be a negative value if (fragSize < 0) { // Session upload will fail // not a JSON object - fragment upload failed - addLogEntry("File upload session failed - invalid calculation of fragment size", ["verbose"]); + if (verboseLogging) {addLogEntry("File upload session failed - invalid calculation of fragment size", ["verbose"]);} if (exists(threadUploadSessionFilePath)) { remove(threadUploadSessionFilePath); } @@ -6154,11 +6245,11 @@ class SyncEngine { // 504 - Gateway Timeout // Insert a new line as well, so that the below error is inserted on the console in the right location - addLogEntry("Fragment upload failed - received an exception response from OneDrive API", ["verbose"]); + if (verboseLogging) {addLogEntry("Fragment upload failed - received an exception response from OneDrive API", ["verbose"]);} // display what the error is displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); // retry fragment upload in case error is transient - addLogEntry("Retrying fragment upload", ["verbose"]); + if (verboseLogging) {addLogEntry("Retrying fragment upload", ["verbose"]);} try { uploadResponse = activeOneDriveApiInstance.uploadFragment( @@ -6170,7 +6261,7 @@ class SyncEngine { ); } catch (OneDriveException e) { // OneDrive threw another error on retry - addLogEntry("Retry to upload fragment failed", ["verbose"]); + if (verboseLogging) {addLogEntry("Retry to upload fragment failed", ["verbose"]);} // display what the error is displayOneDriveErrorMessage(e.msg, getFunctionName!({})); // set uploadResponse to null as the fragment upload was in error twice @@ -6200,7 +6291,7 @@ class SyncEngine { saveSessionFile(threadUploadSessionFilePath, uploadSessionData); } else { // not a JSON object - fragment upload failed - addLogEntry("File upload session failed - invalid response from OneDrive API", ["verbose"]); + if (verboseLogging) {addLogEntry("File upload session failed - invalid response from OneDrive API", ["verbose"]);} // cleanup session data if (exists(threadUploadSessionFilePath)) { @@ -6237,10 +6328,10 @@ class SyncEngine { if (noRemoteDelete) { if ((itemToDelete.type == ItemType.dir)) { // Do not process remote directory delete - addLogEntry("Skipping remote directory delete as --upload-only & --no-remote-delete configured", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping remote directory delete as --upload-only & --no-remote-delete configured", ["verbose"]);} } else { // Do not process remote file delete - addLogEntry("Skipping remote file delete as --upload-only & --no-remote-delete configured", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping remote file delete as --upload-only & --no-remote-delete configured", ["verbose"]);} } } else { @@ -6258,7 +6349,7 @@ class SyncEngine { children = getChildren(itemToDelete.driveId, itemToDelete.id); // Count the returned items + the original item (1) itemsToDelete = count(children) + 1; - addLogEntry("Number of items online to delete: " ~ to!string(itemsToDelete), ["debug"]); + if (debugLogging) {addLogEntry("Number of items online to delete: " ~ to!string(itemsToDelete), ["debug"]);} } else { itemsToDelete = 1; } @@ -6283,10 +6374,11 @@ class SyncEngine { // Are we in a --dry-run scenario? if (!dryRun) { // We are not in a dry run scenario - addLogEntry("itemToDelete: " ~ to!string(itemToDelete), ["debug"]); - - // what item are we trying to delete? - addLogEntry("Attempting to delete this single item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + if (debugLogging) { + addLogEntry("itemToDelete: " ~ to!string(itemToDelete), ["debug"]); + // what item are we trying to delete? + addLogEntry("Attempting to delete this single item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + } // Configure these item variables to handle OneDrive Business Shared Folder Deletion Item actualItemToDelete; @@ -6309,12 +6401,12 @@ class SyncEngine { // Configure actualItemToDelete if (remoteShortcutLinkItem.id != "") { // A DB entry was returned - addLogEntry("remoteShortcutLinkItem: " ~ to!string(remoteShortcutLinkItem), ["debug"]); + if (debugLogging) {addLogEntry("remoteShortcutLinkItem: " ~ to!string(remoteShortcutLinkItem), ["debug"]);} // Set actualItemToDelete to this data actualItemToDelete = remoteShortcutLinkItem; // Delete the shortcut reference in the local database itemDB.deleteById(remoteShortcutLinkItem.driveId, remoteShortcutLinkItem.id); - addLogEntry("Deleted OneDrive Business Shared Folder 'Shorcut Link'", ["debug"]); + if (debugLogging) {addLogEntry("Deleted OneDrive Business Shared Folder 'Shorcut Link'", ["debug"]);} } else { // No data was returned, use the original data actualItemToDelete = itemToDelete; @@ -6338,7 +6430,7 @@ class SyncEngine { } catch (OneDriveException e) { if (e.httpStatusCode == 404) { // item.id, item.eTag could not be found on the specified driveId - addLogEntry("OneDrive reported: The resource could not be found to be deleted.", ["verbose"]); + if (verboseLogging) {addLogEntry("OneDrive reported: The resource could not be found to be deleted.", ["verbose"]);} } // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory @@ -6360,7 +6452,7 @@ class SyncEngine { } } else { // --download-only operation, we are not uploading any delete event to OneDrive - addLogEntry("Not pushing local delete to Microsoft OneDrive due to --download-only being used", ["debug"]); + if (debugLogging) {addLogEntry("Not pushing local delete to Microsoft OneDrive due to --download-only being used", ["debug"]);} } } } @@ -6383,7 +6475,7 @@ class SyncEngine { void performReverseDeletionOfOneDriveItems(Item[] children, Item itemToDelete) { // Log what is happening - addLogEntry("Attempting a reverse delete of all child objects from OneDrive", ["debug"]); + if (debugLogging) {addLogEntry("Attempting a reverse delete of all child objects from OneDrive", ["debug"]);} // Create a new API Instance for this thread and initialise it OneDriveApi performReverseDeletionOneDriveApiInstance; @@ -6392,7 +6484,7 @@ class SyncEngine { foreach_reverse (Item child; children) { // Log the action - addLogEntry("Attempting to delete this child item id: " ~ child.id ~ " from drive: " ~ child.driveId, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to delete this child item id: " ~ child.id ~ " from drive: " ~ child.driveId, ["debug"]);} // perform the delete via the default OneDrive API instance performReverseDeletionOneDriveApiInstance.deleteById(child.driveId, child.id, child.eTag); @@ -6400,7 +6492,7 @@ class SyncEngine { itemDB.deleteById(child.driveId, child.id); } // Log the action - addLogEntry("Attempting to delete this parent item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to delete this parent item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]);} // Perform the delete via the default OneDrive API instance performReverseDeletionOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id, itemToDelete.eTag); @@ -6476,7 +6568,7 @@ class SyncEngine { fakeResponse["folder"] = JSONValue(""); } - addLogEntry("Generated Fake OneDrive Response: " ~ to!string(fakeResponse), ["debug"]); + if (debugLogging) {addLogEntry("Generated Fake OneDrive Response: " ~ to!string(fakeResponse), ["debug"]);} return fakeResponse; } @@ -6491,29 +6583,29 @@ class SyncEngine { // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { // Log that we skipping adding item to the local DB and the reason why - addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + if (debugLogging) {addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]);} } else { // What is the JSON item we are trying to create a DB record with? - addLogEntry("saveItem - creating DB item from this JSON: " ~ to!string(jsonItem), ["debug"]); + if (debugLogging) {addLogEntry("saveItem - creating DB item from this JSON: " ~ to!string(jsonItem), ["debug"]);} // Takes a JSON input and formats to an item which can be used by the database Item item = makeItem(jsonItem); // Is this JSON item a 'root' item? if ((isItemRoot(jsonItem)) && (item.name == "root")) { - addLogEntry("Updating DB Item object with correct values as this is a 'root' object", ["debug"]); + if (debugLogging) {addLogEntry("Updating DB Item object with correct values as this is a 'root' object", ["debug"]);} item.parentId = null; // ensures that this database entry has no parent // Check for parentReference if (hasParentReference(jsonItem)) { // Set the correct item.driveId - addLogEntry("ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str", ["debug"]); + if (debugLogging) {addLogEntry("ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str", ["debug"]);} item.driveId = jsonItem["parentReference"]["driveId"].str; } // We only should be adding our account 'root' to the database, not shared folder 'root' items if (item.driveId != appConfig.defaultDriveId) { // Shared Folder drive 'root' object .. we dont want this item - addLogEntry("NOT adding 'remote root' object to database: " ~ to!string(item), ["debug"]); + if (debugLogging) {addLogEntry("NOT adding 'remote root' object to database: " ~ to!string(item), ["debug"]);} return; } } @@ -6563,7 +6655,7 @@ class SyncEngine { // Microsoft OneDrive OneNote objects will report as files but have 'application/msonenote' and 'application/octet-stream' as mime types if ((isMicrosoftOneNoteMimeType1(onedriveJSONItem)) || (isMicrosoftOneNoteMimeType2(onedriveJSONItem))) { // Debug log output that this is a potential OneNote object - addLogEntry("This item is potentially an associated Microsoft OneNote Object Item", ["debug"]); + if (debugLogging) {addLogEntry("This item is potentially an associated Microsoft OneNote Object Item", ["debug"]);} } else { // Not a Microsoft OneNote Mime Type Object .. string apiWarningMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; @@ -6578,7 +6670,7 @@ class SyncEngine { // Parent is not in the database .. why? // Check if the parent item had been skipped .. if (newDatabaseItem.parentId in skippedItems) { - addLogEntry(apiWarningMessage ~ "newDatabaseItem.parentId listed within skippedItems", ["debug"]); + if (debugLogging) {addLogEntry(apiWarningMessage ~ "newDatabaseItem.parentId listed within skippedItems", ["debug"]);} } else { // Use the item ID .. there is no other reference available, parent is not being skipped, so we should have been able to calculate this - but we could not addLogEntry(apiWarningMessage ~ newDatabaseItem.id); @@ -6589,7 +6681,7 @@ class SyncEngine { } } else { // zero file size - addLogEntry("This item file is zero size - potentially no hash provided by the OneDrive API", ["debug"]); + if (debugLogging) {addLogEntry("This item file is zero size - potentially no hash provided by the OneDrive API", ["debug"]);} } } } @@ -6700,14 +6792,14 @@ class SyncEngine { // Before we get any data from the OneDrive API, flag any child object in the database as out-of-sync for this driveId & and object id // Downgrade ONLY files associated with this driveId and idToQuery - addLogEntry("Downgrading all children for this searchItem.driveId (" ~ searchItem.driveId ~ ") and searchItem.id (" ~ searchItem.id ~ ") to an out-of-sync state", ["debug"]); + if (debugLogging) {addLogEntry("Downgrading all children for this searchItem.driveId (" ~ searchItem.driveId ~ ") and searchItem.id (" ~ searchItem.id ~ ") to an out-of-sync state", ["debug"]);} Item[] drivePathChildren = getChildren(searchItem.driveId, searchItem.id); if (count(drivePathChildren) > 0) { // Children to process and flag as out-of-sync foreach (drivePathChild; drivePathChildren) { // Flag any object in the database as out-of-sync for this driveId & and object id - addLogEntry("Downgrading item as out-of-sync: " ~ drivePathChild.id, ["debug"]); + if (debugLogging) {addLogEntry("Downgrading item as out-of-sync: " ~ drivePathChild.id, ["debug"]);} itemDB.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); } } @@ -6718,7 +6810,7 @@ class SyncEngine { try { driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id); } catch (OneDriveException exception) { - addLogEntry("driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException", ["debug"]);} string thisFunctionName = getFunctionName!({}); // Default operation if not 408,429,503,504 errors @@ -6736,7 +6828,7 @@ class SyncEngine { addProcessingLogHeaderEntry("Generating a /delta response from the OneDrive API from this Item ID: " ~ searchItem.id, appConfig.verbosityCount); } } else { - addLogEntry("Generating a /delta response from the OneDrive API from this Item ID: " ~ searchItem.id, ["verbose"]); + if (verboseLogging) {addLogEntry("Generating a /delta response from the OneDrive API from this Item ID: " ~ searchItem.id, ["verbose"]);} } // Process this initial JSON response @@ -6747,7 +6839,7 @@ class SyncEngine { try { rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); } catch (OneDriveException exception) { - addLogEntry("rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException", ["debug"]);} string thisFunctionName = getFunctionName!({}); // Default operation if not 408,429,503,504 errors @@ -6756,13 +6848,13 @@ class SyncEngine { displayOneDriveErrorMessage(exception.msg, thisFunctionName); } // Add driveData JSON data to array - addLogEntry("Adding OneDrive root details for processing", ["verbose"]); + if (verboseLogging) {addLogEntry("Adding OneDrive root details for processing", ["verbose"]);} childrenData ~= rootData; } } // Add driveData JSON data to array - addLogEntry("Adding OneDrive folder details for processing", ["verbose"]); + if (verboseLogging) {addLogEntry("Adding OneDrive folder details for processing", ["verbose"]);} childrenData ~= driveData; } else { // driveData is an invalid JSON object @@ -6790,11 +6882,13 @@ class SyncEngine { topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink); } catch (OneDriveException exception) { // OneDrive threw an error - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)", ["debug"]); - addLogEntry("driveId: " ~ searchItem.driveId, ["debug"]); - addLogEntry("idToQuery: " ~ searchItem.id, ["debug"]); - addLogEntry("nextLink: " ~ nextLink, ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ searchItem.driveId, ["debug"]); + addLogEntry("idToQuery: " ~ searchItem.id, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + } string thisFunctionName = getFunctionName!({}); // Default operation if not 408,429,503,504 errors @@ -6806,10 +6900,10 @@ class SyncEngine { // Process top level children if (!remotePathObject) { // Main account root folder - addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive 'root' Folder", ["verbose"]); + if (verboseLogging) {addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive 'root' Folder", ["verbose"]);} } else { // Shared Folder - addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive Shared Folder", ["verbose"]); + if (verboseLogging) {addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive Shared Folder", ["verbose"]);} } foreach (child; topLevelChildren["value"].array) { @@ -6851,7 +6945,7 @@ class SyncEngine { // to indicate more items are available and provide the request URL for the next page of items. if ("@odata.nextLink" in topLevelChildren) { // Update nextLink to next changeSet bundle - addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);} nextLink = topLevelChildren["@odata.nextLink"].str; } else break; @@ -6927,7 +7021,7 @@ class SyncEngine { // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. // Plus, the application output now shows accurately what is being shared - so that is a good thing. - addLogEntry("Adding " ~ to!string(count(thisLevelChildren["value"].array)) ~ " OneDrive items for processing from " ~ pathForLogging, ["verbose"]); + if (verboseLogging) {addLogEntry("Adding " ~ to!string(count(thisLevelChildren["value"].array)) ~ " OneDrive items for processing from " ~ pathForLogging, ["verbose"]);} } foreach (child; thisLevelChildren["value"].array) { // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. @@ -6960,17 +7054,17 @@ class SyncEngine { if ("@odata.nextLink" in thisLevelChildren) { // Update nextLink to next changeSet bundle nextLink = thisLevelChildren["@odata.nextLink"].str; - addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);} } else break; } else { // Invalid JSON response when querying this level children - addLogEntry("INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)", ["debug"]); + if (debugLogging) {addLogEntry("INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)", ["debug"]);} // retry thisLevelChildren = queryThisLevelChildren - addLogEntry("Thread sleeping for an additional 30 seconds", ["debug"]); + if (debugLogging) {addLogEntry("Thread sleeping for an additional 30 seconds", ["debug"]);} Thread.sleep(dur!"seconds"(30)); - addLogEntry("Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)", ["debug"]); + if (debugLogging) {addLogEntry("Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)", ["debug"]);} thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); } @@ -6997,16 +7091,18 @@ class SyncEngine { // query children try { // attempt API call - addLogEntry("Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + if (debugLogging) {addLogEntry("Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]);} thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink); - addLogEntry("Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully", ["debug"]); + if (debugLogging) {addLogEntry("Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully", ["debug"]);} } catch (OneDriveException exception) { // OneDrive threw an error - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); - addLogEntry("driveId: " ~ driveId, ["debug"]); - addLogEntry("idToQuery: " ~ idToQuery, ["debug"]); - addLogEntry("nextLink: " ~ nextLink, ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ driveId, ["debug"]); + addLogEntry("idToQuery: " ~ idToQuery, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + } string thisFunctionName = getFunctionName!({}); // Default operation if not 408,429,503,504 errors @@ -7046,7 +7142,7 @@ class SyncEngine { queryOneDriveForSpecificPath.initialise(); foreach (thisFolderName; pathSplitter(thisNewPathToSearch)) { - addLogEntry("Testing for the existence online of this folder path: " ~ thisFolderName, ["debug"]); + if (debugLogging) {addLogEntry("Testing for the existence online of this folder path: " ~ thisFolderName, ["debug"]);} directoryFoundOnline = false; // If this is '.' this is the account root @@ -7057,7 +7153,7 @@ class SyncEngine { } // What path are we querying - addLogEntry("Attempting to query OneDrive for this path: " ~ currentPathTree, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to query OneDrive for this path: " ~ currentPathTree, ["debug"]);} // What query do we use? if (thisFolderName == ".") { @@ -7106,7 +7202,7 @@ class SyncEngine { directoryFoundOnline = true; // Is this JSON a remote object - addLogEntry("Testing if this is a remote Shared Folder", ["debug"]); + if (debugLogging) {addLogEntry("Testing if this is a remote Shared Folder", ["debug"]);} if (isItemRemote(getPathDetailsAPIResponse)) { // Remote Directory .. need a DB Tie Record createDatabaseTieRecordForOnlineSharedFolder(parentDetails); @@ -7142,11 +7238,11 @@ class SyncEngine { addLogEntry("ERROR: Requested directory to search for and potentially create has a 'case-insensitive match' to an existing directory on Microsoft OneDrive online."); addLogEntry("ERROR: To resolve, rename this local directory: " ~ currentPathTree); } catch (JsonResponseException e) { - addLogEntry(e.msg, ["debug"]); + if (debugLogging) {addLogEntry(e.msg, ["debug"]);} } } else { // parentDetails.driveId is not the account drive id - thus will be a remote shared item - addLogEntry("This parent directory is a remote object this next path will be on a remote drive", ["debug"]); + if (debugLogging) {addLogEntry("This parent directory is a remote object this next path will be on a remote drive", ["debug"]);} // For this parentDetails.driveId, parentDetails.id object, query the OneDrive API for it's children while (true) { @@ -7202,7 +7298,7 @@ class SyncEngine { // to indicate more items are available and provide the request URL for the next page of items. if ("@odata.nextLink" in topLevelChildren) { // Update nextLink to next changeSet bundle - addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);} nextLink = topLevelChildren["@odata.nextLink"].str; } else break; @@ -7222,10 +7318,12 @@ class SyncEngine { // No POSIX issue if (createPathIfMissing) { // Create this path as it is missing on OneDrive online and there is no POSIX issue with a 'case-insensitive match' - addLogEntry("FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT", ["debug"]); - addLogEntry("Create folder on this drive: " ~ parentDetails.driveId, ["debug"]); - addLogEntry("Create folder as a child on this object: " ~ parentDetails.id, ["debug"]); - addLogEntry("Create this folder name: " ~ thisFolderName, ["debug"]); + if (debugLogging) { + addLogEntry("FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT", ["debug"]); + addLogEntry("Create folder on this drive: " ~ parentDetails.driveId, ["debug"]); + addLogEntry("Create folder as a child on this object: " ~ parentDetails.id, ["debug"]); + addLogEntry("Create this folder name: " ~ thisFolderName, ["debug"]); + } // Generate the JSON needed to create the folder online JSONValue newDriveItem = [ @@ -7248,7 +7346,7 @@ class SyncEngine { // 409 - API Race Condition if (e.httpStatusCode == 409) { // When we attempted to create it, OneDrive responded that it now already exists - addLogEntry("OneDrive reported that " ~ thisFolderName ~ " already exists .. OneDrive API race condition", ["verbose"]); + if (verboseLogging) {addLogEntry("OneDrive reported that " ~ thisFolderName ~ " already exists .. OneDrive API race condition", ["verbose"]);} } else { // some other error from OneDrive was returned - display what it is addLogEntry("OneDrive generated an error when creating this path: " ~ thisFolderName); @@ -7274,7 +7372,7 @@ class SyncEngine { GC.collect(); // Output our search results - addLogEntry("queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = " ~ to!string(getPathDetailsAPIResponse), ["debug"]); + if (debugLogging) {addLogEntry("queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = " ~ to!string(getPathDetailsAPIResponse), ["debug"]);} return getPathDetailsAPIResponse; } @@ -7318,7 +7416,7 @@ class SyncEngine { try { if (noRemoteDelete) { // do not process remote delete - addLogEntry("Skipping remote delete as --upload-only & --no-remote-delete configured", ["verbose"]); + if (verboseLogging) {addLogEntry("Skipping remote delete as --upload-only & --no-remote-delete configured", ["verbose"]);} } else { uploadDeletedItem(dbItem, path); } @@ -7361,7 +7459,7 @@ class SyncEngine { GC.collect(); // Log that an error was generated - addLogEntry("deleteByPathNoSyncAPIInstance.getPathDetails(path) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("deleteByPathNoSyncAPIInstance.getPathDetails(path) generated a OneDriveException", ["debug"]);} if (exception.httpStatusCode == 404) { // The directory was not found on OneDrive - no need to delete it addLogEntry("The requested directory to delete was not found on OneDrive - skipping removing the remote directory online as it does not exist"); @@ -7491,7 +7589,7 @@ class SyncEngine { if (!exists(newPath)) { // is this --monitor use? if (appConfig.getValueBool("monitor")) { - addLogEntry("uploadMoveItem target has disappeared: " ~ newPath, ["verbose"]); + if (verboseLogging) {addLogEntry("uploadMoveItem target has disappeared: " ~ newPath, ["verbose"]);} return; } } @@ -7537,8 +7635,8 @@ class SyncEngine { if (e.httpStatusCode == 412) { // OneDrive threw a 412 error, most likely: ETag does not match current item's value // Retry without eTag - addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]); - addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]); + if (debugLogging) {addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]);} + if (verboseLogging) {addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]);} eTag = null; // Retry to move the file but without the eTag, via the for() loop } else if (e.httpStatusCode == 409) { @@ -7593,7 +7691,7 @@ class SyncEngine { // compare values if ((localFileSize == uploadFileSize) && (localFileHash == uploadFileHash)) { // Uploaded file integrity intact - addLogEntry("Uploaded local file matches reported online size and hash values", ["debug"]); + if (debugLogging) {addLogEntry("Uploaded local file matches reported online size and hash values", ["debug"]);} integrityValid = true; } else { // Upload integrity failure .. what failed? @@ -7604,20 +7702,22 @@ class SyncEngine { // What integrity failed - size? if (localFileSize != uploadFileSize) { - addLogEntry("WARNING: Online file integrity failure - Size Mismatch", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: Online file integrity failure - Size Mismatch", ["verbose"]);} } // What integrity failed - hash? if (localFileHash != uploadFileHash) { - addLogEntry("WARNING: Online file integrity failure - Hash Mismatch", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: Online file integrity failure - Hash Mismatch", ["verbose"]);} } // What account type is this? if (appConfig.accountType != "personal") { // Not a personal account, thus the integrity failure is most likely due to SharePoint - addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]); - addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]); - addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]); + if (verboseLogging) { + addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]); + addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]); + addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]); + } } // How can this be disabled? addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation"); @@ -7628,7 +7728,7 @@ class SyncEngine { } } else { // We are bypassing integrity checks due to --disable-upload-validation - addLogEntry("Online file validation disabled due to --disable-upload-validation", ["debug"]); + if (debugLogging) {addLogEntry("Online file validation disabled due to --disable-upload-validation", ["debug"]);} addLogEntry("WARNING: Skipping upload integrity check for: " ~ localFilePath, ["info", "notify"]); } @@ -7726,11 +7826,11 @@ class SyncEngine { // is siteQuery a valid JSON object & contain data we can use? if ((siteQuery.type() == JSONType.object) && ("value" in siteQuery)) { // valid JSON object - addLogEntry("O365 Query Response: " ~ to!string(siteQuery), ["debug"]); + if (debugLogging) {addLogEntry("O365 Query Response: " ~ to!string(siteQuery), ["debug"]);} foreach (searchResult; siteQuery["value"].array) { // Need an 'exclusive' match here with sharepointLibraryNameToQuery as entered - addLogEntry("Found O365 Site: " ~ to!string(searchResult), ["debug"]); + if (debugLogging) {addLogEntry("Found O365 Site: " ~ to!string(searchResult), ["debug"]);} // 'displayName' and 'id' have to be present in the search result record in order to query the site if (("displayName" in searchResult) && ("id" in searchResult)) { @@ -7763,7 +7863,7 @@ class SyncEngine { // Display results found = true; addLogEntry("-----------------------------------------------"); - addLogEntry("Site Details: " ~ to!string(driveResult), ["debug"]); + if (debugLogging) {addLogEntry("Site Details: " ~ to!string(driveResult), ["debug"]);} addLogEntry("Site Name: " ~ searchResult["displayName"].str); addLogEntry("Library Name: " ~ driveResult["name"].str); addLogEntry("drive_id: " ~ driveResult["id"].str); @@ -7775,7 +7875,7 @@ class SyncEngine { if ("@odata.nextLink" in siteDriveQuery) { // Update nextLink to next set of SharePoint library names nextLinkDrive = siteDriveQuery["@odata.nextLink"].str; - addLogEntry("Setting nextLinkDrive to (@odata.nextLink): " ~ nextLinkDrive, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLinkDrive to (@odata.nextLink): " ~ nextLinkDrive, ["debug"]);} // Sleep for a while to avoid busy-waiting Thread.sleep(dur!"msecs"(100)); // Adjust the sleep duration as needed @@ -7812,9 +7912,11 @@ class SyncEngine { addLogEntry("ERROR: SharePoint Site details not provided for: " ~ siteNameAvailable); addLogEntry("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); addLogEntry("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); - addLogEntry(" - Is 'displayName' available = " ~ to!string(displayNameAvailable), ["verbose"]); - addLogEntry(" - Is 'id' available = " ~ to!string(idAvailable), ["verbose"]); - addLogEntry("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); + if (verboseLogging) { + addLogEntry(" - Is 'displayName' available = " ~ to!string(displayNameAvailable), ["verbose"]); + addLogEntry(" - Is 'id' available = " ~ to!string(idAvailable), ["verbose"]); + } + addLogEntry("ERROR: To debug this further, please increase application output verbosity to provide further insight as to what details are actually being returned."); } } @@ -7835,7 +7937,7 @@ class SyncEngine { siteSearchResults ~= siteSearchResultsEntry; } else { // displayName and id unavailable, display in debug log the entry - addLogEntry("Bad SharePoint Data for site: " ~ to!string(searchResult), ["debug"]); + if (debugLogging) {addLogEntry("Bad SharePoint Data for site: " ~ to!string(searchResult), ["debug"]);} } } } @@ -7858,7 +7960,7 @@ class SyncEngine { if ("@odata.nextLink" in siteQuery) { // Update nextLink to next set of SharePoint library names nextLink = siteQuery["@odata.nextLink"].str; - addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);} } else break; // Sleep for a while to avoid busy-waiting @@ -7937,7 +8039,7 @@ class SyncEngine { // While the response is not a JSON Object or the Exit Handler has not been triggered while (deltaChanges.type() != JSONType.object) { // Handle the invalid JSON response and retry - addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + if (debugLogging) {addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]);} deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink, getDeltaDataOneDriveApiInstance); } } @@ -7991,13 +8093,13 @@ class SyncEngine { // The response may contain either @odata.deltaLink or @odata.nextLink if ("@odata.deltaLink" in deltaChanges) { deltaLink = deltaChanges["@odata.deltaLink"].str; - addLogEntry("Setting next deltaLink to (@odata.deltaLink): " ~ deltaLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting next deltaLink to (@odata.deltaLink): " ~ deltaLink, ["debug"]);} } // Update deltaLink to next changeSet bundle if ("@odata.nextLink" in deltaChanges) { deltaLink = deltaChanges["@odata.nextLink"].str; - addLogEntry("Setting next deltaLink to (@odata.nextLink): " ~ deltaLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting next deltaLink to (@odata.nextLink): " ~ deltaLink, ["debug"]);} } else break; // Sleep for a while to avoid busy-waiting @@ -8130,7 +8232,7 @@ class SyncEngine { if (fileDetailsFromOneDrive.type() == JSONType.object) { // debug output of response - addLogEntry("API Response: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + if (debugLogging) {addLogEntry("API Response: " ~ to!string(fileDetailsFromOneDrive), ["debug"]);} // What sort of response to we generate // --get-file-link response @@ -8238,7 +8340,7 @@ class SyncEngine { // Create a new OneDrive API instance getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); getCurrentDriveQuotaApiInstance.initialise(); - addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + if (debugLogging) {addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]);} currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory @@ -8248,7 +8350,7 @@ class SyncEngine { GC.collect(); } catch (OneDriveException e) { - addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + if (debugLogging) {addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]);} // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory getCurrentDriveQuotaApiInstance.releaseCurlEngine(); getCurrentDriveQuotaApiInstance = null; @@ -8262,7 +8364,7 @@ class SyncEngine { if ("quota" in currentDriveQuota) { // debug output of response - addLogEntry("currentDriveQuota: " ~ to!string(currentDriveQuota), ["debug"]); + if (debugLogging) {addLogEntry("currentDriveQuota: " ~ to!string(currentDriveQuota), ["debug"]);} // human readable output of response string deletedValue = "Not Provided"; @@ -8354,7 +8456,7 @@ class SyncEngine { if (!validateUploadSessionFileData(sessionFilePath)) { // Remove upload_session file as it is invalid // upload_session file file contains an error - cant resume this session - addLogEntry("Restore file upload session failed - cleaning up resumable session data file: " ~ sessionFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("Restore file upload session failed - cleaning up resumable session data file: " ~ sessionFilePath, ["verbose"]);} // cleanup session path if (exists(sessionFilePath)) { @@ -8391,18 +8493,18 @@ class SyncEngine { try { sessionFileData = readText(sessionFilePath).parseJSON(); } catch (JSONException e) { - addLogEntry("SESSION-RESUME: Invalid JSON data in: " ~ sessionFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: Invalid JSON data in: " ~ sessionFilePath, ["debug"]);} return false; } // Does the file we wish to resume uploading exist locally still? if ("localPath" in sessionFileData) { string sessionLocalFilePath = sessionFileData["localPath"].str; - addLogEntry("SESSION-RESUME: sessionLocalFilePath: " ~ sessionLocalFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: sessionLocalFilePath: " ~ sessionLocalFilePath, ["debug"]);} // Does the file exist? if (!exists(sessionLocalFilePath)) { - addLogEntry("The local file to upload does not exist locally anymore", ["verbose"]); + if (verboseLogging) {addLogEntry("The local file to upload does not exist locally anymore", ["verbose"]);} return false; } @@ -8413,7 +8515,7 @@ class SyncEngine { } } else { - addLogEntry("SESSION-RESUME: No localPath data in: " ~ sessionFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: No localPath data in: " ~ sessionFilePath, ["debug"]);} return false; } @@ -8436,11 +8538,11 @@ class SyncEngine { // valid timestamp if (expiration < Clock.currTime()) { - addLogEntry("The upload session has expired for: " ~ sessionFilePath, ["verbose"]); + if (verboseLogging) {addLogEntry("The upload session has expired for: " ~ sessionFilePath, ["verbose"]);} return false; } } else { - addLogEntry("SESSION-RESUME: No expirationDateTime data in: " ~ sessionFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: No expirationDateTime data in: " ~ sessionFilePath, ["debug"]);} return false; } @@ -8464,7 +8566,7 @@ class SyncEngine { } catch (OneDriveException e) { // handle any onedrive error response as invalid - addLogEntry("SESSION-RESUME: Invalid response when using uploadUrl in: " ~ sessionFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: Invalid response when using uploadUrl in: " ~ sessionFilePath, ["debug"]);} // OneDrive API Instance Cleanup - Shutdown API, free curl object and memory validateUploadSessionFileDataApiInstance.releaseCurlEngine(); @@ -8483,20 +8585,20 @@ class SyncEngine { sessionFileData["nextExpectedRanges"] = response["nextExpectedRanges"]; if (sessionFileData["nextExpectedRanges"].array.length == 0) { - addLogEntry("The upload session was already completed", ["verbose"]); + if (verboseLogging) {addLogEntry("The upload session was already completed", ["verbose"]);} return false; } } else { - addLogEntry("SESSION-RESUME: No expirationDateTime & nextExpectedRanges data in Microsoft OneDrive API response: " ~ to!string(response), ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: No expirationDateTime & nextExpectedRanges data in Microsoft OneDrive API response: " ~ to!string(response), ["debug"]);} return false; } } else { // not a JSON object - addLogEntry("Restore file upload session failed - invalid response from Microsoft OneDrive", ["verbose"]); + if (verboseLogging) {addLogEntry("Restore file upload session failed - invalid response from Microsoft OneDrive", ["verbose"]);} return false; } } else { - addLogEntry("SESSION-RESUME: No uploadUrl data in: " ~ sessionFilePath, ["debug"]); + if (debugLogging) {addLogEntry("SESSION-RESUME: No uploadUrl data in: " ~ sessionFilePath, ["debug"]);} return false; } @@ -8552,12 +8654,12 @@ class SyncEngine { // Only perform the delete if we have a valid file path if (exists(jsonItemToResume["localPath"].str)) { // file exists - addLogEntry("Removing local file: " ~ jsonItemToResume["localPath"].str, ["debug"]); + if (debugLogging) {addLogEntry("Removing local file: " ~ jsonItemToResume["localPath"].str, ["debug"]);} safeRemove(jsonItemToResume["localPath"].str); } } // as file is removed, we have nothing to add to the local database - addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + if (debugLogging) {addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]);} } else { // Save JSON item in database saveItem(uploadResponse); @@ -8573,9 +8675,9 @@ class SyncEngine { string processPathToRemoveRootReference(ref string pathToCheck) { size_t colonIndex = pathToCheck.indexOf(":"); if (colonIndex != -1) { - addLogEntry("Updating " ~ pathToCheck ~ " to remove prefix up to ':'", ["debug"]); + if (debugLogging) {addLogEntry("Updating " ~ pathToCheck ~ " to remove prefix up to ':'", ["debug"]);} pathToCheck = pathToCheck[colonIndex + 1 .. $]; - addLogEntry("Updated path for 'skip_dir' check: " ~ pathToCheck, ["debug"]); + if (debugLogging) {addLogEntry("Updated path for 'skip_dir' check: " ~ pathToCheck, ["debug"]);} } return pathToCheck; } @@ -8607,7 +8709,7 @@ class SyncEngine { onlineDriveDetails[driveId] = DriveDetailsCache(driveId, quotaRestricted, quotaAvailable, quotaRemaining); // Debug log what the cached array now contains - addLogEntry("onlineDriveDetails: " ~ to!string(onlineDriveDetails), ["debug"]); + if (debugLogging) {addLogEntry("onlineDriveDetails: " ~ to!string(onlineDriveDetails), ["debug"]);} } // Return a specific 'driveId' details from 'onlineDriveDetails' @@ -8646,11 +8748,13 @@ class SyncEngine { thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink); } catch (OneDriveException exception) { // OneDrive threw an error - addLogEntry("------------------------------------------------------------------", ["debug"]); - addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]); - addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]); - addLogEntry("idToQuery: " ~ parentItemId, ["debug"]); - addLogEntry("nextLink: " ~ nextLink, ["debug"]); + if (debugLogging) { + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]); + addLogEntry("idToQuery: " ~ parentItemId, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + } string thisFunctionName = getFunctionName!({}); // Default operation if not 408,429,503,504 errors @@ -8681,7 +8785,7 @@ class SyncEngine { // to indicate more items are available and provide the request URL for the next page of items. if ("@odata.nextLink" in thisLevelChildren) { // Update nextLink to next changeSet bundle - addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + if (debugLogging) {addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]);} nextLink = thisLevelChildren["@odata.nextLink"].str; } else break; @@ -8722,7 +8826,7 @@ class SyncEngine { } } else { // zero space available is being reported, maybe being restricted? - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + if (verboseLogging) {addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]);} quotaRemaining = 0; quotaRestricted = true; } @@ -8736,7 +8840,7 @@ class SyncEngine { void freshenCachedDriveQuotaDetails() { foreach (driveId; onlineDriveDetails.keys) { // Update this driveid quota details - addLogEntry("Freshen Quota Details for this driveId: " ~ driveId, ["debug"]); + if (debugLogging) {addLogEntry("Freshen Quota Details for this driveId: " ~ driveId, ["debug"]);} addOrUpdateOneDriveOnlineDetails(driveId); } } @@ -8744,8 +8848,10 @@ class SyncEngine { // Create a 'root' DB Tie Record for a Shared Folder from the JSON data void createDatabaseRootTieRecordForOnlineSharedFolder(JSONValue onedriveJSONItem) { // Creating|Updating a DB Tie - addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]); - addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]); + if (debugLogging) { + addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]); + } // New DB Tie Item to detail the 'root' of the Shared Folder Item tieDBItem; @@ -8794,15 +8900,17 @@ class SyncEngine { tieDBItem.parentId = null; // Add this DB Tie parent record to the local database - addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]);} itemDB.upsert(tieDBItem); } // Create a DB Tie Record for a Shared Folder void createDatabaseTieRecordForOnlineSharedFolder(Item parentItem) { // Creating|Updating a DB Tie - addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]); - addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]); + if (debugLogging) { + addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]); + addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]); + } // New DB Tie Item to bind the 'remote' path to our parent path Item tieDBItem; @@ -8830,7 +8938,7 @@ class SyncEngine { } // Add tie DB record to the local database - addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]);} itemDB.upsert(tieDBItem); } @@ -8881,7 +8989,7 @@ class SyncEngine { string sharedByEmail; // Debug response output - addLogEntry("shared folder entry: " ~ to!string(searchResult), ["debug"]); + if (debugLogging) {addLogEntry("shared folder entry: " ~ to!string(searchResult), ["debug"]);} // Configure 'who' this was shared by if ("sharedBy" in searchResult["remoteItem"]["shared"]) { @@ -8912,11 +9020,13 @@ class SyncEngine { } // More detail if --verbose is being used - addLogEntry("Item Id: " ~ searchResult["remoteItem"]["id"].str, ["verbose"]); - addLogEntry("Parent Drive Id: " ~ searchResult["remoteItem"]["parentReference"]["driveId"].str, ["verbose"]); - if ("id" in searchResult["remoteItem"]["parentReference"]) { - addLogEntry("Parent Item Id: " ~ searchResult["remoteItem"]["parentReference"]["id"].str, ["verbose"]); - } + if (verboseLogging) { + addLogEntry("Item Id: " ~ searchResult["remoteItem"]["id"].str, ["verbose"]); + addLogEntry("Parent Drive Id: " ~ searchResult["remoteItem"]["parentReference"]["driveId"].str, ["verbose"]); + if ("id" in searchResult["remoteItem"]["parentReference"]) { + addLogEntry("Parent Item Id: " ~ searchResult["remoteItem"]["parentReference"]["id"].str, ["verbose"]); + } + } } // Close out the loop @@ -8976,7 +9086,7 @@ class SyncEngine { if (isItemFile(searchResult)) { // Debug response output - addLogEntry("getSharedWithMe Response Shared File JSON: " ~ to!string(searchResult), ["debug"]); + if (debugLogging) {addLogEntry("getSharedWithMe Response Shared File JSON: " ~ to!string(searchResult), ["debug"]);} // Make a DB item from this JSON Item sharedFileOriginalData = makeItem(searchResult); @@ -9031,7 +9141,7 @@ class SyncEngine { sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; // Add DB record to the local database - addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]);} itemDB.upsert(sharedFilesPath); } else { // Folder exists locally, is the folder in the database? @@ -9045,7 +9155,7 @@ class SyncEngine { sharedFilesPath.parentId = sharedFilesRootDirectoryDatabaseRecord.id; // Add DB record to the local database - addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]); + if (debugLogging) {addLogEntry("Creating|Updating into local database a DB record for storing OneDrive Business Shared Files: " ~ to!string(sharedFilesPath), ["debug"]);} itemDB.upsert(sharedFilesPath); } } @@ -9106,7 +9216,7 @@ class SyncEngine { fileToDownload["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"] = latestOnlineDetails["fileSystemInfo"]["lastModifiedDateTime"].str; // Final JSON that will be used to download the file - addLogEntry("Final fileToDownload: " ~ to!string(fileToDownload), ["debug"]); + if (debugLogging) {addLogEntry("Final fileToDownload: " ~ to!string(fileToDownload), ["debug"]);} // Make the new DB item from the consolidated JSON item Item downloadSharedFileDbItem = makeItem(fileToDownload); @@ -9270,9 +9380,9 @@ class SyncEngine { } else { // Need to query to obtain the details try { - addLogEntry("Attempting to query OneDrive Online for this parent path: " ~ parentPath, ["debug"]); + if (debugLogging) {addLogEntry("Attempting to query OneDrive Online for this parent path: " ~ parentPath, ["debug"]);} parentPathData = onlineMoveApiInstance.getPathDetails(parentPath); - addLogEntry("Online Parent Path Query Response: " ~ to!string(parentPathData), ["debug"]); + if (debugLogging) {addLogEntry("Online Parent Path Query Response: " ~ to!string(parentPathData), ["debug"]);} parentItem = makeItem(parentPathData); } catch (OneDriveException exception) { if (exception.httpStatusCode == 404) { diff --git a/src/util.d b/src/util.d index 15ecfaa78..6fd3aac47 100644 --- a/src/util.d +++ b/src/util.d @@ -75,7 +75,7 @@ void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { newPath ~= ext; // Log that we are perform the backup by renaming the file - addLogEntry("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath) , ["verbose"]); + if (verboseLogging) {addLogEntry("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath) , ["verbose"]);} if (!dryRun) { // Not a --dry-run scenario - do the file rename @@ -99,7 +99,7 @@ void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { addLogEntry("Renaming of local file failed for " ~ to!string(path) ~ ": " ~ e.msg, ["error"]); } } else { - addLogEntry("DRY-RUN: Skipping renaming local file to preserve existing file and prevent data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath), ["debug"]); + if (debugLogging) {addLogEntry("DRY-RUN: Skipping renaming local file to preserve existing file and prevent data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath), ["debug"]);} } } @@ -107,11 +107,11 @@ void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) { // Perform the rename if (!dryRun) { - addLogEntry("Calling rename(oldPath, newPath)", ["debug"]); + if (debugLogging) {addLogEntry("Calling rename(oldPath, newPath)", ["debug"]);} // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. rename(oldPath, newPath); } else { - addLogEntry("DRY-RUN: Skipping local file rename", ["debug"]); + if (debugLogging) {addLogEntry("DRY-RUN: Skipping local file rename", ["debug"]);} } } @@ -275,8 +275,10 @@ bool retryInternetConnectivtyTest(ApplicationConfig appConfig) { backoffInterval = min(backoffInterval * 2, maxBackoffInterval); // exponential increase } - addLogEntry(" Retry Attempt: " ~ to!string(retryAttempts + 1), ["debug"]); - addLogEntry(" Retry In (seconds): " ~ to!string(backoffInterval), ["debug"]); + if (debugLogging) { + addLogEntry(" Retry Attempt: " ~ to!string(retryAttempts + 1), ["debug"]); + addLogEntry(" Retry In (seconds): " ~ to!string(backoffInterval), ["debug"]); + } Thread.sleep(dur!"seconds"(backoffInterval)); isOnline = testInternetReachability(appConfig); // assuming this function is defined elsewhere @@ -607,7 +609,7 @@ void displayOneDriveErrorMessage(string message, string callingFunction) { if (errorReason.startsWith(" 0 ? cast(int) ceil(eta_sec) : 0; } else {