diff --git a/.gitignore b/.gitignore index 1db29c4e1..1c744c759 100644 --- a/.gitignore +++ b/.gitignore @@ -31,3 +31,4 @@ Tests/FunctionalTests/TestApp/package-lock.json # Ignore log files npm-debug.log +undefined/temp/appInsights-node/applicationinsights.log diff --git a/Library/FileAccessControl.ts b/Library/FileAccessControl.ts new file mode 100644 index 000000000..038ffb1f0 --- /dev/null +++ b/Library/FileAccessControl.ts @@ -0,0 +1,164 @@ +import fs = require("fs"); +import os = require("os"); +import child_process = require("child_process"); + +import Logging = require("./Logging"); + + +export class FileAccessControl { + private static TAG = "FileAccessControl"; + + private static ICACLS_PATH = `${process.env.systemdrive}/windows/system32/icacls.exe`; + private static POWERSHELL_PATH = `${process.env.systemdrive}/windows/system32/windowspowershell/v1.0/powershell.exe`; + private static ACLED_DIRECTORIES: { [id: string]: boolean } = {}; + private static ACL_IDENTITY: string = null; + private static OS_FILE_PROTECTION_CHECKED = false; + public static OS_PROVIDES_FILE_PROTECTION = false; + public static USE_ICACLS = os.type() === "Windows_NT"; + + + // Check if file access control could be enabled + public static checkFileProtection() { + if (!FileAccessControl.OS_PROVIDES_FILE_PROTECTION && !FileAccessControl.OS_FILE_PROTECTION_CHECKED) { + FileAccessControl.OS_FILE_PROTECTION_CHECKED = true; + // Node's chmod levels do not appropriately restrict file access on Windows + // Use the built-in command line tool ICACLS on Windows to properly restrict + // access to the temporary directory used for disk retry mode. + if (FileAccessControl.USE_ICACLS) { + // This should be async - but it's currently safer to have this synchronous + // This guarantees we can immediately fail setDiskRetryMode if we need to + try { + FileAccessControl.OS_PROVIDES_FILE_PROTECTION = fs.existsSync(FileAccessControl.ICACLS_PATH); + } catch (e) { } + if (!FileAccessControl.OS_PROVIDES_FILE_PROTECTION) { + Logging.warn(FileAccessControl.TAG, "Could not find ICACLS in expected location! This is necessary to use disk retry mode on Windows.") + } + } else { + // chmod works everywhere else + FileAccessControl.OS_PROVIDES_FILE_PROTECTION = true; + } + } + } + + public static async applyACLRules(directory: string): Promise { + if (FileAccessControl.USE_ICACLS) { + if (FileAccessControl.ACLED_DIRECTORIES[directory] === undefined) { + // Avoid multiple calls race condition by setting ACLED_DIRECTORIES to false for this directory immediately + // If batches are being failed faster than the processes spawned below return, some data won't be stored to disk + // This is better than the alternative of potentially infinitely spawned processes + FileAccessControl.ACLED_DIRECTORIES[directory] = false; + try { + // Restrict this directory to only current user and administrator access + let identity = await this._getACLIdentity(); + await this._runICACLS(this._getACLArguments(directory, identity)); + FileAccessControl.ACLED_DIRECTORIES[directory] = true; + } + catch (ex) { + FileAccessControl.ACLED_DIRECTORIES[directory] = false; // false is used to cache failed (vs undefined which is "not yet tried") + throw ex; + } + } else { + if (!FileAccessControl.ACLED_DIRECTORIES[directory]) { + throw new Error("Setting ACL restrictions did not succeed (cached result)"); + } + } + } + } + + public static applyACLRulesSync(directory: string) { + if (FileAccessControl.USE_ICACLS) { + // For performance, only run ACL rules if we haven't already during this session + if (FileAccessControl.ACLED_DIRECTORIES[directory] === undefined) { + this._runICACLSSync(this._getACLArguments(directory, this._getACLIdentitySync())); + FileAccessControl.ACLED_DIRECTORIES[directory] = true; // If we get here, it succeeded. _runIACLSSync will throw on failures + return; + } else if (!FileAccessControl.ACLED_DIRECTORIES[directory]) { // falsy but not undefined + throw new Error("Setting ACL restrictions did not succeed (cached result)"); + } + } + } + + private static _runICACLS(args: string[]): Promise { + return new Promise((resolve, reject) => { + var aclProc = child_process.spawn(FileAccessControl.ICACLS_PATH, args, { windowsHide: true }); + aclProc.on("error", (e: Error) => reject(e)); + aclProc.on("close", (code: number, signal: string) => { + if (code === 0) { + resolve(); + } + else { + reject(new Error(`Setting ACL restrictions did not succeed (ICACLS returned code ${code})`)); + } + }); + }); + } + + private static _runICACLSSync(args: string[]) { + // Some very old versions of Node (< 0.11) don't have this + if (child_process.spawnSync) { + var aclProc = child_process.spawnSync(FileAccessControl.ICACLS_PATH, args, { windowsHide: true }); + if (aclProc.error) { + throw aclProc.error; + } else if (aclProc.status !== 0) { + throw new Error(`Setting ACL restrictions did not succeed (ICACLS returned code ${aclProc.status})`); + } + } else { + throw new Error("Could not synchronously call ICACLS under current version of Node.js"); + } + } + + private static _getACLIdentity(): Promise { + return new Promise((resolve, reject) => { + if (FileAccessControl.ACL_IDENTITY) { + resolve(FileAccessControl.ACL_IDENTITY); + } + var psProc = child_process.spawn(FileAccessControl.POWERSHELL_PATH, + ["-Command", "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"], { + windowsHide: true, + stdio: ['ignore', 'pipe', 'pipe'] // Needed to prevent hanging on Win 7 + }); + let data = ""; + psProc.stdout.on("data", (d: string) => data += d); + psProc.on("error", (e: Error) => reject(e)); + psProc.on("close", (code: number, signal: string) => { + FileAccessControl.ACL_IDENTITY = data && data.trim(); + if (code === 0) { + resolve(FileAccessControl.ACL_IDENTITY); + } + else { + reject(new Error(`Getting ACL identity did not succeed (PS returned code ${code})`)); + } + }); + }); + } + + private static _getACLIdentitySync() { + if (FileAccessControl.ACL_IDENTITY) { + return FileAccessControl.ACL_IDENTITY; + } + // Some very old versions of Node (< 0.11) don't have this + if (child_process.spawnSync) { + var psProc = child_process.spawnSync(FileAccessControl.POWERSHELL_PATH, + ["-Command", "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"], { + windowsHide: true, + stdio: ['ignore', 'pipe', 'pipe'] // Needed to prevent hanging on Win 7 + }); + if (psProc.error) { + throw psProc.error; + } else if (psProc.status !== 0) { + throw new Error(`Getting ACL identity did not succeed (PS returned code ${psProc.status})`); + } + FileAccessControl.ACL_IDENTITY = psProc.stdout && psProc.stdout.toString().trim(); + return FileAccessControl.ACL_IDENTITY; + } else { + throw new Error("Could not synchronously get ACL identity under current version of Node.js"); + } + } + + private static _getACLArguments(directory: string, identity: string) { + return [directory, + "/grant", "*S-1-5-32-544:(OI)(CI)F", // Full permission for Administrators + "/grant", `${identity}:(OI)(CI)F`, // Full permission for current user + "/inheritance:r"]; // Remove all inherited permissions + } +} diff --git a/Library/FileSystemHelper.ts b/Library/FileSystemHelper.ts new file mode 100644 index 000000000..8277fbbaa --- /dev/null +++ b/Library/FileSystemHelper.ts @@ -0,0 +1,76 @@ +import * as fs from "fs"; +import path = require("path"); +import { promisify } from "util"; + +export const statAsync = promisify(fs.stat); +export const lstatAsync = promisify(fs.lstat); +export const mkdirAsync = promisify(fs.mkdir); +export const accessAsync = promisify(fs.access); +export const appendFileAsync = promisify(fs.appendFile); +export const writeFileAsync = promisify(fs.writeFile); +export const readFileAsync = promisify(fs.readFile); +export const readdirAsync = promisify(fs.readdir); +export const unlinkAsync = promisify(fs.unlink); + +/** + * Validate directory exists. + */ +export const confirmDirExists = async (directory: string): Promise => { + try { + const stats = await lstatAsync(directory); + if (!stats.isDirectory()) { + throw new Error("Path existed but was not a directory"); + } + } catch (err) { + if (err && err.code === "ENOENT") { + try { + await mkdirAsync(directory); + } catch (mkdirErr) { + if (mkdirErr && mkdirErr.code !== "EEXIST") { + // Handle race condition by ignoring EEXIST + throw mkdirErr; + } + } + } + } +}; + +/** + * Computes the size (in bytes) of all files in a directory at the root level. Asynchronously. + */ +export const getShallowDirectorySize = async (directory: string): Promise => { + // Get the directory listing + const files = await readdirAsync(directory); + let totalSize = 0; + // Query all file sizes + for (const file of files) { + const fileStats = await statAsync(path.join(directory, file)); + if (fileStats.isFile()) { + totalSize += fileStats.size; + } + } + return totalSize; +}; + +/** +* Computes the size (in bytes) of all files in a directory at the root level. Synchronously. +*/ +export const getShallowDirectorySizeSync = (directory: string): number => { + let files = fs.readdirSync(directory); + let totalSize = 0; + for (let i = 0; i < files.length; i++) { + totalSize += fs.statSync(path.join(directory, files[i])).size; + } + return totalSize; +} + +/** +* Computes the size (in bytes) of a file asynchronously. +*/ +export const getShallowFileSize = async (filePath: string): Promise => { + const fileStats = await statAsync(filePath); + if (fileStats.isFile()) { + return fileStats.size; + } +} + diff --git a/Library/InternalAzureLogger.ts b/Library/InternalAzureLogger.ts new file mode 100644 index 000000000..70938078c --- /dev/null +++ b/Library/InternalAzureLogger.ts @@ -0,0 +1,159 @@ +import fs = require("fs"); +import os = require("os"); +import path = require("path"); +import { AzureLogger, setLogLevel, createClientLogger } from "@azure/logger"; +import FileSystemHelper = require("./FileSystemHelper"); + + +class InternalAzureLogger { + + private static _instance: InternalAzureLogger; + public logger: AzureLogger; + public maxHistory: number; + public maxSizeBytes: number; + + private TAG = "Logger"; + private _cleanupTimeOut = 60 * 30 * 1000; // 30 minutes; + private static _fileCleanupTimer: NodeJS.Timer = null; + private _tempDir: string; + public _logFileName: string; + private _fileFullPath: string; + private _backUpNameFormat: string; + + constructor() { + setLogLevel("verbose"); // Verbose so we can control log level in our side + let logDestination = process.env.APPLICATIONINSIGHTS_LOG_DESTINATION; // destination can be one of file, console or file+console + let logToFile = false; + let logToConsole = true; + if (logDestination == "file+console") { + logToFile = true; + } + if (logDestination == "file") { + logToFile = true; + logToConsole = false; + } + this.maxSizeBytes = 50000; + this.maxHistory = 1; + this._logFileName = "applicationinsights.log"; + + // If custom path not provided use temp folder, /tmp for *nix and USERDIR/AppData/Local/Temp for Windows + let logFilePath = process.env.APPLICATIONINSIGHTS_LOGDIR; + if (!logFilePath) { + this._tempDir = path.join(os.tmpdir(), "appInsights-node"); + } + else { + if (path.isAbsolute(logFilePath)) { + this._tempDir = logFilePath; + } + else { + this._tempDir = path.join(process.cwd(), logFilePath); + } + } + this._fileFullPath = path.join(this._tempDir, this._logFileName); + this._backUpNameFormat = "." + this._logFileName; // {currentime}.applicationinsights.log + + // Override AzureLogger to also enable logs to be stored in disk + AzureLogger.log = (...args) => { + if (logToFile) { + this._storeToDisk(args); + } + if (logToConsole) { + console.log(...args); + } + }; + this.logger = createClientLogger('ApplicationInsights'); + if (logToFile) { + if (!InternalAzureLogger._fileCleanupTimer) { + InternalAzureLogger._fileCleanupTimer = setInterval(() => { this._fileCleanupTask(); }, this._cleanupTimeOut); + InternalAzureLogger._fileCleanupTimer.unref(); + } + } + } + + static getInstance() { + if (!InternalAzureLogger._instance) { + InternalAzureLogger._instance = new InternalAzureLogger(); + } + return InternalAzureLogger._instance; + } + + private async _storeToDisk(args: any): Promise { + let data = args + "\r\n"; + + try { + await FileSystemHelper.confirmDirExists(this._tempDir); + } + catch (err) { + console.log(this.TAG, "Failed to create directory for log file: " + (err && err.message)); + return; + } + try { + await FileSystemHelper.accessAsync(this._fileFullPath, fs.constants.F_OK); + } + catch (err) { + // No file create one + await FileSystemHelper.appendFileAsync(this._fileFullPath, data).catch((appendError) => { + console.log(this.TAG, "Failed to put log into file: " + (appendError && appendError.message)); + }); + return; + } + try { + // Check size + let size = await FileSystemHelper.getShallowFileSize(this._fileFullPath); + if (size > this.maxSizeBytes) { + await this._createBackupFile(data); + } + else { + await FileSystemHelper.appendFileAsync(this._fileFullPath, data); + } + } + catch (err) { + console.log(this.TAG, "Failed to create backup file: " + (err && err.message)); + } + } + + private async _createBackupFile(data: string): Promise { + try { + let buffer = await FileSystemHelper.readFileAsync(this._fileFullPath); + let backupPath = path.join(this._tempDir, new Date().getTime() + "." + this._logFileName); + await FileSystemHelper.writeFileAsync(backupPath, buffer); + } + catch (err) { + console.log(`Failed to generate backup log file`, err); + } + finally { + // Store logs + FileSystemHelper.writeFileAsync(this._fileFullPath, data); + } + } + + private async _fileCleanupTask(): Promise { + try { + let files = await FileSystemHelper.readdirAsync(this._tempDir); + // Filter only backup files + files = files.filter(f => path.basename(f).indexOf(this._backUpNameFormat) > -1); + // Sort by creation date + files.sort((a: string, b: String) => { + // Check expiration + let aCreationDate: Date = new Date(parseInt(a.split(this._backUpNameFormat)[0])); + let bCreationDate: Date = new Date(parseInt(b.split(this._backUpNameFormat)[0])); + if (aCreationDate < bCreationDate) { + return -1; + } + if (aCreationDate >= bCreationDate) { + return 1; + } + }); + let totalFiles = files.length; + for (let i = 0; i < totalFiles - this.maxHistory; i++) { + let pathToDelete = path.join(this._tempDir, files[i]); + await FileSystemHelper.unlinkAsync(pathToDelete); + } + } + catch (err) { + console.log(this.TAG, "Failed to cleanup log files: " + (err && err.message)); + } + } +} + +export = InternalAzureLogger; \ No newline at end of file diff --git a/Library/Logging.ts b/Library/Logging.ts index 0c87aef44..9ff1fdab2 100644 --- a/Library/Logging.ts +++ b/Library/Logging.ts @@ -1,20 +1,20 @@ -import { createClientLogger, AzureLogger } from "@azure/logger"; +import InternalAzureLogger = require("./InternalAzureLogger"); class Logging { public static enableDebug = false; public static disableWarnings = false; + private static TAG = "ApplicationInsights:"; - public static logger = createClientLogger('ApplicationInsights') as AzureLogger; public static info(message?: any, ...optionalParams: any[]) { - if(Logging.enableDebug) { - this.logger.info(Logging.TAG + message, optionalParams); + if (this.enableDebug) { + InternalAzureLogger.getInstance().logger.info(this.TAG + message, optionalParams); } } public static warn(message?: any, ...optionalParams: any[]) { - if(!Logging.disableWarnings) { - this.logger.warning(Logging.TAG + message, optionalParams); + if (!this.disableWarnings) { + InternalAzureLogger.getInstance().logger.warning(this.TAG + message, optionalParams); } } } diff --git a/Library/Sender.ts b/Library/Sender.ts index adb4d90c7..747084c2e 100644 --- a/Library/Sender.ts +++ b/Library/Sender.ts @@ -3,26 +3,23 @@ import http = require("http"); import os = require("os"); import path = require("path"); import zlib = require("zlib"); -import child_process = require("child_process"); + import AuthorizationHandler = require("./AuthorizationHandler"); import Config = require("./Config") import Contracts = require("../Declarations/Contracts"); import Constants = require("../Declarations/Constants"); import AutoCollectHttpDependencies = require("../AutoCollection/HttpDependencies"); import Statsbeat = require("../AutoCollection/Statsbeat"); +import FileSystemHelper = require("./FileSystemHelper"); import Util = require("./Util"); import { URL } from "url"; import Logging = require("./Logging"); +import { FileAccessControl } from "./FileAccessControl"; +import { Envelope } from "../Declarations/Contracts"; class Sender { private static TAG = "Sender"; - private static ICACLS_PATH = `${process.env.systemdrive}/windows/system32/icacls.exe`; - private static POWERSHELL_PATH = `${process.env.systemdrive}/windows/system32/windowspowershell/v1.0/powershell.exe`; - private static ACLED_DIRECTORIES: { [id: string]: boolean } = {}; - private static ACL_IDENTITY: string = null; - private static OS_FILE_PROTECTION_CHECKED = false; - // the amount of time the SDK will wait between resending cached data, this buffer is to avoid any throttling from the service side public static WAIT_BETWEEN_RESEND = 60 * 1000; // 1 minute public static MAX_BYTES_ON_DISK = 50 * 1024 * 1024; // 50 mb @@ -30,8 +27,6 @@ class Sender { public static CLEANUP_TIMEOUT = 60 * 60 * 1000; // 1 hour public static FILE_RETEMPTION_PERIOD = 7 * 24 * 60 * 60 * 1000; // 7 days public static TEMPDIR_PREFIX: string = "appInsights-node"; - public static OS_PROVIDES_FILE_PROTECTION = false; - public static USE_ICACLS = os.type() === "Windows_NT"; private _config: Config; private _statsbeat: Statsbeat; @@ -48,7 +43,7 @@ class Sender { protected _resendInterval: number; protected _maxBytesOnDisk: number; -constructor(config: Config, getAuthorizationHandler?: (config: Config) => AuthorizationHandler, onSuccess?: (response: string) => void, onError?: (error: Error) => void, statsbeat?: Statsbeat) { + constructor(config: Config, getAuthorizationHandler?: (config: Config) => AuthorizationHandler, onSuccess?: (response: string) => void, onError?: (error: Error) => void, statsbeat?: Statsbeat) { this._config = config; this._onSuccess = onSuccess; this._onError = onError; @@ -65,36 +60,14 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author this._tempDir = path.join(os.tmpdir(), Sender.TEMPDIR_PREFIX + this._config.instrumentationKey); } - private static _checkFileProtection() { - if (!Sender.OS_PROVIDES_FILE_PROTECTION && !Sender.OS_FILE_PROTECTION_CHECKED) { - Sender.OS_FILE_PROTECTION_CHECKED = true; - // Node's chmod levels do not appropriately restrict file access on Windows - // Use the built-in command line tool ICACLS on Windows to properly restrict - // access to the temporary directory used for disk retry mode. - if (Sender.USE_ICACLS) { - // This should be async - but it's currently safer to have this synchronous - // This guarantees we can immediately fail setDiskRetryMode if we need to - try { - Sender.OS_PROVIDES_FILE_PROTECTION = fs.existsSync(Sender.ICACLS_PATH); - } catch (e) { } - if (!Sender.OS_PROVIDES_FILE_PROTECTION) { - Logging.warn(Sender.TAG, "Could not find ICACLS in expected location! This is necessary to use disk retry mode on Windows.") - } - } else { - // chmod works everywhere else - Sender.OS_PROVIDES_FILE_PROTECTION = true; - } - } - } - /** * Enable or disable offline mode */ public setDiskRetryMode(value: boolean, resendInterval?: number, maxBytesOnDisk?: number) { - if (!Sender.OS_FILE_PROTECTION_CHECKED && value) { - Sender._checkFileProtection(); // Only check file protection when disk retry is enabled + if (value) { + FileAccessControl.checkFileProtection(); // Only check file protection when disk retry is enabled } - this._enableDiskRetryMode = Sender.OS_PROVIDES_FILE_PROTECTION && value; + this._enableDiskRetryMode = FileAccessControl.OS_PROVIDES_FILE_PROTECTION && value; if (typeof resendInterval === 'number' && resendInterval >= 0) { this._resendInterval = Math.floor(resendInterval); } @@ -102,7 +75,7 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author this._maxBytesOnDisk = Math.floor(maxBytesOnDisk); } - if (value && !Sender.OS_PROVIDES_FILE_PROTECTION) { + if (value && !FileAccessControl.OS_PROVIDES_FILE_PROTECTION) { this._enableDiskRetryMode = false; Logging.warn(Sender.TAG, "Ignoring request to enable disk retry mode. Sufficient file protection capabilities were not detected.") } @@ -164,7 +137,7 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author let batch: string = ""; envelopes.forEach(envelope => { - var payload: string = this._stringify(envelope); + var payload: string = Util.stringify(envelope); if (typeof payload !== "string") { return; } @@ -228,14 +201,17 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author } const breezeResponse = JSON.parse(responseString) as Contracts.BreezeResponse; let filteredEnvelopes: Contracts.EnvelopeTelemetry[] = []; - breezeResponse.errors.forEach(error => { - if (this._isRetriable(error.statusCode)) { - filteredEnvelopes.push(envelopes[error.index]); + if (breezeResponse.errors) { + breezeResponse.errors.forEach(error => { + if (this._isRetriable(error.statusCode)) { + filteredEnvelopes.push(envelopes[error.index]); + } + }); + if (filteredEnvelopes.length > 0) { + this._storeToDisk(filteredEnvelopes); } - }); - if (filteredEnvelopes.length > 0) { - this._storeToDisk(filteredEnvelopes); } + } catch (ex) { this._storeToDisk(envelopes); // Retriable status code with not valid Breeze response @@ -328,7 +304,7 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author public saveOnCrash(envelopes: Contracts.EnvelopeTelemetry[]) { if (this._enableDiskRetryMode) { - this._storeToDiskSync(this._stringify(envelopes)); + this._storeToDiskSync(Util.stringify(envelopes)); } } @@ -345,231 +321,48 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author ); } - private _runICACLS(args: string[], callback: (err: Error) => void) { - var aclProc = child_process.spawn(Sender.ICACLS_PATH, args, { windowsHide: true }); - aclProc.on("error", (e: Error) => callback(e)); - aclProc.on("close", (code: number, signal: string) => { - return callback(code === 0 ? null : new Error(`Setting ACL restrictions did not succeed (ICACLS returned code ${code})`)); - }); - } - - private _runICACLSSync(args: string[]) { - // Some very old versions of Node (< 0.11) don't have this - if (child_process.spawnSync) { - var aclProc = child_process.spawnSync(Sender.ICACLS_PATH, args, { windowsHide: true }); - if (aclProc.error) { - throw aclProc.error; - } else if (aclProc.status !== 0) { - throw new Error(`Setting ACL restrictions did not succeed (ICACLS returned code ${aclProc.status})`); - } - } else { - throw new Error("Could not synchronously call ICACLS under current version of Node.js"); - } - } - - private _getACLIdentity(callback: (error: Error, identity: string) => void) { - if (Sender.ACL_IDENTITY) { - return callback(null, Sender.ACL_IDENTITY); - } - var psProc = child_process.spawn(Sender.POWERSHELL_PATH, - ["-Command", "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"], { - windowsHide: true, - stdio: ['ignore', 'pipe', 'pipe'] // Needed to prevent hanging on Win 7 - }); - let data = ""; - psProc.stdout.on("data", (d: string) => data += d); - psProc.on("error", (e: Error) => callback(e, null)); - psProc.on("close", (code: number, signal: string) => { - Sender.ACL_IDENTITY = data && data.trim(); - return callback( - code === 0 ? null : new Error(`Getting ACL identity did not succeed (PS returned code ${code})`), - Sender.ACL_IDENTITY); - }); - } - - private _getACLIdentitySync() { - if (Sender.ACL_IDENTITY) { - return Sender.ACL_IDENTITY; - } - // Some very old versions of Node (< 0.11) don't have this - if (child_process.spawnSync) { - var psProc = child_process.spawnSync(Sender.POWERSHELL_PATH, - ["-Command", "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"], { - windowsHide: true, - stdio: ['ignore', 'pipe', 'pipe'] // Needed to prevent hanging on Win 7 - }); - if (psProc.error) { - throw psProc.error; - } else if (psProc.status !== 0) { - throw new Error(`Getting ACL identity did not succeed (PS returned code ${psProc.status})`); - } - Sender.ACL_IDENTITY = psProc.stdout && psProc.stdout.toString().trim(); - return Sender.ACL_IDENTITY; - } else { - throw new Error("Could not synchronously get ACL identity under current version of Node.js"); + /** + * Stores the payload as a json file on disk in the temp directory + */ + private async _storeToDisk(envelopes: Contracts.EnvelopeTelemetry[]): Promise { + try { + Logging.info(Sender.TAG, "Checking existence of data storage directory: " + this._tempDir); + await FileSystemHelper.confirmDirExists(this._tempDir); } - } - - private _getACLArguments(directory: string, identity: string) { - return [directory, - "/grant", "*S-1-5-32-544:(OI)(CI)F", // Full permission for Administrators - "/grant", `${identity}:(OI)(CI)F`, // Full permission for current user - "/inheritance:r"]; // Remove all inherited permissions - } - - private _applyACLRules(directory: string, callback: (err: Error) => void) { - if (!Sender.USE_ICACLS) { - return callback(null); + catch (ex) { + Logging.warn(Sender.TAG, "Failed to create folder to put telemetry: " + (ex && ex.message)); + this._onErrorHelper(ex); + return; } - - // For performance, only run ACL rules if we haven't already during this session - if (Sender.ACLED_DIRECTORIES[directory] === undefined) { - // Avoid multiple calls race condition by setting ACLED_DIRECTORIES to false for this directory immediately - // If batches are being failed faster than the processes spawned below return, some data won't be stored to disk - // This is better than the alternative of potentially infinitely spawned processes - Sender.ACLED_DIRECTORIES[directory] = false; - - // Restrict this directory to only current user and administrator access - this._getACLIdentity((err, identity) => { - if (err) { - Sender.ACLED_DIRECTORIES[directory] = false; // false is used to cache failed (vs undefined which is "not yet tried") - return callback(err); - } else { - this._runICACLS(this._getACLArguments(directory, identity), (err) => { - Sender.ACLED_DIRECTORIES[directory] = !err; - return callback(err); - }); - } - }); - } else { - return callback(Sender.ACLED_DIRECTORIES[directory] ? null : - new Error("Setting ACL restrictions did not succeed (cached result)")); + try { + await FileAccessControl.applyACLRules(this._tempDir); } - } - - private _applyACLRulesSync(directory: string) { - if (Sender.USE_ICACLS) { - // For performance, only run ACL rules if we haven't already during this session - if (Sender.ACLED_DIRECTORIES[directory] === undefined) { - this._runICACLSSync(this._getACLArguments(directory, this._getACLIdentitySync())); - Sender.ACLED_DIRECTORIES[directory] = true; // If we get here, it succeeded. _runIACLSSync will throw on failures - return; - } else if (!Sender.ACLED_DIRECTORIES[directory]) { // falsy but not undefined - throw new Error("Setting ACL restrictions did not succeed (cached result)"); - } + catch (ex) { + Logging.warn(Sender.TAG, "Failed to apply file access control to folder: " + (ex && ex.message)); + this._onErrorHelper(ex); + return; } - } - - private _confirmDirExists(directory: string, callback: (err: NodeJS.ErrnoException) => void): void { - fs.lstat(directory, (err, stats) => { - if (err && err.code === 'ENOENT') { - fs.mkdir(directory, (err) => { - if (err && err.code !== 'EEXIST') { // Handle race condition by ignoring EEXIST - callback(err); - } else { - this._applyACLRules(directory, callback); - } - }); - } else if (!err && stats.isDirectory()) { - this._applyACLRules(directory, callback); - } else { - callback(err || new Error("Path existed but was not a directory")); - } - }); - } - - /** - * Computes the size (in bytes) of all files in a directory at the root level. Asynchronously. - */ - private _getShallowDirectorySize(directory: string, callback: (err: NodeJS.ErrnoException, size: number) => void) { - // Get the directory listing - fs.readdir(directory, (err, files) => { - if (err) { - return callback(err, -1); - } - - let error: NodeJS.ErrnoException = null; - let totalSize = 0; - let count = 0; - - if (files.length === 0) { - callback(null, 0); + try { + let size = await FileSystemHelper.getShallowDirectorySize(this._tempDir); + if (size > this._maxBytesOnDisk) { + Logging.warn(Sender.TAG, "Not saving data due to max size limit being met. Directory size in bytes is: " + size); return; } + //create file - file name for now is the timestamp, a better approach would be a UUID but that + //would require an external dependency + var fileName = new Date().getTime() + ".ai.json"; + var fileFullPath = path.join(this._tempDir, fileName); - // Query all file sizes - for (let i = 0; i < files.length; i++) { - fs.stat(path.join(directory, files[i]), (err, fileStats) => { - count++; - - if (err) { - error = err; - } else { - if (fileStats.isFile()) { - totalSize += fileStats.size; - } - } - - if (count === files.length) { - // Did we get an error? - if (error) { - callback(error, -1); - } else { - callback(error, totalSize); - } - } - }); - } - }); - } - - /** - * Computes the size (in bytes) of all files in a directory at the root level. Synchronously. - */ - private _getShallowDirectorySizeSync(directory: string): number { - let files = fs.readdirSync(directory); - let totalSize = 0; - for (let i = 0; i < files.length; i++) { - totalSize += fs.statSync(path.join(directory, files[i])).size; + // Mode 600 is w/r for creator and no read access for others (only applies on *nix) + // For Windows, ACL rules are applied to the entire directory (see logic in _confirmDirExists and _applyACLRules) + Logging.info(Sender.TAG, "saving data to disk at: " + fileFullPath); + FileSystemHelper.writeFileAsync(fileFullPath, Util.stringify(envelopes), { mode: 0o600 }); + } + catch (ex) { + Logging.warn(Sender.TAG, "Failed to persist telemetry to disk: " + (ex && ex.message)); + this._onErrorHelper(ex); + return; } - return totalSize; - } - - /** - * Stores the payload as a json file on disk in the temp directory - */ - private _storeToDisk(envelopes: Contracts.EnvelopeTelemetry[]) { - // This will create the dir if it does not exist - // Default permissions on *nix are directory listing from other users but no file creations - Logging.info(Sender.TAG, "Checking existence of data storage directory: " + this._tempDir); - this._confirmDirExists(this._tempDir, (error) => { - if (error) { - Logging.warn(Sender.TAG, "Error while checking/creating directory: " + (error && error.message)); - this._onErrorHelper(error); - return; - } - - this._getShallowDirectorySize(this._tempDir, (err, size) => { - if (err || size < 0) { - Logging.warn(Sender.TAG, "Error while checking directory size: " + (err && err.message)); - this._onErrorHelper(err); - return; - } else if (size > this._maxBytesOnDisk) { - Logging.warn(Sender.TAG, "Not saving data due to max size limit being met. Directory size in bytes is: " + size); - return; - } - - //create file - file name for now is the timestamp, a better approach would be a UUID but that - //would require an external dependency - var fileName = new Date().getTime() + ".ai.json"; - var fileFullPath = path.join(this._tempDir, fileName); - - // Mode 600 is w/r for creator and no read access for others (only applies on *nix) - // For Windows, ACL rules are applied to the entire directory (see logic in _confirmDirExists and _applyACLRules) - Logging.info(Sender.TAG, "saving data to disk at: " + fileFullPath); - fs.writeFile(fileFullPath, this._stringify(envelopes), { mode: 0o600 }, (error) => this._onErrorHelper(error)); - }); - }); } /** @@ -584,9 +377,9 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author } // Make sure permissions are valid - this._applyACLRulesSync(this._tempDir); + FileAccessControl.applyACLRulesSync(this._tempDir); - let dirSize = this._getShallowDirectorySizeSync(this._tempDir); + let dirSize = FileSystemHelper.getShallowDirectorySizeSync(this._tempDir); if (dirSize > this._maxBytesOnDisk) { Logging.info(Sender.TAG, "Not saving data due to max size limit being met. Directory size in bytes is: " + dirSize); return; @@ -611,43 +404,23 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author * Check for temp telemetry files * reads the first file if exist, deletes it and tries to send its load */ - private _sendFirstFileOnDisk(): void { - - fs.exists(this._tempDir, (exists: boolean) => { - if (exists) { - fs.readdir(this._tempDir, (error, files) => { - if (!error) { - files = files.filter(f => path.basename(f).indexOf(".ai.json") > -1); - if (files.length > 0) { - var firstFile = files[0]; - var filePath = path.join(this._tempDir, firstFile); - fs.readFile(filePath, (error, buffer) => { - if (!error) { - // delete the file first to prevent double sending - fs.unlink(filePath, (error) => { - if (!error) { - try { - let envelopes: Contracts.EnvelopeTelemetry[] = JSON.parse(buffer.toString()); - this.send(envelopes); - } - catch (error) { - Logging.warn(Sender.TAG, "Failed to read persisted file", error); - } - } else { - this._onErrorHelper(error); - } - }); - } else { - this._onErrorHelper(error); - } - }); - } - } else { - this._onErrorHelper(error); - } - }); + private async _sendFirstFileOnDisk(): Promise { + try { + let files = await FileSystemHelper.readdirAsync(this._tempDir); + files = files.filter(f => path.basename(f).indexOf(".ai.json") > -1); + if (files.length > 0) { + var firstFile = files[0]; + var filePath = path.join(this._tempDir, firstFile); + let buffer = await FileSystemHelper.readFileAsync(filePath); + // delete the file first to prevent double sending + await FileSystemHelper.unlinkAsync(filePath); + let envelopes: Contracts.EnvelopeTelemetry[] = JSON.parse(buffer.toString()); + await this.send(envelopes); } - }); + } + catch (err) { + this._onErrorHelper(err); + } } private _onErrorHelper(error: Error): void { @@ -656,42 +429,29 @@ constructor(config: Config, getAuthorizationHandler?: (config: Config) => Author } } - private _stringify(payload: any) { + private async _fileCleanupTask(): Promise { try { - return JSON.stringify(payload); - } catch (error) { - Logging.warn(Sender.TAG, "Failed to serialize payload", error, payload); - } - } - - private _fileCleanupTask() { - fs.exists(this._tempDir, (exists: boolean) => { - if (exists) { - fs.readdir(this._tempDir, (error, files) => { - if (!error) { - files = files.filter(f => path.basename(f).indexOf(".ai.json") > -1); - if (files.length > 0) { - - files.forEach(file => { - // Check expiration - let fileCreationDate: Date = new Date(parseInt(file.split(".ai.json")[0])); - let expired = new Date(+(new Date()) - Sender.FILE_RETEMPTION_PERIOD) > fileCreationDate; - if (expired) { - var filePath = path.join(this._tempDir, file); - fs.unlink(filePath, (error) => { - if (error) { - this._onErrorHelper(error); - } - }); - } - }); - } - } else { - this._onErrorHelper(error); + let files = await FileSystemHelper.readdirAsync(this._tempDir); + files = files.filter(f => path.basename(f).indexOf(".ai.json") > -1); + if (files.length > 0) { + for (let i = 0; i < files.length; i++) { + // Check expiration + let fileCreationDate: Date = new Date(parseInt(files[i].split(".ai.json")[0])); + let expired = new Date(+(new Date()) - Sender.FILE_RETEMPTION_PERIOD) > fileCreationDate; + if (expired) { + var filePath = path.join(this._tempDir, files[i]); + await FileSystemHelper.unlinkAsync(filePath).catch((err) => { + this._onErrorHelper(err); + }); } - }); + } } - }); + } + catch (err) { + if (err.code != "ENOENT") { + this._onErrorHelper(err); + } + } } } diff --git a/Library/Util.ts b/Library/Util.ts index 481b3d863..ba88f4922 100644 --- a/Library/Util.ts +++ b/Library/Util.ts @@ -299,8 +299,8 @@ class Util { requestUrl: string, requestOptions: http.RequestOptions | https.RequestOptions, requestCallback: (res: http.IncomingMessage) => void, - useProxy= true, - useAgent= true): http.ClientRequest { + useProxy = true, + useAgent = true): http.ClientRequest { if (requestUrl && requestUrl.indexOf('//') === 0) { requestUrl = 'https:' + requestUrl; diff --git a/Tests/EndToEnd.tests.ts b/Tests/EndToEnd.tests.ts index 43e5cd51c..56095bb9e 100644 --- a/Tests/EndToEnd.tests.ts +++ b/Tests/EndToEnd.tests.ts @@ -19,6 +19,8 @@ import HeartBeat = require("../AutoCollection/HeartBeat"); import TelemetryClient = require("../Library/TelemetryClient"); import Context = require("../Library/Context"); import Util = require("../Library/Util"); +import { FileAccessControl } from "../Library/FileAccessControl"; +import FileSystemHelper = require("../Library/FileSystemHelper"); /** * A fake response class that passes by default @@ -172,6 +174,9 @@ describe("EndToEnd", () => { var newEnv = <{ [id: string]: string }>{}; Util.tlsRestrictedAgent = new https.Agent(); newEnv["APPLICATION_INSIGHTS_NO_STATSBEAT"] = "true"; + newEnv["TMP"] = process.env.TMP; + newEnv["TMPDIR"] = process.env.TMPDIR; + newEnv["TEMP"] = process.env.TEMP; originalEnv = process.env; process.env = newEnv; @@ -326,7 +331,7 @@ describe("EndToEnd", () => { process.nextTick(c); return eventEmitter; }); - + let generateContextSpy = sandbox.spy(CorrelationContextManager, "generateContextObject"); AppInsights .setup("ikey") @@ -437,7 +442,6 @@ describe("EndToEnd", () => { var readFile: sinon.SinonStub; var lstat: sinon.SinonStub; var mkdir: sinon.SinonStub; - var exists: sinon.SinonStub; var existsSync: sinon.SinonStub; var readdir: sinon.SinonStub; var readdirSync: sinon.SinonStub; @@ -450,21 +454,20 @@ describe("EndToEnd", () => { let nockScope: nock.Scope; beforeEach(() => { - nockScope = interceptor.reply(503, {}); + nockScope = interceptor.reply(503, { "errors": [{ "index": 0, "statusCode": 503 }] }); AppInsights.defaultClient = undefined; sandbox.stub(CorrelationIdManager, 'queryCorrelationId'); // TODO: Fix method of stubbing requests to allow CID to be part of E2E tests - writeFile = sandbox.stub(fs, 'writeFile'); + writeFile = sandbox.stub(FileSystemHelper, 'writeFileAsync'); writeFileSync = sandbox.stub(fs, 'writeFileSync'); - exists = sandbox.stub(fs, 'exists').yields(true); existsSync = sandbox.stub(fs, 'existsSync').returns(true); - readdir = sandbox.stub(fs, 'readdir').yields(null, ['1.ai.json']); + readdir = sandbox.stub(FileSystemHelper, 'readdirAsync').returns(['1.ai.json']); readdirSync = sandbox.stub(fs, 'readdirSync').returns(['1.ai.json']); - stat = sandbox.stub(fs, 'stat').yields(null, { isFile: () => true, size: 8000 }); + stat = sandbox.stub(FileSystemHelper, 'statAsync').returns({ isFile: () => true, size: 8000 }); statSync = sandbox.stub(fs, 'statSync').returns({ isFile: () => true, size: 8000 }); - lstat = sandbox.stub(fs, 'lstat').yields(null, { isDirectory: () => true }); - mkdir = sandbox.stub(fs, 'mkdir').yields(null); + lstat = sandbox.stub(FileSystemHelper, 'lstatAsync').returns({ isDirectory: () => true }); + mkdir = sandbox.stub(FileSystemHelper, 'mkdirAsync').returns(null); mkdirSync = sandbox.stub(fs, 'mkdirSync').returns(null); - readFile = sandbox.stub(fs, 'readFile').yields(null, ''); + readFile = sandbox.stub(FileSystemHelper, 'readFileAsync').returns(''); spawn = sandbox.stub(child_process, 'spawn').returns({ on: (type: string, cb: any) => { if (type === 'close') { @@ -513,7 +516,6 @@ describe("EndToEnd", () => { // yield for the caching behavior setImmediate(() => { assert.equal(writeFile.callCount, 1); - assert.equal(spawn.callCount, os.type() === "Windows_NT" ? 2 : 0); done(); }); } @@ -530,12 +532,12 @@ describe("EndToEnd", () => { callback: (response: any) => { // yield for the caching behavior setImmediate(() => { - assert(writeFile.callCount === 1); + assert.equal(writeFile.callCount, 1); assert.equal( path.dirname(writeFile.firstCall.args[0]), path.join(os.tmpdir(), Sender.TEMPDIR_PREFIX + "key")); assert.equal(writeFile.firstCall.args[2].mode, 0o600, "File must not have weak permissions"); - assert.equal(spawn.callCount, 0); // Should always be 0 because of caching after first call to ICACLS + assert.equal(spawn.callCount, 0); done(); }); } @@ -545,12 +547,12 @@ describe("EndToEnd", () => { it("uses WindowsIdentity to get the identity for ICACLS", (done) => { var client = new AppInsights.TelemetryClient("uniquekey"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix // Clear ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = null; - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); client.flush({ @@ -564,14 +566,14 @@ describe("EndToEnd", () => { assert(spawn.firstCall.args[0].indexOf('powershell.exe')); assert.equal(spawn.firstCall.args[1][0], "-Command"); assert.equal(spawn.firstCall.args[1][1], "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"); - assert.equal((client.channel._sender.constructor).ACL_IDENTITY, 'stdoutmock'); + assert.equal(FileAccessControl["ACL_IDENTITY"], 'stdoutmock'); // Next call should be to ICACLS (with the acquired identity) assert(spawn.lastCall.args[0].indexOf('icacls.exe')); assert.equal(spawn.lastCall.args[1][3], "/grant"); assert.equal(spawn.lastCall.args[1][4], "stdoutmock:(OI)(CI)F"); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl["USE_ICACLS"] = origICACLS; done(); }); } @@ -594,12 +596,11 @@ describe("EndToEnd", () => { }); var client = new AppInsights.TelemetryClient("uniquekey"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix - - // Set ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = null; - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix + // Clear ICACLS caches for test purposes + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); client.flush({ @@ -608,7 +609,7 @@ describe("EndToEnd", () => { setImmediate(() => { assert(writeFile.callCount === 0); assert.equal(tempSpawn.callCount, 1); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; done(); }); } @@ -631,12 +632,12 @@ describe("EndToEnd", () => { }); var client = new AppInsights.TelemetryClient("uniquekey"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix - // Set ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = null; - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + // Clear ICACLS caches for test purposes + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); @@ -656,7 +657,7 @@ describe("EndToEnd", () => { // The call counts shouldnt have changed assert(writeFile.callCount === 0); assert.equal(tempSpawn.callCount, 1); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; done(); }); } @@ -681,12 +682,12 @@ describe("EndToEnd", () => { var client = new AppInsights.TelemetryClient("uniquekey"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix - // Set ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = null; - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + // Clear ICACLS caches for test purposes + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); client.flush({ @@ -703,7 +704,7 @@ describe("EndToEnd", () => { // The call counts shouldnt have changed assert(writeFile.callCount === 0); assert.equal(tempSpawn.callCount, 1); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; done(); }); } @@ -725,12 +726,12 @@ describe("EndToEnd", () => { var client = new AppInsights.TelemetryClient("uniquekey"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix - // Set ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = 'testidentity'; // Don't use spawn for identity - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + // Clear ICACLS caches for test purposes + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); client.flush({ @@ -739,7 +740,7 @@ describe("EndToEnd", () => { setImmediate(() => { assert(writeFile.callCount === 0); assert.equal(tempSpawn.callCount, 1); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; done(); }); } @@ -748,7 +749,7 @@ describe("EndToEnd", () => { it("creates directory when nonexistent", (done) => { lstat.restore(); - sandbox.stub(fs, 'lstat').yields({ code: "ENOENT" }, null); + sandbox.stub(FileSystemHelper, 'lstatAsync').throws({ code: "ENOENT" }); var client = new AppInsights.TelemetryClient("key"); client.channel.setUseDiskRetryCaching(true); @@ -834,12 +835,12 @@ describe("EndToEnd", () => { it("use WindowsIdentity to get ACL identity when process crashes (ICACLS)", () => { var client = new AppInsights.TelemetryClient("key22"); client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix - // Set ICACLS caches for test purposes - (client.channel._sender.constructor).ACL_IDENTITY = null; - (client.channel._sender.constructor).ACLED_DIRECTORIES = {}; + // Clear ICACLS caches for test purposes + FileAccessControl["ACLED_DIRECTORIES"] = {}; + FileAccessControl["ACL_IDENTITY"] = null; client.trackEvent({ name: "test event" }); client.channel.triggerSend(true); @@ -848,14 +849,14 @@ describe("EndToEnd", () => { assert(spawnSync.firstCall.args[0].indexOf('powershell.exe')); assert.equal(spawnSync.firstCall.args[1][0], "-Command"); assert.equal(spawnSync.firstCall.args[1][1], "[System.Security.Principal.WindowsIdentity]::GetCurrent().Name"); - assert.equal((client.channel._sender.constructor).ACL_IDENTITY, 'stdoutmock'); + assert.equal(FileAccessControl["ACL_IDENTITY"], 'stdoutmock'); // Next call should be to ICACLS (with the acquired identity) assert(spawnSync.lastCall.args[0].indexOf('icacls.exe')); assert.equal(spawnSync.lastCall.args[1][3], "/grant"); assert.equal(spawnSync.lastCall.args[1][4], "stdoutmock:(OI)(CI)F"); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; }); it("refuses to cache payload when process crashes if ICACLS fails", () => { @@ -863,8 +864,8 @@ describe("EndToEnd", () => { var tempSpawnSync = sandbox.stub(child_process, 'spawnSync').returns({ status: 2000 }); var client = new AppInsights.TelemetryClient("key3"); // avoid icacls cache by making key unique client.channel.setUseDiskRetryCaching(true); - var origICACLS = (client.channel._sender.constructor).USE_ICACLS; - (client.channel._sender.constructor).USE_ICACLS = true; // Simulate ICACLS environment even on *nix + var origICACLS = FileAccessControl.USE_ICACLS; + FileAccessControl.USE_ICACLS = true; // Simulate ICACLS environment even on *nix client.trackEvent({ name: "test event" }); client.channel.triggerSend(true); @@ -874,7 +875,7 @@ describe("EndToEnd", () => { if (child_process.spawnSync) { assert.equal(tempSpawnSync.callCount, 1); - (client.channel._sender.constructor).USE_ICACLS = origICACLS; + FileAccessControl.USE_ICACLS = origICACLS; } }); }); diff --git a/Tests/Library/InternalAzureLogger.tests.ts b/Tests/Library/InternalAzureLogger.tests.ts new file mode 100644 index 000000000..dba7fbdb1 --- /dev/null +++ b/Tests/Library/InternalAzureLogger.tests.ts @@ -0,0 +1,114 @@ +import assert = require("assert"); +import fs = require("fs"); +import sinon = require("sinon"); + +import InternalAzureLogger = require("../../Library/InternalAzureLogger"); +import FileSystemHelper = require("../../Library/FileSystemHelper"); + +describe("Library/InternalAzureLogger", () => { + + var sandbox: sinon.SinonSandbox; + + before(() => { + sandbox = sinon.sandbox.create(); + }); + + beforeEach(() => { + InternalAzureLogger["_instance"] = null; + InternalAzureLogger["_fileCleanupTimer"] = setInterval(() => { }, 0); + }); + + afterEach(() => { + sandbox.restore(); + }); + + describe("Write to file", () => { + + let internalLogger: InternalAzureLogger = null; + var originalEnv = process.env["APPLICATIONINSIGHTS_LOG_DESTINATION"]; + + before(() => { + process.env["APPLICATIONINSIGHTS_LOG_DESTINATION"] = "file"; + internalLogger = InternalAzureLogger.getInstance(); + }); + + after(() => { + process.env["APPLICATIONINSIGHTS_LOG_DESTINATION"] = originalEnv; + }) + + it("should log message to file", (done) => { + var writeSpy = sandbox.spy(FileSystemHelper, "appendFileAsync"); + internalLogger["_storeToDisk"]("testMessage").then(() => { + assert.ok(writeSpy.called); + assert.ok(writeSpy.lastCall.args[0].indexOf("applicationinsights.log") > 0); + assert.equal(writeSpy.lastCall.args[1], "testMessage\r\n"); + done(); + }).catch((error) => { done(error); }); + }); + + it("should create backup file", (done) => { + var writeSpy = sandbox.spy(FileSystemHelper, "writeFileAsync"); + var readSpy = sandbox.spy(FileSystemHelper, "readFileAsync"); + internalLogger.maxSizeBytes = 0; + internalLogger["_storeToDisk"]("backupTestMessage").then(() => { + assert.ok(readSpy.calledOnce); + assert.ok(writeSpy.calledTwice); + //assert.equal(writeSpy.args[0][0], "C:\Users\hectorh\AppData\Local\Temp\appInsights-node\1636481017787.applicationinsights.log"); // Backup file format + assert.ok(typeof writeSpy.args[0][1]); + //assert.equal(writeSpy.args[1][0], "C:\Users\hectorh\AppData\Local\Temp\appInsights-node\applicationinsights.log"); // Main file format + assert.equal(writeSpy.args[1][1], "backupTestMessage\r\n"); + done(); + }).catch((error) => { done(error); }); + }); + + it("should create multiple backup files", (done) => { + var writeSpy = sandbox.spy(FileSystemHelper, "writeFileAsync"); + var readSpy = sandbox.spy(FileSystemHelper, "readFileAsync"); + internalLogger.maxSizeBytes = 0; + internalLogger.maxHistory = 2; + internalLogger["_storeToDisk"]("testMessage").then(() => { + internalLogger["_storeToDisk"]("testMessage").then(() => { + assert.equal(writeSpy.callCount, 4); + assert.ok(readSpy.calledTwice); + done(); + }).catch((error) => { done(error); }); + }).catch((error) => { done(error); }); + }); + + it("should start file cleanup task", () => { + InternalAzureLogger["_fileCleanupTimer"] = null; + var setIntervalSpy = sandbox.spy(global, "setInterval"); + internalLogger = InternalAzureLogger.getInstance(); + assert.ok(setIntervalSpy.called); + }); + + it("should remove backup files", (done) => { + var unlinkSpy = sandbox.spy(FileSystemHelper, "unlinkAsync"); + internalLogger.maxHistory = 0; + internalLogger["_fileCleanupTask"]().then(() => { + assert.ok(unlinkSpy.called); + FileSystemHelper.readdirAsync(internalLogger["_tempDir"]).then((files) => { + assert.equal(files.length, 1); + done(); + }); + }); + }); + + it("cleanup should keep configured number of backups", (done) => { + var unlinkSpy = sandbox.spy(FileSystemHelper, "unlinkAsync"); + internalLogger.maxHistory = 1; + internalLogger.maxSizeBytes = 0; + internalLogger["_storeToDisk"]("testMessage").then(() => { + internalLogger["_storeToDisk"]("testMessage").then(() => { + internalLogger["_fileCleanupTask"]().then(() => { + assert.ok(unlinkSpy.called); + FileSystemHelper.readdirAsync(internalLogger["_tempDir"]).then((files) => { + assert.equal(files.length, 2); + done(); + }).catch((error) => { done(error); }); + }).catch((error) => { done(error); }); + }).catch((error) => { done(error); }); + }).catch((error) => { done(error); }); + }); + }); +}); diff --git a/Tests/Library/Logging.tests.ts b/Tests/Library/Logging.tests.ts index 47db8b12e..31ee70dd5 100644 --- a/Tests/Library/Logging.tests.ts +++ b/Tests/Library/Logging.tests.ts @@ -2,27 +2,63 @@ import assert = require("assert"); import sinon = require("sinon"); import Logging = require("../../Library/Logging"); +import InternalAzureLogger = require("../../Library/InternalAzureLogger"); describe("Library/Logging", () => { + var sandbox: sinon.SinonSandbox; + + beforeEach(() => { + sandbox = sinon.sandbox.create(); + InternalAzureLogger["_instance"] = null; + }); + + afterEach(() => { + sandbox.restore(); + }); + + describe("Log to console", () => { + it("should log message to console", () => { + var env1 = <{ [id: string]: string }>{}; + env1["APPLICATIONINSIGHTS_LOG_DESTINATION"] = "console"; + var originalEnv = process.env; + process.env = env1; + Logging.enableDebug = true; + var consoleStub = sandbox.stub(console, "log"); + Logging.info("test"); + process.env = originalEnv; + assert.ok(consoleStub.called); + }); + + it("should not log message to console if disabled", () => { + var env1 = <{ [id: string]: string }>{}; + env1["APPLICATIONINSIGHTS_LOG_DESTINATION"] = "file"; + var originalEnv = process.env; + process.env = env1; + Logging.enableDebug = true; + var consoleStub = sandbox.stub(console, "log"); + Logging.info("test"); + process.env = originalEnv; + assert.ok(consoleStub.notCalled); + }); + }); + describe("#info(message, ...optionalParams: any)", () => { it("should do nothing if disabled", () => { var originalSetting = Logging.disableWarnings; Logging.enableDebug = false; - var infoStub = sinon.stub(Logging.logger, "info"); + var infoStub = sandbox.stub(InternalAzureLogger.getInstance().logger, "info"); Logging.info("test"); assert.ok(infoStub.notCalled); - infoStub.restore(); Logging.enableDebug = originalSetting; }); it("should log 'info' if called", () => { var originalSetting = Logging.enableDebug; Logging.enableDebug = true; - var infoStub = sinon.stub(Logging.logger, "info"); + var infoStub = sandbox.stub(InternalAzureLogger.getInstance().logger, "info"); Logging.info("test"); assert.ok(infoStub.calledOnce); - infoStub.restore(); Logging.enableDebug = originalSetting; }); }); @@ -31,21 +67,45 @@ describe("Library/Logging", () => { it("should do nothing if disabled", () => { var originalSetting = Logging.disableWarnings; Logging.disableWarnings = true - var warnStub = sinon.stub(Logging.logger, "warning"); + var warnStub = sandbox.stub(InternalAzureLogger.getInstance().logger, "warning"); Logging.warn("test"); assert.ok(warnStub.notCalled); - warnStub.restore(); Logging.enableDebug = originalSetting; }); it("should log 'warn' if enabled", () => { var originalSetting = Logging.disableWarnings; Logging.disableWarnings = false; - var warnStub = sinon.stub(Logging.logger, "warning"); + var warnStub = sandbox.stub(InternalAzureLogger.getInstance().logger, "warning"); Logging.warn("test"); assert.ok(warnStub.calledOnce); - warnStub.restore(); Logging.enableDebug = originalSetting; }); }); + + describe("Log to file", () => { + it("should log message to file", () => { + var env1 = <{ [id: string]: string }>{}; + env1["APPLICATIONINSIGHTS_LOG_DESTINATION"] = "file"; + var originalEnv = process.env; + process.env = env1; + Logging.enableDebug = true; + var fileStub = sandbox.stub(InternalAzureLogger.getInstance(), "_storeToDisk"); + Logging.info("test"); + process.env = originalEnv; + assert.ok(fileStub.called); + }); + + it("should not log message to file if disabled", () => { + var env1 = <{ [id: string]: string }>{}; + env1["APPLICATIONINSIGHTS_LOG_DESTINATION"] = "console"; + var originalEnv = process.env; + process.env = env1; + Logging.enableDebug = true; + var fileStub = sandbox.stub(InternalAzureLogger.getInstance(), "_storeToDisk"); + Logging.info("test"); + process.env = originalEnv; + assert.ok(fileStub.notCalled); + }); + }); }); diff --git a/Tests/Library/Sender.tests.ts b/Tests/Library/Sender.tests.ts index c29d9729b..81ec71b6c 100644 --- a/Tests/Library/Sender.tests.ts +++ b/Tests/Library/Sender.tests.ts @@ -1,5 +1,4 @@ import assert = require("assert"); -import fs = require("fs"); import https = require("https"); import sinon = require("sinon"); import nock = require("nock"); @@ -12,6 +11,8 @@ import AuthorizationHandler = require("../../Library/AuthorizationHandler"); import Util = require("../../Library/Util"); import Statsbeat = require("../../AutoCollection/Statsbeat"); import Logging = require("../../Library/Logging"); +import { FileAccessControl } from "../../Library/FileAccessControl"; +import FileSystemHelper = require("../../Library/FileSystemHelper"); class SenderMock extends Sender { public getResendInterval() { @@ -55,12 +56,12 @@ describe("Library/Sender", () => { before(() => { sender = new Sender(new Config("2bb22222-bbbb-1ccc-8ddd-eeeeffff3333")); - (sender.constructor).USE_ICACLS = false; + FileAccessControl.USE_ICACLS = false; sender.setDiskRetryMode(true); }); after(() => { - (sender.constructor).USE_ICACLS = true; + FileAccessControl["USE_ICACLS"] = true; sender.setDiskRetryMode(false); }); @@ -70,7 +71,6 @@ describe("Library/Sender", () => { var warnStub = sandbox.stub(Logging, "warn"); assert.doesNotThrow(() => sender.send([a])); assert.ok(warnStub.calledOnce); - warnStub.restore(); }); it("should try to send telemetry from disk when 200", (done) => { @@ -273,14 +273,14 @@ describe("Library/Sender", () => { var sender: Sender; after(() => { - (sender.constructor).USE_ICACLS = true; + FileAccessControl["USE_ICACLS"] = true; sender.setDiskRetryMode(false); }); it("must clean old files from temp location", (done) => { - var deleteSpy = sandbox.spy(fs, "unlink"); + var deleteSpy = sandbox.spy(FileSystemHelper, "unlinkAsync"); sender = new Sender(new Config("3bb33333-bbbb-1ccc-8ddd-eeeeffff3333")); - (sender.constructor).USE_ICACLS = false; + FileAccessControl["USE_ICACLS"] = false; (sender.constructor).CLEANUP_TIMEOUT = 500; (sender.constructor).FILE_RETEMPTION_PERIOD = 1; var taskSpy = sandbox.spy(sender, "_fileCleanupTask"); diff --git a/package-lock.json b/package-lock.json index b9d5931eb..f31ae8a92 100644 --- a/package-lock.json +++ b/package-lock.json @@ -27,9 +27,9 @@ } }, "@azure/core-http": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-2.2.1.tgz", - "integrity": "sha512-7ATnV3OGzCO2K9kMrh3NKUM8b4v+xasmlUhkNZz6uMbm+8XH/AexLkhRGsoo0GyKNlEGvyGEfytqTk0nUY2I4A==", + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/@azure/core-http/-/core-http-2.2.2.tgz", + "integrity": "sha512-V1DdoO9V/sFimKpdWoNBgsE+QUjQgpXYnxrTdUp5RyhsTJjvEVn/HKmTQXIHuLUUo6IyIWj+B+Dg4VaXse9dIA==", "requires": { "@azure/abort-controller": "^1.0.0", "@azure/core-asynciterator-polyfill": "^1.0.0", diff --git a/package.json b/package.json index 2806bf79b..8119728dd 100644 --- a/package.json +++ b/package.json @@ -55,11 +55,11 @@ "typescript": "4.1.2" }, "dependencies": { + "@azure/core-http": "^2.2.2", "@azure/logger": "^1.0.1", - "@azure/core-http": "^2.2.1", "@opentelemetry/api": "^1.0.3", - "@opentelemetry/semantic-conventions": "^0.24.0", "@opentelemetry/core": "^0.23.0", + "@opentelemetry/semantic-conventions": "^0.24.0", "@opentelemetry/tracing": "^0.23.0", "cls-hooked": "^4.2.2", "continuation-local-storage": "^3.2.1",