diff --git a/.idea/dictionaries/develar.xml b/.idea/dictionaries/develar.xml
index 436290e2dcd..e5ef57894ec 100644
--- a/.idea/dictionaries/develar.xml
+++ b/.idea/dictionaries/develar.xml
@@ -64,6 +64,7 @@
digester
dirname
disturl
+ dmgbuild
docdash
docstrap
dpkg
@@ -119,6 +120,7 @@
iconexclamation
iconset
iconutil
+ iconv
icvo
idms
idretry
@@ -168,6 +170,7 @@
lzop
macos
macroend
+ macroman
makeappx
makecert
makedeb
@@ -225,6 +228,7 @@
promisify
psmdcp
pulseaudio
+ pythonpath
rcedit
readpass
redux
diff --git a/.idea/electron-builder.iml b/.idea/electron-builder.iml
index e55862f7fbc..763d4fb57b0 100644
--- a/.idea/electron-builder.iml
+++ b/.idea/electron-builder.iml
@@ -1,5 +1,10 @@
+
+
+
+
+
diff --git a/package.json b/package.json
index 9891dc43b7b..fe333fb1d41 100644
--- a/package.json
+++ b/package.json
@@ -31,7 +31,7 @@
"ajv-keywords": "^2.1.0",
"archiver": "^2.0.0",
"async-exit-hook": "^2.0.1",
- "aws-sdk": "^2.101.0",
+ "aws-sdk": "^2.102.0",
"bluebird-lst": "^1.0.3",
"chalk": "^2.1.0",
"chromium-pickle-js": "^0.2.0",
@@ -46,6 +46,7 @@
"fcopy-pre-bundled": "0.3.4",
"fs-extra-p": "^4.4.0",
"hosted-git-info": "^2.5.0",
+ "iconv-lite": "^0.4.18",
"ini": "^1.3.4",
"is-ci": "^1.0.10",
"isbinaryfile": "^3.0.2",
diff --git a/packages/electron-builder/package.json b/packages/electron-builder/package.json
index 15840cfbc39..f97235417c5 100644
--- a/packages/electron-builder/package.json
+++ b/packages/electron-builder/package.json
@@ -84,7 +84,8 @@
"dotenv": "^4.0.0",
"dotenv-expand": "^4.0.1",
"temp-file": "^2.0.2",
- "ejs": "^2.5.7"
+ "ejs": "^2.5.7",
+ "iconv-lite": "^0.4.18"
},
"typings": "./out/index.d.ts",
"publishConfig": {
diff --git a/packages/electron-builder/src/macPackager.ts b/packages/electron-builder/src/macPackager.ts
index 08f478d757e..29c08195a15 100644
--- a/packages/electron-builder/src/macPackager.ts
+++ b/packages/electron-builder/src/macPackager.ts
@@ -10,7 +10,7 @@ import { DIR_TARGET, Platform, Target } from "./core"
import { MacOptions, MasBuildOptions } from "./options/macOptions"
import { Packager } from "./packager"
import { PlatformPackager } from "./platformPackager"
-import { DmgTarget } from "./targets/dmg"
+import { DmgTarget } from "./targets/dmg/dmg"
import { PkgTarget, prepareProductBuildArgs } from "./targets/pkg"
import { createCommonTarget, NoOpTarget } from "./targets/targetFactory"
import { AsyncTaskManager } from "./util/asyncTaskManager"
diff --git a/packages/electron-builder/src/options/macOptions.ts b/packages/electron-builder/src/options/macOptions.ts
index a4511684d6e..b5773fbb207 100644
--- a/packages/electron-builder/src/options/macOptions.ts
+++ b/packages/electron-builder/src/options/macOptions.ts
@@ -177,7 +177,7 @@ export interface DmgOptions extends TargetSpecificOptions {
/**
* The content — to customize icon locations.
*/
- readonly contents?: Array
+ contents?: Array
/**
* The disk image format. `ULFO` (lzfse-compressed image (OS X 10.11+ only)).
diff --git a/packages/electron-builder/src/targets/dmg.ts b/packages/electron-builder/src/targets/dmg/dmg.ts
similarity index 85%
rename from packages/electron-builder/src/targets/dmg.ts
rename to packages/electron-builder/src/targets/dmg/dmg.ts
index 41696752948..bae3264d8ec 100644
--- a/packages/electron-builder/src/targets/dmg.ts
+++ b/packages/electron-builder/src/targets/dmg/dmg.ts
@@ -1,20 +1,21 @@
import BluebirdPromise from "bluebird-lst"
import { Arch, debug, exec, isEmptyOrSpaces, log, spawn, warn } from "electron-builder-util"
import { copyFile, exists, statOrNull } from "electron-builder-util/out/fs"
-import { executeFinally } from "electron-builder-util/out/promise"
import { outputFile, readFile, remove, unlink } from "fs-extra-p"
import * as path from "path"
import { deepAssign } from "read-config-file/out/deepAssign"
import sanitizeFileName from "sanitize-filename"
-import { Target } from "../core"
-import { DmgOptions, MacOptions } from "../options/macOptions"
-import { PlatformPackager } from "../platformPackager"
+import { DmgOptions, MacOptions } from "../../options/macOptions"
+import { PlatformPackager } from "../../platformPackager"
import { addLicenseToDmg } from "./dmgLicense"
+import { Target } from "../../core"
+import { attachAndExecute, detach } from "./dmgUtil"
+import { getTemplatePath, getVendorPath } from "../../util/pathManager"
export class DmgTarget extends Target {
readonly options: DmgOptions = this.packager.config.dmg || Object.create(null)
- private helperDir = path.join(__dirname, "..", "..", "templates", "dmg")
+ private helperDir = getTemplatePath("dmg")
constructor(private readonly packager: PlatformPackager, readonly outDir: string) {
super("dmg")
@@ -25,6 +26,9 @@ export class DmgTarget extends Target {
log("Building DMG")
const specification = await this.computeDmgOptions()
+ const volumeName = sanitizeFileName(this.computeVolumeName(specification.title))
+ const artifactName = packager.expandArtifactNamePattern(packager.config.dmg, "dmg")
+ const artifactPath = path.join(this.outDir, artifactName)
const tempDmg = await packager.getTempFile(".dmg")
const backgroundDir = path.join(await packager.getTempDir("dmg"), ".background")
@@ -44,7 +48,6 @@ export class DmgTarget extends Target {
// allocate space for .DS_Store
await outputFile(path.join(backgroundDir, "DSStorePlaceHolder"), new Buffer(preallocatedSize))
- const volumeName = sanitizeFileName(this.computeVolumeName(specification.title))
//noinspection SpellCheckingInspection
await spawn("hdiutil", addVerboseIfNeed(["create",
"-srcfolder", backgroundDir,
@@ -65,18 +68,6 @@ export class DmgTarget extends Target {
specification.background == null ? remove(`${volumePath}/.background`) : unlink(`${volumePath}/.background/DSStorePlaceHolder`),
]
- let contents = specification.contents
- if (contents == null) {
- contents = [
- {
- x: 130, y: 220
- },
- {
- x: 410, y: 220, type: "link", path: "/Applications"
- }
- ]
- }
-
const window = specification.window!
const env: any = {
...process.env,
@@ -125,7 +116,7 @@ export class DmgTarget extends Target {
}
let entries = ""
- for (const c of contents) {
+ for (const c of specification.contents!!) {
if (c.path != null && c.path.endsWith(".app") && c.type !== "link") {
warn(`Do not specify path for application: "${c.path}". Actual path to app will be used instead.`)
}
@@ -150,7 +141,7 @@ export class DmgTarget extends Target {
await BluebirdPromise.all(promises)
await exec("/usr/bin/perl", [dmgPropertiesFile], {
- cwd: path.join(__dirname, "..", "..", "vendor"),
+ cwd: getVendorPath(),
env
})
@@ -163,9 +154,6 @@ export class DmgTarget extends Target {
return
}
- const artifactName = packager.expandArtifactNamePattern(packager.config.dmg, "dmg")
- const artifactPath = path.join(this.outDir, artifactName)
-
// dmg file must not exist otherwise hdiutil failed (https://github.com/electron-userland/electron-builder/issues/1308#issuecomment-282847594), so, -ov must be specified
//noinspection SpellCheckingInspection
const args = ["convert", tempDmg, "-ov", "-format", specification.format!, "-o", artifactPath]
@@ -274,46 +262,20 @@ export class DmgTarget extends Target {
}
}
+ if (specification.contents == null) {
+ specification.contents = [
+ {
+ x: 130, y: 220
+ },
+ {
+ x: 410, y: 220, type: "link", path: "/Applications"
+ }
+ ]
+ }
return specification
}
}
-async function detach(name: string) {
- try {
- await exec("hdiutil", ["detach", name])
- }
- catch (e) {
- await new BluebirdPromise((resolve, reject) => {
- setTimeout(() => {
- exec("hdiutil", ["detach", "-force", name])
- .then(resolve)
- .catch(reject)
- }, 1000)
- })
- }
-}
-
-export async function attachAndExecute(dmgPath: string, readWrite: boolean, task: () => Promise) {
- //noinspection SpellCheckingInspection
- const args = ["attach", "-noverify", "-noautoopen"]
- if (readWrite) {
- args.push("-readwrite")
- }
-
- // otherwise hangs
- // addVerboseIfNeed(args)
-
- args.push(dmgPath)
- const attachResult = await exec("hdiutil", args, {maxBuffer: 2 * 1024 * 1024})
- const deviceResult = attachResult == null ? null : /^(\/dev\/\w+)/.exec(attachResult)
- const device = deviceResult == null || deviceResult.length !== 2 ? null : deviceResult[1]
- if (device == null) {
- throw new Error(`Cannot mount: ${attachResult}`)
- }
-
- return await executeFinally(task(), () => detach(device))
-}
-
function addVerboseIfNeed(args: Array): Array {
if (process.env.DEBUG_DMG === "true") {
args.push("-verbose")
diff --git a/packages/electron-builder/src/targets/dmg/dmgLicense.ts b/packages/electron-builder/src/targets/dmg/dmgLicense.ts
new file mode 100644
index 00000000000..ac1de2b6ea9
--- /dev/null
+++ b/packages/electron-builder/src/targets/dmg/dmgLicense.ts
@@ -0,0 +1,296 @@
+import { debug, exec } from "electron-builder-util"
+import { outputFile, readFile } from "fs-extra-p"
+import { PlatformPackager } from "../../platformPackager"
+import { getLicenseFiles } from "../license"
+import { getVendorPath } from "../../util/pathManager"
+
+/** @internal */
+export async function addLicenseToDmg(packager: PlatformPackager, dmgPath: string) {
+ const licenseFiles = await getLicenseFiles(packager)
+ if (licenseFiles.length === 0) {
+ return
+ }
+
+ if (debug.enabled) {
+ debug(`License files: ${licenseFiles.join(" ")}`)
+ }
+
+ let licenses = ""
+ const iconv = require("iconv-lite")
+ const indent = " "
+ for (const item of licenseFiles) {
+ const encoding = getEncoderName(item.langWithRegion)
+ if (!iconv.encodingExists(encoding)) {
+ throw new Error(`${encoding} is not supported by iconv-lite`)
+ }
+
+ const fileData = await readFile(item.file, "utf-8")
+ const data = iconv.encode(fileData, encoding)
+ const isRtf = item.file.endsWith(".rtf") || item.file.endsWith(".RTF")
+ licenses += [
+ `${indent}'${item.langWithRegion}': {`,
+ `${indent} 'data': bytearray.fromhex('${data.toString("hex")}'),`,
+ `${indent} 'isRtf': ${isRtf ? "True" : "False"}`,
+ `${indent}}`,
+ ].join("\n") + ",\n"
+ }
+
+ const script = [
+ "# -*- coding: utf-8 -*-",
+ "from __future__ import unicode_literals",
+ "import dmgbuild.licensing",
+ "license = {",
+ " 'default-language': 'en_US',",
+ " 'licenses': {",
+ licenses,
+ " }",
+ "}",
+ `dmgbuild.licensing.add_license('${dmgPath}', license)`
+ ]
+
+ const tempFile = await packager.getTempFile(".py")
+ await outputFile(tempFile, script.join("\n"))
+
+ if (debug.enabled) {
+ debug(`License: ${script.join("\n")}`)
+ }
+
+ await exec("hdiutil", ["unflatten", "-quiet", dmgPath])
+
+ await exec("/usr/bin/python", [tempFile], {
+ env: {
+ ...process.env,
+ PYTHONPATH: getVendorPath(),
+ LC_CTYPE: "en_US.UTF-8",
+ LANG: "en_US.UTF-8",
+ }
+ })
+
+ await exec("hdiutil", ["flatten", "-quiet", dmgPath])
+}
+
+function getEncoderName(langWithRegion: string): string {
+ const regionCode = regionCodes[langWithRegion]
+ if (regionCode == null) {
+ throw new Error(`Cannot determine region code for ${langWithRegion}`)
+ }
+
+ const scriptCode = scriptCodes[regionCode]
+ if (regionCode == null) {
+ throw new Error(`Cannot determine script code for ${langWithRegion}`)
+ }
+
+ const encodingName = encodingsMap[scriptCode]
+ if (regionCode == null) {
+ throw new Error(`Cannot mac determine encoding for ${langWithRegion}`)
+ }
+ return encodingName
+}
+
+// noinspection SpellCheckingInspection
+const regionCodes: any = {
+ en_US: 0,
+ fr_FR: 1,
+ en_GB: 2,
+ de_DE: 3,
+ it_IT: 4,
+ nl_NL: 5,
+ nl_BE: 6,
+ sv_SE: 7,
+ es_ES: 8,
+ da_DK: 9,
+ pt_PT: 10,
+ fr_CA: 11,
+ nb_NO: 12,
+ he_IL: 13,
+ ja_JP: 14,
+ en_AU: 15,
+ ar: 16,
+ fi_FI: 17,
+ fr_CH: 18,
+ de_CH: 19,
+ el_GR: 20,
+ is_IS: 21,
+ mt_MT: 22,
+ el_CY: 23,
+ tr_TR: 24,
+ hi_IN: 33,
+ ur_PK: 34,
+ it_CH: 36,
+ ro_RO: 39,
+ grc: 40,
+ lt_LT: 41,
+ pl_PL: 42,
+ hu_HU: 43,
+ et_EE: 44,
+ lv_LV: 45,
+ se: 46,
+ fo_FO: 47,
+ fa_IR: 48,
+ ru_RU: 49,
+ ga_IE: 50,
+ ko_KR: 51,
+ zh_CN: 52,
+ zh_TW: 53,
+ th_TH: 54,
+ cs_CZ: 56,
+ sk_SK: 57,
+ bn: 60,
+ be_BY: 61,
+ uk_UA: 62,
+ sr_RS: 65,
+ sl_SI: 66,
+ mk_MK: 67,
+ hr_HR: 68,
+ pt_BR: 71,
+ bg_BG: 72,
+ ca_ES: 73,
+ gd: 75,
+ gv: 76,
+ br: 77,
+ iu_CA: 78,
+ cy: 79,
+ "ga-Latg_IE": 81,
+ en_CA: 82,
+ dz_BT: 83,
+ hy_AM: 84,
+ ka_GE: 85,
+ es_419: 86,
+ to_TO: 88,
+ fr_001: 91,
+ de_AT: 92,
+ gu_IN: 94,
+ pa: 95,
+ ur_IN: 96,
+ vi_VN: 97,
+ fr_BE: 98,
+ uz_UZ: 99,
+ en_SG: 100,
+ nn_NO: 101,
+ af_ZA: 102,
+ eo: 103,
+ mr_IN: 104,
+ bo: 105,
+ ne_NP: 106,
+ kl: 107,
+ en_IE: 108
+}
+
+const scriptCodes: any = {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ 10: 0,
+ 11: 0,
+ 12: 0,
+ 13: 5,
+ 14: 1,
+ 15: 0,
+ 16: 4,
+ 17: 0,
+ 18: 0,
+ 19: 0,
+ 20: 6,
+ 21: 37,
+ 22: 0,
+ 23: 6,
+ 24: 35,
+ 25: 36,
+ 26: 0,
+ 27: 0,
+ 30: 0,
+ 31: 0,
+ 32: 0,
+ 33: 9,
+ 34: 4,
+ 35: 35,
+ 36: 0,
+ 37: 0,
+ 39: 38,
+ 40: 6,
+ 41: 29,
+ 42: 29,
+ 43: 29,
+ 44: 29,
+ 45: 29,
+ 46: 0,
+ 47: 37,
+ 48: 140,
+ 49: 7,
+ 50: 39,
+ 51: 3,
+ 52: 25,
+ 53: 2,
+ 54: 21,
+ 56: 29,
+ 57: 29,
+ 59: 29,
+ 60: 13,
+ 61: 7,
+ 62: 7,
+ 64: 6,
+ 65: 7,
+ 66: 36,
+ 67: 7,
+ 68: 36,
+ 70: 0,
+ 71: 0,
+ 72: 7,
+ 73: 0,
+ 75: 39,
+ 76: 39,
+ 77: 39,
+ 78: 236,
+ 79: 39,
+ 81: 40,
+ 82: 0,
+ 83: 26,
+ 84: 24,
+ 85: 23,
+ 86: 0,
+ 88: 0,
+ 91: 0,
+ 92: 0,
+ 94: 11,
+ 95: 10,
+ 96: 4,
+ 97: 30,
+ 98: 0,
+ 99: 7,
+ 100: 0,
+ 101: 0,
+ 102: 0,
+ 103: 0,
+ 104: 9,
+ 105: 26,
+ 106: 9,
+ 107: 0,
+ 108: 0
+}
+
+// https://github.com/ashtuchkin/iconv-lite/wiki/Supported-Encodings
+// noinspection SpellCheckingInspection
+const encodingsMap: any = {
+ 0: "macroman",
+ 1: "Shift_JIS",
+ 2: "Big5",
+ 3: "EUC-KR",
+ 4: "mac_arabic",
+ 6: "macgreek",
+ 7: "maccyrillic",
+ 21: "ISO-8859-1",
+ 25: "EUC-CN",
+ 29: "maccenteuro",
+ 35: "macturkish",
+ 36: "maccroatian",
+ 37: "maciceland",
+ 38: "macromania",
+ 140: "macfarsi"
+}
\ No newline at end of file
diff --git a/packages/electron-builder/src/targets/dmg/dmgUtil.ts b/packages/electron-builder/src/targets/dmg/dmgUtil.ts
new file mode 100644
index 00000000000..2cd04a7cbdf
--- /dev/null
+++ b/packages/electron-builder/src/targets/dmg/dmgUtil.ts
@@ -0,0 +1,40 @@
+import BluebirdPromise from "bluebird-lst"
+import { executeFinally } from "electron-builder-util/out/promise"
+import { exec } from "electron-builder-util"
+
+export async function attachAndExecute(dmgPath: string, readWrite: boolean, task: () => Promise) {
+ //noinspection SpellCheckingInspection
+ const args = ["attach", "-noverify", "-noautoopen"]
+ if (readWrite) {
+ args.push("-readwrite")
+ }
+
+ // otherwise hangs
+ // addVerboseIfNeed(args)
+
+ args.push(dmgPath)
+ const attachResult = await exec("hdiutil", args, {maxBuffer: 2 * 1024 * 1024})
+ const deviceResult = attachResult == null ? null : /^(\/dev\/\w+)/.exec(attachResult)
+ const device = deviceResult == null || deviceResult.length !== 2 ? null : deviceResult[1]
+ if (device == null) {
+ throw new Error(`Cannot mount: ${attachResult}`)
+ }
+
+ return await executeFinally(task(), () => detach(device))
+}
+
+/** @internal */
+export async function detach(name: string) {
+ try {
+ await exec("hdiutil", ["detach", name])
+ }
+ catch (e) {
+ await new BluebirdPromise((resolve, reject) => {
+ setTimeout(() => {
+ exec("hdiutil", ["detach", "-force", name])
+ .then(resolve)
+ .catch(reject)
+ }, 1000)
+ })
+ }
+}
\ No newline at end of file
diff --git a/packages/electron-builder/src/targets/dmgLicense.ts b/packages/electron-builder/src/targets/dmgLicense.ts
deleted file mode 100644
index 4b05a63ef8b..00000000000
--- a/packages/electron-builder/src/targets/dmgLicense.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-import { debug, exec } from "electron-builder-util"
-import { readFile, writeFile } from "fs-extra-p"
-import * as path from "path"
-import { PlatformPackager } from "../platformPackager"
-import { getLicenseFiles } from "./license"
-
-/** @internal */
-export async function addLicenseToDmg(packager: PlatformPackager, dmgPath: string) {
- // http://www.owsiak.org/?p=700
- const licenseFiles = await getLicenseFiles(packager)
- if (licenseFiles.length === 0) {
- return
- }
-
- if (debug.enabled) {
- debug(`License files: ${licenseFiles.join(" ")}`)
- }
-
- let data = await readFile(path.join(__dirname, "..", "..", "templates", "dmg", "license.txt"), "utf8")
- let counter = 5000
- for (const item of licenseFiles) {
- const kind = item.file.toLowerCase().endsWith(".rtf") ? "RTF" : "TEXT"
- data += `data '${kind}' (${counter}, "${item.langName} SLA") {\n`
-
- const hex = (await readFile(item.file)).toString("hex").toUpperCase()
- for (let i = 0; i < hex.length; i += 32) {
- data += '$"' + hex.substring(i, Math.min(i + 32, hex.length)) + '"\n'
- }
-
- data += "};\n\n"
- // noinspection SpellCheckingInspection
- data += `data 'styl' (${counter}, "${item.langName} SLA") {
- $"0003 0000 0000 000C 0009 0014 0000 0000"
- $"0000 0000 0000 0000 0027 000C 0009 0014"
- $"0100 0000 0000 0000 0000 0000 002A 000C"
- $"0009 0014 0000 0000 0000 0000 0000"
-};`
-
- counter++
- }
-
- const tempFile = await packager.getTempFile(".r")
- await writeFile(tempFile, data)
- await exec("hdiutil", ["unflatten", dmgPath])
- await exec("Rez", ["-a", tempFile, "-o", dmgPath])
- await exec("hdiutil", ["flatten", dmgPath])
-}
\ No newline at end of file
diff --git a/packages/electron-builder/src/targets/license.ts b/packages/electron-builder/src/targets/license.ts
index 77a771b05a4..56ccce95ef1 100644
--- a/packages/electron-builder/src/targets/license.ts
+++ b/packages/electron-builder/src/targets/license.ts
@@ -2,6 +2,7 @@ import * as path from "path"
import { PlatformPackager } from "../platformPackager"
import { langIdToName, toLangWithRegion } from "../util/langs"
+/** @internal */
export async function getLicenseFiles(packager: PlatformPackager): Promise> {
const files = (await packager.resourceList)
.filter(it => {
diff --git a/packages/electron-builder/src/targets/nsis/nsisUtil.ts b/packages/electron-builder/src/targets/nsis/nsisUtil.ts
index 4793102d556..8fdf9432417 100644
--- a/packages/electron-builder/src/targets/nsis/nsisUtil.ts
+++ b/packages/electron-builder/src/targets/nsis/nsisUtil.ts
@@ -1,10 +1,10 @@
import BluebirdPromise from "bluebird-lst"
import { Arch, subTask } from "electron-builder-util"
import { unlink } from "fs-extra-p"
-import * as path from "path"
import { NsisTarget } from "./nsis"
+import { getTemplatePath } from "../../util/pathManager"
-export const nsisTemplatesDir = path.join(__dirname, "..", "..", "..", "templates", "nsis")
+export const nsisTemplatesDir = getTemplatePath("nsis")
interface PackageFileInfo {
file: string
diff --git a/packages/electron-builder/src/util/pathManager.ts b/packages/electron-builder/src/util/pathManager.ts
new file mode 100644
index 00000000000..93f9886066c
--- /dev/null
+++ b/packages/electron-builder/src/util/pathManager.ts
@@ -0,0 +1,11 @@
+import * as path from "path"
+
+const root = path.join(__dirname, "..", "..")
+
+export function getTemplatePath(file: string) {
+ return path.join(root, "templates", file)
+}
+
+export function getVendorPath(file?: string) {
+ return file == null ? path.join(root, "vendor") : path.join(root, "vendor", file)
+}
\ No newline at end of file
diff --git a/packages/electron-builder/templates/dmg/settings.py b/packages/electron-builder/templates/dmg/settings.py
new file mode 100644
index 00000000000..2b791f14a1b
--- /dev/null
+++ b/packages/electron-builder/templates/dmg/settings.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import biplist
+import os.path
+
+application = <%= people.join(", "); %>
+appname = os.path.basename(application)
+
+def icon_from_app(app_path):
+ plist_path = os.path.join(app_path, 'Contents', 'Info.plist')
+ plist = biplist.readPlist(plist_path)
+ icon_name = plist['CFBundleIconFile']
+ icon_root,icon_ext = os.path.splitext(icon_name)
+ if not icon_ext:
+ icon_ext = '.icns'
+ icon_name = icon_root + icon_ext
+ return os.path.join(app_path, 'Contents', 'Resources', icon_name)
+
+# .. Basics ....................................................................
+
+# Volume format (see hdiutil create -help)
+format = defines.get('format')
+
+# Compression level (if relevant)
+compression_level = defines.get('compressionLevel', 9)
+
+# Volume size
+size = defines.get('size', None)
+
+# Files to include
+files = [ application ]
+
+# Symlinks to create
+symlinks = { 'Applications': '/Applications' }
+
+# Volume icon
+#
+# You can either define icon, in which case that icon file will be copied to the
+# image, *or* you can define badge_icon, in which case the icon file you specify
+# will be used to badge the system's Removable Disk icon
+#
+#icon = '/path/to/icon.icns'
+badge_icon = icon_from_app(application)
+
+# Where to put the icons
+icon_locations = {
+ appname: (140, 120),
+ 'Applications': (500, 120)
+ }
+
+# .. Window configuration ......................................................
+
+# Background
+#
+# This is a STRING containing any of the following:
+#
+# #3344ff - web-style RGB color
+# #34f - web-style RGB color, short form (#34f == #3344ff)
+# rgb(1,0,0) - RGB color, each value is between 0 and 1
+# hsl(120,1,.5) - HSL (hue saturation lightness) color
+# hwb(300,0,0) - HWB (hue whiteness blackness) color
+# cmyk(0,1,0,0) - CMYK color
+# goldenrod - X11/SVG named color
+# builtin-arrow - A simple built-in background with a blue arrow
+# /foo/bar/baz.png - The path to an image file
+#
+# The hue component in hsl() and hwb() may include a unit; it defaults to
+# degrees ('deg'), but also supports radians ('rad') and gradians ('grad'
+# or 'gon').
+#
+# Other color components may be expressed either in the range 0 to 1, or
+# as percentages (e.g. 60% is equivalent to 0.6).
+background = 'builtin-arrow'
+
+show_status_bar = False
+show_tab_view = False
+show_toolbar = False
+show_pathbar = False
+show_sidebar = False
+sidebar_width = 180
+
+# Window position in ((x, y), (w, h)) format
+window_rect = ((100, 100), (640, 280))
+
+# Select the default view; must be one of
+#
+# 'icon-view'
+# 'list-view'
+# 'column-view'
+# 'coverflow'
+#
+default_view = 'icon-view'
+
+# General view configuration
+show_icon_preview = False
+
+# Set these to True to force inclusion of icon/list view settings (otherwise
+# we only include settings for the default view)
+include_icon_view_settings = 'auto'
+include_list_view_settings = 'auto'
+
+# .. Icon view configuration ...................................................
+
+arrange_by = None
+grid_offset = (0, 0)
+grid_spacing = 100
+scroll_position = (0, 0)
+label_pos = 'bottom' # or 'right'
+text_size = 16
+icon_size = 128
+
+# .. List view configuration ...................................................
+
+# Column names are as follows:
+#
+# name
+# date-modified
+# date-created
+# date-added
+# date-last-opened
+# size
+# kind
+# label
+# version
+# comments
+#
+list_icon_size = 16
+list_text_size = 12
+list_scroll_position = (0, 0)
+list_sort_by = 'name'
+list_use_relative_dates = True
+list_calculate_all_sizes = False,
+list_columns = ('name', 'date-modified', 'size', 'kind', 'date-added')
+list_column_widths = {
+ 'name': 300,
+ 'date-modified': 181,
+ 'date-created': 181,
+ 'date-added': 181,
+ 'date-last-opened': 181,
+ 'size': 97,
+ 'kind': 115,
+ 'label': 100,
+ 'version': 75,
+ 'comments': 300,
+ }
+list_column_sort_directions = {
+ 'name': 'ascending',
+ 'date-modified': 'descending',
+ 'date-created': 'descending',
+ 'date-added': 'descending',
+ 'date-last-opened': 'descending',
+ 'size': 'descending',
+ 'kind': 'ascending',
+ 'label': 'ascending',
+ 'version': 'ascending',
+ 'comments': 'ascending',
+ }
+
+# .. License configuration .....................................................
+
+# Text in the license configuration is stored in the resources, which means
+# it gets stored in a legacy Mac encoding according to the language. dmgbuild
+# will *try* to convert Unicode strings to the appropriate encoding, *but*
+# you should be aware that Python doesn't support all of the necessary encodings;
+# in many cases you will need to encode the text yourself and use byte strings
+# instead here.
+
+# Recognized language names are:
+#
+# af_ZA, ar, be_BY, bg_BG, bn, bo, br, ca_ES, cs_CZ, cy, da_DK, de_AT, de_CH,
+# de_DE, dz_BT, el_CY, el_GR, en_AU, en_CA, en_GB, en_IE, en_SG, en_US, eo,
+# es_419, es_ES, et_EE, fa_IR, fi_FI, fo_FO, fr_001, fr_BE, fr_CA, fr_CH,
+# fr_FR, ga-Latg_IE, ga_IE, gd, grc, gu_IN, gv, he_IL, hi_IN, hr_HR, hu_HU,
+# hy_AM, is_IS, it_CH, it_IT, iu_CA, ja_JP, ka_GE, kl, ko_KR, lt_LT, lv_LV,
+# mk_MK, mr_IN, mt_MT, nb_NO, ne_NP, nl_BE, nl_NL, nn_NO, pa, pl_PL, pt_BR,
+# pt_PT, ro_RO, ru_RU, se, sk_SK, sl_SI, sr_RS, sv_SE, th_TH, to_TO, tr_TR,
+# uk_UA, ur_IN, ur_PK, uz_UZ, vi_VN, zh_CN, zh_TW
+
+# license = {
+# 'default-language': 'en_US',
+# 'licenses': {
+# # For each language, the text of the license. This can be plain text,
+# # RTF (in which case it must start "{\rtf1"), or a path to a file
+# # containing the license text. If you're using RTF,
+# # watch out for Python escaping (or read it from a file).
+# 'English': b'''{\\rtf1\\ansi\\ansicpg1252\\cocoartf1504\\cocoasubrtf820
+# {\\fonttbl\\f0\\fnil\\fcharset0 Helvetica-Bold;\\f1\\fnil\\fcharset0 Helvetica;}
+# {\\colortbl;\\red255\\green255\\blue255;\\red0\\green0\\blue0;}
+# {\\*\\expandedcolortbl;;\\cssrgb\\c0\\c0\\c0;}
+# \\paperw11905\\paperh16837\\margl1133\\margr1133\\margb1133\\margt1133
+# \\deftab720
+# \\pard\\pardeftab720\\sa160\\partightenfactor0
+
+# \\f0\\b\\fs60 \\cf2 \\expnd0\\expndtw0\\kerning0
+# \\up0 \\nosupersub \\ulnone \\outl0\\strokewidth0 \\strokec2 Test License\\
+# \\pard\\pardeftab720\\sa160\\partightenfactor0
+
+# \\fs36 \\cf2 \\strokec2 What is this?\\
+# \\pard\\pardeftab720\\sa160\\partightenfactor0
+
+# \\f1\\b0\\fs22 \\cf2 \\strokec2 This is the English license. It says what you are allowed to do with this software.\\
+# \\
+# }''',
+# },
+# 'buttons': {
+# # For each language, text for the buttons on the licensing window.
+# #
+# # Default buttons and text are built-in for the following languages:
+# #
+# # English (en_US), German (de_DE), Spanish (es_ES), French (fr_FR),
+# # Italian (it_IT), Japanese (ja_JP), Dutch (nl_NL), Swedish (sv_SE),
+# # Brazilian Portuguese (pt_BR), Simplified Chinese (zh_CN),
+# # Traditional Chinese (zh_TW), Danish (da_DK), Finnish (fi_FI),
+# # Korean (ko_KR), Norwegian (nb_NO)
+# #
+# # You don't need to specify them for those languages; if you fail to
+# # specify them for some other language, English will be used instead.
+
+# 'en_US': (
+# b'English',
+# b'Agree',
+# b'Disagree',
+# b'Print',
+# b'Save',
+# b'If you agree with the terms of this license, press "Agree" to '
+# b'install the software. If you do not agree, press "Disagree".'
+# ),
+# },
+# }
diff --git a/packages/electron-builder/vendor/biplist/__init__.py b/packages/electron-builder/vendor/biplist/__init__.py
new file mode 100644
index 00000000000..17d35178d14
--- /dev/null
+++ b/packages/electron-builder/vendor/biplist/__init__.py
@@ -0,0 +1,873 @@
+"""biplist -- a library for reading and writing binary property list files.
+
+Binary Property List (plist) files provide a faster and smaller serialization
+format for property lists on OS X. This is a library for generating binary
+plists which can be read by OS X, iOS, or other clients.
+
+The API models the plistlib API, and will call through to plistlib when
+XML serialization or deserialization is required.
+
+To generate plists with UID values, wrap the values with the Uid object. The
+value must be an int.
+
+To generate plists with NSData/CFData values, wrap the values with the
+Data object. The value must be a string.
+
+Date values can only be datetime.datetime objects.
+
+The exceptions InvalidPlistException and NotBinaryPlistException may be
+thrown to indicate that the data cannot be serialized or deserialized as
+a binary plist.
+
+Plist generation example:
+
+ from biplist import *
+ from datetime import datetime
+ plist = {'aKey':'aValue',
+ '0':1.322,
+ 'now':datetime.now(),
+ 'list':[1,2,3],
+ 'tuple':('a','b','c')
+ }
+ try:
+ writePlist(plist, "example.plist")
+ except (InvalidPlistException, NotBinaryPlistException), e:
+ print "Something bad happened:", e
+
+Plist parsing example:
+
+ from biplist import *
+ try:
+ plist = readPlist("example.plist")
+ print plist
+ except (InvalidPlistException, NotBinaryPlistException), e:
+ print "Not a plist:", e
+"""
+
+from collections import namedtuple
+import datetime
+import io
+import math
+import plistlib
+from struct import pack, unpack, unpack_from
+from struct import error as struct_error
+import sys
+import time
+
+try:
+ unicode
+ unicodeEmpty = r''
+except NameError:
+ unicode = str
+ unicodeEmpty = ''
+try:
+ long
+except NameError:
+ long = int
+try:
+ {}.iteritems
+ iteritems = lambda x: x.iteritems()
+except AttributeError:
+ iteritems = lambda x: x.items()
+
+__all__ = [
+ 'Uid', 'Data', 'readPlist', 'writePlist', 'readPlistFromString',
+ 'writePlistToString', 'InvalidPlistException', 'NotBinaryPlistException'
+]
+
+# Apple uses Jan 1, 2001 as a base for all plist date/times.
+apple_reference_date = datetime.datetime.utcfromtimestamp(978307200)
+
+class Uid(object):
+ """Wrapper around integers for representing UID values. This
+ is used in keyed archiving."""
+ integer = 0
+ def __init__(self, integer):
+ self.integer = integer
+
+ def __repr__(self):
+ return "Uid(%d)" % self.integer
+
+ def __eq__(self, other):
+ if isinstance(self, Uid) and isinstance(other, Uid):
+ return self.integer == other.integer
+ return False
+
+ def __cmp__(self, other):
+ return self.integer - other.integer
+
+ def __lt__(self, other):
+ return self.integer < other.integer
+
+ def __hash__(self):
+ return self.integer
+
+ def __int__(self):
+ return int(self.integer)
+
+class Data(bytes):
+ """Wrapper around bytes to distinguish Data values."""
+
+class InvalidPlistException(Exception):
+ """Raised when the plist is incorrectly formatted."""
+
+class NotBinaryPlistException(Exception):
+ """Raised when a binary plist was expected but not encountered."""
+
+def readPlist(pathOrFile):
+ """Raises NotBinaryPlistException, InvalidPlistException"""
+ didOpen = False
+ result = None
+ if isinstance(pathOrFile, (bytes, unicode)):
+ pathOrFile = open(pathOrFile, 'rb')
+ didOpen = True
+ try:
+ reader = PlistReader(pathOrFile)
+ result = reader.parse()
+ except NotBinaryPlistException as e:
+ try:
+ pathOrFile.seek(0)
+ result = None
+ if hasattr(plistlib, 'loads'):
+ contents = None
+ if isinstance(pathOrFile, (bytes, unicode)):
+ with open(pathOrFile, 'rb') as f:
+ contents = f.read()
+ else:
+ contents = pathOrFile.read()
+ result = plistlib.loads(contents)
+ else:
+ result = plistlib.readPlist(pathOrFile)
+ result = wrapDataObject(result, for_binary=True)
+ except Exception as e:
+ raise InvalidPlistException(e)
+ finally:
+ if didOpen:
+ pathOrFile.close()
+ return result
+
+def wrapDataObject(o, for_binary=False):
+ if isinstance(o, Data) and not for_binary:
+ v = sys.version_info
+ if not (v[0] >= 3 and v[1] >= 4):
+ o = plistlib.Data(o)
+ elif isinstance(o, (bytes, plistlib.Data)) and for_binary:
+ if hasattr(o, 'data'):
+ o = Data(o.data)
+ elif isinstance(o, tuple):
+ o = wrapDataObject(list(o), for_binary)
+ o = tuple(o)
+ elif isinstance(o, list):
+ for i in range(len(o)):
+ o[i] = wrapDataObject(o[i], for_binary)
+ elif isinstance(o, dict):
+ for k in o:
+ o[k] = wrapDataObject(o[k], for_binary)
+ return o
+
+def writePlist(rootObject, pathOrFile, binary=True):
+ if not binary:
+ rootObject = wrapDataObject(rootObject, binary)
+ if hasattr(plistlib, "dump"):
+ if isinstance(pathOrFile, (bytes, unicode)):
+ with open(pathOrFile, 'wb') as f:
+ return plistlib.dump(rootObject, f)
+ else:
+ return plistlib.dump(rootObject, pathOrFile)
+ else:
+ return plistlib.writePlist(rootObject, pathOrFile)
+ else:
+ didOpen = False
+ if isinstance(pathOrFile, (bytes, unicode)):
+ pathOrFile = open(pathOrFile, 'wb')
+ didOpen = True
+ writer = PlistWriter(pathOrFile)
+ result = writer.writeRoot(rootObject)
+ if didOpen:
+ pathOrFile.close()
+ return result
+
+def readPlistFromString(data):
+ return readPlist(io.BytesIO(data))
+
+def writePlistToString(rootObject, binary=True):
+ if not binary:
+ rootObject = wrapDataObject(rootObject, binary)
+ if hasattr(plistlib, "dumps"):
+ return plistlib.dumps(rootObject)
+ elif hasattr(plistlib, "writePlistToBytes"):
+ return plistlib.writePlistToBytes(rootObject)
+ else:
+ return plistlib.writePlistToString(rootObject)
+ else:
+ ioObject = io.BytesIO()
+ writer = PlistWriter(ioObject)
+ writer.writeRoot(rootObject)
+ return ioObject.getvalue()
+
+def is_stream_binary_plist(stream):
+ stream.seek(0)
+ header = stream.read(7)
+ if header == b'bplist0':
+ return True
+ else:
+ return False
+
+PlistTrailer = namedtuple('PlistTrailer', 'offsetSize, objectRefSize, offsetCount, topLevelObjectNumber, offsetTableOffset')
+PlistByteCounts = namedtuple('PlistByteCounts', 'nullBytes, boolBytes, intBytes, realBytes, dateBytes, dataBytes, stringBytes, uidBytes, arrayBytes, setBytes, dictBytes')
+
+class PlistReader(object):
+ file = None
+ contents = ''
+ offsets = None
+ trailer = None
+ currentOffset = 0
+
+ def __init__(self, fileOrStream):
+ """Raises NotBinaryPlistException."""
+ self.reset()
+ self.file = fileOrStream
+
+ def parse(self):
+ return self.readRoot()
+
+ def reset(self):
+ self.trailer = None
+ self.contents = ''
+ self.offsets = []
+ self.currentOffset = 0
+
+ def readRoot(self):
+ result = None
+ self.reset()
+ # Get the header, make sure it's a valid file.
+ if not is_stream_binary_plist(self.file):
+ raise NotBinaryPlistException()
+ self.file.seek(0)
+ self.contents = self.file.read()
+ if len(self.contents) < 32:
+ raise InvalidPlistException("File is too short.")
+ trailerContents = self.contents[-32:]
+ try:
+ self.trailer = PlistTrailer._make(unpack("!xxxxxxBBQQQ", trailerContents))
+ offset_size = self.trailer.offsetSize * self.trailer.offsetCount
+ offset = self.trailer.offsetTableOffset
+ offset_contents = self.contents[offset:offset+offset_size]
+ offset_i = 0
+ while offset_i < self.trailer.offsetCount:
+ begin = self.trailer.offsetSize*offset_i
+ tmp_contents = offset_contents[begin:begin+self.trailer.offsetSize]
+ tmp_sized = self.getSizedInteger(tmp_contents, self.trailer.offsetSize)
+ self.offsets.append(tmp_sized)
+ offset_i += 1
+ self.setCurrentOffsetToObjectNumber(self.trailer.topLevelObjectNumber)
+ result = self.readObject()
+ except TypeError as e:
+ raise InvalidPlistException(e)
+ return result
+
+ def setCurrentOffsetToObjectNumber(self, objectNumber):
+ self.currentOffset = self.offsets[objectNumber]
+
+ def readObject(self):
+ result = None
+ tmp_byte = self.contents[self.currentOffset:self.currentOffset+1]
+ marker_byte = unpack("!B", tmp_byte)[0]
+ format = (marker_byte >> 4) & 0x0f
+ extra = marker_byte & 0x0f
+ self.currentOffset += 1
+
+ def proc_extra(extra):
+ if extra == 0b1111:
+ #self.currentOffset += 1
+ extra = self.readObject()
+ return extra
+
+ # bool, null, or fill byte
+ if format == 0b0000:
+ if extra == 0b0000:
+ result = None
+ elif extra == 0b1000:
+ result = False
+ elif extra == 0b1001:
+ result = True
+ elif extra == 0b1111:
+ pass # fill byte
+ else:
+ raise InvalidPlistException("Invalid object found at offset: %d" % (self.currentOffset - 1))
+ # int
+ elif format == 0b0001:
+ extra = proc_extra(extra)
+ result = self.readInteger(pow(2, extra))
+ # real
+ elif format == 0b0010:
+ extra = proc_extra(extra)
+ result = self.readReal(extra)
+ # date
+ elif format == 0b0011 and extra == 0b0011:
+ result = self.readDate()
+ # data
+ elif format == 0b0100:
+ extra = proc_extra(extra)
+ result = self.readData(extra)
+ # ascii string
+ elif format == 0b0101:
+ extra = proc_extra(extra)
+ result = self.readAsciiString(extra)
+ # Unicode string
+ elif format == 0b0110:
+ extra = proc_extra(extra)
+ result = self.readUnicode(extra)
+ # uid
+ elif format == 0b1000:
+ result = self.readUid(extra)
+ # array
+ elif format == 0b1010:
+ extra = proc_extra(extra)
+ result = self.readArray(extra)
+ # set
+ elif format == 0b1100:
+ extra = proc_extra(extra)
+ result = set(self.readArray(extra))
+ # dict
+ elif format == 0b1101:
+ extra = proc_extra(extra)
+ result = self.readDict(extra)
+ else:
+ raise InvalidPlistException("Invalid object found: {format: %s, extra: %s}" % (bin(format), bin(extra)))
+ return result
+
+ def readInteger(self, byteSize):
+ result = 0
+ original_offset = self.currentOffset
+ data = self.contents[self.currentOffset:self.currentOffset + byteSize]
+ result = self.getSizedInteger(data, byteSize, as_number=True)
+ self.currentOffset = original_offset + byteSize
+ return result
+
+ def readReal(self, length):
+ result = 0.0
+ to_read = pow(2, length)
+ data = self.contents[self.currentOffset:self.currentOffset+to_read]
+ if length == 2: # 4 bytes
+ result = unpack('>f', data)[0]
+ elif length == 3: # 8 bytes
+ result = unpack('>d', data)[0]
+ else:
+ raise InvalidPlistException("Unknown real of length %d bytes" % to_read)
+ return result
+
+ def readRefs(self, count):
+ refs = []
+ i = 0
+ while i < count:
+ fragment = self.contents[self.currentOffset:self.currentOffset+self.trailer.objectRefSize]
+ ref = self.getSizedInteger(fragment, len(fragment))
+ refs.append(ref)
+ self.currentOffset += self.trailer.objectRefSize
+ i += 1
+ return refs
+
+ def readArray(self, count):
+ result = []
+ values = self.readRefs(count)
+ i = 0
+ while i < len(values):
+ self.setCurrentOffsetToObjectNumber(values[i])
+ value = self.readObject()
+ result.append(value)
+ i += 1
+ return result
+
+ def readDict(self, count):
+ result = {}
+ keys = self.readRefs(count)
+ values = self.readRefs(count)
+ i = 0
+ while i < len(keys):
+ self.setCurrentOffsetToObjectNumber(keys[i])
+ key = self.readObject()
+ self.setCurrentOffsetToObjectNumber(values[i])
+ value = self.readObject()
+ result[key] = value
+ i += 1
+ return result
+
+ def readAsciiString(self, length):
+ result = unpack("!%ds" % length, self.contents[self.currentOffset:self.currentOffset+length])[0]
+ self.currentOffset += length
+ return str(result.decode('ascii'))
+
+ def readUnicode(self, length):
+ actual_length = length*2
+ data = self.contents[self.currentOffset:self.currentOffset+actual_length]
+ # unpack not needed?!! data = unpack(">%ds" % (actual_length), data)[0]
+ self.currentOffset += actual_length
+ return data.decode('utf_16_be')
+
+ def readDate(self):
+ result = unpack(">d", self.contents[self.currentOffset:self.currentOffset+8])[0]
+ # Use timedelta to workaround time_t size limitation on 32-bit python.
+ result = datetime.timedelta(seconds=result) + apple_reference_date
+ self.currentOffset += 8
+ return result
+
+ def readData(self, length):
+ result = self.contents[self.currentOffset:self.currentOffset+length]
+ self.currentOffset += length
+ return Data(result)
+
+ def readUid(self, length):
+ return Uid(self.readInteger(length+1))
+
+ def getSizedInteger(self, data, byteSize, as_number=False):
+ """Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
+ result = 0
+ # 1, 2, and 4 byte integers are unsigned
+ if byteSize == 1:
+ result = unpack('>B', data)[0]
+ elif byteSize == 2:
+ result = unpack('>H', data)[0]
+ elif byteSize == 4:
+ result = unpack('>L', data)[0]
+ elif byteSize == 8:
+ if as_number:
+ result = unpack('>q', data)[0]
+ else:
+ result = unpack('>Q', data)[0]
+ elif byteSize <= 16:
+ # Handle odd-sized or integers larger than 8 bytes
+ # Don't naively go over 16 bytes, in order to prevent infinite loops.
+ result = 0
+ if hasattr(int, 'from_bytes'):
+ result = int.from_bytes(data, 'big')
+ else:
+ for byte in data:
+ if not isinstance(byte, int): # Python3.0-3.1.x return ints, 2.x return str
+ byte = unpack_from('>B', byte)[0]
+ result = (result << 8) | byte
+ else:
+ raise InvalidPlistException("Encountered integer longer than 16 bytes.")
+ return result
+
+class HashableWrapper(object):
+ def __init__(self, value):
+ self.value = value
+ def __repr__(self):
+ return "" % [self.value]
+
+class BoolWrapper(object):
+ def __init__(self, value):
+ self.value = value
+ def __repr__(self):
+ return "" % self.value
+
+class FloatWrapper(object):
+ _instances = {}
+ def __new__(klass, value):
+ # Ensure FloatWrapper(x) for a given float x is always the same object
+ wrapper = klass._instances.get(value)
+ if wrapper is None:
+ wrapper = object.__new__(klass)
+ wrapper.value = value
+ klass._instances[value] = wrapper
+ return wrapper
+ def __repr__(self):
+ return "" % self.value
+
+class StringWrapper(object):
+ __instances = {}
+
+ encodedValue = None
+ encoding = None
+
+ def __new__(cls, value):
+ '''Ensure we only have a only one instance for any string,
+ and that we encode ascii as 1-byte-per character when possible'''
+
+ encodedValue = None
+
+ for encoding in ('ascii', 'utf_16_be'):
+ try:
+ encodedValue = value.encode(encoding)
+ except: pass
+ if encodedValue is not None:
+ if encodedValue not in cls.__instances:
+ cls.__instances[encodedValue] = super(StringWrapper, cls).__new__(cls)
+ cls.__instances[encodedValue].encodedValue = encodedValue
+ cls.__instances[encodedValue].encoding = encoding
+ return cls.__instances[encodedValue]
+
+ raise ValueError('Unable to get ascii or utf_16_be encoding for %s' % repr(value))
+
+ def __len__(self):
+ '''Return roughly the number of characters in this string (half the byte length)'''
+ if self.encoding == 'ascii':
+ return len(self.encodedValue)
+ else:
+ return len(self.encodedValue)//2
+
+ def __lt__(self, other):
+ return self.encodedValue < other.encodedValue
+
+ @property
+ def encodingMarker(self):
+ if self.encoding == 'ascii':
+ return 0b0101
+ else:
+ return 0b0110
+
+ def __repr__(self):
+ return '' % (self.encoding, self.encodedValue)
+
+class PlistWriter(object):
+ header = b'bplist00bybiplist1.0'
+ file = None
+ byteCounts = None
+ trailer = None
+ computedUniques = None
+ writtenReferences = None
+ referencePositions = None
+ wrappedTrue = None
+ wrappedFalse = None
+
+ def __init__(self, file):
+ self.reset()
+ self.file = file
+ self.wrappedTrue = BoolWrapper(True)
+ self.wrappedFalse = BoolWrapper(False)
+
+ def reset(self):
+ self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+ self.trailer = PlistTrailer(0, 0, 0, 0, 0)
+
+ # A set of all the uniques which have been computed.
+ self.computedUniques = set()
+ # A list of all the uniques which have been written.
+ self.writtenReferences = {}
+ # A dict of the positions of the written uniques.
+ self.referencePositions = {}
+
+ def positionOfObjectReference(self, obj):
+ """If the given object has been written already, return its
+ position in the offset table. Otherwise, return None."""
+ return self.writtenReferences.get(obj)
+
+ def writeRoot(self, root):
+ """
+ Strategy is:
+ - write header
+ - wrap root object so everything is hashable
+ - compute size of objects which will be written
+ - need to do this in order to know how large the object refs
+ will be in the list/dict/set reference lists
+ - write objects
+ - keep objects in writtenReferences
+ - keep positions of object references in referencePositions
+ - write object references with the length computed previously
+ - computer object reference length
+ - write object reference positions
+ - write trailer
+ """
+ output = self.header
+ wrapped_root = self.wrapRoot(root)
+ self.computeOffsets(wrapped_root, asReference=True, isRoot=True)
+ self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
+ self.writeObjectReference(wrapped_root, output)
+ output = self.writeObject(wrapped_root, output, setReferencePosition=True)
+
+ # output size at this point is an upper bound on how big the
+ # object reference offsets need to be.
+ self.trailer = self.trailer._replace(**{
+ 'offsetSize':self.intSize(len(output)),
+ 'offsetCount':len(self.computedUniques),
+ 'offsetTableOffset':len(output),
+ 'topLevelObjectNumber':0
+ })
+
+ output = self.writeOffsetTable(output)
+ output += pack('!xxxxxxBBQQQ', *self.trailer)
+ self.file.write(output)
+
+ def wrapRoot(self, root):
+ if isinstance(root, bool):
+ if root is True:
+ return self.wrappedTrue
+ else:
+ return self.wrappedFalse
+ elif isinstance(root, float):
+ return FloatWrapper(root)
+ elif isinstance(root, set):
+ n = set()
+ for value in root:
+ n.add(self.wrapRoot(value))
+ return HashableWrapper(n)
+ elif isinstance(root, dict):
+ n = {}
+ for key, value in iteritems(root):
+ n[self.wrapRoot(key)] = self.wrapRoot(value)
+ return HashableWrapper(n)
+ elif isinstance(root, list):
+ n = []
+ for value in root:
+ n.append(self.wrapRoot(value))
+ return HashableWrapper(n)
+ elif isinstance(root, tuple):
+ n = tuple([self.wrapRoot(value) for value in root])
+ return HashableWrapper(n)
+ elif isinstance(root, (str, unicode)) and not isinstance(root, Data):
+ return StringWrapper(root)
+ elif isinstance(root, bytes):
+ return Data(root)
+ else:
+ return root
+
+ def incrementByteCount(self, field, incr=1):
+ self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr})
+
+ def computeOffsets(self, obj, asReference=False, isRoot=False):
+ def check_key(key):
+ if key is None:
+ raise InvalidPlistException('Dictionary keys cannot be null in plists.')
+ elif isinstance(key, Data):
+ raise InvalidPlistException('Data cannot be dictionary keys in plists.')
+ elif not isinstance(key, StringWrapper):
+ raise InvalidPlistException('Keys must be strings.')
+
+ def proc_size(size):
+ if size > 0b1110:
+ size += self.intSize(size)
+ return size
+ # If this should be a reference, then we keep a record of it in the
+ # uniques table.
+ if asReference:
+ if obj in self.computedUniques:
+ return
+ else:
+ self.computedUniques.add(obj)
+
+ if obj is None:
+ self.incrementByteCount('nullBytes')
+ elif isinstance(obj, BoolWrapper):
+ self.incrementByteCount('boolBytes')
+ elif isinstance(obj, Uid):
+ size = self.intSize(obj.integer)
+ self.incrementByteCount('uidBytes', incr=1+size)
+ elif isinstance(obj, (int, long)):
+ size = self.intSize(obj)
+ self.incrementByteCount('intBytes', incr=1+size)
+ elif isinstance(obj, FloatWrapper):
+ size = self.realSize(obj)
+ self.incrementByteCount('realBytes', incr=1+size)
+ elif isinstance(obj, datetime.datetime):
+ self.incrementByteCount('dateBytes', incr=2)
+ elif isinstance(obj, Data):
+ size = proc_size(len(obj))
+ self.incrementByteCount('dataBytes', incr=1+size)
+ elif isinstance(obj, StringWrapper):
+ size = proc_size(len(obj))
+ self.incrementByteCount('stringBytes', incr=1+size)
+ elif isinstance(obj, HashableWrapper):
+ obj = obj.value
+ if isinstance(obj, set):
+ size = proc_size(len(obj))
+ self.incrementByteCount('setBytes', incr=1+size)
+ for value in obj:
+ self.computeOffsets(value, asReference=True)
+ elif isinstance(obj, (list, tuple)):
+ size = proc_size(len(obj))
+ self.incrementByteCount('arrayBytes', incr=1+size)
+ for value in obj:
+ asRef = True
+ self.computeOffsets(value, asReference=True)
+ elif isinstance(obj, dict):
+ size = proc_size(len(obj))
+ self.incrementByteCount('dictBytes', incr=1+size)
+ for key, value in iteritems(obj):
+ check_key(key)
+ self.computeOffsets(key, asReference=True)
+ self.computeOffsets(value, asReference=True)
+ else:
+ raise InvalidPlistException("Unknown object type: %s (%s)" % (type(obj).__name__, repr(obj)))
+
+ def writeObjectReference(self, obj, output):
+ """Tries to write an object reference, adding it to the references
+ table. Does not write the actual object bytes or set the reference
+ position. Returns a tuple of whether the object was a new reference
+ (True if it was, False if it already was in the reference table)
+ and the new output.
+ """
+ position = self.positionOfObjectReference(obj)
+ if position is None:
+ self.writtenReferences[obj] = len(self.writtenReferences)
+ output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
+ return (True, output)
+ else:
+ output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
+ return (False, output)
+
+ def writeObject(self, obj, output, setReferencePosition=False):
+ """Serializes the given object to the output. Returns output.
+ If setReferencePosition is True, will set the position the
+ object was written.
+ """
+ def proc_variable_length(format, length):
+ result = b''
+ if length > 0b1110:
+ result += pack('!B', (format << 4) | 0b1111)
+ result = self.writeObject(length, result)
+ else:
+ result += pack('!B', (format << 4) | length)
+ return result
+
+ def timedelta_total_seconds(td):
+ # Shim for Python 2.6 compatibility, which doesn't have total_seconds.
+ # Make one argument a float to ensure the right calculation.
+ return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10.0**6) / 10.0**6
+
+ if setReferencePosition:
+ self.referencePositions[obj] = len(output)
+
+ if obj is None:
+ output += pack('!B', 0b00000000)
+ elif isinstance(obj, BoolWrapper):
+ if obj.value is False:
+ output += pack('!B', 0b00001000)
+ else:
+ output += pack('!B', 0b00001001)
+ elif isinstance(obj, Uid):
+ size = self.intSize(obj.integer)
+ output += pack('!B', (0b1000 << 4) | size - 1)
+ output += self.binaryInt(obj.integer)
+ elif isinstance(obj, (int, long)):
+ byteSize = self.intSize(obj)
+ root = math.log(byteSize, 2)
+ output += pack('!B', (0b0001 << 4) | int(root))
+ output += self.binaryInt(obj, as_number=True)
+ elif isinstance(obj, FloatWrapper):
+ # just use doubles
+ output += pack('!B', (0b0010 << 4) | 3)
+ output += self.binaryReal(obj)
+ elif isinstance(obj, datetime.datetime):
+ try:
+ timestamp = (obj - apple_reference_date).total_seconds()
+ except AttributeError:
+ timestamp = timedelta_total_seconds(obj - apple_reference_date)
+ output += pack('!B', 0b00110011)
+ output += pack('!d', float(timestamp))
+ elif isinstance(obj, Data):
+ output += proc_variable_length(0b0100, len(obj))
+ output += obj
+ elif isinstance(obj, StringWrapper):
+ output += proc_variable_length(obj.encodingMarker, len(obj))
+ output += obj.encodedValue
+ elif isinstance(obj, bytes):
+ output += proc_variable_length(0b0101, len(obj))
+ output += obj
+ elif isinstance(obj, HashableWrapper):
+ obj = obj.value
+ if isinstance(obj, (set, list, tuple)):
+ if isinstance(obj, set):
+ output += proc_variable_length(0b1100, len(obj))
+ else:
+ output += proc_variable_length(0b1010, len(obj))
+
+ objectsToWrite = []
+ for objRef in sorted(obj) if isinstance(obj, set) else obj:
+ (isNew, output) = self.writeObjectReference(objRef, output)
+ if isNew:
+ objectsToWrite.append(objRef)
+ for objRef in objectsToWrite:
+ output = self.writeObject(objRef, output, setReferencePosition=True)
+ elif isinstance(obj, dict):
+ output += proc_variable_length(0b1101, len(obj))
+ keys = []
+ values = []
+ objectsToWrite = []
+ for key, value in sorted(iteritems(obj)):
+ keys.append(key)
+ values.append(value)
+ for key in keys:
+ (isNew, output) = self.writeObjectReference(key, output)
+ if isNew:
+ objectsToWrite.append(key)
+ for value in values:
+ (isNew, output) = self.writeObjectReference(value, output)
+ if isNew:
+ objectsToWrite.append(value)
+ for objRef in objectsToWrite:
+ output = self.writeObject(objRef, output, setReferencePosition=True)
+ return output
+
+ def writeOffsetTable(self, output):
+ """Writes all of the object reference offsets."""
+ all_positions = []
+ writtenReferences = list(self.writtenReferences.items())
+ writtenReferences.sort(key=lambda x: x[1])
+ for obj,order in writtenReferences:
+ # Porting note: Elsewhere we deliberately replace empty unicdoe strings
+ # with empty binary strings, but the empty unicode string
+ # goes into writtenReferences. This isn't an issue in Py2
+ # because u'' and b'' have the same hash; but it is in
+ # Py3, where they don't.
+ if bytes != str and obj == unicodeEmpty:
+ obj = b''
+ position = self.referencePositions.get(obj)
+ if position is None:
+ raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
+ output += self.binaryInt(position, self.trailer.offsetSize)
+ all_positions.append(position)
+ return output
+
+ def binaryReal(self, obj):
+ # just use doubles
+ result = pack('>d', obj.value)
+ return result
+
+ def binaryInt(self, obj, byteSize=None, as_number=False):
+ result = b''
+ if byteSize is None:
+ byteSize = self.intSize(obj)
+ if byteSize == 1:
+ result += pack('>B', obj)
+ elif byteSize == 2:
+ result += pack('>H', obj)
+ elif byteSize == 4:
+ result += pack('>L', obj)
+ elif byteSize == 8:
+ if as_number:
+ result += pack('>q', obj)
+ else:
+ result += pack('>Q', obj)
+ elif byteSize <= 16:
+ try:
+ result = pack('>Q', 0) + pack('>Q', obj)
+ except struct_error as e:
+ raise InvalidPlistException("Unable to pack integer %d: %s" % (obj, e))
+ else:
+ raise InvalidPlistException("Core Foundation can't handle integers with size greater than 16 bytes.")
+ return result
+
+ def intSize(self, obj):
+ """Returns the number of bytes necessary to store the given integer."""
+ # SIGNED
+ if obj < 0: # Signed integer, always 8 bytes
+ return 8
+ # UNSIGNED
+ elif obj <= 0xFF: # 1 byte
+ return 1
+ elif obj <= 0xFFFF: # 2 bytes
+ return 2
+ elif obj <= 0xFFFFFFFF: # 4 bytes
+ return 4
+ # SIGNED
+ # 0x7FFFFFFFFFFFFFFF is the max.
+ elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
+ return 8
+ elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
+ return 16
+ else:
+ raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
+
+ def realSize(self, obj):
+ return 8
diff --git a/packages/electron-builder/vendor/dmgbuild/__init__.py b/packages/electron-builder/vendor/dmgbuild/__init__.py
new file mode 100644
index 00000000000..e7f985c3223
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/__init__.py
@@ -0,0 +1,3 @@
+from .core import build_dmg
+
+__all__ = ['dmgbuild']
diff --git a/packages/electron-builder/vendor/dmgbuild/badge.py b/packages/electron-builder/vendor/dmgbuild/badge.py
new file mode 100644
index 00000000000..159a53708b2
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/badge.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from Quartz import *
+import math
+
+_REMOVABLE_DISK_PATH = '/System/Library/Extensions/IOStorageFamily.kext/Contents/Resources/Removable.icns'
+
+def badge_disk_icon(badge_file, output_file):
+ # Load the Removable disk icon
+ url = CFURLCreateWithFileSystemPath(None, _REMOVABLE_DISK_PATH,
+ kCFURLPOSIXPathStyle, False)
+ backdrop = CGImageSourceCreateWithURL(url, None)
+ backdropCount = CGImageSourceGetCount(backdrop)
+
+ # Load the badge
+ url = CFURLCreateWithFileSystemPath(None, badge_file,
+ kCFURLPOSIXPathStyle, False)
+ badge = CGImageSourceCreateWithURL(url, None)
+ assert badge is not None, 'Unable to process image file: %s' % badge_file
+ badgeCount = CGImageSourceGetCount(badge)
+
+ # Set up a destination for our target
+ url = CFURLCreateWithFileSystemPath(None, output_file,
+ kCFURLPOSIXPathStyle, False)
+ target = CGImageDestinationCreateWithURL(url, 'com.apple.icns',
+ backdropCount, None)
+
+ # Get the RGB colorspace
+ rgbColorSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB)
+
+ # Scale
+ scale = 1.0
+
+ # Perspective transform
+ corners = ((0.2, 0.95), (0.8, 0.95), (0.85, 0.35), (0.15, 0.35))
+
+ # Translation
+ position = (0.5, 0.5)
+
+ for n in range(backdropCount):
+ props = CGImageSourceCopyPropertiesAtIndex(backdrop, n, None)
+ width = props['PixelWidth']
+ height = props['PixelHeight']
+ dpi = props['DPIWidth']
+ depth = props['Depth']
+
+ # Choose the best sized badge image
+ bestWidth = None
+ bestHeight = None
+ bestBadge = None
+ bestDepth = None
+ bestDPI = None
+ for m in range(badgeCount):
+ badgeProps = CGImageSourceCopyPropertiesAtIndex(badge, m, None)
+ badgeWidth = badgeProps['PixelWidth']
+ badgeHeight = badgeProps['PixelHeight']
+ badgeDPI = badgeProps['DPIWidth']
+ badgeDepth = badgeProps['Depth']
+
+ if bestBadge is None or (badgeWidth <= width
+ and (bestWidth > width
+ or badgeWidth > bestWidth
+ or (badgeWidth == bestWidth
+ and badgeDPI == dpi
+ and badgeDepth <= depth
+ and (bestDepth is None
+ or badgeDepth > bestDepth)))):
+ bestBadge = m
+ bestWidth = badgeWidth
+ bestHeight = badgeHeight
+ bestDPI = badgeDPI
+ bestDepth = badgeDepth
+
+ badgeImage = CGImageSourceCreateImageAtIndex(badge, bestBadge, None)
+ badgeCI = CIImage.imageWithCGImage_(badgeImage)
+
+ backgroundImage = CGImageSourceCreateImageAtIndex(backdrop, n, None)
+ backgroundCI = CIImage.imageWithCGImage_(backgroundImage)
+
+ compositor = CIFilter.filterWithName_('CISourceOverCompositing')
+ lanczos = CIFilter.filterWithName_('CILanczosScaleTransform')
+ perspective = CIFilter.filterWithName_('CIPerspectiveTransform')
+ transform = CIFilter.filterWithName_('CIAffineTransform')
+
+ lanczos.setValue_forKey_(badgeCI, kCIInputImageKey)
+ lanczos.setValue_forKey_(scale * float(width)/bestWidth, kCIInputScaleKey)
+ lanczos.setValue_forKey_(1.0, kCIInputAspectRatioKey)
+
+ topLeft = (width * scale * corners[0][0],
+ width * scale * corners[0][1])
+ topRight = (width * scale * corners[1][0],
+ width * scale * corners[1][1])
+ bottomRight = (width * scale * corners[2][0],
+ width * scale * corners[2][1])
+ bottomLeft = (width * scale * corners[3][0],
+ width * scale * corners[3][1])
+
+ out = lanczos.valueForKey_(kCIOutputImageKey)
+ if width >= 16:
+ perspective.setValue_forKey_(out, kCIInputImageKey)
+ perspective.setValue_forKey_(CIVector.vectorWithX_Y_(*topLeft),
+ 'inputTopLeft')
+ perspective.setValue_forKey_(CIVector.vectorWithX_Y_(*topRight),
+ 'inputTopRight')
+ perspective.setValue_forKey_(CIVector.vectorWithX_Y_(*bottomRight),
+ 'inputBottomRight')
+ perspective.setValue_forKey_(CIVector.vectorWithX_Y_(*bottomLeft),
+ 'inputBottomLeft')
+ out = perspective.valueForKey_(kCIOutputImageKey)
+
+ tfm = NSAffineTransform.transform()
+ tfm.translateXBy_yBy_(math.floor((position[0] - 0.5 * scale) * width),
+ math.floor((position[1] - 0.5 * scale) * height))
+
+ transform.setValue_forKey_(out, kCIInputImageKey)
+ transform.setValue_forKey_(tfm, 'inputTransform')
+ out = transform.valueForKey_(kCIOutputImageKey)
+
+ compositor.setValue_forKey_(out, kCIInputImageKey)
+ compositor.setValue_forKey_(backgroundCI, kCIInputBackgroundImageKey)
+
+ result = compositor.valueForKey_(kCIOutputImageKey)
+
+ cgContext = CGBitmapContextCreate(None,
+ width,
+ height,
+ 8,
+ 0,
+ rgbColorSpace,
+ kCGImageAlphaPremultipliedLast)
+ context = CIContext.contextWithCGContext_options_(cgContext, None)
+
+ context.drawImage_inRect_fromRect_(result,
+ ((0, 0), (width, height)),
+ ((0, 0), (width, height)))
+
+ image = CGBitmapContextCreateImage(cgContext)
+
+ CGImageDestinationAddImage(target, image, props)
+
+ CGImageDestinationFinalize(target)
+
diff --git a/packages/electron-builder/vendor/dmgbuild/colors.py b/packages/electron-builder/vendor/dmgbuild/colors.py
new file mode 100644
index 00000000000..1d252a6bd19
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/colors.py
@@ -0,0 +1,494 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+import math
+
+class Color (object):
+ def to_rgb(self):
+ raise Exception('Must implement to_rgb() in subclasses')
+
+class RGB (Color):
+ def __init__(self, r, g, b):
+ self.r = r
+ self.g = g
+ self.b = b
+
+ def to_rgb(self):
+ return self
+
+class HSL (Color):
+ def __init__(self, h, s, l):
+ self.h = h
+ self.s = s
+ self.l = l
+
+ @staticmethod
+ def _hue_to_rgb(t1, t2, hue):
+ if hue < 0:
+ hue += 6
+ elif hue >= 6:
+ hue -= 6
+
+ if hue < 1:
+ return (t2 - t1) * hue + t1
+ elif hue < 3:
+ return t2
+ elif hue < 4:
+ return (t2 - t1) * (4 - hue) + t1
+ else:
+ return t1
+
+ def to_rgb(self):
+ hue = self.h / 60.0
+ if self.l <= 0.5:
+ t2 = self.l * (self.s + 1)
+ else:
+ t2 = self.l + self.s - (self.l * self.s)
+ t1 = self.l * 2 - t2
+ r = self._hue_to_rgb(t1, t2, hue + 2)
+ g = self._hue_to_rgb(t1, t2, hue)
+ b = self._hue_to_rgb(t1, t2, hue - 2)
+ return RGB(r, g, b)
+
+class HWB (Color):
+ def __init__(self, h, w, b):
+ self.h = h
+ self.w = w
+ self.b = b
+
+ @staticmethod
+ def _hue_to_rgb(hue):
+ if hue < 0:
+ hue += 6
+ elif hue >= 6:
+ hue -= 6
+
+ if hue < 1:
+ return hue
+ elif hue < 3:
+ return 1
+ elif hue < 4:
+ return (4 - hue)
+ else:
+ return 0
+
+ def to_rgb(self):
+ hue = self.h / 60.0
+ t1 = 1 - self.w - self.b
+ r = self._hue_to_rgb(hue + 2) * t1 + self.w
+ g = self._hue_to_rgb(hue) * t1 + self.w
+ b = self._hue_to_rgb(hue - 2) * t1 + self.w
+ return RGB(r, g, b)
+
+class CMYK (Color):
+ def __init__(self, c, m, y, k):
+ self.c = c
+ self.m = m
+ self.y = y
+ self.k = k
+
+ def to_rgb(self):
+ r = 1.0 - min(1.0, self.c + self.k)
+ g = 1.0 - min(1.0, self.m + self.k)
+ b = 1.0 - min(1.0, self.y + self.k)
+ return RGB(r, g, b)
+
+class Gray (Color):
+ def __init__(self, g):
+ self.g = g
+
+ def to_rgb(self):
+ return RGB(g, g, g)
+
+_x11_colors = {
+ 'aliceblue': (240, 248, 255),
+ 'antiquewhite': (250, 235, 215),
+ 'aqua': ( 0, 255, 255),
+ 'aquamarine': (127, 255, 212),
+ 'azure': (240, 255, 255),
+ 'beige': (245, 245, 220),
+ 'bisque': (255, 228, 196),
+ 'black': ( 0, 0, 0),
+ 'blanchedalmond': (255, 235, 205),
+ 'blue': ( 0, 0, 255),
+ 'blueviolet': (138, 43, 226),
+ 'brown': (165, 42, 42),
+ 'burlywood': (222, 184, 135),
+ 'cadetblue': ( 95, 158, 160),
+ 'chartreuse': (127, 255, 0),
+ 'chocolate': (210, 105, 30),
+ 'coral': (255, 127, 80),
+ 'cornflowerblue': (100, 149, 237),
+ 'cornsilk': (255, 248, 220),
+ 'crimson': (220, 20, 60),
+ 'cyan': ( 0, 255, 255),
+ 'darkblue': ( 0, 0, 139),
+ 'darkcyan': ( 0, 139, 139),
+ 'darkgoldenrod': (184, 134, 11),
+ 'darkgray': (169, 169, 169),
+ 'darkgreen': ( 0, 100, 0),
+ 'darkgrey': (169, 169, 169),
+ 'darkkhaki': (189, 183, 107),
+ 'darkmagenta': (139, 0, 139),
+ 'darkolivegreen': ( 85, 107, 47),
+ 'darkorange': (255, 140, 0),
+ 'darkorchid': (153, 50, 204),
+ 'darkred': (139, 0, 0),
+ 'darksalmon': (233, 150, 122),
+ 'darkseagreen': (143, 188, 143),
+ 'darkslateblue': ( 72, 61, 139),
+ 'darkslategray': ( 47, 79, 79),
+ 'darkslategrey': ( 47, 79, 79),
+ 'darkturquoise': ( 0, 206, 209),
+ 'darkviolet': (148, 0, 211),
+ 'deeppink': (255, 20, 147),
+ 'deepskyblue': ( 0, 191, 255),
+ 'dimgray': (105, 105, 105),
+ 'dimgrey': (105, 105, 105),
+ 'dodgerblue': ( 30, 144, 255),
+ 'firebrick': (178, 34, 34),
+ 'floralwhite': (255, 250, 240),
+ 'forestgreen': ( 34, 139, 34),
+ 'fuchsia': (255, 0, 255),
+ 'gainsboro': (220, 220, 220),
+ 'ghostwhite': (248, 248, 255),
+ 'gold': (255, 215, 0),
+ 'goldenrod': (218, 165, 32),
+ 'gray': (128, 128, 128),
+ 'grey': (128, 128, 128),
+ 'green': ( 0, 128, 0),
+ 'greenyellow': (173, 255, 47),
+ 'honeydew': (240, 255, 240),
+ 'hotpink': (255, 105, 180),
+ 'indianred': (205, 92, 92),
+ 'indigo': ( 75, 0, 130),
+ 'ivory': (255, 255, 240),
+ 'khaki': (240, 230, 140),
+ 'lavender': (230, 230, 250),
+ 'lavenderblush': (255, 240, 245),
+ 'lawngreen': (124, 252, 0),
+ 'lemonchiffon': (255, 250, 205),
+ 'lightblue': (173, 216, 230),
+ 'lightcoral': (240, 128, 128),
+ 'lightcyan': (224, 255, 255),
+ 'lightgoldenrodyellow': (250, 250, 210),
+ 'lightgray': (211, 211, 211),
+ 'lightgreen': (144, 238, 144),
+ 'lightgrey': (211, 211, 211),
+ 'lightpink': (255, 182, 193),
+ 'lightsalmon': (255, 160, 122),
+ 'lightseagreen': ( 32, 178, 170),
+ 'lightskyblue': (135, 206, 250),
+ 'lightslategray': (119, 136, 153),
+ 'lightslategrey': (119, 136, 153),
+ 'lightsteelblue': (176, 196, 222),
+ 'lightyellow': (255, 255, 224),
+ 'lime': ( 0, 255, 0),
+ 'limegreen': ( 50, 205, 50),
+ 'linen': (250, 240, 230),
+ 'magenta': (255, 0, 255),
+ 'maroon': (128, 0, 0),
+ 'mediumaquamarine': (102, 205, 170),
+ 'mediumblue': ( 0, 0, 205),
+ 'mediumorchid': (186, 85, 211),
+ 'mediumpurple': (147, 112, 219),
+ 'mediumseagreen': ( 60, 179, 113),
+ 'mediumslateblue': (123, 104, 238),
+ 'mediumspringgreen': ( 0, 250, 154),
+ 'mediumturquoise': ( 72, 209, 204),
+ 'mediumvioletred': (199, 21, 133),
+ 'midnightblue': ( 25, 25, 112),
+ 'mintcream': (245, 255, 250),
+ 'mistyrose': (255, 228, 225),
+ 'moccasin': (255, 228, 181),
+ 'navajowhite': (255, 222, 173),
+ 'navy': ( 0, 0, 128),
+ 'oldlace': (253, 245, 230),
+ 'olive': (128, 128, 0),
+ 'olivedrab': (107, 142, 35),
+ 'orange': (255, 165, 0),
+ 'orangered': (255, 69, 0),
+ 'orchid': (218, 112, 214),
+ 'palegoldenrod': (238, 232, 170),
+ 'palegreen': (152, 251, 152),
+ 'paleturquoise': (175, 238, 238),
+ 'palevioletred': (219, 112, 147),
+ 'papayawhip': (255, 239, 213),
+ 'peachpuff': (255, 218, 185),
+ 'peru': (205, 133, 63),
+ 'pink': (255, 192, 203),
+ 'plum': (221, 160, 221),
+ 'powderblue': (176, 224, 230),
+ 'purple': (128, 0, 128),
+ 'red': (255, 0, 0),
+ 'rosybrown': (188, 143, 143),
+ 'royalblue': ( 65, 105, 225),
+ 'saddlebrown': (139, 69, 19),
+ 'salmon': (250, 128, 114),
+ 'sandybrown': (244, 164, 96),
+ 'seagreen': ( 46, 139, 87),
+ 'seashell': (255, 245, 238),
+ 'sienna': (160, 82, 45),
+ 'silver': (192, 192, 192),
+ 'skyblue': (135, 206, 235),
+ 'slateblue': (106, 90, 205),
+ 'slategray': (112, 128, 144),
+ 'slategrey': (112, 128, 144),
+ 'snow': (255, 250, 250),
+ 'springgreen': ( 0, 255, 127),
+ 'steelblue': ( 70, 130, 180),
+ 'tan': (210, 180, 140),
+ 'teal': ( 0, 128, 128),
+ 'thistle': (216, 191, 216),
+ 'tomato': (255, 99, 71),
+ 'turquoise': ( 64, 224, 208),
+ 'violet': (238, 130, 238),
+ 'wheat': (245, 222, 179),
+ 'white': (255, 255, 255),
+ 'whitesmoke': (245, 245, 245),
+ 'yellow': (255, 255, 0),
+ 'yellowgreen': (154, 205, 50)
+ }
+
+_ws_re = re.compile('\s+')
+_token_re = re.compile('[A-Za-z_][A-Za-z0-9_]*')
+_hex_re = re.compile('#([0-9a-f]{3}(?:[0-9a-f]{3})?)$')
+_number_re = re.compile('[0-9]*(\.[0-9]*)')
+
+class ColorParser (object):
+ def __init__(self, s):
+ self._string = s
+ self._pos = 0
+
+ def skipws(self):
+ m = _ws_re.match(self._string, self._pos)
+ if m:
+ self._pos = m.end(0)
+
+ def expect(self, s, context=''):
+ if len(self._string) - self._pos < len(s) \
+ or self._string[self._pos:self._pos + len(s)] != s:
+ raise ValueError('bad color "%s" - expected "%s"%s'
+ % (self._string, s, context))
+ self._pos += len(s)
+
+ def expectEnd(self):
+ if self._pos != len(self._string):
+ raise ValueError('junk at end of color "%s"' % self._string)
+
+ def getToken(self):
+ m = _token_re.match(self._string, self._pos)
+ if m:
+ token = m.group(0)
+
+ self._pos = m.end(0)
+ return token
+ return None
+
+ def parseNumber(self, context=''):
+ m = _number_re.match(self._string, self._pos)
+ if m:
+ self._pos = m.end(0)
+ return float(m.group(0))
+ raise ValueError('bad color "%s" - expected a number%s'
+ % (self._string, context))
+
+ def parseColor(self):
+ self.skipws()
+
+ token = self.getToken()
+ if token:
+ if token == 'rgb':
+ return self.parseRGB()
+ elif token == 'hsl':
+ return self.parseHSL()
+ elif token == 'hwb':
+ return self.parseHWB()
+ elif token == 'cmyk':
+ return self.parseCMYK()
+ elif token == 'gray' or token == 'grey':
+ return self.parseGray()
+
+ try:
+ r, g, b = _x11_colors[token]
+ except KeyError:
+ raise ValueError('unknown color name "%s"' % token)
+
+ self.expectEnd()
+
+ return RGB(r / 255.0, g / 255.0, b / 255.0)
+
+ m = _hex_re.match(self._string, self._pos)
+ if m:
+ hrgb = m.group(1)
+
+ if len(hrgb) == 3:
+ r = int('0x' + 2 * hrgb[0], 16)
+ g = int('0x' + 2 * hrgb[1], 16)
+ b = int('0x' + 2 * hrgb[2], 16)
+ else:
+ r = int('0x' + hrgb[0:2], 16)
+ g = int('0x' + hrgb[2:4], 16)
+ b = int('0x' + hrgb[4:6], 16)
+
+ self._pos = m.end(0)
+ self.skipws()
+
+ self.expectEnd()
+
+ return RGB(r / 255.0, g / 255.0, b / 255.0)
+
+ raise ValueError('bad color syntax "%s"' % self._string)
+
+ def parseRGB(self):
+ self.expect('(', 'after "rgb"')
+ self.skipws()
+
+ r = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "rgb"')
+ self.skipws()
+
+ g = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "rgb"')
+ self.skipws()
+
+ b = self.parseValue()
+
+ self.skipws()
+ self.expect(')', 'at end of "rgb"')
+
+ self.skipws()
+ self.expectEnd()
+
+ return RGB(r, g, b)
+
+ def parseHSL(self):
+ self.expect('(', 'after "hsl"')
+ self.skipws()
+
+ h = self.parseAngle()
+
+ self.skipws()
+ self.expect(',', 'in "hsl"')
+ self.skipws()
+
+ s = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "hsl"')
+ self.skipws()
+
+ l = self.parseValue()
+
+ self.skipws()
+ self.expect(')', 'at end of "hsl"')
+
+ self.skipws()
+ self.expectEnd()
+
+ return HSL(h, s, l)
+
+ def parseHWB(self):
+ self.expect('(', 'after "hwb"')
+ self.skipws()
+
+ h = self.parseAngle()
+
+ self.skipws()
+ self.expect(',', 'in "hwb"')
+ self.skipws()
+
+ w = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "hwb"')
+ self.skipws()
+
+ b = self.parseValue()
+
+ self.skipws()
+ self.expect(')', 'at end of "hwb"')
+
+ self.skipws()
+ self.expectEnd()
+
+ return HWB(h, w, b)
+
+ def parseCMYK(self):
+ self.expect('(', 'after "cmyk"')
+ self.skipws()
+
+ c = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "cmyk"')
+ self.skipws()
+
+ m = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "cmyk"')
+ self.skipws()
+
+ y = self.parseValue()
+
+ self.skipws()
+ self.expect(',', 'in "cmyk"')
+ self.skipws()
+
+ k = self.parseValue()
+
+ self.skipws()
+ self.expect(')', 'at end of "cmyk"')
+
+ self.skipws()
+ self.expectEnd()
+
+ return CMYK(c, m, y, k)
+
+ def parseGray(self):
+ self.expect('(', 'after "gray"')
+ self.skipws()
+
+ g = self.parseValue()
+
+ self.skipws()
+ self.expect(')', 'at end of "gray')
+
+ self.skipws()
+ self.expectEnd()
+
+ return Gray(g)
+
+ def parseValue(self):
+ n = self.parseNumber()
+ self.skipws()
+ if self._string[self._pos] == '%':
+ n = n / 100.0
+ self.pos += 1
+ return n
+
+ def parseAngle(self):
+ n = self.parseNumber()
+ self.skipws()
+ tok = self.getToken()
+ if tok == 'rad':
+ n = n * 180.0 / math.pi
+ elif tok == 'grad' or tok == 'gon':
+ n = n * 0.9
+ elif tok != 'deg':
+ raise ValueError('bad angle unit "%s"' % tok)
+ return n
+
+_color_re = re.compile('\s*(#|rgb|hsl|hwb|cmyk|gray|grey|%s)'
+ % '|'.join(_x11_colors.keys()))
+def isAColor(s):
+ return _color_re.match(s)
+
+def parseColor(s):
+ return ColorParser(s).parseColor()
diff --git a/packages/electron-builder/vendor/dmgbuild/core.py b/packages/electron-builder/vendor/dmgbuild/core.py
new file mode 100644
index 00000000000..9ffcd3790e7
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/core.py
@@ -0,0 +1,597 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import os
+import pkg_resources
+import re
+import shutil
+import stat
+import subprocess
+import sys
+import tempfile
+import tokenize
+import json
+
+try:
+ {}.iteritems
+ iteritems = lambda x: x.iteritems()
+ iterkeys = lambda x: x.iterkeys()
+except AttributeError:
+ iteritems = lambda x: x.items()
+ iterkeys = lambda x: x.keys()
+try:
+ unicode
+except NameError:
+ unicode = str
+
+import biplist
+from mac_alias import *
+from ds_store import *
+
+from . import colors
+from . import licensing
+
+try:
+ from . import badge
+except ImportError:
+ badge = None
+
+_hexcolor_re = re.compile(r'#[0-9a-f]{3}(?:[0-9a-f]{3})?')
+
+class DMGError(Exception):
+ pass
+
+def hdiutil(cmd, *args, **kwargs):
+ plist = kwargs.get('plist', True)
+ all_args = ['/usr/bin/hdiutil', cmd]
+ all_args.extend(args)
+ if plist:
+ all_args.append('-plist')
+ p = subprocess.Popen(all_args, stdout=subprocess.PIPE, close_fds=True)
+ output, errors = p.communicate()
+ if plist:
+ results = biplist.readPlistFromString(output)
+ else:
+ results = output
+ retcode = p.wait()
+ return retcode, results
+
+# On Python 2 we can just execfile() it, but Python 3 deprecated that
+def load_settings(filename, settings):
+ if sys.version_info[0] == 2:
+ execfile(filename, settings, settings)
+ else:
+ encoding = 'utf-8'
+ with open(filename, 'rb') as fp:
+ try:
+ encoding = tokenize.detect_encoding(fp.readline)[0]
+ except SyntaxError:
+ pass
+
+ with open(filename, 'r', encoding=encoding) as fp:
+ exec(compile(fp.read(), filename, 'exec'), settings, settings)
+
+def load_json(filename, settings):
+ """Read an appdmg .json spec. Uses the defaults for appdmg, rather than
+ the usual defaults for dmgbuild. """
+
+ with open(filename, 'r') as fp:
+ json_data = json.load(fp)
+
+ if 'title' not in json_data:
+ raise ValueError('missing \'title\' in JSON settings file')
+ if 'contents' not in json_data:
+ raise ValueError('missing \'contents\' in JSON settings file')
+
+ settings['volume_name'] = json_data['title']
+ settings['icon'] = json_data.get('icon', None)
+ settings['badge_icon'] = json_data.get('badge-icon', None)
+ bk = json_data.get('background', None)
+ if bk is None:
+ bk = json_data.get('background-color', None)
+ if bk is not None:
+ settings['background'] = bk
+ settings['icon_size'] = json_data.get('icon-size', 80)
+ wnd = json_data.get('window', { 'position': (100, 100),
+ 'size': (640, 480) })
+ pos = wnd.get('position', { 'x': 100, 'y': 100 })
+ siz = wnd.get('size', { 'width': 640, 'height': 480 })
+ settings['window_rect'] = ((pos.get('x', 100), pos.get('y', 100)),
+ (siz.get('width', 640), siz.get('height', 480)))
+ settings['format'] = json_data.get('format', 'UDZO')
+ settings['compression_level'] = json_data.get('compression-level', None)
+ settings['license'] = json_data.get('license', None)
+ files = []
+ symlinks = {}
+ icon_locations = {}
+ for fileinfo in json_data.get('contents', []):
+ if 'path' not in fileinfo:
+ raise ValueError('missing \'path\' in contents in JSON settings file')
+ if 'x' not in fileinfo:
+ raise ValueError('missing \'x\' in contents in JSON settings file')
+ if 'y' not in fileinfo:
+ raise ValueError('missing \'y\' in contents in JSON settings file')
+
+ kind = fileinfo.get('type', 'file')
+ path = fileinfo['path']
+ name = fileinfo.get('name', os.path.basename(path.rstrip('/')))
+ if kind == 'file':
+ files.append((path, name))
+ elif kind == 'link':
+ symlinks[name] = path
+ elif kind == 'position':
+ pass
+ icon_locations[name] = (fileinfo['x'], fileinfo['y'])
+
+ settings['files'] = files
+ settings['symlinks'] = symlinks
+ settings['icon_locations'] = icon_locations
+
+def build_dmg(filename, volume_name, settings_file=None, settings={},
+ defines={}, lookForHiDPI=True):
+ options = {
+ # Default settings
+ 'filename': filename,
+ 'volume_name': volume_name,
+ 'format': 'UDBZ',
+ 'compression_level': None,
+ 'size': None,
+ 'files': [],
+ 'symlinks': {},
+ 'icon': None,
+ 'badge_icon': None,
+ 'background': None,
+ 'show_status_bar': False,
+ 'show_tab_view': False,
+ 'show_toolbar': False,
+ 'show_pathbar': False,
+ 'show_sidebar': False,
+ 'sidebar_width': 180,
+ 'arrange_by': None,
+ 'grid_offset': (0, 0),
+ 'grid_spacing': 100.0,
+ 'scroll_position': (0.0, 0.0),
+ 'show_icon_preview': False,
+ 'show_item_info': False,
+ 'label_pos': 'bottom',
+ 'text_size': 16.0,
+ 'icon_size': 128.0,
+ 'include_icon_view_settings': 'auto',
+ 'include_list_view_settings': 'auto',
+ 'list_icon_size': 16.0,
+ 'list_text_size': 12.0,
+ 'list_scroll_position': (0, 0),
+ 'list_sort_by': 'name',
+ 'list_use_relative_dates': True,
+ 'list_calculate_all_sizes': False,
+ 'list_columns': ('name', 'date-modified', 'size', 'kind', 'date-added'),
+ 'list_column_widths': {
+ 'name': 300,
+ 'date-modified': 181,
+ 'date-created': 181,
+ 'date-added': 181,
+ 'date-last-opened': 181,
+ 'size': 97,
+ 'kind': 115,
+ 'label': 100,
+ 'version': 75,
+ 'comments': 300,
+ },
+ 'list_column_sort_directions': {
+ 'name': 'ascending',
+ 'date-modified': 'descending',
+ 'date-created': 'descending',
+ 'date-added': 'descending',
+ 'date-last-opened': 'descending',
+ 'size': 'descending',
+ 'kind': 'ascending',
+ 'label': 'ascending',
+ 'version': 'ascending',
+ 'comments': 'ascending',
+ },
+ 'window_rect': ((100, 100), (640, 280)),
+ 'default_view': 'icon-view',
+ 'icon_locations': {},
+ 'license': None,
+ 'defines': defines
+ }
+
+ # Execute the settings file
+ if settings_file:
+ # We now support JSON settings files using appdmg's format
+ if settings_file.endswith('.json'):
+ load_json(settings_file, options)
+ else:
+ load_settings(settings_file, options)
+
+ # Add any overrides
+ options.update(settings)
+
+ # Set up the finder data
+ bounds = options['window_rect']
+
+ bounds_string = '{{%s, %s}, {%s, %s}}' % (bounds[0][0],
+ bounds[0][1],
+ bounds[1][0],
+ bounds[1][1])
+ bwsp = {
+ 'ShowStatusBar': options['show_status_bar'],
+ 'WindowBounds': bounds_string.encode('utf-8'),
+ 'ContainerShowSidebar': False,
+ 'PreviewPaneVisibility': False,
+ 'SidebarWidth': options['sidebar_width'],
+ 'ShowTabView': options['show_tab_view'],
+ 'ShowToolbar': options['show_toolbar'],
+ 'ShowPathbar': options['show_pathbar'],
+ 'ShowSidebar': options['show_sidebar']
+ }
+
+ arrange_options = {
+ 'name': 'name',
+ 'date-modified': 'dateModified',
+ 'date-created': 'dateCreated',
+ 'date-added': 'dateAdded',
+ 'date-last-opened': 'dateLastOpened',
+ 'size': 'size',
+ 'kind': 'kind',
+ 'label': 'label',
+ }
+
+ icvp = {
+ 'viewOptionsVersion': 1,
+ 'backgroundType': 0,
+ 'backgroundColorRed': 1.0,
+ 'backgroundColorGreen': 1.0,
+ 'backgroundColorBlue': 1.0,
+ 'gridOffsetX': float(options['grid_offset'][0]),
+ 'gridOffsetY': float(options['grid_offset'][1]),
+ 'gridSpacing': float(options['grid_spacing']),
+ 'arrangeBy': str(arrange_options.get(options['arrange_by'], 'none')),
+ 'showIconPreview': options['show_icon_preview'] == True,
+ 'showItemInfo': options['show_item_info'] == True,
+ 'labelOnBottom': options['label_pos'] == 'bottom',
+ 'textSize': float(options['text_size']),
+ 'iconSize': float(options['icon_size']),
+ 'scrollPositionX': float(options['scroll_position'][0]),
+ 'scrollPositionY': float(options['scroll_position'][1])
+ }
+
+ background = options['background']
+
+ columns = {
+ 'name': 'name',
+ 'date-modified': 'dateModified',
+ 'date-created': 'dateCreated',
+ 'date-added': 'dateAdded',
+ 'date-last-opened': 'dateLastOpened',
+ 'size': 'size',
+ 'kind': 'kind',
+ 'label': 'label',
+ 'version': 'version',
+ 'comments': 'comments'
+ }
+
+ default_widths = {
+ 'name': 300,
+ 'date-modified': 181,
+ 'date-created': 181,
+ 'date-added': 181,
+ 'date-last-opened': 181,
+ 'size': 97,
+ 'kind': 115,
+ 'label': 100,
+ 'version': 75,
+ 'comments': 300,
+ }
+
+ default_sort_directions = {
+ 'name': 'ascending',
+ 'date-modified': 'descending',
+ 'date-created': 'descending',
+ 'date-added': 'descending',
+ 'date-last-opened': 'descending',
+ 'size': 'descending',
+ 'kind': 'ascending',
+ 'label': 'ascending',
+ 'version': 'ascending',
+ 'comments': 'ascending',
+ }
+
+ lsvp = {
+ 'viewOptionsVersion': 1,
+ 'sortColumn': columns.get(options['list_sort_by'], 'name'),
+ 'textSize': float(options['list_text_size']),
+ 'iconSize': float(options['list_icon_size']),
+ 'showIconPreview': options['show_icon_preview'],
+ 'scrollPositionX': options['list_scroll_position'][0],
+ 'scrollPositionY': options['list_scroll_position'][1],
+ 'useRelativeDates': options['list_use_relative_dates'],
+ 'calculateAllSizes': options['list_calculate_all_sizes'],
+ }
+
+ lsvp['columns'] = {}
+ cndx = {}
+
+ for n, column in enumerate(options['list_columns']):
+ cndx[column] = n
+ width = options['list_column_widths'].get(column,
+ default_widths[column])
+ asc = 'ascending' == options['list_column_sort_directions'].get(column,
+ default_sort_directions[column])
+
+ lsvp['columns'][columns[column]] = {
+ 'index': n,
+ 'width': width,
+ 'identifier': columns[column],
+ 'visible': True,
+ 'ascending': asc
+ }
+
+ n = len(options['list_columns'])
+ for k in iterkeys(columns):
+ if cndx.get(k, None) is None:
+ cndx[k] = n
+ width = default_widths[k]
+ asc = 'ascending' == default_sort_directions[k]
+
+ lsvp['columns'][columns[column]] = {
+ 'index': n,
+ 'width': width,
+ 'identifier': columns[column],
+ 'visible': False,
+ 'ascending': asc
+ }
+
+ n += 1
+
+ default_view = options['default_view']
+ views = {
+ 'icon-view': b'icnv',
+ 'column-view': b'clmv',
+ 'list-view': b'Nlsv',
+ 'coverflow': b'Flwv'
+ }
+
+ icvl = (b'type', views.get(default_view, 'icnv'))
+
+ include_icon_view_settings = default_view == 'icon-view' \
+ or options['include_icon_view_settings'] not in \
+ ('auto', 'no', 0, False, None)
+ include_list_view_settings = default_view in ('list-view', 'coverflow') \
+ or options['include_list_view_settings'] not in \
+ ('auto', 'no', 0, False, None)
+
+ filename = options['filename']
+ volume_name = options['volume_name']
+
+ # Construct a writeable image to start with
+ dirname, basename = os.path.split(os.path.realpath(filename))
+ if not basename.endswith('.dmg'):
+ basename += '.dmg'
+ writableFile = tempfile.NamedTemporaryFile(dir=dirname, prefix='.temp',
+ suffix=basename)
+
+ total_size = options['size']
+ if total_size == None:
+ # Start with a size of 128MB - this way we don't need to calculate the
+ # size of the background image, volume icon, and .DS_Store file (and
+ # 128 MB should be well sufficient for even the most outlandish image
+ # sizes, like an uncompressed 5K multi-resolution TIFF)
+ total_size = 128 * 1024 * 1024
+
+ def roundup(x, n):
+ return x if x % n == 0 else x + n - x % n
+
+ for path in options['files']:
+ if isinstance(path, tuple):
+ path = path[0]
+
+ if not os.path.islink(path) and os.path.isdir(path):
+ for dirpath, dirnames, filenames in os.walk(path):
+ for f in filenames:
+ fp = os.path.join(dirpath, f)
+ total_size += roundup(os.lstat(fp).st_size, 4096)
+ else:
+ total_size += roundup(os.lstat(path).st_size, 4096)
+
+ for name,target in iteritems(options['symlinks']):
+ total_size += 4096
+
+ total_size = str(max(total_size / 1024, 1024)) + 'K'
+
+ ret, output = hdiutil('create',
+ '-ov',
+ '-volname', volume_name,
+ '-fs', 'HFS+',
+ '-fsargs', '-c c=64,a=16,e=16',
+ '-size', total_size,
+ writableFile.name)
+
+ if ret:
+ raise DMGError('Unable to create disk image')
+
+ ret, output = hdiutil('attach',
+ '-nobrowse',
+ '-owners', 'off',
+ '-noidme',
+ writableFile.name)
+
+ if ret:
+ raise DMGError('Unable to attach disk image')
+
+ try:
+ for info in output['system-entities']:
+ if info.get('mount-point', None):
+ device = info['dev-entry']
+ mount_point = info['mount-point']
+
+ icon = options['icon']
+ if badge:
+ badge_icon = options['badge_icon']
+ else:
+ badge_icon = None
+ icon_target_path = os.path.join(mount_point, '.VolumeIcon.icns')
+ if icon:
+ shutil.copyfile(icon, icon_target_path)
+ elif badge_icon:
+ badge.badge_disk_icon(badge_icon, icon_target_path)
+
+ if icon or badge_icon:
+ subprocess.call(['/usr/bin/SetFile', '-a', 'C', mount_point])
+
+ background_bmk = None
+
+ if not isinstance(background, (str, unicode)):
+ pass
+ elif colors.isAColor(background):
+ c = colors.parseColor(background).to_rgb()
+
+ icvp['backgroundType'] = 1
+ icvp['backgroundColorRed'] = float(c.r)
+ icvp['backgroundColorGreen'] = float(c.g)
+ icvp['backgroundColorBlue'] = float(c.b)
+ else:
+ if os.path.isfile(background):
+ # look to see if there are HiDPI resources available
+
+ if lookForHiDPI is True:
+ name, extension = os.path.splitext(os.path.basename(background))
+ orderedImages = [background]
+ imageDirectory = os.path.dirname(background)
+ if imageDirectory == '':
+ imageDirectory = '.'
+ for candidateName in os.listdir(imageDirectory):
+ hasScale = re.match(
+ '^(?P.+)@(?P\d+)x(?P\.\w+)$',
+ candidateName)
+ if hasScale and name == hasScale.group('name') and \
+ extension == hasScale.group('extension'):
+ scale = int(hasScale.group('scale'))
+ if len(orderedImages) < scale:
+ orderedImages += [None] * (scale - len(orderedImages))
+ orderedImages[scale - 1] = os.path.join(imageDirectory, candidateName)
+
+ if len(orderedImages) > 1:
+ # compile the grouped tiff
+ backgroundFile = tempfile.NamedTemporaryFile(suffix='.tiff')
+ background = backgroundFile.name
+ output = tempfile.TemporaryFile(mode='w+')
+ try:
+ subprocess.check_call(
+ ['/usr/bin/tiffutil', '-cathidpicheck'] +
+ filter(None, orderedImages) +
+ ['-out', background], stdout=output, stderr=output)
+ except Exception as e:
+ output.seek(0)
+ raise ValueError(
+ 'unable to compile combined HiDPI file "%s" got error: %s\noutput: %s'
+ % (background, str(e), output.read()))
+
+ _, kind = os.path.splitext(background)
+ path_in_image = os.path.join(mount_point, '.background' + kind)
+ shutil.copyfile(background, path_in_image)
+ elif pkg_resources.resource_exists('dmgbuild', 'resources/' + background + '.tiff'):
+ tiffdata = pkg_resources.resource_string(
+ 'dmgbuild',
+ 'resources/' + background + '.tiff')
+ path_in_image = os.path.join(mount_point, '.background.tiff')
+
+ with open(path_in_image, 'wb') as f:
+ f.write(tiffdata)
+ else:
+ raise ValueError('background file "%s" not found' % background)
+
+ alias = Alias.for_file(path_in_image)
+ background_bmk = Bookmark.for_file(path_in_image)
+
+ icvp['backgroundType'] = 2
+ icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
+
+ for f in options['files']:
+ if isinstance(f, tuple):
+ f_in_image = os.path.join(mount_point, f[1])
+ f = f[0]
+ else:
+ basename = os.path.basename(f.rstrip('/'))
+ f_in_image = os.path.join(mount_point, basename)
+
+ # use system ditto command to preserve code signing, etc.
+ subprocess.call(['/usr/bin/ditto', f, f_in_image])
+
+ for name,target in iteritems(options['symlinks']):
+ name_in_image = os.path.join(mount_point, name)
+ os.symlink(target, name_in_image)
+
+ userfn = options.get('create_hook', None)
+ if callable(userfn):
+ userfn(mount_point, options)
+
+ image_dsstore = os.path.join(mount_point, '.DS_Store')
+
+ with DSStore.open(image_dsstore, 'w+') as d:
+ d['.']['vSrn'] = ('long', 1)
+ d['.']['bwsp'] = bwsp
+ if include_icon_view_settings:
+ d['.']['icvp'] = icvp
+ if background_bmk:
+ d['.']['pBBk'] = background_bmk
+ if include_list_view_settings:
+ d['.']['lsvp'] = lsvp
+ d['.']['icvl'] = icvl
+
+ for k,v in iteritems(options['icon_locations']):
+ d[k]['Iloc'] = v
+
+ # Delete .Trashes, if it gets created
+ shutil.rmtree(os.path.join(mount_point, '.Trashes'), True)
+ except:
+ # Always try to detach
+ hdiutil('detach', '-force', device, plist=False)
+ raise
+
+ ret, output = hdiutil('detach', device, plist=False)
+
+ if ret:
+ hdiutil('detach', '-force', device, plist=False)
+ raise DMGError('Unable to detach device cleanly')
+
+ # Shrink the output to the minimum possible size
+ ret, output = hdiutil('resize',
+ '-quiet',
+ '-sectors', 'min',
+ writableFile.name,
+ plist=False)
+
+ if ret:
+ raise DMGError('Unable to shrink')
+
+ key_prefix = {'UDZO': 'zlib', 'UDBZ': 'bzip2', 'ULFO': 'lzfse'}
+ compression_level = options['compression_level']
+ if options['format'] in key_prefix and compression_level:
+ compression_args = [
+ '-imagekey',
+ key_prefix[options['format']] + '-level=' + str(compression_level)
+ ]
+ else:
+ compression_args = []
+
+ ret, output = hdiutil('convert', writableFile.name,
+ '-format', options['format'],
+ '-ov',
+ '-o', filename, *compression_args)
+
+ if ret:
+ raise DMGError('Unable to convert')
+
+ if options['license']:
+ ret, output = hdiutil('unflatten', '-quiet', filename, plist=False)
+
+ if ret:
+ raise DMGError('Unable to unflatten to add license')
+
+ licensing.add_license(filename, options['license'])
+
+ ret, output = hdiutil('flatten', '-quiet', filename, plist=False)
+
+ if ret:
+ raise DMGError('Unable to flatten after adding license')
diff --git a/packages/electron-builder/vendor/dmgbuild/licensing.py b/packages/electron-builder/vendor/dmgbuild/licensing.py
new file mode 100644
index 00000000000..850ecf84a28
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/licensing.py
@@ -0,0 +1,458 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import os
+import struct
+
+from .resources import *
+
+# ISO language and country codes to Macintosh Region codes (from Script.h)
+# == CFLocaleCreateCanonicalLocaleIdentifierFromScriptManagerCodes(NULL,
+# kTextLanguageDontCare,
+# )
+region_codes = {
+ "en_US": 0,
+ "fr_FR": 1,
+ "en_GB": 2,
+ "de_DE": 3,
+ "it_IT": 4,
+ "nl_NL": 5,
+ "nl_BE": 6,
+ "sv_SE": 7,
+ "es_ES": 8,
+ "da_DK": 9,
+ "pt_PT": 10,
+ "fr_CA": 11,
+ "nb_NO": 12,
+ "he_IL": 13,
+ "ja_JP": 14,
+ "en_AU": 15,
+ "ar": 16,
+ "fi_FI": 17,
+ "fr_CH": 18,
+ "de_CH": 19,
+ "el_GR": 20,
+ "is_IS": 21,
+ "mt_MT": 22,
+ "el_CY": 23,
+ "tr_TR": 24,
+ "hi_IN": 33,
+ "ur_PK": 34,
+ "it_CH": 36,
+ "ro_RO": 39,
+ "grc": 40,
+ "lt_LT": 41,
+ "pl_PL": 42,
+ "hu_HU": 43,
+ "et_EE": 44,
+ "lv_LV": 45,
+ "se": 46,
+ "fo_FO": 47,
+ "fa_IR": 48,
+ "ru_RU": 49,
+ "ga_IE": 50,
+ "ko_KR": 51,
+ "zh_CN": 52,
+ "zh_TW": 53,
+ "th_TH": 54,
+ "cs_CZ": 56,
+ "sk_SK": 57,
+ "bn": 60,
+ "be_BY": 61,
+ "uk_UA": 62,
+ "sr_RS": 65,
+ "sl_SI": 66,
+ "mk_MK": 67,
+ "hr_HR": 68,
+ "pt_BR": 71,
+ "bg_BG": 72,
+ "ca_ES": 73,
+ "gd": 75,
+ "gv": 76,
+ "br": 77,
+ "iu_CA": 78,
+ "cy": 79,
+ "ga-Latg_IE": 81,
+ "en_CA": 82,
+ "dz_BT": 83,
+ "hy_AM": 84,
+ "ka_GE": 85,
+ "es_419": 86,
+ "to_TO": 88,
+ "fr_001": 91,
+ "de_AT": 92,
+ "gu_IN": 94,
+ "pa": 95,
+ "ur_IN": 96,
+ "vi_VN": 97,
+ "fr_BE": 98,
+ "uz_UZ": 99,
+ "en_SG": 100,
+ "nn_NO": 101,
+ "af_ZA": 102,
+ "eo": 103,
+ "mr_IN": 104,
+ "bo": 105,
+ "ne_NP": 106,
+ "kl": 107,
+ "en_IE": 108
+}
+
+# Map of region constants to script constants (from Script.h)
+# TextEncoding textEncoding;
+# GetTextEncodingFromScriptInfo(kTextScriptDontCare, kTextLanguageDontCare, , &textEncoding);
+# == GetTextEncodingBase(textEncoding);
+script_codes = {
+ 0: 0,
+ 1: 0,
+ 2: 0,
+ 3: 0,
+ 4: 0,
+ 5: 0,
+ 6: 0,
+ 7: 0,
+ 8: 0,
+ 9: 0,
+ 10: 0,
+ 11: 0,
+ 12: 0,
+ 13: 5,
+ 14: 1,
+ 15: 0,
+ 16: 4,
+ 17: 0,
+ 18: 0,
+ 19: 0,
+ 20: 6,
+ 21: 37,
+ 22: 0,
+ 23: 6,
+ 24: 35,
+ 25: 36,
+ 26: 0,
+ 27: 0,
+ 30: 0,
+ 31: 0,
+ 32: 0,
+ 33: 9,
+ 34: 4,
+ 35: 35,
+ 36: 0,
+ 37: 0,
+ 39: 38,
+ 40: 6,
+ 41: 29,
+ 42: 29,
+ 43: 29,
+ 44: 29,
+ 45: 29,
+ 46: 0,
+ 47: 37,
+ 48: 140,
+ 49: 7,
+ 50: 39,
+ 51: 3,
+ 52: 25,
+ 53: 2,
+ 54: 21,
+ 56: 29,
+ 57: 29,
+ 59: 29,
+ 60: 13,
+ 61: 7,
+ 62: 7,
+ 64: 6,
+ 65: 7,
+ 66: 36,
+ 67: 7,
+ 68: 36,
+ 70: 0,
+ 71: 0,
+ 72: 7,
+ 73: 0,
+ 75: 39,
+ 76: 39,
+ 77: 39,
+ 78: 236,
+ 79: 39,
+ 81: 40,
+ 82: 0,
+ 83: 26,
+ 84: 24,
+ 85: 23,
+ 86: 0,
+ 88: 0,
+ 91: 0,
+ 92: 0,
+ 94: 11,
+ 95: 10,
+ 96: 4,
+ 97: 30,
+ 98: 0,
+ 99: 7,
+ 100: 0,
+ 101: 0,
+ 102: 0,
+ 103: 0,
+ 104: 9,
+ 105: 26,
+ 106: 9,
+ 107: 0,
+ 108: 0
+}
+
+# Map of TextEncodingBase constants to Python encoder names (from TextCommon.h)
+encodings_map = {
+ 0: 'mac_roman', # kTextEncodingMacRoman
+ 1: 'shift_jis', # kTextEncodingMacJapanese
+ 2: 'big5', # kTextEncodingMacChineseTrad
+ 3: 'euc_kr', # kTextEncodingMacKorean
+ 4: 'mac_arabic', # kTextEncodingMacArabic
+ 6: 'mac_greek', # kTextEncodingMacGreek
+ 7: 'mac_cyrillic', # kTextEncodingMacCyrillic
+ 21: 'iso8859_11', # kTextEncodingMacThai
+ 25: 'euc-cn', # kTextEncodingMacChineseSimp
+ 29: 'mac_centeuro', # kTextEncodingMacCentralEurRoman
+ 35: 'mac_turkish', # kTextEncodingMacTurkish
+ 36: 'mac_croatian', # kTextEncodingMacCroatian
+ 37: 'mac_iceland', # kTextEncodingMacIcelandic
+ 38: 'mac_romanian', # kTextEncodingMacRomanian
+ 140: 'mac_farsi' # kTextEncodingMacFarsi
+}
+
+# Standard fonts
+fonts = {
+ 'New York': 2,
+ 'Geneva': 3,
+ 'Monaco': 4,
+ 'Venice': 5,
+ 'London': 6,
+ 'Athens': 7,
+ 'San Francisco': 8,
+ 'Toronto': 9,
+ 'Cairo': 11,
+ 'Los Angeles': 12,
+ 'Times': 20,
+ 'Helvetica': 21,
+ 'Courier': 22,
+ 'Symbol': 23,
+ 'Mobile': 24
+}
+
+# Buttons (these come from the SLAResources file which you can find in the SLA
+# SDK on developer.apple.com)
+default_buttons = {
+ 0: (
+ b'English',
+ b'Agree',
+ b'Disagree',
+ b'Print',
+ b'Save',
+ b'If you agree with the terms of this license, press "Agree" to '
+ b'install the software. If you do not agree, press "Disagree".'
+ ),
+
+ 3: (
+ b'Deutsch',
+ b'Akzeptieren',
+ b'Ablehnen',
+ b'Drucken',
+ b'Sichern...',
+ b'Klicken Sie in \xd2Akzeptieren\xd3, wenn Sie mit den Bestimmungen des Software-Lizenzvertrags einverstanden sind. Falls nicht, bitte \xd2Ablehnen\xd3 anklicken. Sie k\x9annen die Software nur installieren, wenn Sie \xd2Akzeptieren\xd3 angeklickt haben.'
+ ),
+
+ 8: (
+ b'Espa\x96ol',
+ b'Aceptar',
+ b'No aceptar',
+ b'Imprimir',
+ b'Guardar...',
+ b'Si est\x87 de acuerdo con los t\x8erminos de esta licencia, pulse "Aceptar" para instalar el software. En el supuesto de que no est\x8e de acuerdo con los t\x8erminos de esta licencia, pulse "No aceptar."'
+ ),
+
+ 1: (
+ b'Fran\x8dais',
+ b'Accepter',
+ b'Refuser',
+ b'Imprimer',
+ b'Enregistrer...',
+ b'Si vous acceptez les termes de la pr\x8esente licence, cliquez sur "Accepter" afin d\'installer le logiciel. Si vous n\'\x90tes pas d\'accord avec les termes de la licence, cliquez sur "Refuser".'
+ ),
+
+ 4: (
+ b'Italiano',
+ b'Accetto',
+ b'Rifiuto',
+ b'Stampa',
+ b'Registra...',
+ b'Se accetti le condizioni di questa licenza, fai clic su "Accetto" per installare il software. Altrimenti fai clic su "Rifiuto".'
+ ),
+
+ 14: (
+ b'Japanese',
+ b'\x93\xaf\x88\xd3\x82\xb5\x82\xdc\x82\xb7',
+ b'\x93\xaf\x88\xd3\x82\xb5\x82\xdc\x82\xb9\x82\xf1',
+ b'\x88\xf3\x8d\xfc\x82\xb7\x82\xe9',
+ b'\x95\xdb\x91\xb6...',
+ b'\x96{\x83\\\x83t\x83g\x83E\x83G\x83A\x8eg\x97p\x8b\x96\x91\xf8\x8c_\x96\xf1\x82\xcc\x8f\xf0\x8c\x8f\x82\xc9\x93\xaf\x88\xd3\x82\xb3\x82\xea\x82\xe9\x8f\xea\x8d\x87\x82\xc9\x82\xcd\x81A\x83\\\x83t\x83g\x83E\x83G\x83A\x82\xf0\x83C\x83\x93\x83X\x83g\x81[\x83\x8b\x82\xb7\x82\xe9\x82\xbd\x82\xdf\x82\xc9\x81u\x93\xaf\x88\xd3\x82\xb5\x82\xdc\x82\xb7\x81v\x82\xf0\x89\x9f\x82\xb5\x82\xc4\x82\xad\x82\xbe\x82\xb3\x82\xa2\x81B\x81@\x93\xaf\x88\xd3\x82\xb3\x82\xea\x82\xc8\x82\xa2\x8f\xea\x8d\x87\x82\xc9\x82\xcd\x81A\x81u\x93\xaf\x88\xd3\x82\xb5\x82\xdc\x82\xb9\x82\xf1\x81v\x82\xf0\x89\x9f\x82\xb5\x82\xc4\x82\xad\x82\xbe\x82\xb3\x82\xa2\x81B'
+ ),
+
+ 5: (
+ b'Nederlands',
+ b'Ja',
+ b'Nee',
+ b'Print',
+ b'Bewaar...',
+ b'Indien u akkoord gaat met de voorwaarden van deze licentie, kunt u op \'Ja\' klikken om de programmatuur te installeren. Indien u niet akkoord gaat, klikt u op \'Nee\'.'
+ ),
+
+ 7: (
+ b'Svensk',
+ b'Godk\x8anns',
+ b'Avb\x9ajs',
+ b'Skriv ut',
+ b'Spara...',
+ b'Om Du godk\x8anner licensvillkoren klicka p\x8c "Godk\x8anns" f\x9ar att installera programprodukten. Om Du inte godk\x8anner licensvillkoren, klicka p\x8c "Avb\x9ajs".'
+ ),
+
+ 71: (
+ b'Portugu\x90s',
+ b'Concordar',
+ b'Discordar',
+ b'Imprimir',
+ b'Salvar...',
+ b'Se est\x87 de acordo com os termos desta licen\x8da, pressione "Concordar" para instalar o software. Se n\x8bo est\x87 de acordo, pressione "Discordar".'
+ ),
+
+ 52: (
+ b'Simplified Chinese',
+ b'\xcd\xac\xd2\xe2',
+ b'\xb2\xbb\xcd\xac\xd2\xe2',
+ b'\xb4\xf2\xd3\xa1',
+ b'\xb4\xe6\xb4\xa2\xa1\xad',
+ b'\xc8\xe7\xb9\xfb\xc4\xfa\xcd\xac\xd2\xe2\xb1\xbe\xd0\xed\xbf\xc9\xd0\xad\xd2\xe9\xb5\xc4\xcc\xf5\xbf\xee\xa3\xac\xc7\xeb\xb0\xb4\xa1\xb0\xcd\xac\xd2\xe2\xa1\xb1\xc0\xb4\xb0\xb2\xd7\xb0\xb4\xcb\xc8\xed\xbc\xfe\xa1\xa3\xc8\xe7\xb9\xfb\xc4\xfa\xb2\xbb\xcd\xac\xd2\xe2\xa3\xac\xc7\xeb\xb0\xb4\xa1\xb0\xb2\xbb\xcd\xac\xd2\xe2\xa1\xb1\xa1\xa3'
+ ),
+
+ 53: (
+ b'Traditional Chinese',
+ b'\xa6P\xb7N',
+ b'\xa4\xa3\xa6P\xb7N',
+ b'\xa6C\xa6L',
+ b'\xc0x\xa6s\xa1K',
+ b'\xa6p\xaaG\xb1z\xa6P\xb7N\xa5\xbb\xb3\\\xa5i\xc3\xd2\xb8\xcc\xaa\xba\xb1\xf8\xb4\xda\xa1A\xbd\xd0\xab\xf6\xa1\xa7\xa6P\xb7N\xa1\xa8\xa5H\xa6w\xb8\xcb\xb3n\xc5\xe9\xa1C\xa6p\xaaG\xa4\xa3\xa6P\xb7N\xa1A\xbd\xd0\xab\xf6\xa1\xa7\xa4\xa3\xa6P\xb7N\xa1\xa8\xa1C'
+ ),
+
+ 9: (
+ b'Dansk',
+ b'Enig',
+ b'Uenig',
+ b'Udskriv',
+ b'Arkiver...',
+ b'Hvis du accepterer betingelserne i licensaftalen, skal du klikke p\x8c \xd2Enig\xd3 for at installere softwaren. Klik p\x8c \xd2Uenig\xd3 for at annullere installeringen.'
+ ),
+
+ 17: (
+ b'Suomi',
+ b'Hyv\x8aksyn',
+ b'En hyv\x8aksy',
+ b'Tulosta',
+ b'Tallenna\xc9',
+ b'Hyv\x8aksy lisenssisopimuksen ehdot osoittamalla \xd5Hyv\x8aksy\xd5. Jos et hyv\x8aksy sopimuksen ehtoja, osoita \xd5En hyv\x8aksy\xd5.'
+ ),
+
+ 51: (
+ b'Korean',
+ b'\xb5\xbf\xc0\xc7',
+ b'\xb5\xbf\xc0\xc7 \xbe\xc8\xc7\xd4',
+ b'\xc7\xc1\xb8\xb0\xc6\xae',
+ b'\xc0\xfa\xc0\xe5...',
+ b'\xbb\xe7\xbf\xeb \xb0\xe8\xbe\xe0\xbc\xad\xc0\xc7 \xb3\xbb\xbf\xeb\xbf\xa1 \xb5\xbf\xc0\xc7\xc7\xcf\xb8\xe9, "\xb5\xbf\xc0\xc7" \xb4\xdc\xc3\xdf\xb8\xa6 \xb4\xad\xb7\xaf \xbc\xd2\xc7\xc1\xc6\xae\xbf\xfe\xbe\xee\xb8\xa6 \xbc\xb3\xc4\xa1\xc7\xcf\xbd\xca\xbd\xc3\xbf\xc0. \xb5\xbf\xc0\xc7\xc7\xcf\xc1\xf6 \xbe\xca\xb4\xc2\xb4\xd9\xb8\xe9, "\xb5\xbf\xc0\xc7 \xbe\xc8\xc7\xd4" \xb4\xdc\xc3\xdf\xb8\xa6 \xb4\xa9\xb8\xa3\xbd\xca\xbd\xc3\xbf\xc0.'
+ ),
+
+ 12: (
+ b'Norsk',
+ b'Enig',
+ b'Ikke enig',
+ b'Skriv ut',
+ b'Arkiver...',
+ b'Hvis De er enig i bestemmelsene i denne lisensavtalen, klikker De p\x8c "Enig"-knappen for \x8c installere programvaren. Hvis De ikke er enig, klikker De p\x8c "Ikke enig".'
+ ),
+}
+
+class LPicResource (Resource):
+ def __init__(self, res_id, res_name, default_lang, lpic, res_attrs=0):
+ data = []
+ data.append(struct.pack(b'>HH', default_lang, len(lpic)))
+ for lang,rid,two_byte in lpic:
+ data.append(struct.pack(b'>HHH', lang, rid, int(two_byte)))
+ super(LPicResource, self).__init__(b'LPic', res_id, res_name,
+ b''.join(data), res_attrs)
+
+def get_encoder_name(locale):
+ if locale not in region_codes:
+ raise Exception("Cannot determine region code for locale '%s'" % locale)
+ region_code = region_codes[locale]
+
+ if region_code not in script_codes:
+ raise Exception("Cannot determine script code for locale '%s'" % locale)
+ script_code = script_codes[region_code]
+
+ if script_code not in encodings_map:
+ raise Exception("Cannot determine Python encoder name for locale '%s' - "
+ "encode the string data manually as a byte array instead" % locale)
+ return encodings_map[script_code]
+
+def maybe_encode(s, encoding):
+ if isinstance(s, bytes):
+ return s
+ return s.encode(encoding)
+
+def add_license(filename, license_info):
+ """Add a license agreement to the specified disk image file, which should
+ have been unflattened first."""
+
+ fork = ResourceFork.from_file(filename)
+
+ default_lang = license_info.get('default-language', 'en_US')
+ default_lang_id = region_codes.get(default_lang, 0)
+
+ lpic = []
+ ndx = 1
+ for language,license_info in license_info['licenses'].items():
+ if language not in region_codes:
+ raise Exception("Unknown language '" + language + "'. Valid languages are: " +
+ ", ".join(sorted(region_codes.keys())))
+ encoding_name = get_encoder_name(language)
+ lang_id = region_codes[language]
+
+ is_two_byte = lang_id in (14, 51, 52, 53) # Japanese, Korean, SimpChinese, TradChinese
+
+ license_data = license_info.get('data')
+
+ if license_info.get('isRtf'):
+ fork.add(Resource(b'RTF ', 5000 + ndx, language + ' SLA',
+ str(license_data)))
+ else:
+ fork.add(TextResource(5000 + ndx, language + ' SLA', license_data))
+ fork.add(StyleResource(5000 + ndx, language + ' SLA',
+ [Style(0, 12, 9, Style.Helvetica,
+ 0, 0, (0, 0, 0))]))
+
+ buttons = license_info.get('buttons', {}).get(language, None)
+ if buttons is None:
+ buttons = default_buttons.get(lang_id, None)
+ if buttons is None:
+ buttons = default_buttons[0]
+
+ buttons = [maybe_encode(b, encoding_name) for b in buttons]
+
+ fork.add(StringListResource(5000 + ndx, language + ' Buttons',
+ buttons))
+
+ lpic.append((lang_id, ndx, is_two_byte))
+
+ ndx += 1
+
+ fork.add(LPicResource(5000, None, default_lang_id, lpic))
+
+ fork.write_to_file(filename)
diff --git a/packages/electron-builder/vendor/dmgbuild/resources.py b/packages/electron-builder/vendor/dmgbuild/resources.py
new file mode 100644
index 00000000000..d2f58e64a4d
--- /dev/null
+++ b/packages/electron-builder/vendor/dmgbuild/resources.py
@@ -0,0 +1,355 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import struct
+
+class Resource (object):
+ def __init__(self, res_type, res_id, res_name, data=None, res_attrs=0):
+ self.res_type = str(res_type)
+ self.res_id = res_id
+ if isinstance(res_name, basestring):
+ res_name = str(res_name)
+ self.res_name = res_name
+ self.res_attrs = res_attrs
+ if data is None:
+ self.data = None
+ self.data = str(data)
+
+ self.data_offset = None
+ self.name_offset = None
+
+ def __repr__(self):
+ return 'Resource(%r, %r, %r, data=%r, res_attrs=%r)' % (self.res_type,
+ self.res_id,
+ self.res_name,
+ self.data,
+ self.res_attrs)
+
+class TMPLResource (Resource):
+ def __init__(self, res_id, res_name, tmpl, res_attrs=0):
+ data = []
+ for name,typecode in tmpl:
+ data.append(struct.pack(b'B', len(name)))
+ data.append(str(name))
+ data.append(str(typecode))
+ super(TMPLResource, self).__init__(b'TMPL', res_id, res_name,
+ b''.join(data), res_attrs)
+
+class StringListResource (Resource):
+ def __init__(self, res_id, res_name, strings, res_attrs=0):
+ data = []
+ data.append(struct.pack(b'>H', len(strings)))
+ for s in strings:
+ data.append(struct.pack(b'B', len(s)))
+ data.append(str(s))
+ super(StringListResource, self).__init__(b'STR#', res_id, res_name,
+ b''.join(data), res_attrs)
+
+class TextResource (Resource):
+ def __init__(self, res_id, res_name, string, res_attrs=0):
+ super(TextResource, self).__init__(b'TEXT', res_id, res_name,
+ str(string), res_attrs)
+
+class Style (object):
+ # Fonts
+ NewYork = 2
+ Geneva = 3
+ Monaco = 4
+ Venice = 5
+ London = 6
+ Athens = 7
+ SanFrancisco = 8
+ Toronto = 9
+ Cairo = 11
+ LosAngeles = 12
+ Times = 20
+ Helvetica = 21
+ Courier = 22
+ Symbol = 23
+ Mobile = 24
+
+ # Styles
+ Bold = 0x0100
+ Italic = 0x0200
+ Underline = 0x0400
+ Outline = 0x0800
+ Shadow = 0x1000
+ Condense = 0x2000
+ Expand = 0x4000
+
+ def __init__(self, start_character, height, ascent, font_id, face,
+ size, color):
+ self.start_character = start_character
+ self.height = height
+ self.ascent = ascent
+ self.font_id = font_id
+ self.face = face
+ self.size = size
+ self.color = color
+
+ def __repr__(self):
+ styles = []
+ if self.face & Style.Bold:
+ styles.append('Style.Bold')
+ if self.face & Style.Italic:
+ styles.append('Style.Italic')
+ if self.face & Style.Underline:
+ styles.append('Style.Underline')
+ if self.face & Style.Outline:
+ styles.append('Style.Outline')
+ if self.face & Style.Shadow:
+ styles.append('Style.Shadow')
+ if self.face & Style.Condense:
+ styles.append('Style.Condense')
+ if self.face & Style.Expand:
+ styles.append('Style.Expand')
+ if self.face & ~0x4f00:
+ styles.append('%#06x' % (self.face & ~0x4f00))
+ if styles:
+ styles = '|'.join(styles)
+ else:
+ styles = '0'
+
+ font_revmap = {
+ 2: 'Style.NewYork',
+ 3: 'Style.Geneva',
+ 4: 'Style.Monaco',
+ 5: 'Style.Venice',
+ 6: 'Style.London',
+ 7: 'Style.Athens',
+ 8: 'Style.SanFrancisco',
+ 9: 'Style.Toronto',
+ 11: 'Style.Cairo',
+ 12: 'Style.LosAngeles',
+ 20: 'Style.Times',
+ 21: 'Style.Helvetica',
+ 22: 'Style.Courier',
+ 23: 'Style.Symbol',
+ 24: 'Style.Mobile'
+ }
+
+ font = font_revmap.get(self.font_id, '%s' % self.font_id)
+
+ return 'Style(%r, %r, %r, %s, %s, %r, %r)' % (
+ self.start_character,
+ self.height,
+ self.ascent,
+ font,
+ styles,
+ self.size,
+ self.color)
+
+class StyleResource (Resource):
+ def __init__(self, res_id, res_name, styles, res_attrs=0):
+ data = []
+ data.append(struct.pack(b'>H', len(styles)))
+ for style in styles:
+ data.append(struct.pack(b'>LHHHHHHHH',
+ style.start_character,
+ style.height,
+ style.ascent,
+ style.font_id,
+ style.face,
+ style.size,
+ style.color[0],
+ style.color[1],
+ style.color[2]))
+ super(StyleResource, self).__init__(b'styl', res_id, res_name,
+ b''.join(data), res_attrs)
+
+class ResourceFork (object):
+ def __init__(self, resources=None):
+ self.types = {}
+ self.attrs = 0
+ if resources is not None:
+ for res in resources:
+ self.add(res)
+
+ @classmethod
+ def from_data(clss, data):
+ if len(data) < 16:
+ raise ValueError('Bad resource data - data too short')
+
+ # Read the header
+ data_start, map_start, data_len, map_len = struct.unpack(b'>LLLL',
+ data[0:16])
+
+ if data_start + data_len > len(data):
+ raise ValueError('Bad resource data - data out of range')
+ if map_start + map_len > len(data):
+ raise ValueError('Bad resource data - map out of range')
+ if map_len < 30:
+ raise ValueError('Bad resource data - map too short')
+
+ # Read the map header
+ fork_attrs, type_offset, name_offset, max_type_ndx \
+ = struct.unpack(b'>HHHH', data[map_start + 22:map_start + 30])
+ num_types = max_type_ndx + 1
+
+ if type_offset + 8 * num_types > map_len:
+ raise ValueError('Bad resource data - type data outside map')
+
+ if name_offset > map_len:
+ raise ValueError('Bad resource data - names outside map')
+
+ type_offset += map_start
+ name_offset += map_start
+
+ result = ResourceFork()
+
+ # Now read the type list
+ for ntype in range(0, num_types):
+ type_pos = 2 + type_offset + 8 * ntype
+ res_type, max_item_ndx, ref_offset \
+ = struct.unpack(b'>4sHH', data[type_pos:type_pos+8])
+ num_items = max_item_ndx + 1
+
+ result.types[res_type] = []
+
+ ref_list_offset = type_offset + ref_offset
+ if ref_list_offset + 12 * num_items > map_start + map_len:
+ raise ValueError('Bad resource data - ref list outside map')
+
+ for nitem in range(0, num_items):
+ ref_elt = ref_list_offset + 12 * nitem
+ res_id, res_name_offset, data_offset \
+ = struct.unpack(b'>hHL', data[ref_elt:ref_elt+8])
+
+ res_attrs = data_offset >> 24
+ data_offset &= 0xffffff
+
+ if data_offset >= data_len:
+ raise ValueError('Bad resource data - item data out of range')
+
+ data_offset += data_start
+ res_len = struct.unpack(b'>L', data[data_offset:data_offset+4])[0]
+ if data_offset + res_len >= data_start + data_len:
+ raise ValueError('Bad resource data - item data too large')
+
+ res_data = data[data_offset + 4:data_offset + res_len + 4]
+
+ if res_name_offset == 0xffff:
+ res_name = None
+ else:
+ res_name_offset += name_offset
+ if res_name_offset >= map_start + map_len:
+ raise ValueError('Bad resource data - name out of range')
+ res_name_len = struct.unpack(b'B', data[res_name_offset])[0]
+ res_name = data[res_name_offset + 1:res_name_offset + res_name_len + 1]
+
+ result.types[res_type].append(Resource(res_type, res_id,
+ res_name,
+ res_data, res_attrs))
+
+ return result
+
+ @classmethod
+ def from_file(clss, filename):
+ with open(filename + '/..namedfork/rsrc', 'rb') as f:
+ data = f.read()
+ return clss.from_data(data)
+
+ def to_data(self):
+ data = []
+ data_len = 0
+ names = []
+ names_len = 0
+ types_len = len(self.types) * 8
+ types_data = []
+ reflist_data = []
+ reflist_len = 0
+
+ for res_type, items in self.types.items():
+ types_data.append(struct.pack(b'>4sHH',
+ res_type,
+ len(items) - 1,
+ 2 + types_len + reflist_len))
+ for item in items:
+ data_offset = data_len
+
+ if item.res_name is None:
+ name_offset = 65535
+ else:
+ name_offset = names_len
+ n = str(item.res_name)
+ names.append(struct.pack(b'B', len(n)) + n)
+ names_len += 1 + len(n)
+
+ if item.data is None:
+ data_len += 4
+ else:
+ data_len += 4 + (len(item.data) + 3) & ~3
+
+ reflist_len += 12
+ reflist_data.append(struct.pack(b'>hHLL',
+ item.res_id,
+ name_offset,
+ (item.res_attrs << 24) \
+ | data_offset,
+ 0))
+
+ # Header
+ data.append(struct.pack(b'>LLLL240s', 256, 256 + data_len, data_len,
+ 30 + types_len + reflist_len + names_len,
+ b''))
+
+ # Resource data
+ for res_type, items in self.types.items():
+ for item in items:
+ if item.data is None:
+ dlen = 0
+ else:
+ dlen = len(item.data)
+ plen = (dlen + 3) & ~3
+ data.append(struct.pack(b'>L', dlen))
+ if item.data is not None:
+ data.append(item.data)
+ if plen != dlen:
+ data.append(b'\0' * (plen - dlen))
+
+ # Resource map header
+ data.append(struct.pack(b'>16sLHHHHH',
+ b'', 0, 0,
+ self.attrs, 28, 30 + types_len + reflist_len,
+ len(self.types) - 1))
+
+ # Type list
+ data.append(b''.join(types_data))
+
+ # Reference lists
+ data.append(b''.join(reflist_data))
+
+ # Name list
+ data.append(b''.join(names))
+
+ return b''.join(data)
+
+ def write_to_file(self, filename):
+ with open(filename + '/..namedfork/rsrc', 'wb') as f:
+ f.write(self.to_data())
+
+ def __len__(self):
+ return len(self.types)
+
+ def __getitem__(self, key):
+ return self.types[key]
+
+ def __iter__(self):
+ for res_type, items in self.types.items():
+ for item in items:
+ yield item
+
+ def __repr__(self):
+ output = []
+ for item in self:
+ output.append(repr(item))
+ return 'ResourceFork([%s])' % ', '.join(output)
+
+ def add(self, res):
+ if res.res_type in self.types:
+ self.types[res.res_type].append(res)
+ else:
+ self.types[res.res_type] = [res]
+
+ def remove(self, res):
+ self.types[res.res_type].remove(res)
diff --git a/packages/electron-builder/vendor/ds_store/__init__.py b/packages/electron-builder/vendor/ds_store/__init__.py
new file mode 100644
index 00000000000..a6b81210495
--- /dev/null
+++ b/packages/electron-builder/vendor/ds_store/__init__.py
@@ -0,0 +1,3 @@
+from .store import DSStore, DSStoreEntry
+
+__all__ = ['DSStore', 'DSStoreEntry']
diff --git a/packages/electron-builder/vendor/ds_store/buddy.py b/packages/electron-builder/vendor/ds_store/buddy.py
new file mode 100644
index 00000000000..320768cd3e6
--- /dev/null
+++ b/packages/electron-builder/vendor/ds_store/buddy.py
@@ -0,0 +1,478 @@
+# -*- coding: utf-8 -*-
+import os
+import bisect
+import struct
+import binascii
+
+try:
+ {}.iterkeys
+ iterkeys = lambda x: x.iterkeys()
+except AttributeError:
+ iterkeys = lambda x: x.keys()
+try:
+ unicode
+except NameError:
+ unicode = str
+
+class BuddyError(Exception):
+ pass
+
+class Block(object):
+ def __init__(self, allocator, offset, size):
+ self._allocator = allocator
+ self._offset = offset
+ self._size = size
+ self._value = bytearray(allocator.read(offset, size))
+ self._pos = 0
+ self._dirty = False
+
+ def __len__(self):
+ return self._size
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ if self._dirty:
+ self.flush()
+
+ def flush(self):
+ if self._dirty:
+ self._dirty = False
+ self._allocator.write(self._offset, self._value)
+
+ def invalidate(self):
+ self._dirty = False
+
+ def zero_fill(self):
+ len = self._size - self._pos
+ zeroes = b'\0' * len
+ self._value[self._pos:self._size] = zeroes
+ self._dirty = True
+
+ def tell(self):
+ return self._pos
+
+ def seek(self, pos, whence=os.SEEK_SET):
+ if whence == os.SEEK_CUR:
+ pos += self._pos
+ elif whence == os.SEEK_END:
+ pos = self._size - pos
+
+ if pos < 0 or pos > self._size:
+ raise ValueError('Seek out of range in Block instance')
+
+ self._pos = pos
+
+ def read(self, size_or_format):
+ if isinstance(size_or_format, (str, unicode, bytes)):
+ size = struct.calcsize(size_or_format)
+ fmt = size_or_format
+ else:
+ size = size_or_format
+ fmt = None
+
+ if self._size - self._pos < size:
+ raise BuddyError('Unable to read %lu bytes in block' % size)
+
+ data = self._value[self._pos:self._pos + size]
+ self._pos += size
+
+ if fmt is not None:
+ if isinstance(data, bytearray):
+ return struct.unpack_from(fmt, bytes(data))
+ else:
+ return struct.unpack(fmt, data)
+ else:
+ return data
+
+ def write(self, data_or_format, *args):
+ if len(args):
+ data = struct.pack(data_or_format, *args)
+ else:
+ data = data_or_format
+
+ if self._pos + len(data) > self._size:
+ raise ValueError('Attempt to write past end of Block')
+
+ self._value[self._pos:self._pos + len(data)] = data
+ self._pos += len(data)
+
+ self._dirty = True
+
+ def insert(self, data_or_format, *args):
+ if len(args):
+ data = struct.pack(data_or_format, *args)
+ else:
+ data = data_or_format
+
+ del self._value[-len(data):]
+ self._value[self._pos:self._pos] = data
+ self._pos += len(data)
+
+ self._dirty = True
+
+ def delete(self, size):
+ if self._pos + size > self._size:
+ raise ValueError('Attempt to delete past end of Block')
+ del self._value[self._pos:self._pos + size]
+ self._value += b'\0' * size
+ self._dirty = True
+
+ def __str__(self):
+ return binascii.b2a_hex(self._value)
+
+class Allocator(object):
+ def __init__(self, the_file):
+ self._file = the_file
+ self._dirty = False
+
+ self._file.seek(0)
+
+ # Read the header
+ magic1, magic2, offset, size, offset2, self._unknown1 \
+ = self.read(-4, '>I4sIII16s')
+
+ if magic2 != b'Bud1' or magic1 != 1:
+ raise BuddyError('Not a buddy file')
+
+ if offset != offset2:
+ raise BuddyError('Root addresses differ')
+
+ self._root = Block(self, offset, size)
+
+ # Read the block offsets
+ count, self._unknown2 = self._root.read('>II')
+ self._offsets = []
+ c = (count + 255) & ~255
+ while c:
+ self._offsets += self._root.read('>256I')
+ c -= 256
+ self._offsets = self._offsets[:count]
+
+ # Read the TOC
+ self._toc = {}
+ count = self._root.read('>I')[0]
+ for n in range(count):
+ nlen = self._root.read('B')[0]
+ name = bytes(self._root.read(nlen))
+ value = self._root.read('>I')[0]
+ self._toc[name] = value
+
+ # Read the free lists
+ self._free = []
+ for n in range(32):
+ count = self._root.read('>I')
+ self._free.append(list(self._root.read('>%uI' % count)))
+
+ @classmethod
+ def open(cls, file_or_name, mode='r+'):
+ if isinstance(file_or_name, (str, unicode)):
+ if not 'b' in mode:
+ mode = mode[:1] + 'b' + mode[1:]
+ f = open(file_or_name, mode)
+ else:
+ f = file_or_name
+
+ if 'w' in mode:
+ # Create an empty file in this case
+ f.truncate()
+
+ # An empty root block needs 1264 bytes:
+ #
+ # 0 4 offset count
+ # 4 4 unknown
+ # 8 4 root block offset (2048)
+ # 12 255 * 4 padding (offsets are in multiples of 256)
+ # 1032 4 toc count (0)
+ # 1036 228 free list
+ # total 1264
+
+ # The free list will contain the following:
+ #
+ # 0 5 * 4 no blocks of width less than 5
+ # 20 6 * 8 1 block each of widths 5 to 10
+ # 68 4 no blocks of width 11 (allocated for the root)
+ # 72 19 * 8 1 block each of widths 12 to 30
+ # 224 4 no blocks of width 31
+ # total 228
+ #
+ # (The reason for this layout is that we allocate 2**5 bytes for
+ # the header, which splits the initial 2GB region into every size
+ # below 2**31, including *two* blocks of size 2**5, one of which
+ # we take. The root block itself then needs a block of size
+ # 2**11. Conveniently, each of these initial blocks will be
+ # located at offset 2**n where n is its width.)
+
+ # Write the header
+ header = struct.pack(b'>I4sIII16s',
+ 1, b'Bud1',
+ 2048, 1264, 2048,
+ b'\x00\x00\x10\x0c'
+ b'\x00\x00\x00\x87'
+ b'\x00\x00\x20\x0b'
+ b'\x00\x00\x00\x00')
+ f.write(header)
+ f.write(b'\0' * 2016)
+
+ # Write the root block
+ free_list = [struct.pack(b'>5I', 0, 0, 0, 0, 0)]
+ for n in range(5, 11):
+ free_list.append(struct.pack(b'>II', 1, 2**n))
+ free_list.append(struct.pack(b'>I', 0))
+ for n in range(12, 31):
+ free_list.append(struct.pack(b'>II', 1, 2**n))
+ free_list.append(struct.pack(b'>I', 0))
+
+ root = b''.join([struct.pack(b'>III', 1, 0, 2048 | 5),
+ struct.pack(b'>I', 0) * 255,
+ struct.pack(b'>I', 0)] + free_list)
+ f.write(root)
+
+ return Allocator(f)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ def close(self):
+ self.flush()
+ self._file.close()
+
+ def flush(self):
+ if self._dirty:
+ size = self._root_block_size()
+ self.allocate(size, 0)
+ with self.get_block(0) as rblk:
+ self._write_root_block_into(rblk)
+
+ addr = self._offsets[0]
+ offset = addr & ~0x1f
+ size = 1 << (addr & 0x1f)
+
+ self._file.seek(0, os.SEEK_SET)
+ self._file.write(struct.pack(b'>I4sIII16s',
+ 1, b'Bud1',
+ offset, size, offset,
+ self._unknown1))
+
+ self._dirty = False
+
+ self._file.flush()
+
+ def read(self, offset, size_or_format):
+ """Read data at `offset', or raise an exception. `size_or_format'
+ may either be a byte count, in which case we return raw data,
+ or a format string for `struct.unpack', in which case we
+ work out the size and unpack the data before returning it."""
+ # N.B. There is a fixed offset of four bytes(!)
+ self._file.seek(offset + 4, os.SEEK_SET)
+
+ if isinstance(size_or_format, (str, unicode)):
+ size = struct.calcsize(size_or_format)
+ fmt = size_or_format
+ else:
+ size = size_or_format
+ fmt = None
+
+ ret = self._file.read(size)
+ if len(ret) < size:
+ ret += b'\0' * (size - len(ret))
+
+ if fmt is not None:
+ if isinstance(ret, bytearray):
+ ret = struct.unpack_from(fmt, bytes(ret))
+ else:
+ ret = struct.unpack(fmt, ret)
+
+ return ret
+
+ def write(self, offset, data_or_format, *args):
+ """Write data at `offset', or raise an exception. `data_or_format'
+ may either be the data to write, or a format string for `struct.pack',
+ in which case we pack the additional arguments and write the
+ resulting data."""
+ # N.B. There is a fixed offset of four bytes(!)
+ self._file.seek(offset + 4, os.SEEK_SET)
+
+ if len(args):
+ data = struct.pack(data_or_format, *args)
+ else:
+ data = data_or_format
+
+ self._file.write(data)
+
+ def get_block(self, block):
+ try:
+ addr = self._offsets[block]
+ except IndexError:
+ return None
+
+ offset = addr & ~0x1f
+ size = 1 << (addr & 0x1f)
+
+ return Block(self, offset, size)
+
+ def _root_block_size(self):
+ """Return the number of bytes required by the root block."""
+ # Offsets
+ size = 8
+ size += 4 * ((len(self._offsets) + 255) & ~255)
+
+ # TOC
+ size += 4
+ size += sum([5 + len(s) for s in self._toc])
+
+ # Free list
+ size += sum([4 + 4 * len(fl) for fl in self._free])
+
+ return size
+
+ def _write_root_block_into(self, block):
+ # Offsets
+ block.write('>II', len(self._offsets), self._unknown2)
+ block.write('>%uI' % len(self._offsets), *self._offsets)
+ extra = len(self._offsets) & 255
+ if extra:
+ block.write(b'\0\0\0\0' * (256 - extra))
+
+ # TOC
+ keys = list(self._toc.keys())
+ keys.sort()
+
+ block.write('>I', len(keys))
+ for k in keys:
+ block.write('B', len(k))
+ block.write(k)
+ block.write('>I', self._toc[k])
+
+ # Free list
+ for w, f in enumerate(self._free):
+ block.write('>I', len(f))
+ if len(f):
+ block.write('>%uI' % len(f), *f)
+
+ def _buddy(self, offset, width):
+ f = self._free[width]
+ b = offset ^ (1 << width)
+
+ try:
+ ndx = f.index(b)
+ except ValueError:
+ ndx = None
+
+ return (f, b, ndx)
+
+ def _release(self, offset, width):
+ # Coalesce
+ while True:
+ f,b,ndx = self._buddy(offset, width)
+
+ if ndx is None:
+ break
+
+ offset &= b
+ width += 1
+ del f[ndx]
+
+ # Add to the list
+ bisect.insort(f, offset)
+
+ # Mark as dirty
+ self._dirty = True
+
+ def _alloc(self, width):
+ w = width
+ while not self._free[w]:
+ w += 1
+ while w > width:
+ offset = self._free[w].pop(0)
+ w -= 1
+ self._free[w] = [offset, offset ^ (1 << w)]
+ self._dirty = True
+ return self._free[width].pop(0)
+
+ def allocate(self, bytes, block=None):
+ """Allocate or reallocate a block such that it has space for at least
+ `bytes' bytes."""
+ if block is None:
+ # Find the first unused block
+ try:
+ block = self._offsets.index(0)
+ except ValueError:
+ block = len(self._offsets)
+ self._offsets.append(0)
+
+ # Compute block width
+ width = max(bytes.bit_length(), 5)
+
+ addr = self._offsets[block]
+ offset = addr & ~0x1f
+
+ if addr:
+ blkwidth = addr & 0x1f
+ if blkwidth == width:
+ return block
+ self._release(offset, width)
+ self._offsets[block] = 0
+
+ offset = self._alloc(width)
+ self._offsets[block] = offset | width
+ return block
+
+ def release(self, block):
+ addr = self._offsets[block]
+
+ if addr:
+ width = addr & 0x1f
+ offset = addr & ~0x1f
+ self._release(offset, width)
+
+ if block == len(self._offsets):
+ del self._offsets[block]
+ else:
+ self._offsets[block] = 0
+
+ def __len__(self):
+ return len(self._toc)
+
+ def __getitem__(self, key):
+ if not isinstance(key, (str, unicode)):
+ raise TypeError('Keys must be of string type')
+ if not isinstance(key, bytes):
+ key = key.encode('latin_1')
+ return self._toc[key]
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, (str, unicode)):
+ raise TypeError('Keys must be of string type')
+ if not isinstance(key, bytes):
+ key = key.encode('latin_1')
+ self._toc[key] = value
+ self._dirty = True
+
+ def __delitem__(self, key):
+ if not isinstance(key, (str, unicode)):
+ raise TypeError('Keys must be of string type')
+ if not isinstance(key, bytes):
+ key = key.encode('latin_1')
+ del self._toc[key]
+ self._dirty = True
+
+ def iterkeys(self):
+ return iterkeys(self._toc)
+
+ def keys(self):
+ return iterkeys(self._toc)
+
+ def __iter__(self):
+ return iterkeys(self._toc)
+
+ def __contains__(self, key):
+ return key in self._toc
+
diff --git a/packages/electron-builder/vendor/ds_store/store.py b/packages/electron-builder/vendor/ds_store/store.py
new file mode 100644
index 00000000000..b6f805b24c1
--- /dev/null
+++ b/packages/electron-builder/vendor/ds_store/store.py
@@ -0,0 +1,1251 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+
+import binascii
+import struct
+import biplist
+import mac_alias
+
+try:
+ next
+except NameError:
+ next = lambda x: x.next()
+try:
+ unicode
+except NameError:
+ unicode = str
+
+from . import buddy
+
+class ILocCodec(object):
+ @staticmethod
+ def encode(point):
+ return struct.pack(b'>IIII', point[0], point[1],
+ 0xffffffff, 0xffff0000)
+
+ @staticmethod
+ def decode(bytesData):
+ if isinstance(bytesData, bytearray):
+ x, y = struct.unpack_from(b'>II', bytes(bytesData[:8]))
+ else:
+ x, y = struct.unpack(b'>II', bytesData[:8])
+ return (x, y)
+
+class PlistCodec(object):
+ @staticmethod
+ def encode(plist):
+ return biplist.writePlistToString(plist)
+
+ @staticmethod
+ def decode(bytes):
+ return biplist.readPlistFromString(bytes)
+
+class BookmarkCodec(object):
+ @staticmethod
+ def encode(bmk):
+ return bmk.to_bytes()
+
+ @staticmethod
+ def decode(bytes):
+ return mac_alias.Bookmark.from_bytes(bytes)
+
+# This list tells the code how to decode particular kinds of entry in the
+# .DS_Store file. This is really a convenience, and we currently only
+# support a tiny subset of the possible entry types.
+codecs = {
+ b'Iloc': ILocCodec,
+ b'bwsp': PlistCodec,
+ b'lsvp': PlistCodec,
+ b'lsvP': PlistCodec,
+ b'icvp': PlistCodec,
+ b'pBBk': BookmarkCodec
+ }
+
+class DSStoreEntry(object):
+ """Holds the data from an entry in a ``.DS_Store`` file. Note that this is
+ not meant to represent the entry itself---i.e. if you change the type
+ or value, your changes will *not* be reflected in the underlying file.
+
+ If you want to make a change, you should either use the :class:`DSStore`
+ object's :meth:`DSStore.insert` method (which will replace a key if it
+ already exists), or the mapping access mode for :class:`DSStore` (often
+ simpler anyway).
+ """
+ def __init__(self, filename, code, typecode, value=None):
+ if str != bytes and type(filename) == bytes:
+ filename = filename.decode('utf-8')
+
+ if not isinstance(code, bytes):
+ code = code.encode('latin_1')
+
+ self.filename = filename
+ self.code = code
+ self.type = typecode
+ self.value = value
+
+ @classmethod
+ def read(cls, block):
+ """Read a ``.DS_Store`` entry from the containing Block"""
+ # First read the filename
+ nlen = block.read(b'>I')[0]
+ filename = block.read(2 * nlen).decode('utf-16be')
+
+ # Next, read the code and type
+ code, typecode = block.read(b'>4s4s')
+
+ # Finally, read the data
+ if typecode == b'bool':
+ value = block.read(b'>?')[0]
+ elif typecode == b'long' or typecode == b'shor':
+ value = block.read(b'>I')[0]
+ elif typecode == b'blob':
+ vlen = block.read(b'>I')[0]
+ value = block.read(vlen)
+
+ codec = codecs.get(code, None)
+ if codec:
+ value = codec.decode(value)
+ typecode = codec
+ elif typecode == b'ustr':
+ vlen = block.read(b'>I')[0]
+ value = block.read(2 * vlen).decode('utf-16be')
+ elif typecode == b'type':
+ value = block.read(b'>4s')[0]
+ elif typecode == b'comp' or typecode == b'dutc':
+ value = block.read(b'>Q')[0]
+ else:
+ raise ValueError('Unknown type code "%s"' % typecode)
+
+ return DSStoreEntry(filename, code, typecode, value)
+
+ def __lt__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+ return (sfl < ofl
+ or (self.filename == other.filename
+ and self.code < other.code))
+
+ def __le__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+ return (sfl < ofl
+ or (sfl == ofl
+ and self.code <= other.code))
+
+ def __eq__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+ return (sfl == ofl
+ and self.code == other.code)
+
+ def __ne__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+ return (sfl != ofl
+ or self.code != other.code)
+
+ def __gt__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+
+ selfCode = self.code
+ if str != bytes and type(selfCode) is bytes:
+ selfCode = selfCode.decode('utf-8')
+ otherCode = other.code
+ if str != bytes and type(otherCode) is bytes:
+ otherCode = otherCode.decode('utf-8')
+
+ return (sfl > ofl or (sfl == ofl and selfCode > otherCode))
+
+ def __ge__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ sfl = self.filename.lower()
+ ofl = other.filename.lower()
+ return (sfl > ofl
+ or (sfl == ofl
+ and self.code >= other.code))
+
+ def __cmp__(self, other):
+ if not isinstance(other, DSStoreEntry):
+ raise TypeError('Can only compare against other DSStoreEntry objects')
+ r = cmp(self.filename.lower(), other.filename.lower())
+ if r:
+ return r
+ return cmp(self.code, other.code)
+
+ def byte_length(self):
+ """Compute the length of this entry, in bytes"""
+ utf16 = self.filename.encode('utf-16be')
+ l = 4 + len(utf16) + 8
+
+ if isinstance(self.type, unicode):
+ entry_type = self.type.encode('latin_1')
+ value = self.value
+ elif isinstance(self.type, (bytes, str)):
+ entry_type = self.type
+ value = self.value
+ else:
+ entry_type = b'blob'
+ value = self.type.encode(self.value)
+
+ if entry_type == b'bool':
+ l += 1
+ elif entry_type == b'long' or entry_type == b'shor':
+ l += 4
+ elif entry_type == b'blob':
+ l += 4 + len(value)
+ elif entry_type == b'ustr':
+ utf16 = value.encode('utf-16be')
+ l += 4 + len(utf16)
+ elif entry_type == b'type':
+ l += 4
+ elif entry_type == b'comp' or entry_type == b'dutc':
+ l += 8
+ else:
+ raise ValueError('Unknown type code "%s"' % entry_type)
+
+ return l
+
+ def write(self, block, insert=False):
+ """Write this entry to the specified Block"""
+ if insert:
+ w = block.insert
+ else:
+ w = block.write
+
+ if isinstance(self.type, unicode):
+ entry_type = self.type.encode('latin_1')
+ value = self.value
+ elif isinstance(self.type, (bytes, str)):
+ entry_type = self.type
+ value = self.value
+ else:
+ entry_type = b'blob'
+ value = self.type.encode(self.value)
+
+ utf16 = self.filename.encode('utf-16be')
+ w(b'>I', len(utf16) // 2)
+ w(utf16)
+ w(b'>4s4s', self.code, entry_type)
+
+ if entry_type == b'bool':
+ w(b'>?', value)
+ elif entry_type == b'long' or entry_type == b'shor':
+ w(b'>I', value)
+ elif entry_type == b'blob':
+ w(b'>I', len(value))
+ w(value)
+ elif entry_type == b'ustr':
+ utf16 = value.encode('utf-16be')
+ w(b'>I', len(utf16) // 2)
+ w(utf16)
+ elif entry_type == b'type':
+ if isinstance(value, unicode):
+ value = value.encode('latin_1')
+ w(b'>4s', value)
+ elif entry_type == b'comp' or entry_type == b'dutc':
+ w(b'>Q', value)
+ else:
+ raise ValueError('Unknown type code "%s"' % entry_type)
+
+ def __repr__(self):
+ return '<%s %s>' % (self.filename, self.code)
+
+class DSStore(object):
+ """Python interface to a ``.DS_Store`` file. Works by manipulating the file
+ on the disk---so this code will work with ``.DS_Store`` files for *very*
+ large directories.
+
+ A :class:`DSStore` object can be used as if it was a mapping, e.g.::
+
+ d['foobar.dat']['Iloc']
+
+ will fetch the "Iloc" record for "foobar.dat", or raise :class:`KeyError` if
+ there is no such record. If used in this manner, the :class:`DSStore` object
+ will return (type, value) tuples, unless the type is "blob" and the module
+ knows how to decode it.
+
+ Currently, we know how to decode "Iloc", "bwsp", "lsvp", "lsvP" and "icvp"
+ blobs. "Iloc" decodes to an (x, y) tuple, while the others are all decoded
+ using ``biplist``.
+
+ Assignment also works, e.g.::
+
+ d['foobar.dat']['note'] = ('ustr', u'Hello World!')
+
+ as does deletion with ``del``::
+
+ del d['foobar.dat']['note']
+
+ This is usually going to be the most convenient interface, though
+ occasionally (for instance when creating a new ``.DS_Store`` file) you
+ may wish to drop down to using :class:`DSStoreEntry` objects directly."""
+ def __init__(self, store):
+ self._store = store
+ self._superblk = self._store['DSDB']
+ with self._get_block(self._superblk) as s:
+ self._rootnode, self._levels, self._records, \
+ self._nodes, self._page_size = s.read(b'>IIIII')
+ self._min_usage = 2 * self._page_size // 3
+ self._dirty = False
+
+ @classmethod
+ def open(cls, file_or_name, mode='r+', initial_entries=None):
+ """Open a ``.DS_Store`` file; pass either a Python file object, or a
+ filename in the ``file_or_name`` argument and a file access mode in
+ the ``mode`` argument. If you are creating a new file using the "w"
+ or "w+" modes, you may also specify a list of entries with which
+ to initialise the file."""
+ store = buddy.Allocator.open(file_or_name, mode)
+
+ if mode == 'w' or mode == 'w+':
+ superblk = store.allocate(20)
+ store['DSDB'] = superblk
+ page_size = 4096
+
+ if not initial_entries:
+ root = store.allocate(page_size)
+
+ with store.get_block(root) as rootblk:
+ rootblk.zero_fill()
+
+ with store.get_block(superblk) as s:
+ s.write(b'>IIIII', root, 0, 0, 1, page_size)
+ else:
+ # Make sure they're in sorted order
+ initial_entries = list(initial_entries)
+ initial_entries.sort()
+
+ # Construct the tree
+ current_level = initial_entries
+ next_level = []
+ levels = []
+ ptr_size = 0
+ node_count = 0
+ while True:
+ total = 8
+ nodes = []
+ node = []
+ for e in current_level:
+ new_total = total + ptr_size + e.byte_length()
+ if new_total > page_size:
+ nodes.append(node)
+ next_level.append(e)
+ total = 8
+ node = []
+ else:
+ total = new_total
+ node.append(e)
+ if node:
+ nodes.append(node)
+
+ node_count += len(nodes)
+ levels.append(nodes)
+
+ if len(nodes) == 1:
+ break
+
+ current_level = next_level
+ next_level = []
+ ptr_size = 4
+
+ # Allocate nodes
+ ptrs = [store.allocate(page_size) for n in range(node_count)]
+
+ # Generate nodes
+ pointers = []
+ prev_pointers = None
+ for level in levels:
+ ppndx = 0
+ lptrs = ptrs[-len(level):]
+ del ptrs[-len(level):]
+ for node in level:
+ ndx = lptrs.pop(0)
+ if prev_pointers is None:
+ with store.get_block(ndx) as block:
+ block.write(b'>II', 0, len(node))
+ for e in node:
+ e.write(block)
+ else:
+ next_node = prev_pointers[ppndx + len(node)]
+ node_ptrs = prev_pointers[ppndx:ppndx+len(node)]
+
+ with store.get_block(ndx) as block:
+ block.write(b'>II', next_node, len(node))
+ for ptr, e in zip(node_ptrs, node):
+ block.write(b'>I', ptr)
+ e.write(block)
+
+ pointers.append(ndx)
+ prev_pointers = pointers
+ pointers = []
+
+ root = prev_pointers[0]
+
+ with store.get_block(superblk) as s:
+ s.write(b'>IIIII', root, len(levels), len(initial_entries),
+ node_count, page_size)
+
+ return DSStore(store)
+
+ def _get_block(self, number):
+ return self._store.get_block(number)
+
+ def flush(self):
+ """Flush any dirty data back to the file."""
+ if self._dirty:
+ self._dirty = False
+
+ with self._get_block(self._superblk) as s:
+ s.write(b'>IIIII', self._rootnode, self._levels, self._records,
+ self._nodes, self._page_size)
+ self._store.flush()
+
+ def close(self):
+ """Flush dirty data and close the underlying file."""
+ self.flush()
+ self._store.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.close()
+
+ # Internal B-Tree nodes look like this:
+ #
+ # [ next | count | (ptr0 | rec0) | (ptr1 | rec1) ... (ptrN | recN) ]
+
+ # Leaf nodes look like this:
+ #
+ # [ 0 | count | rec0 | rec1 ... recN ]
+
+ # Iterate over the tree, starting at `node'
+ def _traverse(self, node):
+ if node is None:
+ node = self._rootnode
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ if next_node:
+ for n in range(count):
+ ptr = block.read(b'>I')[0]
+ for e in self._traverse(ptr):
+ yield e
+ e = DSStoreEntry.read(block)
+ yield e
+ for e in self._traverse(next_node):
+ yield e
+ else:
+ for n in range(count):
+ e = DSStoreEntry.read(block)
+ yield e
+
+ # Display the data in `node'
+ def _dump_node(self, node):
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ print('next: %u\ncount: %u\n' % (next_node, count))
+ for n in range(count):
+ if next_node:
+ ptr = block.read(b'>I')[0]
+ print('%8u ' % ptr, end=' ')
+ else:
+ print(' ', end=' ')
+ e = DSStoreEntry.read(block)
+ print(e, ' (%u)' % e.byte_length())
+ print('used: %u' % block.tell())
+
+ # Display the data in the super block
+ def _dump_super(self):
+ print('root: %u\nlevels: %u\nrecords: %u\nnodes: %u\npage-size: %u' \
+ % (self._rootnode, self._levels, self._records,
+ self._nodes, self._page_size))
+
+ # Splits entries across two blocks, returning one pivot
+ #
+ # Tries to balance the block usage across the two as best it can
+ def _split2(self, blocks, entries, pointers, before, internal):
+ left_block = blocks[0]
+ right_block = blocks[1]
+
+ count = len(entries)
+
+ # Find the feasible splits
+ best_split = None
+ best_diff = None
+ total = before[count]
+
+ if 8 + total <= self._page_size:
+ # We can use a *single* node for this
+ best_split = count
+ else:
+ # Split into two nodes
+ for n in range(1, count - 1):
+ left_size = 8 + before[n]
+ right_size = 8 + total - before[n + 1]
+
+ if left_size > self._page_size:
+ break
+ if right_size > self._page_size:
+ continue
+
+ diff = abs(left_size - right_size)
+
+ if best_split is None or diff < best_diff:
+ best_split = n
+ best_diff = diff
+
+ if best_split is None:
+ return None
+
+ # Write the nodes
+ left_block.seek(0)
+ if internal:
+ next_node = pointers[best_split]
+ else:
+ next_node = 0
+ left_block.write(b'>II', next_node, best_split)
+
+ for n in range(best_split):
+ if internal:
+ left_block.write(b'>I', pointers[n])
+ entries[n].write(left_block)
+
+ left_block.zero_fill()
+
+ if best_split == count:
+ return []
+
+ right_block.seek(0)
+ if internal:
+ next_node = pointers[count]
+ else:
+ next_node = 0
+ right_block.write(b'>II', next_node, count - best_split - 1)
+
+ for n in range(best_split + 1, count):
+ if internal:
+ right_block.write(b'>I', pointers[n])
+ entries[n].write(right_block)
+
+ right_block.zero_fill()
+
+ pivot = entries[best_split]
+
+ return [pivot]
+
+ def _split(self, node, entry, right_ptr=0):
+ self._nodes += 1
+ self._dirty = True
+ new_right = self._store.allocate(self._page_size)
+ with self._get_block(node) as block, \
+ self._get_block(new_right) as right_block:
+
+ # First, measure and extract all the elements
+ entry_size = entry.byte_length()
+ entry_pos = None
+ next_node, count = block.read(b'>II')
+ if next_node:
+ entry_size += 4
+ pointers = []
+ entries = []
+ before = []
+ total = 0
+ for n in range(count):
+ pos = block.tell()
+ if next_node:
+ ptr = block.read(b'>I')[0]
+ pointers.append(ptr)
+ e = DSStoreEntry.read(block)
+ if e > entry:
+ entry_pos = n
+ entries.append(entry)
+ pointers.append(right_ptr)
+ before.append(total)
+ total += entry_size
+ entries.append(e)
+ before.append(total)
+ total += block.tell() - pos
+ before.append(total)
+ if next_node:
+ pointers.append(next_node)
+
+ pivot = self._split2([block, right_block],
+ entries, pointers, before,
+ bool(next_node))[0]
+
+ self._records += 1
+ self._nodes += 1
+ self._dirty = True
+
+ return (pivot, new_right)
+
+ # Allocate a new root node containing the element `pivot' and the pointers
+ # `left' and `right'
+ def _new_root(self, left, pivot, right):
+ new_root = self._store.allocate(self._page_size)
+ with self._get_block(new_root) as block:
+ block.write(b'>III', right, 1, left)
+ pivot.write(block)
+ self._rootnode = new_root
+ self._levels += 1
+ self._nodes += 1
+ self._dirty = True
+
+ # Insert an entry into an inner node; `path' is the path from the root
+ # to `node', not including `node' itself. `right_ptr' is the new node
+ # pointer (inserted to the RIGHT of `entry')
+ def _insert_inner(self, path, node, entry, right_ptr):
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ insert_pos = None
+ insert_ndx = None
+ n = 0
+ while n < count:
+ pos = block.tell()
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+ if e == entry:
+ if n == count - 1:
+ right_ptr = next_node
+ next_node = ptr
+ block_seek(pos)
+ else:
+ right_ptr = block.read(b'>I')[0]
+ block.seek(pos + 4)
+ insert_pos = pos
+ insert_ndx = n
+ block.delete(e.byte_length() + 4)
+ count -= 1
+ self._records += 1
+ self._dirty = True
+ continue
+ elif insert_pos is None and e > entry:
+ insert_pos = pos
+ insert_ndx = n
+ n += 1
+ if insert_pos is None:
+ insert_pos = block.tell()
+ insert_ndx = count
+ remaining = self._page_size - block.tell()
+
+ if remaining < entry.byte_length() + 4:
+ pivot, new_right = self._split(node, entry, right_ptr)
+ if path:
+ self._insert_inner(path[:-1], path[-1], pivot, new_right)
+ else:
+ self._new_root(node, pivot, new_right)
+ else:
+ if insert_ndx == count:
+ block.seek(insert_pos)
+ block.write(b'>I', next_node)
+ entry.write(block)
+ next_node = right_ptr
+ else:
+ block.seek(insert_pos + 4)
+ entry.write(block, True)
+ block.insert('>I', right_ptr)
+ block.seek(0)
+ count += 1
+ block.write(b'>II', next_node, count)
+ self._records += 1
+ self._dirty = True
+
+ # Insert `entry' into the leaf node `node'
+ def _insert_leaf(self, path, node, entry):
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ insert_pos = None
+ insert_ndx = None
+ n = 0
+ while n < count:
+ pos = block.tell()
+ e = DSStoreEntry.read(block)
+ if e == entry:
+ insert_pos = pos
+ insert_ndx = n
+ block.seek(pos)
+ block.delete(e.byte_length())
+ count -= 1
+ self._records += 1
+ self._dirty = True
+ continue
+ elif insert_pos is None and e > entry:
+ insert_pos = pos
+ insert_ndx = n
+ n += 1
+ if insert_pos is None:
+ insert_pos = block.tell()
+ insert_ndx = count
+ remaining = self._page_size - block.tell()
+
+ if remaining < entry.byte_length():
+ pivot, new_right = self._split(node, entry)
+ if path:
+ self._insert_inner(path[:-1], path[-1], pivot, new_right)
+ else:
+ self._new_root(node, pivot, new_right)
+ else:
+ block.seek(insert_pos)
+ entry.write(block, True)
+ block.seek(0)
+ count += 1
+ block.write(b'>II', next_node, count)
+ self._records += 1
+ self._dirty = True
+
+ def insert(self, entry):
+ """Insert ``entry`` (which should be a :class:`DSStoreEntry`)
+ into the B-Tree."""
+ path = []
+ node = self._rootnode
+ while True:
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ if next_node:
+ for n in range(count):
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+ if entry < e:
+ next_node = ptr
+ break
+ elif entry == e:
+ # If we find an existing entry the same, replace it
+ self._insert_inner(path, node, entry, None)
+ return
+ path.append(node)
+ node = next_node
+ else:
+ self._insert_leaf(path, node, entry)
+ return
+
+ # Return usage information for the specified `node'
+ def _block_usage(self, node):
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+
+ for n in range(count):
+ if next_node:
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+
+ used = block.tell()
+
+ return (count, used)
+
+ # Splits entries across three blocks, returning two pivots
+ def _split3(self, blocks, entries, pointers, before, internal):
+ count = len(entries)
+
+ # Find the feasible splits
+ best_split = None
+ best_diff = None
+ total = before[count]
+ for n in range(1, count - 3):
+ left_size = 8 + before[n]
+ remaining = 16 + total - before[n + 1]
+
+ if left_size > self._page_size:
+ break
+ if remaining > 2 * self._page_size:
+ continue
+
+ for m in range(n + 2, count - 1):
+ mid_size = 8 + before[m] - before[n + 1]
+ right_size = 8 + total - before[m + 1]
+
+ if mid_size > self._page_size:
+ break
+ if right_size > self._page_size:
+ continue
+
+ diff = abs(left_size - mid_size) * abs(right_size - mid_size)
+
+ if best_split is None or diff < best_diff:
+ best_split = (n, m, count)
+ best_diff = diff
+
+ if best_split is None:
+ return None
+
+ # Write the nodes
+ prev_split = -1
+ for block, split in zip(blocks, best_split):
+ block.seek(0)
+ if internal:
+ next_node = pointers[split]
+ else:
+ next_node = 0
+ block.write(b'>II', next_node, split)
+
+ for n in range(prev_split + 1, split):
+ if internal:
+ block.write(b'>I', pointers[n])
+ entries[n].write(block)
+
+ block.zero_fill()
+
+ prev_split = split
+
+ return (entries[best_split[0]], entries[best_split[1]])
+
+ # Extract all of the entries from the specified list of `blocks',
+ # separating them by the specified `pivots'. Also computes the
+ # amount of space used before each entry.
+ def _extract(self, blocks, pivots):
+ pointers = []
+ entries = []
+ before = []
+ total = 0
+ ppivots = pivots + [None]
+ for b,p in zip(blocks, ppivots):
+ b.seek(0)
+ next_node, count = b.read(b'>II')
+ for n in range(count):
+ pos = b.tell()
+ if next_node:
+ ptr = b.read(b'>I')[0]
+ pointers.append(ptr)
+ e = DSStoreEntry.read(b)
+ entries.append(e)
+ before.append(total)
+ total += b.tell() - pos
+ if next_node:
+ pointers.append(next_node)
+ if p:
+ entries.append(p)
+ before.append(total)
+ total += p.byte_length()
+ if next_node:
+ total += 4
+ before.append(total)
+
+ return (entries, pointers, before)
+
+ # Rebalance the specified `node', whose path from the root is `path'.
+ def _rebalance(self, path, node):
+ # Can't rebalance the root
+ if not path:
+ return
+
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+
+ with self._get_block(path[-1]) as parent:
+ # Find the left and right siblings and respective pivots
+ parent_next, parent_count = parent.read(b'>II')
+ left_pos = None
+ left_node = None
+ left_pivot = None
+ node_pos = None
+ right_pos = None
+ right_node = None
+ right_pivot = None
+ prev_e = prev_ptr = prev_pos = None
+ for n in range(parent_count):
+ pos = parent.tell()
+ ptr = parent.read(b'>I')[0]
+ e = DSStoreEntry.read(parent)
+
+ if ptr == node:
+ node_pos = pos
+ right_pivot = e
+ left_pos = prev_pos
+ left_pivot = prev_e
+ left_node = prev_ptr
+ elif prev_ptr == node:
+ right_node = ptr
+ right_pos = pos
+ break
+
+ prev_e = e
+ prev_ptr = ptr
+ prev_pos = pos
+
+ if parent_next == node:
+ node_pos = parent.tell()
+ left_pos = prev_pos
+ left_pivot = prev_e
+ left_node = prev_ptr
+ elif right_node is None:
+ right_node = parent_next
+ right_pos = parent.tell()
+
+ parent_used = parent.tell()
+
+ if left_node and right_node:
+ with self._get_block(left_node) as left, \
+ self._get_block(right_node) as right:
+ blocks = [left, block, right]
+ pivots = [left_pivot, right_pivot]
+
+ entries, pointers, before = self._extract(blocks, pivots)
+
+ # If there's a chance that we could use two pages instead
+ # of three, go for it
+ pivots = self._split2(blocks, entries, pointers,
+ before, bool(next_node))
+ if pivots is None:
+ ptrs = [left_node, node, right_node]
+ pivots = self._split3(blocks, entries, pointers,
+ before, bool(next_node))
+ else:
+ if pivots:
+ ptrs = [left_node, node]
+ else:
+ ptrs = [left_node]
+ self._store.release(node)
+ self._nodes -= 1
+ node = left_node
+ self._store.release(right_node)
+ self._nodes -= 1
+ self._dirty = True
+
+ # Remove the pivots from the parent
+ with self._get_block(path[-1]) as parent:
+ if right_node == parent_next:
+ parent.seek(left_pos)
+ parent.delete(right_pos - left_pos)
+ parent_next = left_node
+ else:
+ parent.seek(left_pos + 4)
+ parent.delete(right_pos - left_pos)
+ parent.seek(0)
+ parent_count -= 2
+ parent.write(b'>II', parent_next, parent_count)
+ self._records -= 2
+
+ # Replace with those in pivots
+ for e,rp in zip(pivots, ptrs[1:]):
+ self._insert_inner(path[:-1], path[-1], e, rp)
+ elif left_node:
+ with self._get_block(left_node) as left:
+ blocks = [left, block]
+ pivots = [left_pivot]
+
+ entries, pointers, before = self._extract(blocks, pivots)
+
+ pivots = self._split2(blocks, entries, pointers,
+ before, bool(next_node))
+
+ # Remove the pivot from the parent
+ with self._get_block(path[-1]) as parent:
+ if node == parent_next:
+ parent.seek(left_pos)
+ parent.delete(node_pos - left_pos)
+ parent_next = left_node
+ else:
+ parent.seek(left_pos + 4)
+ parent.delete(node_pos - left_pos)
+ parent.seek(0)
+ parent_count -= 1
+ parent.write(b'>II', parent_next, parent_count)
+ self._records -= 1
+
+ # Replace the pivot
+ if pivots:
+ self._insert_inner(path[:-1], path[-1], pivots[0], node)
+ elif right_node:
+ with self._get_block(right_node) as right:
+ blocks = [block, right]
+ pivots = [right_pivot]
+
+ entries, pointers, before = self._extract(blocks, pivots)
+
+ pivots = self._split2(blocks, entries, pointers,
+ before, bool(next_node))
+
+ # Remove the pivot from the parent
+ with self._get_block(path[-1]) as parent:
+ if right_node == parent_next:
+ parent.seek(pos)
+ parent.delete(right_pos - node_pos)
+ parent_next = node
+ else:
+ parent.seek(pos + 4)
+ parent.delete(right_pos - node_pos)
+ parent.seek(0)
+ parent_count -= 1
+ parent.write(b'>II', parent_next, parent_count)
+ self._records -= 1
+
+ # Replace the pivot
+ if pivots:
+ self._insert_inner(path[:-1], path[-1], pivots[0],
+ right_node)
+
+ if not path and not parent_count:
+ self._store.release(path[-1])
+ self._nodes -= 1
+ self._dirty = True
+ self._rootnode = node
+ else:
+ count, used = self._block_usage(path[-1])
+
+ if used < self._page_size // 2:
+ self._rebalance(path[:-1], path[-1])
+
+ # Delete from the leaf node `node'. `filename_lc' has already been
+ # lower-cased.
+ def _delete_leaf(self, node, filename_lc, code):
+ found = False
+
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+
+ for n in range(count):
+ pos = block.tell()
+ e = DSStoreEntry.read(block)
+ if e.filename.lower() == filename_lc \
+ and (code is None or e.code == code):
+ block.seek(pos)
+ block.delete(e.byte_length())
+ found = True
+
+ # This does not affect the loop; THIS IS NOT A BUG
+ count -= 1
+
+ self._records -= 1
+ self._dirty = True
+
+ if found:
+ used = block.tell()
+
+ block.seek(0)
+ block.write(b'>II', next_node, count)
+
+ return used < self._page_size // 2
+ else:
+ return False
+
+ # Remove the largest entry from the subtree starting at `node' (with
+ # path from root `path'). Returns a tuple (rebalance, entry) where
+ # rebalance is either None if no rebalancing is required, or a
+ # (path, node) tuple giving the details of the node to rebalance.
+ def _take_largest(self, path, node):
+ path = list(path)
+ rebalance = None
+ while True:
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+
+ if next_node:
+ path.append(node)
+ node = next_node
+ continue
+
+ for n in range(count):
+ pos = block.tell()
+ e = DSStoreEntry.read(block)
+
+ count -= 1
+ block.seek(0)
+ block.write(b'>II', next_node, count)
+
+ if pos < self._page_size // 2:
+ rebalance = (path, node)
+ break
+
+ return rebalance, e
+
+ # Delete an entry from an inner node, `node'
+ def _delete_inner(self, path, node, filename_lc, code):
+ rebalance = False
+
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+
+ for n in range(count):
+ pos = block.tell()
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+ if e.filename.lower() == filename_lc \
+ and (code is None or e.code == code):
+ # Take the largest from the left subtree
+ rebalance, largest = self._take_largest(path, ptr)
+
+ # Delete this entry
+ if n == count - 1:
+ right_ptr = next_node
+ next_node = ptr
+ block.seek(pos)
+ else:
+ right_ptr = block.read(b'>I')[0]
+ block.seek(pos + 4)
+
+ block.delete(e.byte_length() + 4)
+
+ count -= 1
+ block.seek(0)
+ block.write(b'>II', next_node, count)
+
+ self._records -= 1
+ self._dirty = True
+
+ break
+
+ # Replace the pivot value
+ self._insert_inner(path, node, largest, right_ptr)
+
+ # Rebalance from the node we stole from
+ if rebalance:
+ self._rebalance(rebalance[0], rebalance[1])
+ return True
+ return False
+
+ def delete(self, filename, code):
+ """Delete an item, identified by ``filename`` and ``code``
+ from the B-Tree."""
+ if isinstance(filename, DSStoreEntry):
+ code = filename.code
+ filename = filename.filename
+
+ # If we're deleting *every* node for "filename", we must recurse
+ if code is None:
+ ###TODO: Fix this so we can do bulk deletes
+ raise ValueError('You must delete items individually. Sorry')
+
+ # Otherwise, we're deleting *one* specific node
+ filename_lc = filename.lower()
+ path = []
+ node = self._rootnode
+ while True:
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ if next_node:
+ for n in range(count):
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+ e_lc = e.filename.lower()
+ if filename_lc < e_lc \
+ or (filename_lc == e_lc and code < e.code):
+ next_node = ptr
+ break
+ elif filename_lc == e_lc and code == e.code:
+ self._delete_inner(path, node, filename_lc, code)
+ return
+ path.append(node)
+ node = next_node
+ else:
+ if self._delete_leaf(node, filename_lc, code):
+ self._rebalance(path, node)
+ return
+
+ # Find implementation
+ def _find(self, node, filename_lc, code=None):
+ if not isinstance(code, bytes):
+ code = code.encode('latin_1')
+ with self._get_block(node) as block:
+ next_node, count = block.read(b'>II')
+ if next_node:
+ for n in range(count):
+ ptr = block.read(b'>I')[0]
+ e = DSStoreEntry.read(block)
+ if filename_lc < e.filename.lower():
+ for e in self._find(ptr, filename_lc, code):
+ yield e
+ return
+ elif filename_lc == e.filename.lower():
+ if code is None or (code and code < e.code):
+ for e in self._find(ptr, filename_lc, code):
+ yield e
+ if code is None or code == e.code:
+ yield e
+ elif code < e.code:
+ return
+ for e in self._find(next_node, filename_lc, code):
+ yield e
+ else:
+ for n in range(count):
+ e = DSStoreEntry.read(block)
+ if filename_lc == e.filename.lower():
+ if code is None or code == e.code:
+ yield e
+ elif code < e.code:
+ return
+
+ def find(self, filename, code=None):
+ """Returns a generator that will iterate over matching entries in
+ the B-Tree."""
+ if isinstance(filename, DSStoreEntry):
+ code = filename.code
+ filename = filename.filename
+
+ filename_lc = filename.lower()
+
+ return self._find(self._rootnode, filename_lc, code)
+
+ def __len__(self):
+ return self._records
+
+ def __iter__(self):
+ return self._traverse(self._rootnode)
+
+ class Partial(object):
+ """This is used to implement indexing."""
+ def __init__(self, store, filename):
+ self._store = store
+ self._filename = filename
+
+ def __getitem__(self, code):
+ if code is None:
+ raise KeyError('no such key - [%s][None]' % self._filename)
+
+ if not isinstance(code, bytes):
+ code = code.encode('latin_1')
+
+ try:
+ item = next(self._store.find(self._filename, code))
+ except StopIteration:
+ raise KeyError('no such key - [%s][%s]' % (self._filename,
+ code))
+
+ if not isinstance(item.type, (bytes, str, unicode)):
+ return item.value
+
+ return (item.type, item.value)
+
+ def __setitem__(self, code, value):
+ if code is None:
+ raise KeyError('bad key - [%s][None]' % self._filename)
+
+ if not isinstance(code, bytes):
+ code = code.encode('latin_1')
+
+ codec = codecs.get(code, None)
+ if codec:
+ entry_type = codec
+ entry_value = value
+ else:
+ entry_type = value[0]
+ entry_value = value[1]
+
+ self._store.insert(DSStoreEntry(self._filename, code,
+ entry_type, entry_value))
+
+ def __delitem__(self, code):
+ if code is None:
+ raise KeyError('no such key - [%s][None]' % self._filename)
+
+ self._store.delete(self._filename, code)
+
+ def __iter__(self):
+ for item in self._store.find(self._filename):
+ yield item
+
+ def __getitem__(self, filename):
+ return self.Partial(self, filename)
+
diff --git a/packages/electron-builder/vendor/mac_alias/__init__.py b/packages/electron-builder/vendor/mac_alias/__init__.py
new file mode 100644
index 00000000000..7eb31410735
--- /dev/null
+++ b/packages/electron-builder/vendor/mac_alias/__init__.py
@@ -0,0 +1,27 @@
+from .alias import *
+from .bookmark import *
+
+__all__ = [ 'ALIAS_KIND_FILE', 'ALIAS_KIND_FOLDER',
+ 'ALIAS_HFS_VOLUME_SIGNATURE',
+ 'ALIAS_FIXED_DISK', 'ALIAS_NETWORK_DISK', 'ALIAS_400KB_FLOPPY_DISK',
+ 'ALIAS_800KB_FLOPPY_DISK', 'ALIAS_1_44MB_FLOPPY_DISK',
+ 'ALIAS_EJECTABLE_DISK',
+ 'ALIAS_NO_CNID',
+ 'kBookmarkPath', 'kBookmarkCNIDPath', 'kBookmarkFileProperties',
+ 'kBookmarkFileName', 'kBookmarkFileID', 'kBookmarkFileCreationDate',
+ 'kBookmarkTOCPath', 'kBookmarkVolumePath',
+ 'kBookmarkVolumeURL', 'kBookmarkVolumeName', 'kBookmarkVolumeUUID',
+ 'kBookmarkVolumeSize', 'kBookmarkVolumeCreationDate',
+ 'kBookmarkVolumeProperties', 'kBookmarkContainingFolder',
+ 'kBookmarkUserName', 'kBookmarkUID', 'kBookmarkWasFileReference',
+ 'kBookmarkCreationOptions', 'kBookmarkURLLengths',
+ 'kBookmarkSecurityExtension',
+ 'AppleShareInfo',
+ 'VolumeInfo',
+ 'TargetInfo',
+ 'Alias',
+ 'Bookmark',
+ 'Data',
+ 'URL' ]
+
+
diff --git a/packages/electron-builder/vendor/mac_alias/alias.py b/packages/electron-builder/vendor/mac_alias/alias.py
new file mode 100644
index 00000000000..512d2221bcb
--- /dev/null
+++ b/packages/electron-builder/vendor/mac_alias/alias.py
@@ -0,0 +1,607 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+from __future__ import division
+
+import struct
+import datetime
+import io
+import re
+import os
+import os.path
+import stat
+import sys
+
+if sys.platform == 'darwin':
+ from . import osx
+
+try:
+ long
+except NameError:
+ long = int
+
+from .utils import *
+
+ALIAS_KIND_FILE = 0
+ALIAS_KIND_FOLDER = 1
+
+ALIAS_HFS_VOLUME_SIGNATURE = b'H+'
+
+ALIAS_FIXED_DISK = 0
+ALIAS_NETWORK_DISK = 1
+ALIAS_400KB_FLOPPY_DISK = 2
+ALIAS_800KB_FLOPPY_DISK = 3
+ALIAS_1_44MB_FLOPPY_DISK = 4
+ALIAS_EJECTABLE_DISK = 5
+
+ALIAS_NO_CNID = 0xffffffff
+
+def encode_utf8(s):
+ if isinstance(s, bytes):
+ return s
+ return s.encode('utf-8')
+
+def decode_utf8(s):
+ if isinstance(s, bytes):
+ return s.decode('utf-8')
+ return s
+
+class AppleShareInfo (object):
+ def __init__(self, zone=None, server=None, user=None):
+ #: The AppleShare zone
+ self.zone = zone
+ #: The AFP server
+ self.server = server
+ #: The username
+ self.user = user
+
+ def __repr__(self):
+ return 'AppleShareInfo(%r,%r,%r)' % (self.zone, self.server, self.user)
+
+class VolumeInfo (object):
+ def __init__(self, name, creation_date, fs_type, disk_type,
+ attribute_flags, fs_id, appleshare_info=None,
+ driver_name=None, posix_path=None, disk_image_alias=None,
+ dialup_info=None, network_mount_info=None):
+ #: The name of the volume on which the target resides
+ self.name = name
+
+ #: The creation date of the target's volume
+ self.creation_date = creation_date
+
+ #: The filesystem type (a two character code, e.g. ``b'H+'`` for HFS+)
+ self.fs_type = fs_type
+
+ #: The type of disk; should be one of
+ #:
+ #: * ALIAS_FIXED_DISK
+ #: * ALIAS_NETWORK_DISK
+ #: * ALIAS_400KB_FLOPPY_DISK
+ #: * ALIAS_800KB_FLOPPY_DISK
+ #: * ALIAS_1_44MB_FLOPPY_DISK
+ #: * ALIAS_EJECTABLE_DISK
+ self.disk_type = disk_type
+
+ #: Filesystem attribute flags (from HFS volume header)
+ self.attribute_flags = attribute_flags
+
+ #: Filesystem identifier
+ self.fs_id = fs_id
+
+ #: AppleShare information (for automatic remounting of network shares)
+ #: *(optional)*
+ self.appleshare_info = appleshare_info
+
+ #: Driver name (*probably* contains a disk driver name on older Macs)
+ #: *(optional)*
+ self.driver_name = driver_name
+
+ #: POSIX path of the mount point of the target's volume
+ #: *(optional)*
+ self.posix_path = posix_path
+
+ #: :class:`Alias` object pointing at the disk image on which the
+ #: target's volume resides *(optional)*
+ self.disk_image_alias = disk_image_alias
+
+ #: Dialup information (for automatic establishment of dialup connections)
+ self.dialup_info = dialup_info
+
+ #: Network mount information (for automatic remounting)
+ self.network_mount_info = network_mount_info
+
+ def __repr__(self):
+ args = ['name', 'creation_date', 'fs_type', 'disk_type',
+ 'attribute_flags', 'fs_id']
+ values = []
+ for a in args:
+ v = getattr(self, a)
+ values.append(repr(v))
+
+ kwargs = ['appleshare_info', 'driver_name', 'posix_path',
+ 'disk_image_alias', 'dialup_info', 'network_mount_info']
+ for a in kwargs:
+ v = getattr(self, a)
+ if v is not None:
+ values.append('%s=%r' % (a, v))
+ return 'VolumeInfo(%s)' % ','.join(values)
+
+class TargetInfo (object):
+ def __init__(self, kind, filename, folder_cnid, cnid, creation_date,
+ creator_code, type_code, levels_from=-1, levels_to=-1,
+ folder_name=None, cnid_path=None, carbon_path=None,
+ posix_path=None, user_home_prefix_len=None):
+ #: Either ALIAS_KIND_FILE or ALIAS_KIND_FOLDER
+ self.kind = kind
+
+ #: The filename of the target
+ self.filename = filename
+
+ #: The CNID (Catalog Node ID) of the target's containing folder;
+ #: CNIDs are similar to but different than traditional UNIX inode
+ #: numbers
+ self.folder_cnid = folder_cnid
+
+ #: The CNID (Catalog Node ID) of the target
+ self.cnid = cnid
+
+ #: The target's *creation* date.
+ self.creation_date = creation_date
+
+ #: The target's Mac creator code (a four-character binary string)
+ self.creator_code = creator_code
+
+ #: The target's Mac type code (a four-character binary string)
+ self.type_code = type_code
+
+ #: The depth of the alias? Always seems to be -1 on OS X.
+ self.levels_from = levels_from
+
+ #: The depth of the target? Always seems to be -1 on OS X.
+ self.levels_to = levels_to
+
+ #: The (POSIX) name of the target's containing folder. *(optional)*
+ self.folder_name = folder_name
+
+ #: The path from the volume root as a sequence of CNIDs. *(optional)*
+ self.cnid_path = cnid_path
+
+ #: The Carbon path of the target *(optional)*
+ self.carbon_path = carbon_path
+
+ #: The POSIX path of the target relative to the volume root. Note
+ #: that this may or may not have a leading '/' character, but it is
+ #: always relative to the containing volume. *(optional)*
+ self.posix_path = posix_path
+
+ #: If the path points into a user's home folder, the number of folders
+ #: deep that we go before we get to that home folder. *(optional)*
+ self.user_home_prefix_len = user_home_prefix_len
+
+ def __repr__(self):
+ args = ['kind', 'filename', 'folder_cnid', 'cnid', 'creation_date',
+ 'creator_code', 'type_code']
+ values = []
+ for a in args:
+ v = getattr(self, a)
+ values.append(repr(v))
+
+ if self.levels_from != -1:
+ values.append('levels_from=%r' % self.levels_from)
+ if self.levels_to != -1:
+ values.append('levels_to=%r' % self.levels_to)
+
+ kwargs = ['folder_name', 'cnid_path', 'carbon_path',
+ 'posix_path', 'user_home_prefix_len']
+ for a in kwargs:
+ v = getattr(self, a)
+ values.append('%s=%r' % (a, v))
+
+ return 'TargetInfo(%s)' % ','.join(values)
+
+TAG_CARBON_FOLDER_NAME = 0
+TAG_CNID_PATH = 1
+TAG_CARBON_PATH = 2
+TAG_APPLESHARE_ZONE = 3
+TAG_APPLESHARE_SERVER_NAME = 4
+TAG_APPLESHARE_USERNAME = 5
+TAG_DRIVER_NAME = 6
+TAG_NETWORK_MOUNT_INFO = 9
+TAG_DIALUP_INFO = 10
+TAG_UNICODE_FILENAME = 14
+TAG_UNICODE_VOLUME_NAME = 15
+TAG_HIGH_RES_VOLUME_CREATION_DATE = 16
+TAG_HIGH_RES_CREATION_DATE = 17
+TAG_POSIX_PATH = 18
+TAG_POSIX_PATH_TO_MOUNTPOINT = 19
+TAG_RECURSIVE_ALIAS_OF_DISK_IMAGE = 20
+TAG_USER_HOME_LENGTH_PREFIX = 21
+
+class Alias (object):
+ def __init__(self, appinfo=b'\0\0\0\0', version=2, volume=None,
+ target=None, extra=[]):
+ """Construct a new :class:`Alias` object with the specified
+ contents."""
+
+ #: Application specific information (four byte byte-string)
+ self.appinfo = appinfo
+
+ #: Version (we support only version 2)
+ self.version = version
+
+ #: A :class:`VolumeInfo` object describing the target's volume
+ self.volume = volume
+
+ #: A :class:`TargetInfo` object describing the target
+ self.target = target
+
+ #: A list of extra `(tag, value)` pairs
+ self.extra = list(extra)
+
+ @classmethod
+ def _from_fd(cls, b):
+ appinfo, recsize, version = struct.unpack(b'>4shh', b.read(8))
+
+ if recsize < 150:
+ raise ValueError('Incorrect alias length')
+
+ if version != 2:
+ raise ValueError('Unsupported alias version %u' % version)
+
+ kind, volname, voldate, fstype, disktype, \
+ folder_cnid, filename, cnid, crdate, creator_code, type_code, \
+ levels_from, levels_to, volattrs, volfsid, reserved = \
+ struct.unpack(b'>h28pI2shI64pII4s4shhI2s10s', b.read(142))
+
+ voldate = mac_epoch + datetime.timedelta(seconds=voldate)
+ crdate = mac_epoch + datetime.timedelta(seconds=crdate)
+
+ alias = Alias()
+ alias.appinfo = appinfo
+
+ alias.volume = VolumeInfo (volname.replace('/',':'),
+ voldate, fstype, disktype,
+ volattrs, volfsid)
+ alias.target = TargetInfo (kind, filename.replace('/',':'),
+ folder_cnid, cnid,
+ crdate, creator_code, type_code)
+ alias.target.levels_from = levels_from
+ alias.target.levels_to = levels_to
+
+ tag = struct.unpack(b'>h', b.read(2))[0]
+
+ while tag != -1:
+ length = struct.unpack(b'>h', b.read(2))[0]
+ value = b.read(length)
+ if length & 1:
+ b.read(1)
+
+ if tag == TAG_CARBON_FOLDER_NAME:
+ alias.target.folder_name = value.replace('/',':')
+ elif tag == TAG_CNID_PATH:
+ alias.target.cnid_path = struct.unpack(b'>%uI' % (length // 4),
+ value)
+ elif tag == TAG_CARBON_PATH:
+ alias.target.carbon_path = value
+ elif tag == TAG_APPLESHARE_ZONE:
+ if alias.volume.appleshare_info is None:
+ alias.volume.appleshare_info = AppleShareInfo()
+ alias.volume.appleshare_info.zone = value
+ elif tag == TAG_APPLESHARE_SERVER_NAME:
+ if alias.volume.appleshare_info is None:
+ alias.volume.appleshare_info = AppleShareInfo()
+ alias.volume.appleshare_info.server = value
+ elif tag == TAG_APPLESHARE_USERNAME:
+ if alias.volume.appleshare_info is None:
+ alias.volume.appleshare_info = AppleShareInfo()
+ alias.volume.appleshare_info.user = value
+ elif tag == TAG_DRIVER_NAME:
+ alias.volume.driver_name = value
+ elif tag == TAG_NETWORK_MOUNT_INFO:
+ alias.volume.network_mount_info = value
+ elif tag == TAG_DIALUP_INFO:
+ alias.volume.dialup_info = value
+ elif tag == TAG_UNICODE_FILENAME:
+ alias.target.filename = value[2:].decode('utf-16be')
+ elif tag == TAG_UNICODE_VOLUME_NAME:
+ alias.volume.name = value[2:].decode('utf-16be')
+ elif tag == TAG_HIGH_RES_VOLUME_CREATION_DATE:
+ seconds = struct.unpack(b'>Q', value)[0] / 65536.0
+ alias.volume.creation_date \
+ = mac_epoch + datetime.timedelta(seconds=seconds)
+ elif tag == TAG_HIGH_RES_CREATION_DATE:
+ seconds = struct.unpack(b'>Q', value)[0] / 65536.0
+ alias.target.creation_date \
+ = mac_epoch + datetime.timedelta(seconds=seconds)
+ elif tag == TAG_POSIX_PATH:
+ alias.target.posix_path = value
+ elif tag == TAG_POSIX_PATH_TO_MOUNTPOINT:
+ alias.volume.posix_path = value
+ elif tag == TAG_RECURSIVE_ALIAS_OF_DISK_IMAGE:
+ alias.volume.disk_image_alias = Alias.from_bytes(value)
+ elif tag == TAG_USER_HOME_LENGTH_PREFIX:
+ alias.target.user_home_prefix_len = struct.unpack(b'>h', value)[0]
+ else:
+ alias.extra.append((tag, value))
+
+ tag = struct.unpack(b'>h', b.read(2))[0]
+
+ return alias
+
+ @classmethod
+ def from_bytes(cls, bytes):
+ """Construct an :class:`Alias` object given binary Alias data."""
+ with io.BytesIO(bytes) as b:
+ return cls._from_fd(b)
+
+ @classmethod
+ def for_file(cls, path):
+ """Create an :class:`Alias` that points at the specified file."""
+ if sys.platform != 'darwin':
+ raise Exception('Not implemented (requires special support)')
+
+ path = encode_utf8(path)
+
+ a = Alias()
+
+ # Find the filesystem
+ st = osx.statfs(path)
+ vol_path = st.f_mntonname
+
+ # Grab its attributes
+ attrs = [osx.ATTR_CMN_CRTIME,
+ osx.ATTR_VOL_NAME,
+ 0, 0, 0]
+ volinfo = osx.getattrlist(vol_path, attrs, 0)
+
+ vol_crtime = volinfo[0]
+ vol_name = encode_utf8(volinfo[1])
+
+ # Also grab various attributes of the file
+ attrs = [(osx.ATTR_CMN_OBJTYPE
+ | osx.ATTR_CMN_CRTIME
+ | osx.ATTR_CMN_FNDRINFO
+ | osx.ATTR_CMN_FILEID
+ | osx.ATTR_CMN_PARENTID), 0, 0, 0, 0]
+ info = osx.getattrlist(path, attrs, osx.FSOPT_NOFOLLOW)
+
+ if info[0] == osx.VDIR:
+ kind = ALIAS_KIND_FOLDER
+ else:
+ kind = ALIAS_KIND_FILE
+
+ cnid = info[3]
+ folder_cnid = info[4]
+
+ dirname, filename = os.path.split(path)
+
+ if dirname == b'' or dirname == b'.':
+ dirname = os.getcwd()
+
+ foldername = os.path.basename(dirname)
+
+ creation_date = info[1]
+
+ if kind == ALIAS_KIND_FILE:
+ creator_code = struct.pack(b'I', info[2].fileInfo.fileCreator)
+ type_code = struct.pack(b'I', info[2].fileInfo.fileType)
+ else:
+ creator_code = b'\0\0\0\0'
+ type_code = b'\0\0\0\0'
+
+ a.target = TargetInfo(kind, filename, folder_cnid, cnid, creation_date,
+ creator_code, type_code)
+ a.volume = VolumeInfo(vol_name, vol_crtime, b'H+',
+ ALIAS_FIXED_DISK, 0, b'\0\0')
+
+ a.target.folder_name = foldername
+ a.volume.posix_path = vol_path
+
+ rel_path = os.path.relpath(path, vol_path)
+
+ # Leave off the initial '/' if vol_path is '/' (no idea why)
+ if vol_path == b'/':
+ a.target.posix_path = rel_path
+ else:
+ a.target.posix_path = b'/' + rel_path
+
+ # Construct the Carbon and CNID paths
+ carbon_path = []
+ cnid_path = []
+ head, tail = os.path.split(rel_path)
+ if not tail:
+ head, tail = os.path.split(head)
+ while head or tail:
+ if head:
+ attrs = [osx.ATTR_CMN_FILEID, 0, 0, 0, 0]
+ info = osx.getattrlist(os.path.join(vol_path, head), attrs, 0)
+ cnid_path.append(info[0])
+ carbon_tail = tail.replace(b':',b'/')
+ carbon_path.insert(0, carbon_tail)
+ head, tail = os.path.split(head)
+
+ carbon_path = vol_name + b':' + b':\0'.join(carbon_path)
+
+ a.target.carbon_path = carbon_path
+ a.target.cnid_path = cnid_path
+
+ return a
+
+ def _to_fd(self, b):
+ # We'll come back and fix the length when we're done
+ pos = b.tell()
+ b.write(struct.pack(b'>4shh', self.appinfo, 0, self.version))
+
+ carbon_volname = encode_utf8(self.volume.name).replace(b':',b'/')
+ carbon_filename = encode_utf8(self.target.filename).replace(b':',b'/')
+ voldate = (self.volume.creation_date - mac_epoch).total_seconds()
+ crdate = (self.target.creation_date - mac_epoch).total_seconds()
+
+ # NOTE: crdate should be in local time, but that's system dependent
+ # (so doing so is ridiculous, and nothing could rely on it).
+ b.write(struct.pack(b'>h28pI2shI64pII4s4shhI2s10s',
+ self.target.kind,
+ carbon_volname, int(voldate),
+ self.volume.fs_type,
+ self.volume.disk_type,
+ self.target.folder_cnid,
+ carbon_filename,
+ self.target.cnid,
+ int(crdate),
+ self.target.creator_code,
+ self.target.type_code,
+ self.target.levels_from,
+ self.target.levels_to,
+ self.volume.attribute_flags,
+ self.volume.fs_id,
+ b'\0'*10))
+
+ # Excuse the odd order; we're copying Finder
+ if self.target.folder_name:
+ carbon_foldername = encode_utf8(self.target.folder_name)\
+ .replace(b':',b'/')
+ b.write(struct.pack(b'>hh', TAG_CARBON_FOLDER_NAME,
+ len(carbon_foldername)))
+ b.write(carbon_foldername)
+ if len(carbon_foldername) & 1:
+ b.write(b'\0')
+
+ b.write(struct.pack(b'>hhQhhQ',
+ TAG_HIGH_RES_VOLUME_CREATION_DATE,
+ 8, long(voldate * 65536),
+ TAG_HIGH_RES_CREATION_DATE,
+ 8, long(crdate * 65536)))
+
+ if self.target.cnid_path:
+ cnid_path = struct.pack(b'>%uI' % len(self.target.cnid_path),
+ *self.target.cnid_path)
+ b.write(struct.pack(b'>hh', TAG_CNID_PATH,
+ len(cnid_path)))
+ b.write(cnid_path)
+
+ if self.target.carbon_path:
+ carbon_path=encode_utf8(self.target.carbon_path)
+ b.write(struct.pack(b'>hh', TAG_CARBON_PATH,
+ len(carbon_path)))
+ b.write(carbon_path)
+ if len(carbon_path) & 1:
+ b.write(b'\0')
+
+ if self.volume.appleshare_info:
+ ai = self.volume.appleshare_info
+ if ai.zone:
+ b.write(struct.pack(b'>hh', TAG_APPLESHARE_ZONE,
+ len(ai.zone)))
+ b.write(ai.zone)
+ if len(ai.zone) & 1:
+ b.write(b'\0')
+ if ai.server:
+ b.write(struct.pack(b'>hh', TAG_APPLESHARE_SERVER_NAME,
+ len(ai.server)))
+ b.write(ai.server)
+ if len(ai.server) & 1:
+ b.write(b'\0')
+ if ai.username:
+ b.write(struct.pack(b'>hh', TAG_APPLESHARE_USERNAME,
+ len(ai.username)))
+ b.write(ai.username)
+ if len(ai.username) & 1:
+ b.write(b'\0')
+
+ if self.volume.driver_name:
+ driver_name = encode_utf8(self.volume.driver_name)
+ b.write(struct.pack(b'>hh', TAG_DRIVER_NAME,
+ len(driver_name)))
+ b.write(driver_name)
+ if len(driver_name) & 1:
+ b.write(b'\0')
+
+ if self.volume.network_mount_info:
+ b.write(struct.pack(b'>hh', TAG_NETWORK_MOUNT_INFO,
+ len(self.volume.network_mount_info)))
+ b.write(self.volume.network_mount_info)
+ if len(self.volume.network_mount_info) & 1:
+ b.write(b'\0')
+
+ if self.volume.dialup_info:
+ b.write(struct.pack(b'>hh', TAG_DIALUP_INFO,
+ len(self.volume.network_mount_info)))
+ b.write(self.volume.network_mount_info)
+ if len(self.volume.network_mount_info) & 1:
+ b.write(b'\0')
+
+ utf16 = decode_utf8(self.target.filename)\
+ .replace(':','/').encode('utf-16-be')
+ b.write(struct.pack(b'>hhh', TAG_UNICODE_FILENAME,
+ len(utf16) + 2,
+ len(utf16) // 2))
+ b.write(utf16)
+
+ utf16 = decode_utf8(self.volume.name)\
+ .replace(':','/').encode('utf-16-be')
+ b.write(struct.pack(b'>hhh', TAG_UNICODE_VOLUME_NAME,
+ len(utf16) + 2,
+ len(utf16) // 2))
+ b.write(utf16)
+
+ if self.target.posix_path:
+ posix_path = encode_utf8(self.target.posix_path)
+ b.write(struct.pack(b'>hh', TAG_POSIX_PATH,
+ len(posix_path)))
+ b.write(posix_path)
+ if len(posix_path) & 1:
+ b.write(b'\0')
+
+ if self.volume.posix_path:
+ posix_path = encode_utf8(self.volume.posix_path)
+ b.write(struct.pack(b'>hh', TAG_POSIX_PATH_TO_MOUNTPOINT,
+ len(posix_path)))
+ b.write(posix_path)
+ if len(posix_path) & 1:
+ b.write(b'\0')
+
+ if self.volume.disk_image_alias:
+ d = self.volume.disk_image_alias.to_bytes()
+ b.write(struct.pack(b'>hh', TAG_RECURSIVE_ALIAS_OF_DISK_IMAGE,
+ len(d)))
+ b.write(d)
+ if len(d) & 1:
+ b.write(b'\0')
+
+ if self.target.user_home_prefix_len is not None:
+ b.write(struct.pack(b'>hhh', TAG_USER_HOME_LENGTH_PREFIX,
+ 2, self.target.user_home_prefix_len))
+
+ for t,v in self.extra:
+ b.write(struct.pack(b'>hh', t, len(v)))
+ b.write(v)
+ if len(v) & 1:
+ b.write(b'\0')
+
+ b.write(struct.pack(b'>hh', -1, 0))
+
+ blen = b.tell() - pos
+ b.seek(pos + 4, os.SEEK_SET)
+ b.write(struct.pack(b'>h', blen))
+
+ def to_bytes(self):
+ """Returns the binary representation for this :class:`Alias`."""
+ with io.BytesIO() as b:
+ self._to_fd(b)
+ return b.getvalue()
+
+ def __str__(self):
+ return '' % self.target.filename
+
+ def __repr__(self):
+ values = []
+ if self.appinfo != b'\0\0\0\0':
+ values.append('appinfo=%r' % self.appinfo)
+ if self.version != 2:
+ values.append('version=%r' % self.version)
+ if self.volume is not None:
+ values.append('volume=%r' % self.volume)
+ if self.target is not None:
+ values.append('target=%r' % self.target)
+ if self.extra:
+ values.append('extra=%r' % self.extra)
+ return 'Alias(%s)' % ','.join(values)
diff --git a/packages/electron-builder/vendor/mac_alias/bookmark.py b/packages/electron-builder/vendor/mac_alias/bookmark.py
new file mode 100644
index 00000000000..0de6b9404b6
--- /dev/null
+++ b/packages/electron-builder/vendor/mac_alias/bookmark.py
@@ -0,0 +1,665 @@
+# -*- coding: utf-8 -*-
+#
+# This file implements the Apple "bookmark" format, which is the replacement
+# for the old-fashioned alias format. The details of this format were
+# reverse engineered; some things are still not entirely clear.
+#
+from __future__ import unicode_literals, print_function
+
+import struct
+import uuid
+import datetime
+import os
+import sys
+import pprint
+
+try:
+ from urlparse import urljoin
+except ImportError:
+ from urllib.parse import urljoin
+
+if sys.platform == 'darwin':
+ from . import osx
+
+def iteritems(x):
+ return x.iteritems()
+
+try:
+ unicode
+except NameError:
+ unicode = str
+ long = int
+ xrange = range
+ def iteritems(x):
+ return x.items()
+
+from .utils import *
+
+BMK_DATA_TYPE_MASK = 0xffffff00
+BMK_DATA_SUBTYPE_MASK = 0x000000ff
+
+BMK_STRING = 0x0100
+BMK_DATA = 0x0200
+BMK_NUMBER = 0x0300
+BMK_DATE = 0x0400
+BMK_BOOLEAN = 0x0500
+BMK_ARRAY = 0x0600
+BMK_DICT = 0x0700
+BMK_UUID = 0x0800
+BMK_URL = 0x0900
+BMK_NULL = 0x0a00
+
+BMK_ST_ZERO = 0x0000
+BMK_ST_ONE = 0x0001
+
+BMK_BOOLEAN_ST_FALSE = 0x0000
+BMK_BOOLEAN_ST_TRUE = 0x0001
+
+# Subtypes for BMK_NUMBER are really CFNumberType values
+kCFNumberSInt8Type = 1
+kCFNumberSInt16Type = 2
+kCFNumberSInt32Type = 3
+kCFNumberSInt64Type = 4
+kCFNumberFloat32Type = 5
+kCFNumberFloat64Type = 6
+kCFNumberCharType = 7
+kCFNumberShortType = 8
+kCFNumberIntType = 9
+kCFNumberLongType = 10
+kCFNumberLongLongType = 11
+kCFNumberFloatType = 12
+kCFNumberDoubleType = 13
+kCFNumberCFIndexType = 14
+kCFNumberNSIntegerType = 15
+kCFNumberCGFloatType = 16
+
+# Resource property flags (from CFURLPriv.h)
+kCFURLResourceIsRegularFile = 0x00000001
+kCFURLResourceIsDirectory = 0x00000002
+kCFURLResourceIsSymbolicLink = 0x00000004
+kCFURLResourceIsVolume = 0x00000008
+kCFURLResourceIsPackage = 0x00000010
+kCFURLResourceIsSystemImmutable = 0x00000020
+kCFURLResourceIsUserImmutable = 0x00000040
+kCFURLResourceIsHidden = 0x00000080
+kCFURLResourceHasHiddenExtension = 0x00000100
+kCFURLResourceIsApplication = 0x00000200
+kCFURLResourceIsCompressed = 0x00000400
+kCFURLResourceIsSystemCompressed = 0x00000400
+kCFURLCanSetHiddenExtension = 0x00000800
+kCFURLResourceIsReadable = 0x00001000
+kCFURLResourceIsWriteable = 0x00002000
+kCFURLResourceIsExecutable = 0x00004000
+kCFURLIsAliasFile = 0x00008000
+kCFURLIsMountTrigger = 0x00010000
+
+# Volume property flags (from CFURLPriv.h)
+kCFURLVolumeIsLocal = 0x1 #
+kCFURLVolumeIsAutomount = 0x2 #
+kCFURLVolumeDontBrowse = 0x4 #
+kCFURLVolumeIsReadOnly = 0x8 #
+kCFURLVolumeIsQuarantined = 0x10
+kCFURLVolumeIsEjectable = 0x20 #
+kCFURLVolumeIsRemovable = 0x40 #
+kCFURLVolumeIsInternal = 0x80 #
+kCFURLVolumeIsExternal = 0x100 #
+kCFURLVolumeIsDiskImage = 0x200 #
+kCFURLVolumeIsFileVault = 0x400
+kCFURLVolumeIsLocaliDiskMirror = 0x800
+kCFURLVolumeIsiPod = 0x1000 #
+kCFURLVolumeIsiDisk = 0x2000
+kCFURLVolumeIsCD = 0x4000
+kCFURLVolumeIsDVD = 0x8000
+kCFURLVolumeIsDeviceFileSystem = 0x10000
+kCFURLVolumeSupportsPersistentIDs = 0x100000000
+kCFURLVolumeSupportsSearchFS = 0x200000000
+kCFURLVolumeSupportsExchange = 0x400000000
+# reserved 0x800000000
+kCFURLVolumeSupportsSymbolicLinks = 0x1000000000
+kCFURLVolumeSupportsDenyModes = 0x2000000000
+kCFURLVolumeSupportsCopyFile = 0x4000000000
+kCFURLVolumeSupportsReadDirAttr = 0x8000000000
+kCFURLVolumeSupportsJournaling = 0x10000000000
+kCFURLVolumeSupportsRename = 0x20000000000
+kCFURLVolumeSupportsFastStatFS = 0x40000000000
+kCFURLVolumeSupportsCaseSensitiveNames = 0x80000000000
+kCFURLVolumeSupportsCasePreservedNames = 0x100000000000
+kCFURLVolumeSupportsFLock = 0x200000000000
+kCFURLVolumeHasNoRootDirectoryTimes = 0x400000000000
+kCFURLVolumeSupportsExtendedSecurity = 0x800000000000
+kCFURLVolumeSupports2TBFileSize = 0x1000000000000
+kCFURLVolumeSupportsHardLinks = 0x2000000000000
+kCFURLVolumeSupportsMandatoryByteRangeLocks = 0x4000000000000
+kCFURLVolumeSupportsPathFromID = 0x8000000000000
+# reserved 0x10000000000000
+kCFURLVolumeIsJournaling = 0x20000000000000
+kCFURLVolumeSupportsSparseFiles = 0x40000000000000
+kCFURLVolumeSupportsZeroRuns = 0x80000000000000
+kCFURLVolumeSupportsVolumeSizes = 0x100000000000000
+kCFURLVolumeSupportsRemoteEvents = 0x200000000000000
+kCFURLVolumeSupportsHiddenFiles = 0x400000000000000
+kCFURLVolumeSupportsDecmpFSCompression = 0x800000000000000
+kCFURLVolumeHas64BitObjectIDs = 0x1000000000000000
+kCFURLVolumePropertyFlagsAll = 0xffffffffffffffff
+
+BMK_URL_ST_ABSOLUTE = 0x0001
+BMK_URL_ST_RELATIVE = 0x0002
+
+# Bookmark keys
+# = 0x1003
+kBookmarkPath = 0x1004 # Array of path components
+kBookmarkCNIDPath = 0x1005 # Array of CNIDs
+kBookmarkFileProperties = 0x1010 # (CFURL rp flags,
+ # CFURL rp flags asked for,
+ # 8 bytes NULL)
+kBookmarkFileName = 0x1020
+kBookmarkFileID = 0x1030
+kBookmarkFileCreationDate = 0x1040
+# = 0x1054 # ?
+# = 0x1055 # ?
+# = 0x1056 # ?
+# = 0x1101 # ?
+# = 0x1102 # ?
+kBookmarkTOCPath = 0x2000 # A list of (TOC id, ?) pairs
+kBookmarkVolumePath = 0x2002
+kBookmarkVolumeURL = 0x2005
+kBookmarkVolumeName = 0x2010
+kBookmarkVolumeUUID = 0x2011 # Stored (perversely) as a string
+kBookmarkVolumeSize = 0x2012
+kBookmarkVolumeCreationDate = 0x2013
+kBookmarkVolumeProperties = 0x2020 # (CFURL vp flags,
+ # CFURL vp flags asked for,
+ # 8 bytes NULL)
+kBookmarkVolumeIsRoot = 0x2030 # True if volume is FS root
+kBookmarkVolumeBookmark = 0x2040 # Embedded bookmark for disk image (TOC id)
+kBookmarkVolumeMountPoint = 0x2050 # A URL
+# = 0x2070
+kBookmarkContainingFolder = 0xc001 # Index of containing folder in path
+kBookmarkUserName = 0xc011 # User that created bookmark
+kBookmarkUID = 0xc012 # UID that created bookmark
+kBookmarkWasFileReference = 0xd001 # True if the URL was a file reference
+kBookmarkCreationOptions = 0xd010
+kBookmarkURLLengths = 0xe003 # See below
+# = 0xf017 # Localized name?
+# = 0xf022
+kBookmarkSecurityExtension = 0xf080
+# = 0xf081
+
+# kBookmarkURLLengths is an array that is set if the URL encoded by the
+# bookmark had a base URL; in that case, each entry is the length of the
+# base URL in question. Thus a URL
+#
+# file:///foo/bar/baz blam/blat.html
+#
+# will result in [3, 2], while the URL
+#
+# file:///foo bar/baz blam blat.html
+#
+# would result in [1, 2, 1, 1]
+
+
+class Data (object):
+ def __init__(self, bytedata=None):
+ #: The bytes, stored as a byte string
+ self.bytes = bytes(bytedata)
+
+ def __repr__(self):
+ return 'Data(%r)' % self.bytes
+
+class URL (object):
+ def __init__(self, base, rel=None):
+ if rel is not None:
+ #: The base URL, if any (a :class:`URL`)
+ self.base = base
+ #: The rest of the URL (a string)
+ self.relative = rel
+ else:
+ self.base = None
+ self.relative = base
+
+ @property
+ def absolute(self):
+ """Return an absolute URL."""
+ if self.base is None:
+ return self.relative
+ else:
+ base_abs = self.base.absolute
+ return urljoin(self.base.absolute, self.relative)
+
+ def __repr__(self):
+ return 'URL(%r)' % self.absolute
+
+class Bookmark (object):
+ def __init__(self, tocs=None):
+ if tocs is None:
+ #: The TOCs for this Bookmark
+ self.tocs = []
+ else:
+ self.tocs = tocs
+
+ @classmethod
+ def _get_item(cls, data, hdrsize, offset):
+ offset += hdrsize
+ if offset > len(data) - 8:
+ raise ValueError('Offset out of range')
+
+ length,typecode = struct.unpack(b'd', databytes)[0])
+ return osx_epoch + secs
+ elif dtype == BMK_BOOLEAN:
+ if dsubtype == BMK_BOOLEAN_ST_TRUE:
+ return True
+ elif dsubtype == BMK_BOOLEAN_ST_FALSE:
+ return False
+ elif dtype == BMK_UUID:
+ return uuid.UUID(bytes=databytes)
+ elif dtype == BMK_URL:
+ if dsubtype == BMK_URL_ST_ABSOLUTE:
+ return URL(databytes.decode('utf-8'))
+ elif dsubtype == BMK_URL_ST_RELATIVE:
+ baseoff,reloff = struct.unpack(b' size:
+ raise ValueError('Not a bookmark file (header size too large)')
+
+ if size != len(data):
+ raise ValueError('Not a bookmark file (truncated)')
+
+ tocoffset, = struct.unpack(b' size - hdrsize \
+ or size - tocbase < 20:
+ raise ValueError('TOC offset out of range')
+
+ tocsize,tocmagic,tocid,nexttoc,toccount \
+ = struct.unpack(b' -0x80000000 and item < 0x7fffffff:
+ result = struct.pack(b'd', float(secs.total_seconds()))
+ elif isinstance(item, uuid.UUID):
+ result = struct.pack(b' -1:
+ sz = sz[:nul]
+ return sz.decode('utf-8')
+
+def _decode_attrlist_result(buf, attrs, options):
+ result = []
+
+ assert len(buf) >= 4
+ total_size = uint32_t.from_buffer(buf, 0).value
+ assert total_size <= len(buf)
+
+ offset = 4
+
+ # Common attributes
+ if attrs[0] & ATTR_CMN_RETURNED_ATTRS:
+ a = attribute_set_t.from_buffer(buf, offset)
+ result.append(a)
+ offset += sizeof (attribute_set_t)
+ if not (options & FSOPT_PACK_INVAL_ATTRS):
+ attrs = [a.commonattr, a.volattr, a.dirattr, a.fileattr, a.forkattr]
+ if attrs[0] & ATTR_CMN_NAME:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ name = _decode_utf8_nul(buf[ofs:ofs+a.attr_length])
+ offset += sizeof (attrreference_t)
+ result.append(name)
+ if attrs[0] & ATTR_CMN_DEVID:
+ a = dev_t.from_buffer(buf, offset)
+ offset += sizeof(dev_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_FSID:
+ a = fsid_t.from_buffer(buf, offset)
+ offset += sizeof(fsid_t)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_OBJTYPE:
+ a = fsobj_type_t.from_buffer(buf, offset)
+ offset += sizeof(fsobj_type_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_OBJTAG:
+ a = fsobj_tag_t.from_buffer(buf, offset)
+ offset += sizeof(fsobj_tag_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_OBJID:
+ a = fsobj_id_t.from_buffer(buf, offset)
+ offset += sizeof(fsobj_id_t)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_OBJPERMANENTID:
+ a = fsobj_id_t.from_buffer(buf, offset)
+ offset += sizeof(fsobj_id_t)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_PAROBJID:
+ a = fsobj_id_t.from_buffer(buf, offset)
+ offset += sizeof(fsobj_id_t)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_SCRIPT:
+ a = text_encoding_t.from_buffer(buf, offset)
+ offset += sizeof(text_encoding_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_CRTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+ if attrs[0] & ATTR_CMN_MODTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+ if attrs[0] & ATTR_CMN_CHGTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+ if attrs[0] & ATTR_CMN_ACCTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+ if attrs[0] & ATTR_CMN_BKUPTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+ if attrs[0] & ATTR_CMN_FNDRINFO:
+ a = FinderInfo.from_buffer(buf, offset)
+ offset += sizeof(FinderInfo)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_OWNERID:
+ a = uid_t.from_buffer(buf, offset)
+ offset += sizeof(uid_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_GRPID:
+ a = gid_t.from_buffer(buf, offset)
+ offset += sizeof(gid_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_ACCESSMASK:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_FLAGS:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_USERACCESS:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_EXTENDED_SECURITY:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ offset += sizeof(attrreference_t)
+ ec = uint32_t.from_buffer(buf, ofs + 36).value
+ class kauth_acl(Structure):
+ _fields_ = [('acl_entrycount', c_uint),
+ ('acl_flags', c_uint),
+ ('acl_ace', kauth_ace * ec)]
+ class kauth_filesec(Structure):
+ _fields_ = [('fsec_magic', c_uint),
+ ('fsec_owner', guid_t),
+ ('fsec_group', guid_t),
+ ('fsec_acl', kauth_acl)]
+ a = kauth_filesec.from_buffer(buf, ofs)
+ result.append(a)
+ if attrs[0] & ATTR_CMN_UUID:
+ result.append(uuid.UUID(bytes=buf[offset:offset+16]))
+ offset += sizeof(guid_t)
+ if attrs[0] & ATTR_CMN_GRPUUID:
+ result.append(uuid.UUID(bytes=buf[offset:offset+16]))
+ offset += sizeof(guid_t)
+ if attrs[0] & ATTR_CMN_FILEID:
+ a = uint64_t.from_buffer(buf, offset)
+ offset += sizeof(uint64_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_PARENTID:
+ a = uint64_t.from_buffer(buf, offset)
+ offset += sizeof(uint64_t)
+ result.append(a.value)
+ if attrs[0] & ATTR_CMN_FULLPATH:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ path = _decode_utf8_nul(buf[ofs:ofs+a.attr_length])
+ offset += sizeof (attrreference_t)
+ result.append(path)
+ if attrs[0] & ATTR_CMN_ADDEDTIME:
+ a = timespec.from_buffer(buf, offset)
+ offset += sizeof(timespec)
+ result.append(_datetime_from_timespec(a))
+
+ # Volume attributes
+ if attrs[1] & ATTR_VOL_FSTYPE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_SIGNATURE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_SIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_SPACEFREE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_SPACEAVAIL:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_MINALLOCATION:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_ALLOCATIONCLUMP:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_IOBLOCKSIZE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_OBJCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_FILECOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_DIRCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_MAXOBJCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_MOUNTPOINT:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ path = _decode_utf8_nul(buf[ofs:ofs+a.attr_length])
+ offset += sizeof (attrreference_t)
+ result.append(path)
+ if attrs[1] & ATTR_VOL_NAME:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ name = _decode_utf8_nul(buf[ofs:ofs+a.attr_length])
+ offset += sizeof (attrreference_t)
+ result.append(name)
+ if attrs[1] & ATTR_VOL_MOUNTFLAGS:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_MOUNTEDDEVICE:
+ a = attrreference_t.from_buffer(buf, offset)
+ ofs = offset + a.attr_dataoffset
+ path = _decode_utf8_nul(buf[ofs:ofs+a.attr_length])
+ offset += sizeof (attrreference_t)
+ result.append(path)
+ if attrs[1] & ATTR_VOL_ENCODINGSUSED:
+ a = c_ulonglong.from_buffer(buf, offset)
+ offset += sizeof(c_ulonglong)
+ result.append(a.value)
+ if attrs[1] & ATTR_VOL_CAPABILITIES:
+ a = vol_capabilities_attr_t.from_buffer(buf, offset)
+ offset += sizeof(vol_capabilities_attr_t)
+ result.append(a)
+ if attrs[1] & ATTR_VOL_UUID:
+ result.append(uuid.UUID(bytes=buf[offset:offset+16]))
+ offset += sizeof(uuid_t)
+ if attrs[1] & ATTR_VOL_ATTRIBUTES:
+ a = vol_attributes_attr_t.from_buffer(buf, offset)
+ offset += sizeof(vol_attributes_attr_t)
+ result.append(a)
+
+ # Directory attributes
+ if attrs[2] & ATTR_DIR_LINKCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[2] & ATTR_DIR_ENTRYCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[2] & ATTR_DIR_MOUNTSTATUS:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+
+ # File attributes
+ if attrs[3] & ATTR_FILE_LINKCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_TOTALSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_ALLOCSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_IOBLOCKSIZE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_CLUMPSIZE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_DEVTYPE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_FILETYPE:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_FORKCOUNT:
+ a = uint32_t.from_buffer(buf, offset)
+ offset += sizeof(uint32_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_DATALENGTH:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_DATAALLOCSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_DATAEXTENTS:
+ a = extentrecord.from_buffer(buf, offset)
+ offset += sizeof(extentrecord)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_RSRCLENGTH:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_RSRCALLOCSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[3] & ATTR_FILE_RSRCEXTENTS:
+ a = extentrecord.from_buffer(buf, offset)
+ offset += sizeof(extentrecord)
+ result.append(a.value)
+
+ # Fork attributes
+ if attrs[4] & ATTR_FORK_TOTALSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+ if attrs[4] & ATTR_FORK_ALLOCSIZE:
+ a = off_t.from_buffer(buf, offset)
+ offset += sizeof(off_t)
+ result.append(a.value)
+
+ return result
+
+# Sadly, ctypes.get_errno() seems not to work
+__error = libc.__error
+__error.restype = POINTER(c_int)
+
+def _get_errno():
+ return __error().contents.value
+
+def getattrlist(path, attrs, options):
+ if not isinstance(path, bytes):
+ path = path.encode('utf-8')
+ attrs = list(attrs)
+ if attrs[1]:
+ attrs[1] |= ATTR_VOL_INFO
+ alist = attrlist(bitmapcount=5,
+ commonattr=attrs[0],
+ volattr=attrs[1],
+ dirattr=attrs[2],
+ fileattr=attrs[3],
+ forkattr=attrs[4])
+
+ bufsize = _attrbuf_size(attrs)
+ buf = create_string_buffer(bufsize)
+
+ ret = _getattrlist(path, byref(alist), buf, bufsize,
+ options | FSOPT_REPORT_FULLSIZE)
+
+ if ret < 0:
+ err = _get_errno()
+ raise OSError(err, os.strerror(err), path)
+
+ return _decode_attrlist_result(buf, attrs, options)
+
+def fgetattrlist(fd, attrs, options):
+ if hasattr(fd, 'fileno'):
+ fd = fd.fileno()
+ attrs = list(attrs)
+ if attrs[1]:
+ attrs[1] |= ATTR_VOL_INFO
+ alist = attrlist(bitmapcount=5,
+ commonattr=attrs[0],
+ volattr=attrs[1],
+ dirattr=attrs[2],
+ fileattr=attrs[3],
+ forkattr=attrs[4])
+
+ bufsize = _attrbuf_size(attrs)
+ buf = create_string_buffer(bufsize)
+
+ ret = _fgetattrlist(fd, byref(alist), buf, bufsize,
+ options | FSOPT_REPORT_FULLSIZE)
+
+ if ret < 0:
+ err = _get_errno()
+ raise OSError(err, os.strerror(err))
+
+ return _decode_attrlist_result(buf, attrs, options)
+
+def statfs(path):
+ if not isinstance(path, bytes):
+ path = path.encode('utf-8')
+ result = struct_statfs()
+ ret = _statfs(path, byref(result))
+ if ret < 0:
+ err = _get_errno()
+ raise OSError(err, os.strerror(err), path)
+ return result
+
+def fstatfs(fd):
+ if hasattr(fd, 'fileno'):
+ fd = fd.fileno()
+ result = struct_statfs()
+ ret = _fstatfs(fd, byref(result))
+ if ret < 0:
+ err = _get_errno()
+ raise OSError(err, os.strerror(err))
+ return result
diff --git a/packages/electron-builder/vendor/mac_alias/utils.py b/packages/electron-builder/vendor/mac_alias/utils.py
new file mode 100644
index 00000000000..6a7d0a1212f
--- /dev/null
+++ b/packages/electron-builder/vendor/mac_alias/utils.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import datetime
+
+ZERO = datetime.timedelta(0)
+class UTC (datetime.tzinfo):
+ def utcoffset(self, dt):
+ return ZERO
+ def dst(self, dt):
+ return ZERO
+ def tzname(self, dt):
+ return 'UTC'
+
+utc = UTC()
+mac_epoch = datetime.datetime(1904,1,1,0,0,0,0,utc)
+unix_epoch = datetime.datetime(1970,1,1,0,0,0,0,utc)
+osx_epoch = datetime.datetime(2001,1,1,0,0,0,0,utc)
diff --git a/test/out/mac/__snapshots__/dmgTest.js.snap b/test/out/mac/__snapshots__/dmgTest.js.snap
index 1c07539c96d..c7dd19ee034 100644
--- a/test/out/mac/__snapshots__/dmgTest.js.snap
+++ b/test/out/mac/__snapshots__/dmgTest.js.snap
@@ -102,7 +102,20 @@ Object {
}
`;
-exports[`no build directory 1`] = `undefined`;
+exports[`no build directory 1`] = `
+Array [
+ Object {
+ "x": 130,
+ "y": 220,
+ },
+ Object {
+ "path": "/Applications",
+ "type": "link",
+ "x": 410,
+ "y": 220,
+ },
+]
+`;
exports[`no build directory 2`] = `
Object {
diff --git a/test/src/helpers/CheckingPackager.ts b/test/src/helpers/CheckingPackager.ts
index 597f5ece203..06df039f382 100644
--- a/test/src/helpers/CheckingPackager.ts
+++ b/test/src/helpers/CheckingPackager.ts
@@ -2,7 +2,7 @@ import { Arch, MacOptions, Packager, Target } from "electron-builder"
import SquirrelWindowsTarget from "electron-builder-squirrel-windows"
import { Identity } from "electron-builder/out/codeSign"
import MacPackager from "electron-builder/out/macPackager"
-import { DmgTarget } from "electron-builder/out/targets/dmg"
+import { DmgTarget } from "electron-builder/out/targets/dmg/dmg"
import { AsyncTaskManager } from "electron-builder/out/util/asyncTaskManager"
import { SignOptions } from "electron-builder/out/windowsCodeSign"
import { WinPackager } from "electron-builder/out/winPackager"
diff --git a/test/src/mac/dmgTest.ts b/test/src/mac/dmgTest.ts
index 72f4311f7bc..bfc1cb95a95 100644
--- a/test/src/mac/dmgTest.ts
+++ b/test/src/mac/dmgTest.ts
@@ -2,7 +2,7 @@ import BluebirdPromise from "bluebird-lst"
import { Platform } from "electron-builder"
import { copyFile } from "electron-builder-util/out/fs"
import { PlatformPackager } from "electron-builder/out/platformPackager"
-import { attachAndExecute } from "electron-builder/out/targets/dmg"
+import { attachAndExecute } from "electron-builder/out/targets/dmg/dmgUtil"
import { remove, writeFile } from "fs-extra-p"
import * as path from "path"
import { assertThat } from "../helpers/fileAssert"
@@ -63,16 +63,16 @@ test.ifMac("no Applications link", () => {
publish: null,
productName: "NoApplicationsLink",
dmg: {
- "contents": [
+ contents: [
{
- "x": 110,
- "y": 150
+ x: 110,
+ y: 150
},
{
- "x": 410,
- "y": 440,
- "type": "link",
- "path": "/Applications/TextEdit.app"
+ x: 410,
+ y: 440,
+ type: "link",
+ path: "/Applications/TextEdit.app"
}
],
},
@@ -101,7 +101,7 @@ test.ifMac("unset dmg icon", app({
}
}
}, {
- packed: (context) => {
+ packed: context => {
return attachAndExecute(path.join(context.outDir, "Test ß No Volume Icon-1.1.0.dmg"), false, () => {
return BluebirdPromise.all([
assertThat(path.join("/Volumes/Test ß No Volume Icon 1.1.0/.background/background.tiff")).isFile(),
@@ -123,7 +123,7 @@ test.ifMac("no background", app({
}
}
}, {
- packed: (context) => {
+ packed: context => {
return attachAndExecute(path.join(context.outDir, "NoBackground-1.1.0.dmg"), false, () => {
return assertThat(path.join("/Volumes/NoBackground 1.1.0/.background")).doesNotExist()
})
@@ -158,6 +158,7 @@ test.ifAll.ifMac("multi language license", app({
return BluebirdPromise.all([
writeFile(path.join(projectDir, "build", "license_en.txt"), "Hi"),
writeFile(path.join(projectDir, "build", "license_ru.txt"), "Привет"),
+ writeFile(path.join(projectDir, "build", "license_de.txt"), "Hallo, Grünwald"),
])
},
}))
diff --git a/yarn.lock b/yarn.lock
index a71cefb0b76..eed72a32f02 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -291,9 +291,9 @@ asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
-aws-sdk@^2.101.0:
- version "2.101.0"
- resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.101.0.tgz#1252c364b840eba1ae0b8cd8c1accf78bfa14d96"
+aws-sdk@^2.102.0:
+ version "2.102.0"
+ resolved "https://registry.yarnpkg.com/aws-sdk/-/aws-sdk-2.102.0.tgz#70e52a8eff63de4c68e1d08c5c37db5561324340"
dependencies:
buffer "4.9.1"
crypto-browserify "1.0.9"
@@ -1567,6 +1567,10 @@ iconv-lite@0.4.13:
version "0.4.13"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.13.tgz#1f88aba4ab0b1508e8312acc39345f36e992e2f2"
+iconv-lite@^0.4.18:
+ version "0.4.18"
+ resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.18.tgz#23d8656b16aae6742ac29732ea8f0336a4789cf2"
+
ieee754@^1.1.4:
version "1.1.8"
resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.1.8.tgz#be33d40ac10ef1926701f6f08a2d86fbfd1ad3e4"
@@ -3704,6 +3708,10 @@ write-file-atomic@^2.0.0:
imurmurhash "^0.1.4"
signal-exit "^3.0.2"
+x-mac-cyrillic@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/x-mac-cyrillic/-/x-mac-cyrillic-1.0.0.tgz#43f111038ab51d766d0beb4ce5d283c1826f9bc9"
+
xdg-basedir@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-3.0.0.tgz#496b2cc109eca8dbacfe2dc72b603c17c5870ad4"