diff --git a/_shared/lib/adm-zip/LICENSE b/_shared/lib/adm-zip/LICENSE new file mode 100644 index 00000000..f748c3de --- /dev/null +++ b/_shared/lib/adm-zip/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2012 Another-D-Mention Software and other contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/_shared/lib/adm-zip/README.md b/_shared/lib/adm-zip/README.md new file mode 100644 index 00000000..60d7a12b --- /dev/null +++ b/_shared/lib/adm-zip/README.md @@ -0,0 +1,65 @@ +# ADM-ZIP for NodeJS with added support for electron original-fs + +ADM-ZIP is a pure JavaScript implementation for zip data compression for [NodeJS](https://nodejs.org/). + +# Installation + +With [npm](https://www.npmjs.com/) do: + + $ npm install adm-zip + +## What is it good for? + +The library allows you to: + +- decompress zip files directly to disk or in memory buffers +- compress files and store them to disk in .zip format or in compressed buffers +- update content of/add new/delete files from an existing .zip + +# Dependencies + +There are no other nodeJS libraries that ADM-ZIP is dependent of + +# Examples + +## Basic usage + +```javascript +var AdmZip = require("adm-zip"); + +// reading archives +var zip = new AdmZip("./my_file.zip"); +var zipEntries = zip.getEntries(); // an array of ZipEntry records + +zipEntries.forEach(function (zipEntry) { + console.log(zipEntry.toString()); // outputs zip entries information + if (zipEntry.entryName == "my_file.txt") { + console.log(zipEntry.getData().toString("utf8")); + } +}); +// outputs the content of some_folder/my_file.txt +console.log(zip.readAsText("some_folder/my_file.txt")); +// extracts the specified file to the specified location +zip.extractEntryTo(/*entry name*/ "some_folder/my_file.txt", /*target path*/ "/home/me/tempfolder", /*maintainEntryPath*/ false, /*overwrite*/ true); +// extracts everything +zip.extractAllTo(/*target path*/ "/home/me/zipcontent/", /*overwrite*/ true); + +// creating archives +var zip = new AdmZip(); + +// add file directly +var content = "inner content of the file"; +zip.addFile("test.txt", Buffer.from(content, "utf8"), "entry comment goes here"); +// add local file +zip.addLocalFile("/home/me/some_picture.png"); +// get everything as a buffer +var willSendthis = zip.toBuffer(); +// or write everything to disk +zip.writeZip(/*target file name*/ "/home/me/files.zip"); + +// ... more examples in the wiki +``` + +For more detailed information please check out the [wiki](https://github.com/cthackers/adm-zip/wiki). + +[![Build Status](https://travis-ci.org/cthackers/adm-zip.svg?branch=master)](https://travis-ci.org/cthackers/adm-zip) diff --git a/_shared/lib/adm-zip/adm-zip.js b/_shared/lib/adm-zip/adm-zip.js new file mode 100644 index 00000000..e1f1ce57 --- /dev/null +++ b/_shared/lib/adm-zip/adm-zip.js @@ -0,0 +1,786 @@ +const Utils = require("./util"); +const pth = require("path"); +const ZipEntry = require("./zipEntry"); +const ZipFile = require("./zipFile"); + +const get_Bool = (val, def) => (typeof val === "boolean" ? val : def); +const get_Str = (val, def) => (typeof val === "string" ? val : def); + +const defaultOptions = { + // option "noSort" : if true it disables files sorting + noSort: false, + // read entries during load (initial loading may be slower) + readEntries: false, + // default method is none + method: Utils.Constants.NONE, + // file system + fs: null +}; + +module.exports = function (/**String*/ input, /** object */ options) { + let inBuffer = null; + + // create object based default options, allowing them to be overwritten + const opts = Object.assign(Object.create(null), defaultOptions); + + // test input variable + if (input && "object" === typeof input) { + // if value is not buffer we accept it to be object with options + if (!(input instanceof Uint8Array)) { + Object.assign(opts, input); + input = opts.input ? opts.input : undefined; + if (opts.input) delete opts.input; + } + + // if input is buffer + if (Buffer.isBuffer(input)) { + inBuffer = input; + opts.method = Utils.Constants.BUFFER; + input = undefined; + } + } + + // assign options + Object.assign(opts, options); + + // instanciate utils filesystem + const filetools = new Utils(opts); + + // if input is file name we retrieve its content + if (input && "string" === typeof input) { + // load zip file + if (filetools.fs.existsSync(input)) { + opts.method = Utils.Constants.FILE; + opts.filename = input; + inBuffer = filetools.fs.readFileSync(input); + } else { + throw new Error(Utils.Errors.INVALID_FILENAME); + } + } + + // create variable + const _zip = new ZipFile(inBuffer, opts); + + const { canonical, sanitize } = Utils; + + function getEntry(/**Object*/ entry) { + if (entry && _zip) { + var item; + // If entry was given as a file name + if (typeof entry === "string") item = _zip.getEntry(entry); + // if entry was given as a ZipEntry object + if (typeof entry === "object" && typeof entry.entryName !== "undefined" && typeof entry.header !== "undefined") item = _zip.getEntry(entry.entryName); + + if (item) { + return item; + } + } + return null; + } + + function fixPath(zipPath) { + const { join, normalize, sep } = pth.posix; + // convert windows file separators and normalize + return join(".", normalize(sep + zipPath.split("\\").join(sep) + sep)); + } + + return { + /** + * Extracts the given entry from the archive and returns the content as a Buffer object + * @param entry ZipEntry object or String with the full path of the entry + * + * @return Buffer or Null in case of error + */ + readFile: function (/**Object*/ entry, /*String, Buffer*/ pass) { + var item = getEntry(entry); + return (item && item.getData(pass)) || null; + }, + + /** + * Asynchronous readFile + * @param entry ZipEntry object or String with the full path of the entry + * @param callback + * + * @return Buffer or Null in case of error + */ + readFileAsync: function (/**Object*/ entry, /**Function*/ callback) { + var item = getEntry(entry); + if (item) { + item.getDataAsync(callback); + } else { + callback(null, "getEntry failed for:" + entry); + } + }, + + /** + * Extracts the given entry from the archive and returns the content as plain text in the given encoding + * @param entry ZipEntry object or String with the full path of the entry + * @param encoding Optional. If no encoding is specified utf8 is used + * + * @return String + */ + readAsText: function (/**Object*/ entry, /**String=*/ encoding) { + var item = getEntry(entry); + if (item) { + var data = item.getData(); + if (data && data.length) { + return data.toString(encoding || "utf8"); + } + } + return ""; + }, + + /** + * Asynchronous readAsText + * @param entry ZipEntry object or String with the full path of the entry + * @param callback + * @param encoding Optional. If no encoding is specified utf8 is used + * + * @return String + */ + readAsTextAsync: function (/**Object*/ entry, /**Function*/ callback, /**String=*/ encoding) { + var item = getEntry(entry); + if (item) { + item.getDataAsync(function (data, err) { + if (err) { + callback(data, err); + return; + } + + if (data && data.length) { + callback(data.toString(encoding || "utf8")); + } else { + callback(""); + } + }); + } else { + callback(""); + } + }, + + /** + * Remove the entry from the file or the entry and all it's nested directories and files if the given entry is a directory + * + * @param entry + */ + deleteFile: function (/**Object*/ entry) { + // @TODO: test deleteFile + var item = getEntry(entry); + if (item) { + _zip.deleteEntry(item.entryName); + } + }, + + /** + * Adds a comment to the zip. The zip must be rewritten after adding the comment. + * + * @param comment + */ + addZipComment: function (/**String*/ comment) { + // @TODO: test addZipComment + _zip.comment = comment; + }, + + /** + * Returns the zip comment + * + * @return String + */ + getZipComment: function () { + return _zip.comment || ""; + }, + + /** + * Adds a comment to a specified zipEntry. The zip must be rewritten after adding the comment + * The comment cannot exceed 65535 characters in length + * + * @param entry + * @param comment + */ + addZipEntryComment: function (/**Object*/ entry, /**String*/ comment) { + var item = getEntry(entry); + if (item) { + item.comment = comment; + } + }, + + /** + * Returns the comment of the specified entry + * + * @param entry + * @return String + */ + getZipEntryComment: function (/**Object*/ entry) { + var item = getEntry(entry); + if (item) { + return item.comment || ""; + } + return ""; + }, + + /** + * Updates the content of an existing entry inside the archive. The zip must be rewritten after updating the content + * + * @param entry + * @param content + */ + updateFile: function (/**Object*/ entry, /**Buffer*/ content) { + var item = getEntry(entry); + if (item) { + item.setData(content); + } + }, + + /** + * Adds a file from the disk to the archive + * + * @param localPath File to add to zip + * @param zipPath Optional path inside the zip + * @param zipName Optional name for the file + */ + addLocalFile: function (/**String*/ localPath, /**String=*/ zipPath, /**String=*/ zipName, /**String*/ comment) { + if (filetools.fs.existsSync(localPath)) { + // fix ZipPath + zipPath = zipPath ? fixPath(zipPath) : ""; + + // p - local file name + var p = localPath.split("\\").join("/").split("/").pop(); + + // add file name into zippath + zipPath += zipName ? zipName : p; + + // read file attributes + const _attr = filetools.fs.statSync(localPath); + + // add file into zip file + this.addFile(zipPath, filetools.fs.readFileSync(localPath), comment, _attr); + } else { + throw new Error(Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath)); + } + }, + + /** + * Adds a local directory and all its nested files and directories to the archive + * + * @param localPath + * @param zipPath optional path inside zip + * @param filter optional RegExp or Function if files match will + * be included. + * @param {number | object} attr - number as unix file permissions, object as filesystem Stats object + */ + addLocalFolder: function (/**String*/ localPath, /**String=*/ zipPath, /**=RegExp|Function*/ filter, /**=number|object*/ attr) { + // Prepare filter + if (filter instanceof RegExp) { + // if filter is RegExp wrap it + filter = (function (rx) { + return function (filename) { + return rx.test(filename); + }; + })(filter); + } else if ("function" !== typeof filter) { + // if filter is not function we will replace it + filter = function () { + return true; + }; + } + + // fix ZipPath + zipPath = zipPath ? fixPath(zipPath) : ""; + + // normalize the path first + localPath = pth.normalize(localPath); + + if (filetools.fs.existsSync(localPath)) { + const items = filetools.findFiles(localPath); + const self = this; + + if (items.length) { + items.forEach(function (filepath) { + var p = pth.relative(localPath, filepath).split("\\").join("/"); //windows fix + if (filter(p)) { + var stats = filetools.fs.statSync(filepath); + if (stats.isFile()) { + self.addFile(zipPath + p, filetools.fs.readFileSync(filepath), "", attr ? attr : stats); + } else { + self.addFile(zipPath + p + "/", Buffer.alloc(0), "", attr ? attr : stats); + } + } + }); + } + } else { + throw new Error(Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath)); + } + }, + + /** + * Asynchronous addLocalFile + * @param localPath + * @param callback + * @param zipPath optional path inside zip + * @param filter optional RegExp or Function if files match will + * be included. + */ + addLocalFolderAsync: function (/*String*/ localPath, /*Function*/ callback, /*String*/ zipPath, /*RegExp|Function*/ filter) { + if (filter instanceof RegExp) { + filter = (function (rx) { + return function (filename) { + return rx.test(filename); + }; + })(filter); + } else if ("function" !== typeof filter) { + filter = function () { + return true; + }; + } + + // fix ZipPath + zipPath = zipPath ? fixPath(zipPath) : ""; + + // normalize the path first + localPath = pth.normalize(localPath); + + var self = this; + filetools.fs.open(localPath, "r", function (err) { + if (err && err.code === "ENOENT") { + callback(undefined, Utils.Errors.FILE_NOT_FOUND.replace("%s", localPath)); + } else if (err) { + callback(undefined, err); + } else { + var items = filetools.findFiles(localPath); + var i = -1; + + var next = function () { + i += 1; + if (i < items.length) { + var filepath = items[i]; + var p = pth.relative(localPath, filepath).split("\\").join("/"); //windows fix + p = p + .normalize("NFD") + .replace(/[\u0300-\u036f]/g, "") + .replace(/[^\x20-\x7E]/g, ""); // accent fix + if (filter(p)) { + filetools.fs.stat(filepath, function (er0, stats) { + if (er0) callback(undefined, er0); + if (stats.isFile()) { + filetools.fs.readFile(filepath, function (er1, data) { + if (er1) { + callback(undefined, er1); + } else { + self.addFile(zipPath + p, data, "", stats); + next(); + } + }); + } else { + self.addFile(zipPath + p + "/", Buffer.alloc(0), "", stats); + next(); + } + }); + } else { + process.nextTick(() => { + next(); + }); + } + } else { + callback(true, undefined); + } + }; + + next(); + } + }); + }, + + /** + * + * @param {string} localPath - path where files will be extracted + * @param {object} props - optional properties + * @param {string} props.zipPath - optional path inside zip + * @param {regexp, function} props.filter - RegExp or Function if files match will be included. + */ + addLocalFolderPromise: function (/*String*/ localPath, /* object */ props) { + return new Promise((resolve, reject) => { + const { filter, zipPath } = Object.assign({}, props); + this.addLocalFolderAsync( + localPath, + (done, err) => { + if (err) reject(err); + if (done) resolve(this); + }, + zipPath, + filter + ); + }); + }, + + /** + * Allows you to create a entry (file or directory) in the zip file. + * If you want to create a directory the entryName must end in / and a null buffer should be provided. + * Comment and attributes are optional + * + * @param {string} entryName + * @param {Buffer | string} content - file content as buffer or utf8 coded string + * @param {string} comment - file comment + * @param {number | object} attr - number as unix file permissions, object as filesystem Stats object + */ + addFile: function (/**String*/ entryName, /**Buffer*/ content, /**String*/ comment, /**Number*/ attr) { + let entry = getEntry(entryName); + const update = entry != null; + + // prepare new entry + if (!update) { + entry = new ZipEntry(); + entry.entryName = entryName; + } + entry.comment = comment || ""; + + const isStat = "object" === typeof attr && attr instanceof filetools.fs.Stats; + + // last modification time from file stats + if (isStat) { + entry.header.time = attr.mtime; + } + + // Set file attribute + var fileattr = entry.isDirectory ? 0x10 : 0; // (MS-DOS directory flag) + + // extended attributes field for Unix + // set file type either S_IFDIR / S_IFREG + let unix = entry.isDirectory ? 0x4000 : 0x8000; + + if (isStat) { + // File attributes from file stats + unix |= 0xfff & attr.mode; + } else if ("number" === typeof attr) { + // attr from given attr values + unix |= 0xfff & attr; + } else { + // Default values: + unix |= entry.isDirectory ? 0o755 : 0o644; // permissions (drwxr-xr-x) or (-r-wr--r--) + } + + fileattr = (fileattr | (unix << 16)) >>> 0; // add attributes + + entry.attr = fileattr; + + entry.setData(content); + if (!update) _zip.setEntry(entry); + }, + + /** + * Returns an array of ZipEntry objects representing the files and folders inside the archive + * + * @return Array + */ + getEntries: function () { + return _zip ? _zip.entries : []; + }, + + /** + * Returns a ZipEntry object representing the file or folder specified by ``name``. + * + * @param name + * @return ZipEntry + */ + getEntry: function (/**String*/ name) { + return getEntry(name); + }, + + getEntryCount: function () { + return _zip.getEntryCount(); + }, + + forEach: function (callback) { + return _zip.forEach(callback); + }, + + /** + * Extracts the given entry to the given targetPath + * If the entry is a directory inside the archive, the entire directory and it's subdirectories will be extracted + * + * @param entry ZipEntry object or String with the full path of the entry + * @param targetPath Target folder where to write the file + * @param maintainEntryPath If maintainEntryPath is true and the entry is inside a folder, the entry folder + * will be created in targetPath as well. Default is TRUE + * @param overwrite If the file already exists at the target path, the file will be overwriten if this is true. + * Default is FALSE + * @param keepOriginalPermission The file will be set as the permission from the entry if this is true. + * Default is FALSE + * @param outFileName String If set will override the filename of the extracted file (Only works if the entry is a file) + * + * @return Boolean + */ + extractEntryTo: function ( + /**Object*/ entry, + /**String*/ targetPath, + /**Boolean*/ maintainEntryPath, + /**Boolean*/ overwrite, + /**Boolean*/ keepOriginalPermission, + /**String**/ outFileName + ) { + overwrite = get_Bool(overwrite, false); + keepOriginalPermission = get_Bool(keepOriginalPermission, false); + maintainEntryPath = get_Bool(maintainEntryPath, true); + outFileName = get_Str(outFileName, get_Str(keepOriginalPermission, undefined)); + + var item = getEntry(entry); + if (!item) { + throw new Error(Utils.Errors.NO_ENTRY); + } + + var entryName = canonical(item.entryName); + + var target = sanitize(targetPath, outFileName && !item.isDirectory ? outFileName : maintainEntryPath ? entryName : pth.basename(entryName)); + + if (item.isDirectory) { + var children = _zip.getEntryChildren(item); + children.forEach(function (child) { + if (child.isDirectory) return; + var content = child.getData(); + if (!content) { + throw new Error(Utils.Errors.CANT_EXTRACT_FILE); + } + var name = canonical(child.entryName); + var childName = sanitize(targetPath, maintainEntryPath ? name : pth.basename(name)); + // The reverse operation for attr depend on method addFile() + const fileAttr = keepOriginalPermission ? child.header.fileAttr : undefined; + filetools.writeFileTo(childName, content, overwrite, fileAttr); + }); + return true; + } + + var content = item.getData(); + if (!content) throw new Error(Utils.Errors.CANT_EXTRACT_FILE); + + if (filetools.fs.existsSync(target) && !overwrite) { + throw new Error(Utils.Errors.CANT_OVERRIDE); + } + // The reverse operation for attr depend on method addFile() + const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined; + filetools.writeFileTo(target, content, overwrite, fileAttr); + + return true; + }, + + /** + * Test the archive + * + */ + test: function (pass) { + if (!_zip) { + return false; + } + + for (var entry in _zip.entries) { + try { + if (entry.isDirectory) { + continue; + } + var content = _zip.entries[entry].getData(pass); + if (!content) { + return false; + } + } catch (err) { + return false; + } + } + return true; + }, + + /** + * Extracts the entire archive to the given location + * + * @param targetPath Target location + * @param overwrite If the file already exists at the target path, the file will be overwriten if this is true. + * Default is FALSE + * @param keepOriginalPermission The file will be set as the permission from the entry if this is true. + * Default is FALSE + */ + extractAllTo: function (/**String*/ targetPath, /**Boolean*/ overwrite, /**Boolean*/ keepOriginalPermission, /*String, Buffer*/ pass) { + overwrite = get_Bool(overwrite, false); + pass = get_Str(keepOriginalPermission, pass); + keepOriginalPermission = get_Bool(keepOriginalPermission, false); + if (!_zip) { + throw new Error(Utils.Errors.NO_ZIP); + } + _zip.entries.forEach(function (entry) { + var entryName = sanitize(targetPath, canonical(entry.entryName.toString())); + if (entry.isDirectory) { + filetools.makeDir(entryName); + return; + } + var content = entry.getData(pass); + if (!content) { + throw new Error(Utils.Errors.CANT_EXTRACT_FILE); + } + // The reverse operation for attr depend on method addFile() + const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined; + filetools.writeFileTo(entryName, content, overwrite, fileAttr); + try { + filetools.fs.utimesSync(entryName, entry.header.time, entry.header.time); + } catch (err) { + throw new Error(Utils.Errors.CANT_EXTRACT_FILE); + } + }); + }, + + /** + * Asynchronous extractAllTo + * + * @param targetPath Target location + * @param overwrite If the file already exists at the target path, the file will be overwriten if this is true. + * Default is FALSE + * @param keepOriginalPermission The file will be set as the permission from the entry if this is true. + * Default is FALSE + * @param callback The callback will be executed when all entries are extracted successfully or any error is thrown. + */ + extractAllToAsync: function (/**String*/ targetPath, /**Boolean*/ overwrite, /**Boolean*/ keepOriginalPermission, /**Function*/ callback) { + overwrite = get_Bool(overwrite, false); + if (typeof keepOriginalPermission === "function" && !callback) callback = keepOriginalPermission; + keepOriginalPermission = get_Bool(keepOriginalPermission, false); + if (!callback) { + callback = function (err) { + throw new Error(err); + }; + } + if (!_zip) { + callback(new Error(Utils.Errors.NO_ZIP)); + return; + } + + targetPath = pth.resolve(targetPath); + // convert entryName to + const getPath = (entry) => sanitize(targetPath, pth.normalize(canonical(entry.entryName.toString()))); + const getError = (msg, file) => new Error(msg + ': "' + file + '"'); + + // separate directories from files + const dirEntries = []; + const fileEntries = new Set(); + _zip.entries.forEach((e) => { + if (e.isDirectory) { + dirEntries.push(e); + } else { + fileEntries.add(e); + } + }); + + // Create directory entries first synchronously + // this prevents race condition and assures folders are there before writing files + for (const entry of dirEntries) { + const dirPath = getPath(entry); + // The reverse operation for attr depend on method addFile() + const dirAttr = keepOriginalPermission ? entry.header.fileAttr : undefined; + try { + filetools.makeDir(dirPath); + if (dirAttr) filetools.fs.chmodSync(dirPath, dirAttr); + // in unix timestamp will change if files are later added to folder, but still + filetools.fs.utimesSync(dirPath, entry.header.time, entry.header.time); + } catch (er) { + callback(getError("Unable to create folder", dirPath)); + } + } + + // callback wrapper, for some house keeping + const done = () => { + if (fileEntries.size === 0) { + callback(); + } + }; + + // Extract file entries asynchronously + for (const entry of fileEntries.values()) { + const entryName = pth.normalize(canonical(entry.entryName.toString())); + const filePath = sanitize(targetPath, entryName); + entry.getDataAsync(function (content, err_1) { + if (err_1) { + callback(new Error(err_1)); + return; + } + if (!content) { + callback(new Error(Utils.Errors.CANT_EXTRACT_FILE)); + } else { + // The reverse operation for attr depend on method addFile() + const fileAttr = keepOriginalPermission ? entry.header.fileAttr : undefined; + filetools.writeFileToAsync(filePath, content, overwrite, fileAttr, function (succ) { + if (!succ) { + callback(getError("Unable to write file", filePath)); + return; + } + filetools.fs.utimes(filePath, entry.header.time, entry.header.time, function (err_2) { + if (err_2) { + callback(getError("Unable to set times", filePath)); + return; + } + fileEntries.delete(entry); + // call the callback if it was last entry + done(); + }); + }); + } + }); + } + // call the callback if fileEntries was empty + done(); + }, + + /** + * Writes the newly created zip file to disk at the specified location or if a zip was opened and no ``targetFileName`` is provided, it will overwrite the opened zip + * + * @param targetFileName + * @param callback + */ + writeZip: function (/**String*/ targetFileName, /**Function*/ callback) { + if (arguments.length === 1) { + if (typeof targetFileName === "function") { + callback = targetFileName; + targetFileName = ""; + } + } + + if (!targetFileName && opts.filename) { + targetFileName = opts.filename; + } + if (!targetFileName) return; + + var zipData = _zip.compressToBuffer(); + if (zipData) { + var ok = filetools.writeFileTo(targetFileName, zipData, true); + if (typeof callback === "function") callback(!ok ? new Error("failed") : null, ""); + } + }, + + writeZipPromise: function (/**String*/ targetFileName, /* object */ props) { + const { overwrite, perm } = Object.assign({ overwrite: true }, props); + + return new Promise((resolve, reject) => { + // find file name + if (!targetFileName && opts.filename) targetFileName = opts.filename; + if (!targetFileName) reject("ADM-ZIP: ZIP File Name Missing"); + + this.toBufferPromise().then((zipData) => { + const ret = (done) => (done ? resolve(done) : reject("ADM-ZIP: Wasn't able to write zip file")); + filetools.writeFileToAsync(targetFileName, zipData, overwrite, perm, ret); + }, reject); + }); + }, + + toBufferPromise: function () { + return new Promise((resolve, reject) => { + _zip.toAsyncBuffer(resolve, reject); + }); + }, + + /** + * Returns the content of the entire zip file as a Buffer object + * + * @return Buffer + */ + toBuffer: function (/**Function=*/ onSuccess, /**Function=*/ onFail, /**Function=*/ onItemStart, /**Function=*/ onItemEnd) { + this.valueOf = 2; + if (typeof onSuccess === "function") { + _zip.toAsyncBuffer(onSuccess, onFail, onItemStart, onItemEnd); + return null; + } + return _zip.compressToBuffer(); + } + }; +}; diff --git a/_shared/lib/adm-zip/headers/entryHeader.js b/_shared/lib/adm-zip/headers/entryHeader.js new file mode 100644 index 00000000..572b9a74 --- /dev/null +++ b/_shared/lib/adm-zip/headers/entryHeader.js @@ -0,0 +1,338 @@ +var Utils = require("../util"), + Constants = Utils.Constants; + +/* The central directory file header */ +module.exports = function () { + var _verMade = 20, // v2.0 + _version = 10, // v1.0 + _flags = 0, + _method = 0, + _time = 0, + _crc = 0, + _compressedSize = 0, + _size = 0, + _fnameLen = 0, + _extraLen = 0, + _comLen = 0, + _diskStart = 0, + _inattr = 0, + _attr = 0, + _offset = 0; + + _verMade |= Utils.isWin ? 0x0a00 : 0x0300; + + // Set EFS flag since filename and comment fields are all by default encoded using UTF-8. + // Without it file names may be corrupted for other apps when file names use unicode chars + _flags |= Constants.FLG_EFS; + + var _dataHeader = {}; + + function setTime(val) { + val = new Date(val); + _time = + (((val.getFullYear() - 1980) & 0x7f) << 25) | // b09-16 years from 1980 + ((val.getMonth() + 1) << 21) | // b05-08 month + (val.getDate() << 16) | // b00-04 hour + // 2 bytes time + (val.getHours() << 11) | // b11-15 hour + (val.getMinutes() << 5) | // b05-10 minute + (val.getSeconds() >> 1); // b00-04 seconds divided by 2 + } + + setTime(+new Date()); + + return { + get made() { + return _verMade; + }, + set made(val) { + _verMade = val; + }, + + get version() { + return _version; + }, + set version(val) { + _version = val; + }, + + get flags() { + return _flags; + }, + set flags(val) { + _flags = val; + }, + + get method() { + return _method; + }, + set method(val) { + switch (val) { + case Constants.STORED: + this.version = 10; + case Constants.DEFLATED: + default: + this.version = 20; + } + _method = val; + }, + + get time() { + return new Date(((_time >> 25) & 0x7f) + 1980, ((_time >> 21) & 0x0f) - 1, (_time >> 16) & 0x1f, (_time >> 11) & 0x1f, (_time >> 5) & 0x3f, (_time & 0x1f) << 1); + }, + set time(val) { + setTime(val); + }, + + get crc() { + return _crc; + }, + set crc(val) { + _crc = Math.max(0, val) >>> 0; + }, + + get compressedSize() { + return _compressedSize; + }, + set compressedSize(val) { + _compressedSize = Math.max(0, val) >>> 0; + }, + + get size() { + return _size; + }, + set size(val) { + _size = Math.max(0, val) >>> 0; + }, + + get fileNameLength() { + return _fnameLen; + }, + set fileNameLength(val) { + _fnameLen = val; + }, + + get extraLength() { + return _extraLen; + }, + set extraLength(val) { + _extraLen = val; + }, + + get commentLength() { + return _comLen; + }, + set commentLength(val) { + _comLen = val; + }, + + get diskNumStart() { + return _diskStart; + }, + set diskNumStart(val) { + _diskStart = Math.max(0, val) >>> 0; + }, + + get inAttr() { + return _inattr; + }, + set inAttr(val) { + _inattr = Math.max(0, val) >>> 0; + }, + + get attr() { + return _attr; + }, + set attr(val) { + _attr = Math.max(0, val) >>> 0; + }, + + // get Unix file permissions + get fileAttr() { + return _attr ? (((_attr >>> 0) | 0) >> 16) & 0xfff : 0; + }, + + get offset() { + return _offset; + }, + set offset(val) { + _offset = Math.max(0, val) >>> 0; + }, + + get encripted() { + return (_flags & 1) === 1; + }, + + get entryHeaderSize() { + return Constants.CENHDR + _fnameLen + _extraLen + _comLen; + }, + + get realDataOffset() { + return _offset + Constants.LOCHDR + _dataHeader.fnameLen + _dataHeader.extraLen; + }, + + get dataHeader() { + return _dataHeader; + }, + + loadDataHeaderFromBinary: function (/*Buffer*/ input) { + var data = input.slice(_offset, _offset + Constants.LOCHDR); + // 30 bytes and should start with "PK\003\004" + if (data.readUInt32LE(0) !== Constants.LOCSIG) { + throw new Error(Utils.Errors.INVALID_LOC); + } + _dataHeader = { + // version needed to extract + version: data.readUInt16LE(Constants.LOCVER), + // general purpose bit flag + flags: data.readUInt16LE(Constants.LOCFLG), + // compression method + method: data.readUInt16LE(Constants.LOCHOW), + // modification time (2 bytes time, 2 bytes date) + time: data.readUInt32LE(Constants.LOCTIM), + // uncompressed file crc-32 value + crc: data.readUInt32LE(Constants.LOCCRC), + // compressed size + compressedSize: data.readUInt32LE(Constants.LOCSIZ), + // uncompressed size + size: data.readUInt32LE(Constants.LOCLEN), + // filename length + fnameLen: data.readUInt16LE(Constants.LOCNAM), + // extra field length + extraLen: data.readUInt16LE(Constants.LOCEXT) + }; + }, + + loadFromBinary: function (/*Buffer*/ data) { + // data should be 46 bytes and start with "PK 01 02" + if (data.length !== Constants.CENHDR || data.readUInt32LE(0) !== Constants.CENSIG) { + throw new Error(Utils.Errors.INVALID_CEN); + } + // version made by + _verMade = data.readUInt16LE(Constants.CENVEM); + // version needed to extract + _version = data.readUInt16LE(Constants.CENVER); + // encrypt, decrypt flags + _flags = data.readUInt16LE(Constants.CENFLG); + // compression method + _method = data.readUInt16LE(Constants.CENHOW); + // modification time (2 bytes time, 2 bytes date) + _time = data.readUInt32LE(Constants.CENTIM); + // uncompressed file crc-32 value + _crc = data.readUInt32LE(Constants.CENCRC); + // compressed size + _compressedSize = data.readUInt32LE(Constants.CENSIZ); + // uncompressed size + _size = data.readUInt32LE(Constants.CENLEN); + // filename length + _fnameLen = data.readUInt16LE(Constants.CENNAM); + // extra field length + _extraLen = data.readUInt16LE(Constants.CENEXT); + // file comment length + _comLen = data.readUInt16LE(Constants.CENCOM); + // volume number start + _diskStart = data.readUInt16LE(Constants.CENDSK); + // internal file attributes + _inattr = data.readUInt16LE(Constants.CENATT); + // external file attributes + _attr = data.readUInt32LE(Constants.CENATX); + // LOC header offset + _offset = data.readUInt32LE(Constants.CENOFF); + }, + + dataHeaderToBinary: function () { + // LOC header size (30 bytes) + var data = Buffer.alloc(Constants.LOCHDR); + // "PK\003\004" + data.writeUInt32LE(Constants.LOCSIG, 0); + // version needed to extract + data.writeUInt16LE(_version, Constants.LOCVER); + // general purpose bit flag + data.writeUInt16LE(_flags, Constants.LOCFLG); + // compression method + data.writeUInt16LE(_method, Constants.LOCHOW); + // modification time (2 bytes time, 2 bytes date) + data.writeUInt32LE(_time, Constants.LOCTIM); + // uncompressed file crc-32 value + data.writeUInt32LE(_crc, Constants.LOCCRC); + // compressed size + data.writeUInt32LE(_compressedSize, Constants.LOCSIZ); + // uncompressed size + data.writeUInt32LE(_size, Constants.LOCLEN); + // filename length + data.writeUInt16LE(_fnameLen, Constants.LOCNAM); + // extra field length + data.writeUInt16LE(_extraLen, Constants.LOCEXT); + return data; + }, + + entryHeaderToBinary: function () { + // CEN header size (46 bytes) + var data = Buffer.alloc(Constants.CENHDR + _fnameLen + _extraLen + _comLen); + // "PK\001\002" + data.writeUInt32LE(Constants.CENSIG, 0); + // version made by + data.writeUInt16LE(_verMade, Constants.CENVEM); + // version needed to extract + data.writeUInt16LE(_version, Constants.CENVER); + // encrypt, decrypt flags + data.writeUInt16LE(_flags, Constants.CENFLG); + // compression method + data.writeUInt16LE(_method, Constants.CENHOW); + // modification time (2 bytes time, 2 bytes date) + data.writeUInt32LE(_time, Constants.CENTIM); + // uncompressed file crc-32 value + data.writeUInt32LE(_crc, Constants.CENCRC); + // compressed size + data.writeUInt32LE(_compressedSize, Constants.CENSIZ); + // uncompressed size + data.writeUInt32LE(_size, Constants.CENLEN); + // filename length + data.writeUInt16LE(_fnameLen, Constants.CENNAM); + // extra field length + data.writeUInt16LE(_extraLen, Constants.CENEXT); + // file comment length + data.writeUInt16LE(_comLen, Constants.CENCOM); + // volume number start + data.writeUInt16LE(_diskStart, Constants.CENDSK); + // internal file attributes + data.writeUInt16LE(_inattr, Constants.CENATT); + // external file attributes + data.writeUInt32LE(_attr, Constants.CENATX); + // LOC header offset + data.writeUInt32LE(_offset, Constants.CENOFF); + // fill all with + data.fill(0x00, Constants.CENHDR); + return data; + }, + + toJSON: function () { + const bytes = function (nr) { + return nr + " bytes"; + }; + + return { + made: _verMade, + version: _version, + flags: _flags, + method: Utils.methodToString(_method), + time: this.time, + crc: "0x" + _crc.toString(16).toUpperCase(), + compressedSize: bytes(_compressedSize), + size: bytes(_size), + fileNameLength: bytes(_fnameLen), + extraLength: bytes(_extraLen), + commentLength: bytes(_comLen), + diskNumStart: _diskStart, + inAttr: _inattr, + attr: _attr, + offset: _offset, + entryHeaderSize: bytes(Constants.CENHDR + _fnameLen + _extraLen + _comLen) + }; + }, + + toString: function () { + return JSON.stringify(this.toJSON(), null, "\t"); + } + }; +}; diff --git a/_shared/lib/adm-zip/headers/index.js b/_shared/lib/adm-zip/headers/index.js new file mode 100644 index 00000000..b54a7222 --- /dev/null +++ b/_shared/lib/adm-zip/headers/index.js @@ -0,0 +1,2 @@ +exports.EntryHeader = require("./entryHeader"); +exports.MainHeader = require("./mainHeader"); diff --git a/_shared/lib/adm-zip/headers/mainHeader.js b/_shared/lib/adm-zip/headers/mainHeader.js new file mode 100644 index 00000000..dcea01dc --- /dev/null +++ b/_shared/lib/adm-zip/headers/mainHeader.js @@ -0,0 +1,130 @@ +var Utils = require("../util"), + Constants = Utils.Constants; + +/* The entries in the end of central directory */ +module.exports = function () { + var _volumeEntries = 0, + _totalEntries = 0, + _size = 0, + _offset = 0, + _commentLength = 0; + + return { + get diskEntries() { + return _volumeEntries; + }, + set diskEntries(/*Number*/ val) { + _volumeEntries = _totalEntries = val; + }, + + get totalEntries() { + return _totalEntries; + }, + set totalEntries(/*Number*/ val) { + _totalEntries = _volumeEntries = val; + }, + + get size() { + return _size; + }, + set size(/*Number*/ val) { + _size = val; + }, + + get offset() { + return _offset; + }, + set offset(/*Number*/ val) { + _offset = val; + }, + + get commentLength() { + return _commentLength; + }, + set commentLength(/*Number*/ val) { + _commentLength = val; + }, + + get mainHeaderSize() { + return Constants.ENDHDR + _commentLength; + }, + + loadFromBinary: function (/*Buffer*/ data) { + // data should be 22 bytes and start with "PK 05 06" + // or be 56+ bytes and start with "PK 06 06" for Zip64 + if ( + (data.length !== Constants.ENDHDR || data.readUInt32LE(0) !== Constants.ENDSIG) && + (data.length < Constants.ZIP64HDR || data.readUInt32LE(0) !== Constants.ZIP64SIG) + ) { + throw new Error(Utils.Errors.INVALID_END); + } + + if (data.readUInt32LE(0) === Constants.ENDSIG) { + // number of entries on this volume + _volumeEntries = data.readUInt16LE(Constants.ENDSUB); + // total number of entries + _totalEntries = data.readUInt16LE(Constants.ENDTOT); + // central directory size in bytes + _size = data.readUInt32LE(Constants.ENDSIZ); + // offset of first CEN header + _offset = data.readUInt32LE(Constants.ENDOFF); + // zip file comment length + _commentLength = data.readUInt16LE(Constants.ENDCOM); + } else { + // number of entries on this volume + _volumeEntries = Utils.readBigUInt64LE(data, Constants.ZIP64SUB); + // total number of entries + _totalEntries = Utils.readBigUInt64LE(data, Constants.ZIP64TOT); + // central directory size in bytes + _size = Utils.readBigUInt64LE(data, Constants.ZIP64SIZE); + // offset of first CEN header + _offset = Utils.readBigUInt64LE(data, Constants.ZIP64OFF); + + _commentLength = 0; + } + }, + + toBinary: function () { + var b = Buffer.alloc(Constants.ENDHDR + _commentLength); + // "PK 05 06" signature + b.writeUInt32LE(Constants.ENDSIG, 0); + b.writeUInt32LE(0, 4); + // number of entries on this volume + b.writeUInt16LE(_volumeEntries, Constants.ENDSUB); + // total number of entries + b.writeUInt16LE(_totalEntries, Constants.ENDTOT); + // central directory size in bytes + b.writeUInt32LE(_size, Constants.ENDSIZ); + // offset of first CEN header + b.writeUInt32LE(_offset, Constants.ENDOFF); + // zip file comment length + b.writeUInt16LE(_commentLength, Constants.ENDCOM); + // fill comment memory with spaces so no garbage is left there + b.fill(" ", Constants.ENDHDR); + + return b; + }, + + toJSON: function () { + // creates 0x0000 style output + const offset = function (nr, len) { + let offs = nr.toString(16).toUpperCase(); + while (offs.length < len) offs = "0" + offs; + return "0x" + offs; + }; + + return { + diskEntries: _volumeEntries, + totalEntries: _totalEntries, + size: _size + " bytes", + offset: offset(_offset, 4), + commentLength: _commentLength + }; + }, + + toString: function () { + return JSON.stringify(this.toJSON(), null, "\t"); + } + }; +}; + // Misspelled \ No newline at end of file diff --git a/_shared/lib/adm-zip/methods/deflater.js b/_shared/lib/adm-zip/methods/deflater.js new file mode 100644 index 00000000..992de8f1 --- /dev/null +++ b/_shared/lib/adm-zip/methods/deflater.js @@ -0,0 +1,33 @@ +module.exports = function (/*Buffer*/ inbuf) { + var zlib = require("zlib"); + + var opts = { chunkSize: (parseInt(inbuf.length / 1024) + 1) * 1024 }; + + return { + deflate: function () { + return zlib.deflateRawSync(inbuf, opts); + }, + + deflateAsync: function (/*Function*/ callback) { + var tmp = zlib.createDeflateRaw(opts), + parts = [], + total = 0; + tmp.on("data", function (data) { + parts.push(data); + total += data.length; + }); + tmp.on("end", function () { + var buf = Buffer.alloc(total), + written = 0; + buf.fill(0); + for (var i = 0; i < parts.length; i++) { + var part = parts[i]; + part.copy(buf, written); + written += part.length; + } + callback && callback(buf); + }); + tmp.end(inbuf); + } + }; +}; diff --git a/_shared/lib/adm-zip/methods/index.js b/_shared/lib/adm-zip/methods/index.js new file mode 100644 index 00000000..5285677f --- /dev/null +++ b/_shared/lib/adm-zip/methods/index.js @@ -0,0 +1,3 @@ +exports.Deflater = require("./deflater"); +exports.Inflater = require("./inflater"); +exports.ZipCrypto = require("./zipcrypto"); diff --git a/_shared/lib/adm-zip/methods/inflater.js b/_shared/lib/adm-zip/methods/inflater.js new file mode 100644 index 00000000..3ed0d589 --- /dev/null +++ b/_shared/lib/adm-zip/methods/inflater.js @@ -0,0 +1,31 @@ +module.exports = function (/*Buffer*/ inbuf) { + var zlib = require("zlib"); + + return { + inflate: function () { + return zlib.inflateRawSync(inbuf); + }, + + inflateAsync: function (/*Function*/ callback) { + var tmp = zlib.createInflateRaw(), + parts = [], + total = 0; + tmp.on("data", function (data) { + parts.push(data); + total += data.length; + }); + tmp.on("end", function () { + var buf = Buffer.alloc(total), + written = 0; + buf.fill(0); + for (var i = 0; i < parts.length; i++) { + var part = parts[i]; + part.copy(buf, written); + written += part.length; + } + callback && callback(buf); + }); + tmp.end(inbuf); + } + }; +}; diff --git a/_shared/lib/adm-zip/methods/zipcrypto.js b/_shared/lib/adm-zip/methods/zipcrypto.js new file mode 100644 index 00000000..701b5ce4 --- /dev/null +++ b/_shared/lib/adm-zip/methods/zipcrypto.js @@ -0,0 +1,170 @@ +"use strict"; + +// node crypt, we use it for generate salt +// eslint-disable-next-line node/no-unsupported-features/node-builtins +const { randomFillSync } = require("crypto"); + +// generate CRC32 lookup table +const crctable = new Uint32Array(256).map((t, crc) => { + for (let j = 0; j < 8; j++) { + if (0 !== (crc & 1)) { + crc = (crc >>> 1) ^ 0xedb88320; + } else { + crc >>>= 1; + } + } + return crc >>> 0; +}); + +// C-style uInt32 Multiply (discards higher bits, when JS multiply discards lower bits) +const uMul = (a, b) => Math.imul(a, b) >>> 0; + +// crc32 byte single update (actually same function is part of utils.crc32 function :) ) +const crc32update = (pCrc32, bval) => { + return crctable[(pCrc32 ^ bval) & 0xff] ^ (pCrc32 >>> 8); +}; + +// function for generating salt for encrytion header +const genSalt = () => { + if ("function" === typeof randomFillSync) { + return randomFillSync(Buffer.alloc(12)); + } else { + // fallback if function is not defined + return genSalt.node(); + } +}; + +// salt generation with node random function (mainly as fallback) +genSalt.node = () => { + const salt = Buffer.alloc(12); + const len = salt.length; + for (let i = 0; i < len; i++) salt[i] = (Math.random() * 256) & 0xff; + return salt; +}; + +// general config +const config = { + genSalt +}; + +// Class Initkeys handles same basic ops with keys +function Initkeys(pw) { + const pass = Buffer.isBuffer(pw) ? pw : Buffer.from(pw); + this.keys = new Uint32Array([0x12345678, 0x23456789, 0x34567890]); + for (let i = 0; i < pass.length; i++) { + this.updateKeys(pass[i]); + } +} + +Initkeys.prototype.updateKeys = function (byteValue) { + const keys = this.keys; + keys[0] = crc32update(keys[0], byteValue); + keys[1] += keys[0] & 0xff; + keys[1] = uMul(keys[1], 134775813) + 1; + keys[2] = crc32update(keys[2], keys[1] >>> 24); + return byteValue; +}; + +Initkeys.prototype.next = function () { + const k = (this.keys[2] | 2) >>> 0; // key + return (uMul(k, k ^ 1) >> 8) & 0xff; // decode +}; + +function make_decrypter(/*Buffer*/ pwd) { + // 1. Stage initialize key + const keys = new Initkeys(pwd); + + // return decrypter function + return function (/*Buffer*/ data) { + // result - we create new Buffer for results + const result = Buffer.alloc(data.length); + let pos = 0; + // process input data + for (let c of data) { + //c ^= keys.next(); + //result[pos++] = c; // decode & Save Value + result[pos++] = keys.updateKeys(c ^ keys.next()); // update keys with decoded byte + } + return result; + }; +} + +function make_encrypter(/*Buffer*/ pwd) { + // 1. Stage initialize key + const keys = new Initkeys(pwd); + + // return encrypting function, result and pos is here so we dont have to merge buffers later + return function (/*Buffer*/ data, /*Buffer*/ result, /* Number */ pos = 0) { + // result - we create new Buffer for results + if (!result) result = Buffer.alloc(data.length); + // process input data + for (let c of data) { + const k = keys.next(); // save key byte + result[pos++] = c ^ k; // save val + keys.updateKeys(c); // update keys with decoded byte + } + return result; + }; +} + +function decrypt(/*Buffer*/ data, /*Object*/ header, /*String, Buffer*/ pwd) { + if (!data || !Buffer.isBuffer(data) || data.length < 12) { + return Buffer.alloc(0); + } + + // 1. We Initialize and generate decrypting function + const decrypter = make_decrypter(pwd); + + // 2. decrypt salt what is always 12 bytes and is a part of file content + const salt = decrypter(data.slice(0, 12)); + + // 3. does password meet expectations + if (salt[11] !== header.crc >>> 24) { + throw "ADM-ZIP: Wrong Password"; + } + + // 4. decode content + return decrypter(data.slice(12)); +} + +// lets add way to populate salt, NOT RECOMMENDED for production but maybe useful for testing general functionality +function _salter(data) { + if (Buffer.isBuffer(data) && data.length >= 12) { + // be aware - currently salting buffer data is modified + config.genSalt = function () { + return data.slice(0, 12); + }; + } else if (data === "node") { + // test salt generation with node random function + config.genSalt = genSalt.node; + } else { + // if value is not acceptable config gets reset. + config.genSalt = genSalt; + } +} + +function encrypt(/*Buffer*/ data, /*Object*/ header, /*String, Buffer*/ pwd, /*Boolean*/ oldlike = false) { + // 1. test data if data is not Buffer we make buffer from it + if (data == null) data = Buffer.alloc(0); + // if data is not buffer be make buffer from it + if (!Buffer.isBuffer(data)) data = Buffer.from(data.toString()); + + // 2. We Initialize and generate encrypting function + const encrypter = make_encrypter(pwd); + + // 3. generate salt (12-bytes of random data) + const salt = config.genSalt(); + salt[11] = (header.crc >>> 24) & 0xff; + + // old implementations (before PKZip 2.04g) used two byte check + if (oldlike) salt[10] = (header.crc >>> 16) & 0xff; + + // 4. create output + const result = Buffer.alloc(data.length + 12); + encrypter(salt, result); + + // finally encode content + return encrypter(data, result, 12); +} + +module.exports = { decrypt, encrypt, _salter }; diff --git a/_shared/lib/adm-zip/util/constants.js b/_shared/lib/adm-zip/util/constants.js new file mode 100644 index 00000000..119954ba --- /dev/null +++ b/_shared/lib/adm-zip/util/constants.js @@ -0,0 +1,142 @@ +module.exports = { + /* The local file header */ + LOCHDR : 30, // LOC header size + LOCSIG : 0x04034b50, // "PK\003\004" + LOCVER : 4, // version needed to extract + LOCFLG : 6, // general purpose bit flag + LOCHOW : 8, // compression method + LOCTIM : 10, // modification time (2 bytes time, 2 bytes date) + LOCCRC : 14, // uncompressed file crc-32 value + LOCSIZ : 18, // compressed size + LOCLEN : 22, // uncompressed size + LOCNAM : 26, // filename length + LOCEXT : 28, // extra field length + + /* The Data descriptor */ + EXTSIG : 0x08074b50, // "PK\007\008" + EXTHDR : 16, // EXT header size + EXTCRC : 4, // uncompressed file crc-32 value + EXTSIZ : 8, // compressed size + EXTLEN : 12, // uncompressed size + + /* The central directory file header */ + CENHDR : 46, // CEN header size + CENSIG : 0x02014b50, // "PK\001\002" + CENVEM : 4, // version made by + CENVER : 6, // version needed to extract + CENFLG : 8, // encrypt, decrypt flags + CENHOW : 10, // compression method + CENTIM : 12, // modification time (2 bytes time, 2 bytes date) + CENCRC : 16, // uncompressed file crc-32 value + CENSIZ : 20, // compressed size + CENLEN : 24, // uncompressed size + CENNAM : 28, // filename length + CENEXT : 30, // extra field length + CENCOM : 32, // file comment length + CENDSK : 34, // volume number start + CENATT : 36, // internal file attributes + CENATX : 38, // external file attributes (host system dependent) + CENOFF : 42, // LOC header offset + + /* The entries in the end of central directory */ + ENDHDR : 22, // END header size + ENDSIG : 0x06054b50, // "PK\005\006" + ENDSUB : 8, // number of entries on this disk + ENDTOT : 10, // total number of entries + ENDSIZ : 12, // central directory size in bytes + ENDOFF : 16, // offset of first CEN header + ENDCOM : 20, // zip file comment length + + END64HDR : 20, // zip64 END header size + END64SIG : 0x07064b50, // zip64 Locator signature, "PK\006\007" + END64START : 4, // number of the disk with the start of the zip64 + END64OFF : 8, // relative offset of the zip64 end of central directory + END64NUMDISKS : 16, // total number of disks + + ZIP64SIG : 0x06064b50, // zip64 signature, "PK\006\006" + ZIP64HDR : 56, // zip64 record minimum size + ZIP64LEAD : 12, // leading bytes at the start of the record, not counted by the value stored in ZIP64SIZE + ZIP64SIZE : 4, // zip64 size of the central directory record + ZIP64VEM : 12, // zip64 version made by + ZIP64VER : 14, // zip64 version needed to extract + ZIP64DSK : 16, // zip64 number of this disk + ZIP64DSKDIR : 20, // number of the disk with the start of the record directory + ZIP64SUB : 24, // number of entries on this disk + ZIP64TOT : 32, // total number of entries + ZIP64SIZB : 40, // zip64 central directory size in bytes + ZIP64OFF : 48, // offset of start of central directory with respect to the starting disk number + ZIP64EXTRA : 56, // extensible data sector + + /* Compression methods */ + STORED : 0, // no compression + SHRUNK : 1, // shrunk + REDUCED1 : 2, // reduced with compression factor 1 + REDUCED2 : 3, // reduced with compression factor 2 + REDUCED3 : 4, // reduced with compression factor 3 + REDUCED4 : 5, // reduced with compression factor 4 + IMPLODED : 6, // imploded + // 7 reserved for Tokenizing compression algorithm + DEFLATED : 8, // deflated + ENHANCED_DEFLATED: 9, // enhanced deflated + PKWARE : 10,// PKWare DCL imploded + // 11 reserved by PKWARE + BZIP2 : 12, // compressed using BZIP2 + // 13 reserved by PKWARE + LZMA : 14, // LZMA + // 15-17 reserved by PKWARE + IBM_TERSE : 18, // compressed using IBM TERSE + IBM_LZ77 : 19, // IBM LZ77 z + AES_ENCRYPT : 99, // WinZIP AES encryption method + + /* General purpose bit flag */ + // values can obtained with expression 2**bitnr + FLG_ENC : 1, // Bit 0: encrypted file + FLG_COMP1 : 2, // Bit 1, compression option + FLG_COMP2 : 4, // Bit 2, compression option + FLG_DESC : 8, // Bit 3, data descriptor + FLG_ENH : 16, // Bit 4, enhanced deflating + FLG_PATCH : 32, // Bit 5, indicates that the file is compressed patched data. + FLG_STR : 64, // Bit 6, strong encryption (patented) + // Bits 7-10: Currently unused. + FLG_EFS : 2048, // Bit 11: Language encoding flag (EFS) + // Bit 12: Reserved by PKWARE for enhanced compression. + // Bit 13: encrypted the Central Directory (patented). + // Bits 14-15: Reserved by PKWARE. + FLG_MSK : 4096, // mask header values + + /* Load type */ + FILE : 2, + BUFFER : 1, + NONE : 0, + + /* 4.5 Extensible data fields */ + EF_ID : 0, + EF_SIZE : 2, + + /* Header IDs */ + ID_ZIP64 : 0x0001, + ID_AVINFO : 0x0007, + ID_PFS : 0x0008, + ID_OS2 : 0x0009, + ID_NTFS : 0x000a, + ID_OPENVMS : 0x000c, + ID_UNIX : 0x000d, + ID_FORK : 0x000e, + ID_PATCH : 0x000f, + ID_X509_PKCS7 : 0x0014, + ID_X509_CERTID_F : 0x0015, + ID_X509_CERTID_C : 0x0016, + ID_STRONGENC : 0x0017, + ID_RECORD_MGT : 0x0018, + ID_X509_PKCS7_RL : 0x0019, + ID_IBM1 : 0x0065, + ID_IBM2 : 0x0066, + ID_POSZIP : 0x4690, + + EF_ZIP64_OR_32 : 0xffffffff, + EF_ZIP64_OR_16 : 0xffff, + EF_ZIP64_SUNCOMP : 0, + EF_ZIP64_SCOMP : 8, + EF_ZIP64_RHO : 16, + EF_ZIP64_DSN : 24 +}; diff --git a/_shared/lib/adm-zip/util/errors.js b/_shared/lib/adm-zip/util/errors.js new file mode 100644 index 00000000..dde469bc --- /dev/null +++ b/_shared/lib/adm-zip/util/errors.js @@ -0,0 +1,35 @@ +module.exports = { + /* Header error messages */ + INVALID_LOC: "Invalid LOC header (bad signature)", + INVALID_CEN: "Invalid CEN header (bad signature)", + INVALID_END: "Invalid END header (bad signature)", + + /* ZipEntry error messages*/ + NO_DATA: "Nothing to decompress", + BAD_CRC: "CRC32 checksum failed", + FILE_IN_THE_WAY: "There is a file in the way: %s", + UNKNOWN_METHOD: "Invalid/unsupported compression method", + + /* Inflater error messages */ + AVAIL_DATA: "inflate::Available inflate data did not terminate", + INVALID_DISTANCE: "inflate::Invalid literal/length or distance code in fixed or dynamic block", + TO_MANY_CODES: "inflate::Dynamic block code description: too many length or distance codes", + INVALID_REPEAT_LEN: "inflate::Dynamic block code description: repeat more than specified lengths", + INVALID_REPEAT_FIRST: "inflate::Dynamic block code description: repeat lengths with no first length", + INCOMPLETE_CODES: "inflate::Dynamic block code description: code lengths codes incomplete", + INVALID_DYN_DISTANCE: "inflate::Dynamic block code description: invalid distance code lengths", + INVALID_CODES_LEN: "inflate::Dynamic block code description: invalid literal/length code lengths", + INVALID_STORE_BLOCK: "inflate::Stored block length did not match one's complement", + INVALID_BLOCK_TYPE: "inflate::Invalid block type (type == 3)", + + /* ADM-ZIP error messages */ + CANT_EXTRACT_FILE: "Could not extract the file", + CANT_OVERRIDE: "Target file already exists", + NO_ZIP: "No zip file was loaded", + NO_ENTRY: "Entry doesn't exist", + DIRECTORY_CONTENT_ERROR: "A directory cannot have content", + FILE_NOT_FOUND: "File not found: %s", + NOT_IMPLEMENTED: "Not implemented", + INVALID_FILENAME: "Invalid filename", + INVALID_FORMAT: "Invalid or unsupported zip format. No END header found" +}; diff --git a/_shared/lib/adm-zip/util/fattr.js b/_shared/lib/adm-zip/util/fattr.js new file mode 100644 index 00000000..163e2e52 --- /dev/null +++ b/_shared/lib/adm-zip/util/fattr.js @@ -0,0 +1,79 @@ +const fs = require("./fileSystem").require(); +const pth = require("path"); + +fs.existsSync = fs.existsSync || pth.existsSync; + +module.exports = function (/*String*/ path) { + var _path = path || "", + _obj = newAttr(), + _stat = null; + + function newAttr() { + return { + directory: false, + readonly: false, + hidden: false, + executable: false, + mtime: 0, + atime: 0 + }; + } + + if (_path && fs.existsSync(_path)) { + _stat = fs.statSync(_path); + _obj.directory = _stat.isDirectory(); + _obj.mtime = _stat.mtime; + _obj.atime = _stat.atime; + _obj.executable = (0o111 & _stat.mode) !== 0; // file is executable who ever har right not just owner + _obj.readonly = (0o200 & _stat.mode) === 0; // readonly if owner has no write right + _obj.hidden = pth.basename(_path)[0] === "."; + } else { + console.warn("Invalid path: " + _path); + } + + return { + get directory() { + return _obj.directory; + }, + + get readOnly() { + return _obj.readonly; + }, + + get hidden() { + return _obj.hidden; + }, + + get mtime() { + return _obj.mtime; + }, + + get atime() { + return _obj.atime; + }, + + get executable() { + return _obj.executable; + }, + + decodeAttributes: function () {}, + + encodeAttributes: function () {}, + + toJSON: function () { + return { + path: _path, + isDirectory: _obj.directory, + isReadOnly: _obj.readonly, + isHidden: _obj.hidden, + isExecutable: _obj.executable, + mTime: _obj.mtime, + aTime: _obj.atime + }; + }, + + toString: function () { + return JSON.stringify(this.toJSON(), null, "\t"); + } + }; +}; diff --git a/_shared/lib/adm-zip/util/fileSystem.js b/_shared/lib/adm-zip/util/fileSystem.js new file mode 100644 index 00000000..dee5c2af --- /dev/null +++ b/_shared/lib/adm-zip/util/fileSystem.js @@ -0,0 +1,11 @@ +exports.require = function () { + if (typeof process === "object" && process.versions && process.versions["electron"]) { + try { + const originalFs = require("original-fs"); + if (Object.keys(originalFs).length > 0) { + return originalFs; + } + } catch (e) {} + } + return require("fs"); +}; diff --git a/_shared/lib/adm-zip/util/index.js b/_shared/lib/adm-zip/util/index.js new file mode 100644 index 00000000..6790df40 --- /dev/null +++ b/_shared/lib/adm-zip/util/index.js @@ -0,0 +1,4 @@ +module.exports = require("./utils"); +module.exports.Constants = require("./constants"); +module.exports.Errors = require("./errors"); +module.exports.FileAttr = require("./fattr"); diff --git a/_shared/lib/adm-zip/util/utils.js b/_shared/lib/adm-zip/util/utils.js new file mode 100644 index 00000000..9d732ba6 --- /dev/null +++ b/_shared/lib/adm-zip/util/utils.js @@ -0,0 +1,247 @@ +const fsystem = require("./fileSystem").require(); +const pth = require("path"); +const Constants = require("./constants"); +const Errors = require("./errors"); +const isWin = typeof process === "object" && "win32" === process.platform; + +const is_Obj = (obj) => obj && typeof obj === "object"; + +// generate CRC32 lookup table +const crcTable = new Uint32Array(256).map((t, c) => { + for (let k = 0; k < 8; k++) { + if ((c & 1) !== 0) { + c = 0xedb88320 ^ (c >>> 1); + } else { + c >>>= 1; + } + } + return c >>> 0; +}); + +// UTILS functions + +function Utils(opts) { + this.sep = pth.sep; + this.fs = fsystem; + + if (is_Obj(opts)) { + // custom filesystem + if (is_Obj(opts.fs) && typeof opts.fs.statSync === "function") { + this.fs = opts.fs; + } + } +} + +module.exports = Utils; + +// INSTANCED functions + +Utils.prototype.makeDir = function (/*String*/ folder) { + const self = this; + + // Sync - make directories tree + function mkdirSync(/*String*/ fpath) { + let resolvedPath = fpath.split(self.sep)[0]; + fpath.split(self.sep).forEach(function (name) { + if (!name || name.substr(-1, 1) === ":") return; + resolvedPath += self.sep + name; + var stat; + try { + stat = self.fs.statSync(resolvedPath); + } catch (e) { + self.fs.mkdirSync(resolvedPath); + } + if (stat && stat.isFile()) throw Errors.FILE_IN_THE_WAY.replace("%s", resolvedPath); + }); + } + + mkdirSync(folder); +}; + +Utils.prototype.writeFileTo = function (/*String*/ path, /*Buffer*/ content, /*Boolean*/ overwrite, /*Number*/ attr) { + const self = this; + if (self.fs.existsSync(path)) { + if (!overwrite) return false; // cannot overwrite + + var stat = self.fs.statSync(path); + if (stat.isDirectory()) { + return false; + } + } + var folder = pth.dirname(path); + if (!self.fs.existsSync(folder)) { + self.makeDir(folder); + } + + var fd; + try { + fd = self.fs.openSync(path, "w", 438); // 0666 + } catch (e) { + self.fs.chmodSync(path, 438); + fd = self.fs.openSync(path, "w", 438); + } + if (fd) { + try { + self.fs.writeSync(fd, content, 0, content.length, 0); + } finally { + self.fs.closeSync(fd); + } + } + self.fs.chmodSync(path, attr || 438); + return true; +}; + +Utils.prototype.writeFileToAsync = function (/*String*/ path, /*Buffer*/ content, /*Boolean*/ overwrite, /*Number*/ attr, /*Function*/ callback) { + if (typeof attr === "function") { + callback = attr; + attr = undefined; + } + + const self = this; + + self.fs.exists(path, function (exist) { + if (exist && !overwrite) return callback(false); + + self.fs.stat(path, function (err, stat) { + if (exist && stat.isDirectory()) { + return callback(false); + } + + var folder = pth.dirname(path); + self.fs.exists(folder, function (exists) { + if (!exists) self.makeDir(folder); + + self.fs.open(path, "w", 438, function (err, fd) { + if (err) { + self.fs.chmod(path, 438, function () { + self.fs.open(path, "w", 438, function (err, fd) { + self.fs.write(fd, content, 0, content.length, 0, function () { + self.fs.close(fd, function () { + self.fs.chmod(path, attr || 438, function () { + callback(true); + }); + }); + }); + }); + }); + } else if (fd) { + self.fs.write(fd, content, 0, content.length, 0, function () { + self.fs.close(fd, function () { + self.fs.chmod(path, attr || 438, function () { + callback(true); + }); + }); + }); + } else { + self.fs.chmod(path, attr || 438, function () { + callback(true); + }); + } + }); + }); + }); + }); +}; + +Utils.prototype.findFiles = function (/*String*/ path) { + const self = this; + + function findSync(/*String*/ dir, /*RegExp*/ pattern, /*Boolean*/ recursive) { + if (typeof pattern === "boolean") { + recursive = pattern; + pattern = undefined; + } + let files = []; + self.fs.readdirSync(dir).forEach(function (file) { + var path = pth.join(dir, file); + + if (self.fs.statSync(path).isDirectory() && recursive) files = files.concat(findSync(path, pattern, recursive)); + + if (!pattern || pattern.test(path)) { + files.push(pth.normalize(path) + (self.fs.statSync(path).isDirectory() ? self.sep : "")); + } + }); + return files; + } + + return findSync(path, undefined, true); +}; + +Utils.prototype.getAttributes = function () {}; + +Utils.prototype.setAttributes = function () {}; + +// STATIC functions + +// crc32 single update (it is part of crc32) +Utils.crc32update = function (crc, byte) { + return crcTable[(crc ^ byte) & 0xff] ^ (crc >>> 8); +}; + +Utils.crc32 = function (buf) { + if (typeof buf === "string") { + buf = Buffer.from(buf, "utf8"); + } + // Generate crcTable + if (!crcTable.length) genCRCTable(); + + let len = buf.length; + let crc = ~0; + for (let off = 0; off < len; ) crc = Utils.crc32update(crc, buf[off++]); + // xor and cast as uint32 number + return ~crc >>> 0; +}; + +Utils.methodToString = function (/*Number*/ method) { + switch (method) { + case Constants.STORED: + return "STORED (" + method + ")"; + case Constants.DEFLATED: + return "DEFLATED (" + method + ")"; + default: + return "UNSUPPORTED (" + method + ")"; + } +}; + +// removes ".." style path elements +Utils.canonical = function (/*string*/ path) { + if (!path) return ""; + // trick normalize think path is absolute + var safeSuffix = pth.posix.normalize("/" + path.split("\\").join("/")); + return pth.join(".", safeSuffix); +}; + +// make abolute paths taking prefix as root folder +Utils.sanitize = function (/*string*/ prefix, /*string*/ name) { + prefix = pth.resolve(pth.normalize(prefix)); + var parts = name.split("/"); + for (var i = 0, l = parts.length; i < l; i++) { + var path = pth.normalize(pth.join(prefix, parts.slice(i, l).join(pth.sep))); + if (path.indexOf(prefix) === 0) { + return path; + } + } + return pth.normalize(pth.join(prefix, pth.basename(name))); +}; + +// converts buffer, Uint8Array, string types to buffer +Utils.toBuffer = function toBuffer(/*buffer, Uint8Array, string*/ input) { + if (Buffer.isBuffer(input)) { + return input; + } else if (input instanceof Uint8Array) { + return Buffer.from(input); + } else { + // expect string all other values are invalid and return empty buffer + return typeof input === "string" ? Buffer.from(input, "utf8") : Buffer.alloc(0); + } +}; + +Utils.readBigUInt64LE = function (/*Buffer*/ buffer, /*int*/ index) { + var slice = Buffer.from(buffer.slice(index, index + 8)); + slice.swap64(); + + return parseInt(`0x${slice.toString("hex")}`); +}; + +Utils.isWin = isWin; // Do we have windows system +Utils.crcTable = crcTable; diff --git a/_shared/lib/adm-zip/zipEntry.js b/_shared/lib/adm-zip/zipEntry.js new file mode 100644 index 00000000..8c3053b5 --- /dev/null +++ b/_shared/lib/adm-zip/zipEntry.js @@ -0,0 +1,333 @@ +var Utils = require("./util"), + Headers = require("./headers"), + Constants = Utils.Constants, + Methods = require("./methods"); + +module.exports = function (/*Buffer*/ input) { + var _entryHeader = new Headers.EntryHeader(), + _entryName = Buffer.alloc(0), + _comment = Buffer.alloc(0), + _isDirectory = false, + uncompressedData = null, + _extra = Buffer.alloc(0); + + function getCompressedDataFromZip() { + if (!input || !Buffer.isBuffer(input)) { + return Buffer.alloc(0); + } + _entryHeader.loadDataHeaderFromBinary(input); + return input.slice(_entryHeader.realDataOffset, _entryHeader.realDataOffset + _entryHeader.compressedSize); + } + + function crc32OK(data) { + // if bit 3 (0x08) of the general-purpose flags field is set, then the CRC-32 and file sizes are not known when the header is written + if ((_entryHeader.flags & 0x8) !== 0x8) { + if (Utils.crc32(data) !== _entryHeader.dataHeader.crc) { + return false; + } + } else { + // @TODO: load and check data descriptor header + // The fields in the local header are filled with zero, and the CRC-32 and size are appended in a 12-byte structure + // (optionally preceded by a 4-byte signature) immediately after the compressed data: + } + return true; + } + + function decompress(/*Boolean*/ async, /*Function*/ callback, /*String, Buffer*/ pass) { + if (typeof callback === "undefined" && typeof async === "string") { + pass = async; + async = void 0; + } + if (_isDirectory) { + if (async && callback) { + callback(Buffer.alloc(0), Utils.Errors.DIRECTORY_CONTENT_ERROR); //si added error. + } + return Buffer.alloc(0); + } + + var compressedData = getCompressedDataFromZip(); + + if (compressedData.length === 0) { + // File is empty, nothing to decompress. + if (async && callback) callback(compressedData); + return compressedData; + } + + if (_entryHeader.encripted) { + if ("string" !== typeof pass && !Buffer.isBuffer(pass)) { + throw new Error("ADM-ZIP: Incompatible password parameter"); + } + compressedData = Methods.ZipCrypto.decrypt(compressedData, _entryHeader, pass); + } + + var data = Buffer.alloc(_entryHeader.size); + + switch (_entryHeader.method) { + case Utils.Constants.STORED: + compressedData.copy(data); + if (!crc32OK(data)) { + if (async && callback) callback(data, Utils.Errors.BAD_CRC); //si added error + throw new Error(Utils.Errors.BAD_CRC); + } else { + //si added otherwise did not seem to return data. + if (async && callback) callback(data); + return data; + } + case Utils.Constants.DEFLATED: + var inflater = new Methods.Inflater(compressedData); + if (!async) { + const result = inflater.inflate(data); + result.copy(data, 0); + if (!crc32OK(data)) { + throw new Error(Utils.Errors.BAD_CRC + " " + _entryName.toString()); + } + return data; + } else { + inflater.inflateAsync(function (result) { + result.copy(result, 0); + if (callback) { + if (!crc32OK(result)) { + callback(result, Utils.Errors.BAD_CRC); //si added error + } else { + callback(result); + } + } + }); + } + break; + default: + if (async && callback) callback(Buffer.alloc(0), Utils.Errors.UNKNOWN_METHOD); + throw new Error(Utils.Errors.UNKNOWN_METHOD); + } + } + + function compress(/*Boolean*/ async, /*Function*/ callback) { + if ((!uncompressedData || !uncompressedData.length) && Buffer.isBuffer(input)) { + // no data set or the data wasn't changed to require recompression + if (async && callback) callback(getCompressedDataFromZip()); + return getCompressedDataFromZip(); + } + + if (uncompressedData.length && !_isDirectory) { + var compressedData; + // Local file header + switch (_entryHeader.method) { + case Utils.Constants.STORED: + _entryHeader.compressedSize = _entryHeader.size; + + compressedData = Buffer.alloc(uncompressedData.length); + uncompressedData.copy(compressedData); + + if (async && callback) callback(compressedData); + return compressedData; + default: + case Utils.Constants.DEFLATED: + var deflater = new Methods.Deflater(uncompressedData); + if (!async) { + var deflated = deflater.deflate(); + _entryHeader.compressedSize = deflated.length; + return deflated; + } else { + deflater.deflateAsync(function (data) { + compressedData = Buffer.alloc(data.length); + _entryHeader.compressedSize = data.length; + data.copy(compressedData); + callback && callback(compressedData); + }); + } + deflater = null; + break; + } + } else if (async && callback) { + callback(Buffer.alloc(0)); + } else { + return Buffer.alloc(0); + } + } + + function readUInt64LE(buffer, offset) { + return (buffer.readUInt32LE(offset + 4) << 4) + buffer.readUInt32LE(offset); + } + + function parseExtra(data) { + var offset = 0; + var signature, size, part; + while (offset < data.length) { + signature = data.readUInt16LE(offset); + offset += 2; + size = data.readUInt16LE(offset); + offset += 2; + part = data.slice(offset, offset + size); + offset += size; + if (Constants.ID_ZIP64 === signature) { + parseZip64ExtendedInformation(part); + } + } + } + + //Override header field values with values from the ZIP64 extra field + function parseZip64ExtendedInformation(data) { + var size, compressedSize, offset, diskNumStart; + + if (data.length >= Constants.EF_ZIP64_SCOMP) { + size = readUInt64LE(data, Constants.EF_ZIP64_SUNCOMP); + if (_entryHeader.size === Constants.EF_ZIP64_OR_32) { + _entryHeader.size = size; + } + } + if (data.length >= Constants.EF_ZIP64_RHO) { + compressedSize = readUInt64LE(data, Constants.EF_ZIP64_SCOMP); + if (_entryHeader.compressedSize === Constants.EF_ZIP64_OR_32) { + _entryHeader.compressedSize = compressedSize; + } + } + if (data.length >= Constants.EF_ZIP64_DSN) { + offset = readUInt64LE(data, Constants.EF_ZIP64_RHO); + if (_entryHeader.offset === Constants.EF_ZIP64_OR_32) { + _entryHeader.offset = offset; + } + } + if (data.length >= Constants.EF_ZIP64_DSN + 4) { + diskNumStart = data.readUInt32LE(Constants.EF_ZIP64_DSN); + if (_entryHeader.diskNumStart === Constants.EF_ZIP64_OR_16) { + _entryHeader.diskNumStart = diskNumStart; + } + } + } + + return { + get entryName() { + return _entryName.toString(); + }, + get rawEntryName() { + return _entryName; + }, + set entryName(val) { + _entryName = Utils.toBuffer(val); + var lastChar = _entryName[_entryName.length - 1]; + _isDirectory = lastChar === 47 || lastChar === 92; + _entryHeader.fileNameLength = _entryName.length; + }, + + get extra() { + return _extra; + }, + set extra(val) { + _extra = val; + _entryHeader.extraLength = val.length; + parseExtra(val); + }, + + get comment() { + return _comment.toString(); + }, + set comment(val) { + _comment = Utils.toBuffer(val); + _entryHeader.commentLength = _comment.length; + }, + + get name() { + var n = _entryName.toString(); + return _isDirectory + ? n + .substr(n.length - 1) + .split("/") + .pop() + : n.split("/").pop(); + }, + get isDirectory() { + return _isDirectory; + }, + + getCompressedData: function () { + return compress(false, null); + }, + + getCompressedDataAsync: function (/*Function*/ callback) { + compress(true, callback); + }, + + setData: function (value) { + uncompressedData = Utils.toBuffer(value); + if (!_isDirectory && uncompressedData.length) { + _entryHeader.size = uncompressedData.length; + _entryHeader.method = Utils.Constants.DEFLATED; + _entryHeader.crc = Utils.crc32(value); + _entryHeader.changed = true; + } else { + // folders and blank files should be stored + _entryHeader.method = Utils.Constants.STORED; + } + }, + + getData: function (pass) { + if (_entryHeader.changed) { + return uncompressedData; + } else { + return decompress(false, null, pass); + } + }, + + getDataAsync: function (/*Function*/ callback, pass) { + if (_entryHeader.changed) { + callback(uncompressedData); + } else { + decompress(true, callback, pass); + } + }, + + set attr(attr) { + _entryHeader.attr = attr; + }, + get attr() { + return _entryHeader.attr; + }, + + set header(/*Buffer*/ data) { + _entryHeader.loadFromBinary(data); + }, + + get header() { + return _entryHeader; + }, + + packHeader: function () { + // 1. create header (buffer) + var header = _entryHeader.entryHeaderToBinary(); + var addpos = Utils.Constants.CENHDR; + // 2. add file name + _entryName.copy(header, addpos); + addpos += _entryName.length; + // 3. add extra data + if (_entryHeader.extraLength) { + _extra.copy(header, addpos); + addpos += _entryHeader.extraLength; + } + // 4. add file comment + if (_entryHeader.commentLength) { + _comment.copy(header, addpos); + } + return header; + }, + + toJSON: function () { + const bytes = function (nr) { + return "<" + ((nr && nr.length + " bytes buffer") || "null") + ">"; + }; + + return { + entryName: this.entryName, + name: this.name, + comment: this.comment, + isDirectory: this.isDirectory, + header: _entryHeader.toJSON(), + compressedData: bytes(input), + data: bytes(uncompressedData) + }; + }, + + toString: function () { + return JSON.stringify(this.toJSON(), null, "\t"); + } + }; +}; diff --git a/_shared/lib/adm-zip/zipFile.js b/_shared/lib/adm-zip/zipFile.js new file mode 100644 index 00000000..997226a2 --- /dev/null +++ b/_shared/lib/adm-zip/zipFile.js @@ -0,0 +1,384 @@ +const ZipEntry = require("./zipEntry"); +const Headers = require("./headers"); +const Utils = require("./util"); + +module.exports = function (/*Buffer|null*/ inBuffer, /** object */ options) { + var entryList = [], + entryTable = {}, + _comment = Buffer.alloc(0), + mainHeader = new Headers.MainHeader(), + loadedEntries = false; + + // assign options + const opts = Object.assign(Object.create(null), options); + + const { noSort } = opts; + + if (inBuffer) { + // is a memory buffer + readMainHeader(opts.readEntries); + } else { + // none. is a new file + loadedEntries = true; + } + + function iterateEntries(callback) { + const totalEntries = mainHeader.diskEntries; // total number of entries + let index = mainHeader.offset; // offset of first CEN header + + for (let i = 0; i < totalEntries; i++) { + let tmp = index; + const entry = new ZipEntry(inBuffer); + + entry.header = inBuffer.slice(tmp, (tmp += Utils.Constants.CENHDR)); + entry.entryName = inBuffer.slice(tmp, (tmp += entry.header.fileNameLength)); + + index += entry.header.entryHeaderSize; + + callback(entry); + } + } + + function readEntries() { + loadedEntries = true; + entryTable = {}; + entryList = new Array(mainHeader.diskEntries); // total number of entries + var index = mainHeader.offset; // offset of first CEN header + for (var i = 0; i < entryList.length; i++) { + var tmp = index, + entry = new ZipEntry(inBuffer); + entry.header = inBuffer.slice(tmp, (tmp += Utils.Constants.CENHDR)); + + entry.entryName = inBuffer.slice(tmp, (tmp += entry.header.fileNameLength)); + + if (entry.header.extraLength) { + entry.extra = inBuffer.slice(tmp, (tmp += entry.header.extraLength)); + } + + if (entry.header.commentLength) entry.comment = inBuffer.slice(tmp, tmp + entry.header.commentLength); + + index += entry.header.entryHeaderSize; + + entryList[i] = entry; + entryTable[entry.entryName] = entry; + } + } + + function readMainHeader(/*Boolean*/ readNow) { + var i = inBuffer.length - Utils.Constants.ENDHDR, // END header size + max = Math.max(0, i - 0xffff), // 0xFFFF is the max zip file comment length + n = max, + endStart = inBuffer.length, + endOffset = -1, // Start offset of the END header + commentEnd = 0; + + for (i; i >= n; i--) { + if (inBuffer[i] !== 0x50) continue; // quick check that the byte is 'P' + if (inBuffer.readUInt32LE(i) === Utils.Constants.ENDSIG) { + // "PK\005\006" + endOffset = i; + commentEnd = i; + endStart = i + Utils.Constants.ENDHDR; + // We already found a regular signature, let's look just a bit further to check if there's any zip64 signature + n = i - Utils.Constants.END64HDR; + continue; + } + + if (inBuffer.readUInt32LE(i) === Utils.Constants.END64SIG) { + // Found a zip64 signature, let's continue reading the whole zip64 record + n = max; + continue; + } + + if (inBuffer.readUInt32LE(i) === Utils.Constants.ZIP64SIG) { + // Found the zip64 record, let's determine it's size + endOffset = i; + endStart = i + Utils.readBigUInt64LE(inBuffer, i + Utils.Constants.ZIP64SIZE) + Utils.Constants.ZIP64LEAD; + break; + } + } + + if (!~endOffset) throw new Error(Utils.Errors.INVALID_FORMAT); + + mainHeader.loadFromBinary(inBuffer.slice(endOffset, endStart)); + if (mainHeader.commentLength) { + _comment = inBuffer.slice(commentEnd + Utils.Constants.ENDHDR); + } + if (readNow) readEntries(); + } + + function sortEntries() { + if (entryList.length > 1 && !noSort) { + entryList.sort((a, b) => a.entryName.toLowerCase().localeCompare(b.entryName.toLowerCase())); + } + } + + return { + /** + * Returns an array of ZipEntry objects existent in the current opened archive + * @return Array + */ + get entries() { + if (!loadedEntries) { + readEntries(); + } + return entryList; + }, + + /** + * Archive comment + * @return {String} + */ + get comment() { + return _comment.toString(); + }, + set comment(val) { + _comment = Utils.toBuffer(val); + mainHeader.commentLength = _comment.length; + }, + + getEntryCount: function () { + if (!loadedEntries) { + return mainHeader.diskEntries; + } + + return entryList.length; + }, + + forEach: function (callback) { + if (!loadedEntries) { + iterateEntries(callback); + return; + } + + entryList.forEach(callback); + }, + + /** + * Returns a reference to the entry with the given name or null if entry is inexistent + * + * @param entryName + * @return ZipEntry + */ + getEntry: function (/*String*/ entryName) { + if (!loadedEntries) { + readEntries(); + } + return entryTable[entryName] || null; + }, + + /** + * Adds the given entry to the entry list + * + * @param entry + */ + setEntry: function (/*ZipEntry*/ entry) { + if (!loadedEntries) { + readEntries(); + } + entryList.push(entry); + entryTable[entry.entryName] = entry; + mainHeader.totalEntries = entryList.length; + }, + + /** + * Removes the entry with the given name from the entry list. + * + * If the entry is a directory, then all nested files and directories will be removed + * @param entryName + */ + deleteEntry: function (/*String*/ entryName) { + if (!loadedEntries) { + readEntries(); + } + var entry = entryTable[entryName]; + if (entry && entry.isDirectory) { + var _self = this; + this.getEntryChildren(entry).forEach(function (child) { + if (child.entryName !== entryName) { + _self.deleteEntry(child.entryName); + } + }); + } + entryList.splice(entryList.indexOf(entry), 1); + delete entryTable[entryName]; + mainHeader.totalEntries = entryList.length; + }, + + /** + * Iterates and returns all nested files and directories of the given entry + * + * @param entry + * @return Array + */ + getEntryChildren: function (/*ZipEntry*/ entry) { + if (!loadedEntries) { + readEntries(); + } + if (entry && entry.isDirectory) { + const list = []; + const name = entry.entryName; + const len = name.length; + + entryList.forEach(function (zipEntry) { + if (zipEntry.entryName.substr(0, len) === name) { + list.push(zipEntry); + } + }); + return list; + } + return []; + }, + + /** + * Returns the zip file + * + * @return Buffer + */ + compressToBuffer: function () { + if (!loadedEntries) { + readEntries(); + } + sortEntries(); + + const dataBlock = []; + const entryHeaders = []; + let totalSize = 0; + let dindex = 0; + + mainHeader.size = 0; + mainHeader.offset = 0; + + for (const entry of entryList) { + // compress data and set local and entry header accordingly. Reason why is called first + const compressedData = entry.getCompressedData(); + // 1. construct data header + entry.header.offset = dindex; + const dataHeader = entry.header.dataHeaderToBinary(); + const entryNameLen = entry.rawEntryName.length; + // 1.2. postheader - data after data header + const postHeader = Buffer.alloc(entryNameLen + entry.extra.length); + entry.rawEntryName.copy(postHeader, 0); + postHeader.copy(entry.extra, entryNameLen); + + // 2. offsets + const dataLength = dataHeader.length + postHeader.length + compressedData.length; + dindex += dataLength; + + // 3. store values in sequence + dataBlock.push(dataHeader); + dataBlock.push(postHeader); + dataBlock.push(compressedData); + + // 4. construct entry header + const entryHeader = entry.packHeader(); + entryHeaders.push(entryHeader); + // 5. update main header + mainHeader.size += entryHeader.length; + totalSize += dataLength + entryHeader.length; + } + + totalSize += mainHeader.mainHeaderSize; // also includes zip file comment length + // point to end of data and beginning of central directory first record + mainHeader.offset = dindex; + + dindex = 0; + const outBuffer = Buffer.alloc(totalSize); + // write data blocks + for (const content of dataBlock) { + content.copy(outBuffer, dindex); + dindex += content.length; + } + + // write central directory entries + for (const content of entryHeaders) { + content.copy(outBuffer, dindex); + dindex += content.length; + } + + // write main header + const mh = mainHeader.toBinary(); + if (_comment) { + _comment.copy(mh, Utils.Constants.ENDHDR); // add zip file comment + } + mh.copy(outBuffer, dindex); + + return outBuffer; + }, + + toAsyncBuffer: function (/*Function*/ onSuccess, /*Function*/ onFail, /*Function*/ onItemStart, /*Function*/ onItemEnd) { + try { + if (!loadedEntries) { + readEntries(); + } + sortEntries(); + + const dataBlock = []; + const entryHeaders = []; + let totalSize = 0; + let dindex = 0; + + mainHeader.size = 0; + mainHeader.offset = 0; + + const compress2Buffer = function (entryLists) { + if (entryLists.length) { + const entry = entryLists.pop(); + const name = entry.entryName + entry.extra.toString(); + if (onItemStart) onItemStart(name); + entry.getCompressedDataAsync(function (compressedData) { + if (onItemEnd) onItemEnd(name); + + entry.header.offset = dindex; + // data header + const dataHeader = entry.header.dataHeaderToBinary(); + const postHeader = Buffer.alloc(name.length, name); + const dataLength = dataHeader.length + postHeader.length + compressedData.length; + + dindex += dataLength; + + dataBlock.push(dataHeader); + dataBlock.push(postHeader); + dataBlock.push(compressedData); + + const entryHeader = entry.packHeader(); + entryHeaders.push(entryHeader); + mainHeader.size += entryHeader.length; + totalSize += dataLength + entryHeader.length; + + compress2Buffer(entryLists); + }); + } else { + totalSize += mainHeader.mainHeaderSize; // also includes zip file comment length + // point to end of data and beginning of central directory first record + mainHeader.offset = dindex; + + dindex = 0; + const outBuffer = Buffer.alloc(totalSize); + dataBlock.forEach(function (content) { + content.copy(outBuffer, dindex); // write data blocks + dindex += content.length; + }); + entryHeaders.forEach(function (content) { + content.copy(outBuffer, dindex); // write central directory entries + dindex += content.length; + }); + + const mh = mainHeader.toBinary(); + if (_comment) { + _comment.copy(mh, Utils.Constants.ENDHDR); // add zip file comment + } + + mh.copy(outBuffer, dindex); // write main header + + onSuccess(outBuffer); + } + }; + + compress2Buffer(entryList); + } catch (e) { + onFail(e); + } + } + }; +}; diff --git a/_shared/lib/node-fetch/@types/index.d.ts b/_shared/lib/node-fetch/@types/index.d.ts new file mode 100644 index 00000000..274ca03a --- /dev/null +++ b/_shared/lib/node-fetch/@types/index.d.ts @@ -0,0 +1,219 @@ +/// + +import {RequestOptions} from 'http'; +import {FormData} from 'formdata-polyfill/esm.min.js'; +import { + Blob, + blobFrom, + blobFromSync, + File, + fileFrom, + fileFromSync +} from 'fetch-blob/from.js'; + +type AbortSignal = { + readonly aborted: boolean; + + addEventListener: (type: 'abort', listener: (this: AbortSignal) => void) => void; + removeEventListener: (type: 'abort', listener: (this: AbortSignal) => void) => void; +}; + +export type HeadersInit = Headers | Record | Iterable | Iterable>; + +export { + FormData, + Blob, + blobFrom, + blobFromSync, + File, + fileFrom, + fileFromSync +}; + +/** + * This Fetch API interface allows you to perform various actions on HTTP request and response headers. + * These actions include retrieving, setting, adding to, and removing. + * A Headers object has an associated header list, which is initially empty and consists of zero or more name and value pairs. + * You can add to this using methods like append() (see Examples.) + * In all methods of this interface, header names are matched by case-insensitive byte sequence. + * */ +export class Headers { + constructor(init?: HeadersInit); + + append(name: string, value: string): void; + delete(name: string): void; + get(name: string): string | null; + has(name: string): boolean; + set(name: string, value: string): void; + forEach( + callbackfn: (value: string, key: string, parent: Headers) => void, + thisArg?: any + ): void; + + [Symbol.iterator](): IterableIterator<[string, string]>; + /** + * Returns an iterator allowing to go through all key/value pairs contained in this object. + */ + entries(): IterableIterator<[string, string]>; + /** + * Returns an iterator allowing to go through all keys of the key/value pairs contained in this object. + */ + keys(): IterableIterator; + /** + * Returns an iterator allowing to go through all values of the key/value pairs contained in this object. + */ + values(): IterableIterator; + + /** Node-fetch extension */ + raw(): Record; +} + +export interface RequestInit { + /** + * A BodyInit object or null to set request's body. + */ + body?: BodyInit | null; + /** + * A Headers object, an object literal, or an array of two-item arrays to set request's headers. + */ + headers?: HeadersInit; + /** + * A string to set request's method. + */ + method?: string; + /** + * A string indicating whether request follows redirects, results in an error upon encountering a redirect, or returns the redirect (in an opaque fashion). Sets request's redirect. + */ + redirect?: RequestRedirect; + /** + * An AbortSignal to set request's signal. + */ + signal?: AbortSignal | null; + /** + * A string whose value is a same-origin URL, "about:client", or the empty string, to set request’s referrer. + */ + referrer?: string; + /** + * A referrer policy to set request’s referrerPolicy. + */ + referrerPolicy?: ReferrerPolicy; + + // Node-fetch extensions to the whatwg/fetch spec + agent?: RequestOptions['agent'] | ((parsedUrl: URL) => RequestOptions['agent']); + compress?: boolean; + counter?: number; + follow?: number; + hostname?: string; + port?: number; + protocol?: string; + size?: number; + highWaterMark?: number; + insecureHTTPParser?: boolean; +} + +export interface ResponseInit { + headers?: HeadersInit; + status?: number; + statusText?: string; +} + +export type BodyInit = + | Blob + | Buffer + | URLSearchParams + | FormData + | NodeJS.ReadableStream + | string; +declare class BodyMixin { + constructor(body?: BodyInit, options?: {size?: number}); + + readonly body: NodeJS.ReadableStream | null; + readonly bodyUsed: boolean; + readonly size: number; + + /** @deprecated Use `body.arrayBuffer()` instead. */ + buffer(): Promise; + arrayBuffer(): Promise; + formData(): Promise; + blob(): Promise; + json(): Promise; + text(): Promise; +} + +// `Body` must not be exported as a class since it's not exported from the JavaScript code. +export interface Body extends Pick {} + +export type RequestRedirect = 'error' | 'follow' | 'manual'; +export type ReferrerPolicy = '' | 'no-referrer' | 'no-referrer-when-downgrade' | 'same-origin' | 'origin' | 'strict-origin' | 'origin-when-cross-origin' | 'strict-origin-when-cross-origin' | 'unsafe-url'; +export type RequestInfo = string | Request; +export class Request extends BodyMixin { + constructor(input: URL | RequestInfo, init?: RequestInit); + + /** + * Returns a Headers object consisting of the headers associated with request. Note that headers added in the network layer by the user agent will not be accounted for in this object, e.g., the "Host" header. + */ + readonly headers: Headers; + /** + * Returns request's HTTP method, which is "GET" by default. + */ + readonly method: string; + /** + * Returns the redirect mode associated with request, which is a string indicating how redirects for the request will be handled during fetching. A request will follow redirects by default. + */ + readonly redirect: RequestRedirect; + /** + * Returns the signal associated with request, which is an AbortSignal object indicating whether or not request has been aborted, and its abort event handler. + */ + readonly signal: AbortSignal; + /** + * Returns the URL of request as a string. + */ + readonly url: string; + /** + * A string whose value is a same-origin URL, "about:client", or the empty string, to set request’s referrer. + */ + readonly referrer: string; + /** + * A referrer policy to set request’s referrerPolicy. + */ + readonly referrerPolicy: ReferrerPolicy; + clone(): Request; +} + +type ResponseType = 'basic' | 'cors' | 'default' | 'error' | 'opaque' | 'opaqueredirect'; + +export class Response extends BodyMixin { + constructor(body?: BodyInit | null, init?: ResponseInit); + + readonly headers: Headers; + readonly ok: boolean; + readonly redirected: boolean; + readonly status: number; + readonly statusText: string; + readonly type: ResponseType; + readonly url: string; + clone(): Response; + + static error(): Response; + static redirect(url: string, status?: number): Response; + static json(data: any, init?: ResponseInit): Response; +} + +export class FetchError extends Error { + constructor(message: string, type: string, systemError?: Record); + + name: 'FetchError'; + [Symbol.toStringTag]: 'FetchError'; + type: string; + code?: string; + errno?: string; +} + +export class AbortError extends Error { + type: string; + name: 'AbortError'; + [Symbol.toStringTag]: 'AbortError'; +} + +export function isRedirect(code: number): boolean; +export default function fetch(url: URL | RequestInfo, init?: RequestInit): Promise; diff --git a/_shared/lib/node-fetch/LICENSE.md b/_shared/lib/node-fetch/LICENSE.md new file mode 100644 index 00000000..41ca1b6e --- /dev/null +++ b/_shared/lib/node-fetch/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 - 2020 Node Fetch Team + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/_shared/lib/node-fetch/README.md b/_shared/lib/node-fetch/README.md new file mode 100644 index 00000000..badc2b19 --- /dev/null +++ b/_shared/lib/node-fetch/README.md @@ -0,0 +1,872 @@ +
+ Node Fetch +
+

A light-weight module that brings Fetch API to Node.js.

+ Build status + Coverage status + Current version + Install size + Mentioned in Awesome Node.js + Discord +
+
+ Consider supporting us on our Open Collective: +
+
+ Open Collective +
+ +--- + +**You might be looking for the [v2 docs](https://github.com/node-fetch/node-fetch/tree/2.x#readme)** + + + +- [Motivation](#motivation) +- [Features](#features) +- [Difference from client-side fetch](#difference-from-client-side-fetch) +- [Installation](#installation) +- [Loading and configuring the module](#loading-and-configuring-the-module) +- [Upgrading](#upgrading) +- [Common Usage](#common-usage) + - [Plain text or HTML](#plain-text-or-html) + - [JSON](#json) + - [Simple Post](#simple-post) + - [Post with JSON](#post-with-json) + - [Post with form parameters](#post-with-form-parameters) + - [Handling exceptions](#handling-exceptions) + - [Handling client and server errors](#handling-client-and-server-errors) + - [Handling cookies](#handling-cookies) +- [Advanced Usage](#advanced-usage) + - [Streams](#streams) + - [Accessing Headers and other Metadata](#accessing-headers-and-other-metadata) + - [Extract Set-Cookie Header](#extract-set-cookie-header) + - [Post data using a file](#post-data-using-a-file) + - [Request cancellation with AbortSignal](#request-cancellation-with-abortsignal) +- [API](#api) + - [fetch(url[, options])](#fetchurl-options) + - [Options](#options) + - [Default Headers](#default-headers) + - [Custom Agent](#custom-agent) + - [Custom highWaterMark](#custom-highwatermark) + - [Insecure HTTP Parser](#insecure-http-parser) + - [Class: Request](#class-request) + - [new Request(input[, options])](#new-requestinput-options) + - [Class: Response](#class-response) + - [new Response([body[, options]])](#new-responsebody-options) + - [response.ok](#responseok) + - [response.redirected](#responseredirected) + - [response.type](#responsetype) + - [Class: Headers](#class-headers) + - [new Headers([init])](#new-headersinit) + - [Interface: Body](#interface-body) + - [body.body](#bodybody) + - [body.bodyUsed](#bodybodyused) + - [body.arrayBuffer()](#bodyarraybuffer) + - [body.blob()](#bodyblob) + - [body.formData()](#formdata) + - [body.json()](#bodyjson) + - [body.text()](#bodytext) + - [Class: FetchError](#class-fetcherror) + - [Class: AbortError](#class-aborterror) +- [TypeScript](#typescript) +- [Acknowledgement](#acknowledgement) +- [Team](#team) + - [Former](#former) +- [License](#license) + + + +## Motivation + +Instead of implementing `XMLHttpRequest` in Node.js to run browser-specific [Fetch polyfill](https://github.com/github/fetch), why not go from native `http` to `fetch` API directly? Hence, `node-fetch`, minimal code for a `window.fetch` compatible API on Node.js runtime. + +See Jason Miller's [isomorphic-unfetch](https://www.npmjs.com/package/isomorphic-unfetch) or Leonardo Quixada's [cross-fetch](https://github.com/lquixada/cross-fetch) for isomorphic usage (exports `node-fetch` for server-side, `whatwg-fetch` for client-side). + +## Features + +- Stay consistent with `window.fetch` API. +- Make conscious trade-off when following [WHATWG fetch spec][whatwg-fetch] and [stream spec](https://streams.spec.whatwg.org/) implementation details, document known differences. +- Use native promise and async functions. +- Use native Node streams for body, on both request and response. +- Decode content encoding (gzip/deflate/brotli) properly, and convert string output (such as `res.text()` and `res.json()`) to UTF-8 automatically. +- Useful extensions such as redirect limit, response size limit, [explicit errors][error-handling.md] for troubleshooting. + +## Difference from client-side fetch + +- See known differences: + - [As of v3.x](docs/v3-LIMITS.md) + - [As of v2.x](docs/v2-LIMITS.md) +- If you happen to use a missing feature that `window.fetch` offers, feel free to open an issue. +- Pull requests are welcomed too! + +## Installation + +Current stable release (`3.x`) requires at least Node.js 12.20.0. + +```sh +npm install node-fetch +``` + +## Loading and configuring the module + +### ES Modules (ESM) + +```js +import fetch from 'node-fetch'; +``` + +### CommonJS + +`node-fetch` from v3 is an ESM-only module - you are not able to import it with `require()`. + +If you cannot switch to ESM, please use v2 which remains compatible with CommonJS. Critical bug fixes will continue to be published for v2. + +```sh +npm install node-fetch@2 +``` + +Alternatively, you can use the async `import()` function from CommonJS to load `node-fetch` asynchronously: + +```js +// mod.cjs +const fetch = (...args) => import('node-fetch').then(({default: fetch}) => fetch(...args)); +``` + +### Providing global access + +To use `fetch()` without importing it, you can patch the `global` object in node: + +```js +// fetch-polyfill.js +import fetch, { + Blob, + blobFrom, + blobFromSync, + File, + fileFrom, + fileFromSync, + FormData, + Headers, + Request, + Response, +} from 'node-fetch' + +if (!globalThis.fetch) { + globalThis.fetch = fetch + globalThis.Headers = Headers + globalThis.Request = Request + globalThis.Response = Response +} + +// index.js +import './fetch-polyfill' + +// ... +``` + +## Upgrading + +Using an old version of node-fetch? Check out the following files: + +- [2.x to 3.x upgrade guide](docs/v3-UPGRADE-GUIDE.md) +- [1.x to 2.x upgrade guide](docs/v2-UPGRADE-GUIDE.md) +- [Changelog](https://github.com/node-fetch/node-fetch/releases) + +## Common Usage + +NOTE: The documentation below is up-to-date with `3.x` releases, if you are using an older version, please check how to [upgrade](#upgrading). + +### Plain text or HTML + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://github.com/'); +const body = await response.text(); + +console.log(body); +``` + +### JSON + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://api.github.com/users/github'); +const data = await response.json(); + +console.log(data); +``` + +### Simple Post + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://httpbin.org/post', {method: 'POST', body: 'a=1'}); +const data = await response.json(); + +console.log(data); +``` + +### Post with JSON + +```js +import fetch from 'node-fetch'; + +const body = {a: 1}; + +const response = await fetch('https://httpbin.org/post', { + method: 'post', + body: JSON.stringify(body), + headers: {'Content-Type': 'application/json'} +}); +const data = await response.json(); + +console.log(data); +``` + +### Post with form parameters + +`URLSearchParams` is available on the global object in Node.js as of v10.0.0. See [official documentation](https://nodejs.org/api/url.html#url_class_urlsearchparams) for more usage methods. + +NOTE: The `Content-Type` header is only set automatically to `x-www-form-urlencoded` when an instance of `URLSearchParams` is given as such: + +```js +import fetch from 'node-fetch'; + +const params = new URLSearchParams(); +params.append('a', 1); + +const response = await fetch('https://httpbin.org/post', {method: 'POST', body: params}); +const data = await response.json(); + +console.log(data); +``` + +### Handling exceptions + +NOTE: 3xx-5xx responses are _NOT_ exceptions, and should be handled in `then()`, see the next section. + +Wrapping the fetch function into a `try/catch` block will catch _all_ exceptions, such as errors originating from node core libraries, like network errors, and operational errors which are instances of FetchError. See the [error handling document][error-handling.md] for more details. + +```js +import fetch from 'node-fetch'; + +try { + await fetch('https://domain.invalid/'); +} catch (error) { + console.log(error); +} +``` + +### Handling client and server errors + +It is common to create a helper function to check that the response contains no client (4xx) or server (5xx) error responses: + +```js +import fetch from 'node-fetch'; + +class HTTPResponseError extends Error { + constructor(response) { + super(`HTTP Error Response: ${response.status} ${response.statusText}`); + this.response = response; + } +} + +const checkStatus = response => { + if (response.ok) { + // response.status >= 200 && response.status < 300 + return response; + } else { + throw new HTTPResponseError(response); + } +} + +const response = await fetch('https://httpbin.org/status/400'); + +try { + checkStatus(response); +} catch (error) { + console.error(error); + + const errorBody = await error.response.text(); + console.error(`Error body: ${errorBody}`); +} +``` + +### Handling cookies + +Cookies are not stored by default. However, cookies can be extracted and passed by manipulating request and response headers. See [Extract Set-Cookie Header](#extract-set-cookie-header) for details. + +## Advanced Usage + +### Streams + +The "Node.js way" is to use streams when possible. You can pipe `res.body` to another stream. This example uses [stream.pipeline](https://nodejs.org/api/stream.html#stream_stream_pipeline_streams_callback) to attach stream error handlers and wait for the download to complete. + +```js +import {createWriteStream} from 'node:fs'; +import {pipeline} from 'node:stream'; +import {promisify} from 'node:util' +import fetch from 'node-fetch'; + +const streamPipeline = promisify(pipeline); + +const response = await fetch('https://github.githubassets.com/images/modules/logos_page/Octocat.png'); + +if (!response.ok) throw new Error(`unexpected response ${response.statusText}`); + +await streamPipeline(response.body, createWriteStream('./octocat.png')); +``` + +In Node.js 14 you can also use async iterators to read `body`; however, be careful to catch +errors -- the longer a response runs, the more likely it is to encounter an error. + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://httpbin.org/stream/3'); + +try { + for await (const chunk of response.body) { + console.dir(JSON.parse(chunk.toString())); + } +} catch (err) { + console.error(err.stack); +} +``` + +In Node.js 12 you can also use async iterators to read `body`; however, async iterators with streams +did not mature until Node.js 14, so you need to do some extra work to ensure you handle errors +directly from the stream and wait on it response to fully close. + +```js +import fetch from 'node-fetch'; + +const read = async body => { + let error; + body.on('error', err => { + error = err; + }); + + for await (const chunk of body) { + console.dir(JSON.parse(chunk.toString())); + } + + return new Promise((resolve, reject) => { + body.on('close', () => { + error ? reject(error) : resolve(); + }); + }); +}; + +try { + const response = await fetch('https://httpbin.org/stream/3'); + await read(response.body); +} catch (err) { + console.error(err.stack); +} +``` + +### Accessing Headers and other Metadata + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://github.com/'); + +console.log(response.ok); +console.log(response.status); +console.log(response.statusText); +console.log(response.headers.raw()); +console.log(response.headers.get('content-type')); +``` + +### Extract Set-Cookie Header + +Unlike browsers, you can access raw `Set-Cookie` headers manually using `Headers.raw()`. This is a `node-fetch` only API. + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://example.com'); + +// Returns an array of values, instead of a string of comma-separated values +console.log(response.headers.raw()['set-cookie']); +``` + +### Post data using a file + +```js +import fetch, { + Blob, + blobFrom, + blobFromSync, + File, + fileFrom, + fileFromSync, +} from 'node-fetch' + +const mimetype = 'text/plain' +const blob = fileFromSync('./input.txt', mimetype) +const url = 'https://httpbin.org/post' + +const response = await fetch(url, { method: 'POST', body: blob }) +const data = await response.json() + +console.log(data) +``` + +node-fetch comes with a spec-compliant [FormData] implementations for posting +multipart/form-data payloads + +```js +import fetch, { FormData, File, fileFrom } from 'node-fetch' + +const httpbin = 'https://httpbin.org/post' +const formData = new FormData() +const binary = new Uint8Array([ 97, 98, 99 ]) +const abc = new File([binary], 'abc.txt', { type: 'text/plain' }) + +formData.set('greeting', 'Hello, world!') +formData.set('file-upload', abc, 'new name.txt') + +const response = await fetch(httpbin, { method: 'POST', body: formData }) +const data = await response.json() + +console.log(data) +``` + +If you for some reason need to post a stream coming from any arbitrary place, +then you can append a [Blob] or a [File] look-a-like item. + +The minimum requirement is that it has: +1. A `Symbol.toStringTag` getter or property that is either `Blob` or `File` +2. A known size. +3. And either a `stream()` method or a `arrayBuffer()` method that returns a ArrayBuffer. + +The `stream()` must return any async iterable object as long as it yields Uint8Array (or Buffer) +so Node.Readable streams and whatwg streams works just fine. + +```js +formData.append('upload', { + [Symbol.toStringTag]: 'Blob', + size: 3, + *stream() { + yield new Uint8Array([97, 98, 99]) + }, + arrayBuffer() { + return new Uint8Array([97, 98, 99]).buffer + } +}, 'abc.txt') +``` + +### Request cancellation with AbortSignal + +You may cancel requests with `AbortController`. A suggested implementation is [`abort-controller`](https://www.npmjs.com/package/abort-controller). + +An example of timing out a request after 150ms could be achieved as the following: + +```js +import fetch, { AbortError } from 'node-fetch'; + +// AbortController was added in node v14.17.0 globally +const AbortController = globalThis.AbortController || await import('abort-controller') + +const controller = new AbortController(); +const timeout = setTimeout(() => { + controller.abort(); +}, 150); + +try { + const response = await fetch('https://example.com', {signal: controller.signal}); + const data = await response.json(); +} catch (error) { + if (error instanceof AbortError) { + console.log('request was aborted'); + } +} finally { + clearTimeout(timeout); +} +``` + +See [test cases](https://github.com/node-fetch/node-fetch/blob/master/test/) for more examples. + +## API + +### fetch(url[, options]) + +- `url` A string representing the URL for fetching +- `options` [Options](#fetch-options) for the HTTP(S) request +- Returns: Promise<[Response](#class-response)> + +Perform an HTTP(S) fetch. + +`url` should be an absolute URL, such as `https://example.com/`. A path-relative URL (`/file/under/root`) or protocol-relative URL (`//can-be-http-or-https.com/`) will result in a rejected `Promise`. + + + +### Options + +The default values are shown after each option key. + +```js +{ + // These properties are part of the Fetch Standard + method: 'GET', + headers: {}, // Request headers. format is the identical to that accepted by the Headers constructor (see below) + body: null, // Request body. can be null, or a Node.js Readable stream + redirect: 'follow', // Set to `manual` to extract redirect headers, `error` to reject redirect + signal: null, // Pass an instance of AbortSignal to optionally abort requests + + // The following properties are node-fetch extensions + follow: 20, // maximum redirect count. 0 to not follow redirect + compress: true, // support gzip/deflate content encoding. false to disable + size: 0, // maximum response body size in bytes. 0 to disable + agent: null, // http(s).Agent instance or function that returns an instance (see below) + highWaterMark: 16384, // the maximum number of bytes to store in the internal buffer before ceasing to read from the underlying resource. + insecureHTTPParser: false // Use an insecure HTTP parser that accepts invalid HTTP headers when `true`. +} +``` + +#### Default Headers + +If no values are set, the following request headers will be sent automatically: + +| Header | Value | +| ------------------- | ------------------------------------------------------ | +| `Accept-Encoding` | `gzip, deflate, br` (when `options.compress === true`) | +| `Accept` | `*/*` | +| `Content-Length` | _(automatically calculated, if possible)_ | +| `Host` | _(host and port information from the target URI)_ | +| `Transfer-Encoding` | `chunked` _(when `req.body` is a stream)_ | +| `User-Agent` | `node-fetch` | + + +Note: when `body` is a `Stream`, `Content-Length` is not set automatically. + +#### Custom Agent + +The `agent` option allows you to specify networking related options which are out of the scope of Fetch, including and not limited to the following: + +- Support self-signed certificate +- Use only IPv4 or IPv6 +- Custom DNS Lookup + +See [`http.Agent`](https://nodejs.org/api/http.html#http_new_agent_options) for more information. + +If no agent is specified, the default agent provided by Node.js is used. Note that [this changed in Node.js 19](https://github.com/nodejs/node/blob/4267b92604ad78584244488e7f7508a690cb80d0/lib/_http_agent.js#L564) to have `keepalive` true by default. If you wish to enable `keepalive` in an earlier version of Node.js, you can override the agent as per the following code sample. + +In addition, the `agent` option accepts a function that returns `http`(s)`.Agent` instance given current [URL](https://nodejs.org/api/url.html), this is useful during a redirection chain across HTTP and HTTPS protocol. + +```js +import http from 'node:http'; +import https from 'node:https'; + +const httpAgent = new http.Agent({ + keepAlive: true +}); +const httpsAgent = new https.Agent({ + keepAlive: true +}); + +const options = { + agent: function(_parsedURL) { + if (_parsedURL.protocol == 'http:') { + return httpAgent; + } else { + return httpsAgent; + } + } +}; +``` + + + +#### Custom highWaterMark + +Stream on Node.js have a smaller internal buffer size (16kB, aka `highWaterMark`) from client-side browsers (>1MB, not consistent across browsers). Because of that, when you are writing an isomorphic app and using `res.clone()`, it will hang with large response in Node. + +The recommended way to fix this problem is to resolve cloned response in parallel: + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://example.com'); +const r1 = response.clone(); + +const results = await Promise.all([response.json(), r1.text()]); + +console.log(results[0]); +console.log(results[1]); +``` + +If for some reason you don't like the solution above, since `3.x` you are able to modify the `highWaterMark` option: + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://example.com', { + // About 1MB + highWaterMark: 1024 * 1024 +}); + +const result = await res.clone().arrayBuffer(); +console.dir(result); +``` + +#### Insecure HTTP Parser + +Passed through to the `insecureHTTPParser` option on http(s).request. See [`http.request`](https://nodejs.org/api/http.html#http_http_request_url_options_callback) for more information. + +#### Manual Redirect + +The `redirect: 'manual'` option for node-fetch is different from the browser & specification, which +results in an [opaque-redirect filtered response](https://fetch.spec.whatwg.org/#concept-filtered-response-opaque-redirect). +node-fetch gives you the typical [basic filtered response](https://fetch.spec.whatwg.org/#concept-filtered-response-basic) instead. + +```js +import fetch from 'node-fetch'; + +const response = await fetch('https://httpbin.org/status/301', { redirect: 'manual' }); + +if (response.status === 301 || response.status === 302) { + const locationURL = new URL(response.headers.get('location'), response.url); + const response2 = await fetch(locationURL, { redirect: 'manual' }); + console.dir(response2); +} +``` + + + +### Class: Request + +An HTTP(S) request containing information about URL, method, headers, and the body. This class implements the [Body](#iface-body) interface. + +Due to the nature of Node.js, the following properties are not implemented at this moment: + +- `type` +- `destination` +- `mode` +- `credentials` +- `cache` +- `integrity` +- `keepalive` + +The following node-fetch extension properties are provided: + +- `follow` +- `compress` +- `counter` +- `agent` +- `highWaterMark` + +See [options](#fetch-options) for exact meaning of these extensions. + +#### new Request(input[, options]) + +_(spec-compliant)_ + +- `input` A string representing a URL, or another `Request` (which will be cloned) +- `options` [Options](#fetch-options) for the HTTP(S) request + +Constructs a new `Request` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Request/Request). + +In most cases, directly `fetch(url, options)` is simpler than creating a `Request` object. + + + +### Class: Response + +An HTTP(S) response. This class implements the [Body](#iface-body) interface. + +The following properties are not implemented in node-fetch at this moment: + +- `trailer` + +#### new Response([body[, options]]) + +_(spec-compliant)_ + +- `body` A `String` or [`Readable` stream][node-readable] +- `options` A [`ResponseInit`][response-init] options dictionary + +Constructs a new `Response` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Response/Response). + +Because Node.js does not implement service workers (for which this class was designed), one rarely has to construct a `Response` directly. + +#### response.ok + +_(spec-compliant)_ + +Convenience property representing if the request ended normally. Will evaluate to true if the response status was greater than or equal to 200 but smaller than 300. + +#### response.redirected + +_(spec-compliant)_ + +Convenience property representing if the request has been redirected at least once. Will evaluate to true if the internal redirect counter is greater than 0. + +#### response.type + +_(deviation from spec)_ + +Convenience property representing the response's type. node-fetch only supports `'default'` and `'error'` and does not make use of [filtered responses](https://fetch.spec.whatwg.org/#concept-filtered-response). + + + +### Class: Headers + +This class allows manipulating and iterating over a set of HTTP headers. All methods specified in the [Fetch Standard][whatwg-fetch] are implemented. + +#### new Headers([init]) + +_(spec-compliant)_ + +- `init` Optional argument to pre-fill the `Headers` object + +Construct a new `Headers` object. `init` can be either `null`, a `Headers` object, an key-value map object or any iterable object. + +```js +// Example adapted from https://fetch.spec.whatwg.org/#example-headers-class +import {Headers} from 'node-fetch'; + +const meta = { + 'Content-Type': 'text/xml' +}; +const headers = new Headers(meta); + +// The above is equivalent to +const meta = [['Content-Type', 'text/xml']]; +const headers = new Headers(meta); + +// You can in fact use any iterable objects, like a Map or even another Headers +const meta = new Map(); +meta.set('Content-Type', 'text/xml'); +const headers = new Headers(meta); +const copyOfHeaders = new Headers(headers); +``` + + + +### Interface: Body + +`Body` is an abstract interface with methods that are applicable to both `Request` and `Response` classes. + +#### body.body + +_(deviation from spec)_ + +- Node.js [`Readable` stream][node-readable] + +Data are encapsulated in the `Body` object. Note that while the [Fetch Standard][whatwg-fetch] requires the property to always be a WHATWG `ReadableStream`, in node-fetch it is a Node.js [`Readable` stream][node-readable]. + +#### body.bodyUsed + +_(spec-compliant)_ + +- `Boolean` + +A boolean property for if this body has been consumed. Per the specs, a consumed body cannot be used again. + +#### body.arrayBuffer() + +#### body.formData() + +#### body.blob() + +#### body.json() + +#### body.text() + +`fetch` comes with methods to parse `multipart/form-data` payloads as well as +`x-www-form-urlencoded` bodies using `.formData()` this comes from the idea that +Service Worker can intercept such messages before it's sent to the server to +alter them. This is useful for anybody building a server so you can use it to +parse & consume payloads. + +
+Code example + +```js +import http from 'node:http' +import { Response } from 'node-fetch' + +http.createServer(async function (req, res) { + const formData = await new Response(req, { + headers: req.headers // Pass along the boundary value + }).formData() + const allFields = [...formData] + + const file = formData.get('uploaded-files') + const arrayBuffer = await file.arrayBuffer() + const text = await file.text() + const whatwgReadableStream = file.stream() + + // other was to consume the request could be to do: + const json = await new Response(req).json() + const text = await new Response(req).text() + const arrayBuffer = await new Response(req).arrayBuffer() + const blob = await new Response(req, { + headers: req.headers // So that `type` inherits `Content-Type` + }.blob() +}) +``` + +
+ + + +### Class: FetchError + +_(node-fetch extension)_ + +An operational error in the fetching process. See [ERROR-HANDLING.md][] for more info. + + + +### Class: AbortError + +_(node-fetch extension)_ + +An Error thrown when the request is aborted in response to an `AbortSignal`'s `abort` event. It has a `name` property of `AbortError`. See [ERROR-HANDLING.MD][] for more info. + +## TypeScript + +**Since `3.x` types are bundled with `node-fetch`, so you don't need to install any additional packages.** + +For older versions please use the type definitions from [DefinitelyTyped](https://github.com/DefinitelyTyped/DefinitelyTyped): + +```sh +npm install --save-dev @types/node-fetch@2.x +``` + +## Acknowledgement + +Thanks to [github/fetch](https://github.com/github/fetch) for providing a solid implementation reference. + +## Team + +| [![David Frank](https://github.com/bitinn.png?size=100)](https://github.com/bitinn) | [![Jimmy Wärting](https://github.com/jimmywarting.png?size=100)](https://github.com/jimmywarting) | [![Antoni Kepinski](https://github.com/xxczaki.png?size=100)](https://github.com/xxczaki) | [![Richie Bendall](https://github.com/Richienb.png?size=100)](https://github.com/Richienb) | [![Gregor Martynus](https://github.com/gr2m.png?size=100)](https://github.com/gr2m) | +| ----------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------- | +| [David Frank](https://bitinn.net/) | [Jimmy Wärting](https://jimmy.warting.se/) | [Antoni Kepinski](https://kepinski.ch) | [Richie Bendall](https://www.richie-bendall.ml/) | [Gregor Martynus](https://twitter.com/gr2m) | + +###### Former + +- [Timothy Gu](https://github.com/timothygu) +- [Jared Kantrowitz](https://github.com/jkantr) + +## License + +[MIT](LICENSE.md) + +[whatwg-fetch]: https://fetch.spec.whatwg.org/ +[response-init]: https://fetch.spec.whatwg.org/#responseinit +[node-readable]: https://nodejs.org/api/stream.html#stream_readable_streams +[mdn-headers]: https://developer.mozilla.org/en-US/docs/Web/API/Headers +[error-handling.md]: https://github.com/node-fetch/node-fetch/blob/master/docs/ERROR-HANDLING.md +[FormData]: https://developer.mozilla.org/en-US/docs/Web/API/FormData +[Blob]: https://developer.mozilla.org/en-US/docs/Web/API/Blob +[File]: https://developer.mozilla.org/en-US/docs/Web/API/File diff --git a/_shared/lib/node-fetch/src/body.js b/_shared/lib/node-fetch/src/body.js new file mode 100644 index 00000000..714e27ec --- /dev/null +++ b/_shared/lib/node-fetch/src/body.js @@ -0,0 +1,397 @@ + +/** + * Body.js + * + * Body interface provides common methods for Request and Response + */ + +import Stream, {PassThrough} from 'node:stream'; +import {types, deprecate, promisify} from 'node:util'; +import {Buffer} from 'node:buffer'; + +import Blob from 'fetch-blob'; +import {FormData, formDataToBlob} from 'formdata-polyfill/esm.min.js'; + +import {FetchError} from './errors/fetch-error.js'; +import {FetchBaseError} from './errors/base.js'; +import {isBlob, isURLSearchParameters} from './utils/is.js'; + +const pipeline = promisify(Stream.pipeline); +const INTERNALS = Symbol('Body internals'); + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +export default class Body { + constructor(body, { + size = 0 + } = {}) { + let boundary = null; + + if (body === null) { + // Body is undefined or null + body = null; + } else if (isURLSearchParameters(body)) { + // Body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) { + // Body is blob + } else if (Buffer.isBuffer(body)) { + // Body is Buffer + } else if (types.isAnyArrayBuffer(body)) { + // Body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // Body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) { + // Body is stream + } else if (body instanceof FormData) { + // Body is FormData + body = formDataToBlob(body); + boundary = body.type.split('=')[1]; + } else { + // None of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + + let stream = body; + + if (Buffer.isBuffer(body)) { + stream = Stream.Readable.from(body); + } else if (isBlob(body)) { + stream = Stream.Readable.from(body.stream()); + } + + this[INTERNALS] = { + body, + stream, + boundary, + disturbed: false, + error: null + }; + this.size = size; + + if (body instanceof Stream) { + body.on('error', error_ => { + const error = error_ instanceof FetchBaseError ? + error_ : + new FetchError(`Invalid response body while trying to fetch ${this.url}: ${error_.message}`, 'system', error_); + this[INTERNALS].error = error; + }); + } + } + + get body() { + return this[INTERNALS].stream; + } + + get bodyUsed() { + return this[INTERNALS].disturbed; + } + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + async arrayBuffer() { + const {buffer, byteOffset, byteLength} = await consumeBody(this); + return buffer.slice(byteOffset, byteOffset + byteLength); + } + + async formData() { + const ct = this.headers.get('content-type'); + + if (ct.startsWith('application/x-www-form-urlencoded')) { + const formData = new FormData(); + const parameters = new URLSearchParams(await this.text()); + + for (const [name, value] of parameters) { + formData.append(name, value); + } + + return formData; + } + + const {toFormData} = await import('./utils/multipart-parser.js'); + return toFormData(this.body, ct); + } + + /** + * Return raw response as Blob + * + * @return Promise + */ + async blob() { + const ct = (this.headers && this.headers.get('content-type')) || (this[INTERNALS].body && this[INTERNALS].body.type) || ''; + const buf = await this.arrayBuffer(); + + return new Blob([buf], { + type: ct + }); + } + + /** + * Decode response as json + * + * @return Promise + */ + async json() { + const text = await this.text(); + return JSON.parse(text); + } + + /** + * Decode response as text + * + * @return Promise + */ + async text() { + const buffer = await consumeBody(this); + return new TextDecoder().decode(buffer); + } + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody(this); + } +} + +Body.prototype.buffer = deprecate(Body.prototype.buffer, 'Please use \'response.arrayBuffer()\' instead of \'response.buffer()\'', 'node-fetch#buffer'); + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: {enumerable: true}, + bodyUsed: {enumerable: true}, + arrayBuffer: {enumerable: true}, + blob: {enumerable: true}, + json: {enumerable: true}, + text: {enumerable: true}, + data: {get: deprecate(() => {}, + 'data doesn\'t exist, use json(), text(), arrayBuffer(), or body instead', + 'https://github.com/node-fetch/node-fetch/issues/1000 (response)')} +}); + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +async function consumeBody(data) { + if (data[INTERNALS].disturbed) { + throw new TypeError(`body used already for: ${data.url}`); + } + + data[INTERNALS].disturbed = true; + + if (data[INTERNALS].error) { + throw data[INTERNALS].error; + } + + const {body} = data; + + // Body is null + if (body === null) { + return Buffer.alloc(0); + } + + /* c8 ignore next 3 */ + if (!(body instanceof Stream)) { + return Buffer.alloc(0); + } + + // Body is stream + // get ready to actually consume the body + const accum = []; + let accumBytes = 0; + + try { + for await (const chunk of body) { + if (data.size > 0 && accumBytes + chunk.length > data.size) { + const error = new FetchError(`content size at ${data.url} over limit: ${data.size}`, 'max-size'); + body.destroy(error); + throw error; + } + + accumBytes += chunk.length; + accum.push(chunk); + } + } catch (error) { + const error_ = error instanceof FetchBaseError ? error : new FetchError(`Invalid response body while trying to fetch ${data.url}: ${error.message}`, 'system', error); + throw error_; + } + + if (body.readableEnded === true || body._readableState.ended === true) { + try { + if (accum.every(c => typeof c === 'string')) { + return Buffer.from(accum.join('')); + } + + return Buffer.concat(accum, accumBytes); + } catch (error) { + throw new FetchError(`Could not create Buffer from response body for ${data.url}: ${error.message}`, 'system', error); + } + } else { + throw new FetchError(`Premature close of server response while trying to fetch ${data.url}`); + } +} + +/** + * Clone body given Res/Req instance + * + * @param Mixed instance Response or Request instance + * @param String highWaterMark highWaterMark for both PassThrough body streams + * @return Mixed + */ +export const clone = (instance, highWaterMark) => { + let p1; + let p2; + let {body} = instance[INTERNALS]; + + // Don't allow cloning a used body + if (instance.bodyUsed) { + throw new Error('cannot clone body after it is used'); + } + + // Check that body is a stream and not form-data object + // note: we can't clone the form-data object without having it as a dependency + if ((body instanceof Stream) && (typeof body.getBoundary !== 'function')) { + // Tee instance body + p1 = new PassThrough({highWaterMark}); + p2 = new PassThrough({highWaterMark}); + body.pipe(p1); + body.pipe(p2); + // Set instance body to teed body and return the other teed body + instance[INTERNALS].stream = p1; + body = p2; + } + + return body; +}; + +const getNonSpecFormDataBoundary = deprecate( + body => body.getBoundary(), + 'form-data doesn\'t follow the spec and requires special treatment. Use alternative package', + 'https://github.com/node-fetch/node-fetch/issues/1167' +); + +/** + * Performs the operation "extract a `Content-Type` value from |object|" as + * specified in the specification: + * https://fetch.spec.whatwg.org/#concept-bodyinit-extract + * + * This function assumes that instance.body is present. + * + * @param {any} body Any options.body input + * @returns {string | null} + */ +export const extractContentType = (body, request) => { + // Body is null or undefined + if (body === null) { + return null; + } + + // Body is string + if (typeof body === 'string') { + return 'text/plain;charset=UTF-8'; + } + + // Body is a URLSearchParams + if (isURLSearchParameters(body)) { + return 'application/x-www-form-urlencoded;charset=UTF-8'; + } + + // Body is blob + if (isBlob(body)) { + return body.type || null; + } + + // Body is a Buffer (Buffer, ArrayBuffer or ArrayBufferView) + if (Buffer.isBuffer(body) || types.isAnyArrayBuffer(body) || ArrayBuffer.isView(body)) { + return null; + } + + if (body instanceof FormData) { + return `multipart/form-data; boundary=${request[INTERNALS].boundary}`; + } + + // Detect form data input from form-data module + if (body && typeof body.getBoundary === 'function') { + return `multipart/form-data;boundary=${getNonSpecFormDataBoundary(body)}`; + } + + // Body is stream - can't really do much about this + if (body instanceof Stream) { + return null; + } + + // Body constructor defaults other things to string + return 'text/plain;charset=UTF-8'; +}; + +/** + * The Fetch Standard treats this as if "total bytes" is a property on the body. + * For us, we have to explicitly get it with a function. + * + * ref: https://fetch.spec.whatwg.org/#concept-body-total-bytes + * + * @param {any} obj.body Body object from the Body instance. + * @returns {number | null} + */ +export const getTotalBytes = request => { + const {body} = request[INTERNALS]; + + // Body is null or undefined + if (body === null) { + return 0; + } + + // Body is Blob + if (isBlob(body)) { + return body.size; + } + + // Body is Buffer + if (Buffer.isBuffer(body)) { + return body.length; + } + + // Detect form data input from form-data module + if (body && typeof body.getLengthSync === 'function') { + return body.hasKnownLength && body.hasKnownLength() ? body.getLengthSync() : null; + } + + // Body is stream + return null; +}; + +/** + * Write a Body to a Node.js WritableStream (e.g. http.Request) object. + * + * @param {Stream.Writable} dest The stream to write to. + * @param obj.body Body object from the Body instance. + * @returns {Promise} + */ +export const writeToStream = async (dest, {body}) => { + if (body === null) { + // Body is null + dest.end(); + } else { + // Body is stream + await pipeline(body, dest); + } +}; diff --git a/_shared/lib/node-fetch/src/errors/abort-error.js b/_shared/lib/node-fetch/src/errors/abort-error.js new file mode 100644 index 00000000..0b62f1cd --- /dev/null +++ b/_shared/lib/node-fetch/src/errors/abort-error.js @@ -0,0 +1,10 @@ +import {FetchBaseError} from './base.js'; + +/** + * AbortError interface for cancelled requests + */ +export class AbortError extends FetchBaseError { + constructor(message, type = 'aborted') { + super(message, type); + } +} diff --git a/_shared/lib/node-fetch/src/errors/base.js b/_shared/lib/node-fetch/src/errors/base.js new file mode 100644 index 00000000..4e66e1bf --- /dev/null +++ b/_shared/lib/node-fetch/src/errors/base.js @@ -0,0 +1,17 @@ +export class FetchBaseError extends Error { + constructor(message, type) { + super(message); + // Hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); + + this.type = type; + } + + get name() { + return this.constructor.name; + } + + get [Symbol.toStringTag]() { + return this.constructor.name; + } +} diff --git a/_shared/lib/node-fetch/src/errors/fetch-error.js b/_shared/lib/node-fetch/src/errors/fetch-error.js new file mode 100644 index 00000000..f7ae5cc4 --- /dev/null +++ b/_shared/lib/node-fetch/src/errors/fetch-error.js @@ -0,0 +1,26 @@ + +import {FetchBaseError} from './base.js'; + +/** + * @typedef {{ address?: string, code: string, dest?: string, errno: number, info?: object, message: string, path?: string, port?: number, syscall: string}} SystemError +*/ + +/** + * FetchError interface for operational errors + */ +export class FetchError extends FetchBaseError { + /** + * @param {string} message - Error message for human + * @param {string} [type] - Error type for machine + * @param {SystemError} [systemError] - For Node.js system error + */ + constructor(message, type, systemError) { + super(message, type); + // When err.type is `system`, err.erroredSysCall contains system error and err.code contains system error code + if (systemError) { + // eslint-disable-next-line no-multi-assign + this.code = this.errno = systemError.code; + this.erroredSysCall = systemError.syscall; + } + } +} diff --git a/_shared/lib/node-fetch/src/headers.js b/_shared/lib/node-fetch/src/headers.js new file mode 100644 index 00000000..cd694558 --- /dev/null +++ b/_shared/lib/node-fetch/src/headers.js @@ -0,0 +1,267 @@ +/** + * Headers.js + * + * Headers class offers convenient helpers + */ + +import {types} from 'node:util'; +import http from 'node:http'; + +/* c8 ignore next 9 */ +const validateHeaderName = typeof http.validateHeaderName === 'function' ? + http.validateHeaderName : + name => { + if (!/^[\^`\-\w!#$%&'*+.|~]+$/.test(name)) { + const error = new TypeError(`Header name must be a valid HTTP token [${name}]`); + Object.defineProperty(error, 'code', {value: 'ERR_INVALID_HTTP_TOKEN'}); + throw error; + } + }; + +/* c8 ignore next 9 */ +const validateHeaderValue = typeof http.validateHeaderValue === 'function' ? + http.validateHeaderValue : + (name, value) => { + if (/[^\t\u0020-\u007E\u0080-\u00FF]/.test(value)) { + const error = new TypeError(`Invalid character in header content ["${name}"]`); + Object.defineProperty(error, 'code', {value: 'ERR_INVALID_CHAR'}); + throw error; + } + }; + +/** + * @typedef {Headers | Record | Iterable | Iterable>} HeadersInit + */ + +/** + * This Fetch API interface allows you to perform various actions on HTTP request and response headers. + * These actions include retrieving, setting, adding to, and removing. + * A Headers object has an associated header list, which is initially empty and consists of zero or more name and value pairs. + * You can add to this using methods like append() (see Examples.) + * In all methods of this interface, header names are matched by case-insensitive byte sequence. + * + */ +export default class Headers extends URLSearchParams { + /** + * Headers class + * + * @constructor + * @param {HeadersInit} [init] - Response headers + */ + constructor(init) { + // Validate and normalize init object in [name, value(s)][] + /** @type {string[][]} */ + let result = []; + if (init instanceof Headers) { + const raw = init.raw(); + for (const [name, values] of Object.entries(raw)) { + result.push(...values.map(value => [name, value])); + } + } else if (init == null) { // eslint-disable-line no-eq-null, eqeqeq + // No op + } else if (typeof init === 'object' && !types.isBoxedPrimitive(init)) { + const method = init[Symbol.iterator]; + // eslint-disable-next-line no-eq-null, eqeqeq + if (method == null) { + // Record + result.push(...Object.entries(init)); + } else { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // Sequence> + // Note: per spec we have to first exhaust the lists then process them + result = [...init] + .map(pair => { + if ( + typeof pair !== 'object' || types.isBoxedPrimitive(pair) + ) { + throw new TypeError('Each header pair must be an iterable object'); + } + + return [...pair]; + }).map(pair => { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + + return [...pair]; + }); + } + } else { + throw new TypeError('Failed to construct \'Headers\': The provided value is not of type \'(sequence> or record)'); + } + + // Validate and lowercase + result = + result.length > 0 ? + result.map(([name, value]) => { + validateHeaderName(name); + validateHeaderValue(name, String(value)); + return [String(name).toLowerCase(), String(value)]; + }) : + undefined; + + super(result); + + // Returning a Proxy that will lowercase key names, validate parameters and sort keys + // eslint-disable-next-line no-constructor-return + return new Proxy(this, { + get(target, p, receiver) { + switch (p) { + case 'append': + case 'set': + return (name, value) => { + validateHeaderName(name); + validateHeaderValue(name, String(value)); + return URLSearchParams.prototype[p].call( + target, + String(name).toLowerCase(), + String(value) + ); + }; + + case 'delete': + case 'has': + case 'getAll': + return name => { + validateHeaderName(name); + return URLSearchParams.prototype[p].call( + target, + String(name).toLowerCase() + ); + }; + + case 'keys': + return () => { + target.sort(); + return new Set(URLSearchParams.prototype.keys.call(target)).keys(); + }; + + default: + return Reflect.get(target, p, receiver); + } + } + }); + /* c8 ignore next */ + } + + get [Symbol.toStringTag]() { + return this.constructor.name; + } + + toString() { + return Object.prototype.toString.call(this); + } + + get(name) { + const values = this.getAll(name); + if (values.length === 0) { + return null; + } + + let value = values.join(', '); + if (/^content-encoding$/i.test(name)) { + value = value.toLowerCase(); + } + + return value; + } + + forEach(callback, thisArg = undefined) { + for (const name of this.keys()) { + Reflect.apply(callback, thisArg, [this.get(name), name, this]); + } + } + + * values() { + for (const name of this.keys()) { + yield this.get(name); + } + } + + /** + * @type {() => IterableIterator<[string, string]>} + */ + * entries() { + for (const name of this.keys()) { + yield [name, this.get(name)]; + } + } + + [Symbol.iterator]() { + return this.entries(); + } + + /** + * Node-fetch non-spec method + * returning all headers and their values as array + * @returns {Record} + */ + raw() { + return [...this.keys()].reduce((result, key) => { + result[key] = this.getAll(key); + return result; + }, {}); + } + + /** + * For better console.log(headers) and also to convert Headers into Node.js Request compatible format + */ + [Symbol.for('nodejs.util.inspect.custom')]() { + return [...this.keys()].reduce((result, key) => { + const values = this.getAll(key); + // Http.request() only supports string as Host header. + // This hack makes specifying custom Host header possible. + if (key === 'host') { + result[key] = values[0]; + } else { + result[key] = values.length > 1 ? values : values[0]; + } + + return result; + }, {}); + } +} + +/** + * Re-shaping object for Web IDL tests + * Only need to do it for overridden methods + */ +Object.defineProperties( + Headers.prototype, + ['get', 'entries', 'forEach', 'values'].reduce((result, property) => { + result[property] = {enumerable: true}; + return result; + }, {}) +); + +/** + * Create a Headers object from an http.IncomingMessage.rawHeaders, ignoring those that do + * not conform to HTTP grammar productions. + * @param {import('http').IncomingMessage['rawHeaders']} headers + */ +export function fromRawHeaders(headers = []) { + return new Headers( + headers + // Split into pairs + .reduce((result, value, index, array) => { + if (index % 2 === 0) { + result.push(array.slice(index, index + 2)); + } + + return result; + }, []) + .filter(([name, value]) => { + try { + validateHeaderName(name); + validateHeaderValue(name, String(value)); + return true; + } catch { + return false; + } + }) + + ); +} diff --git a/_shared/lib/node-fetch/src/index.js b/_shared/lib/node-fetch/src/index.js new file mode 100644 index 00000000..7c4aee87 --- /dev/null +++ b/_shared/lib/node-fetch/src/index.js @@ -0,0 +1,417 @@ +/** + * Index.js + * + * a request API compatible with window.fetch + * + * All spec algorithm step numbers are based on https://fetch.spec.whatwg.org/commit-snapshots/ae716822cb3a61843226cd090eefc6589446c1d2/. + */ + +import http from 'node:http'; +import https from 'node:https'; +import zlib from 'node:zlib'; +import Stream, {PassThrough, pipeline as pump} from 'node:stream'; +import {Buffer} from 'node:buffer'; + +import dataUriToBuffer from 'data-uri-to-buffer'; + +import {writeToStream, clone} from './body.js'; +import Response from './response.js'; +import Headers, {fromRawHeaders} from './headers.js'; +import Request, {getNodeRequestOptions} from './request.js'; +import {FetchError} from './errors/fetch-error.js'; +import {AbortError} from './errors/abort-error.js'; +import {isRedirect} from './utils/is-redirect.js'; +import {FormData} from 'formdata-polyfill/esm.min.js'; +import {isDomainOrSubdomain, isSameProtocol} from './utils/is.js'; +import {parseReferrerPolicyFromHeader} from './utils/referrer.js'; +import { + Blob, + File, + fileFromSync, + fileFrom, + blobFromSync, + blobFrom +} from 'fetch-blob/from.js'; + +export {FormData, Headers, Request, Response, FetchError, AbortError, isRedirect}; +export {Blob, File, fileFromSync, fileFrom, blobFromSync, blobFrom}; + +const supportedSchemas = new Set(['data:', 'http:', 'https:']); + +/** + * Fetch function + * + * @param {string | URL | import('./request').default} url - Absolute url or Request instance + * @param {*} [options_] - Fetch options + * @return {Promise} + */ +export default async function fetch(url, options_) { + return new Promise((resolve, reject) => { + // Build request object + const request = new Request(url, options_); + const {parsedURL, options} = getNodeRequestOptions(request); + if (!supportedSchemas.has(parsedURL.protocol)) { + throw new TypeError(`node-fetch cannot load ${url}. URL scheme "${parsedURL.protocol.replace(/:$/, '')}" is not supported.`); + } + + if (parsedURL.protocol === 'data:') { + const data = dataUriToBuffer(request.url); + const response = new Response(data, {headers: {'Content-Type': data.typeFull}}); + resolve(response); + return; + } + + // Wrap http.request into fetch + const send = (parsedURL.protocol === 'https:' ? https : http).request; + const {signal} = request; + let response = null; + + const abort = () => { + const error = new AbortError('The operation was aborted.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + + if (!response || !response.body) { + return; + } + + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = () => { + abort(); + finalize(); + }; + + // Send request + const request_ = send(parsedURL.toString(), options); + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + const finalize = () => { + request_.abort(); + if (signal) { + signal.removeEventListener('abort', abortAndFinalize); + } + }; + + request_.on('error', error => { + reject(new FetchError(`request to ${request.url} failed, reason: ${error.message}`, 'system', error)); + finalize(); + }); + + fixResponseChunkedTransferBadEnding(request_, error => { + if (response && response.body) { + response.body.destroy(error); + } + }); + + /* c8 ignore next 18 */ + if (process.version < 'v14') { + // Before Node.js 14, pipeline() does not fully support async iterators and does not always + // properly handle when the socket close/end events are out of order. + request_.on('socket', s => { + let endedWithEventsCount; + s.prependListener('end', () => { + endedWithEventsCount = s._eventsCount; + }); + s.prependListener('close', hadError => { + // if end happened before close but the socket didn't emit an error, do it now + if (response && endedWithEventsCount < s._eventsCount && !hadError) { + const error = new Error('Premature close'); + error.code = 'ERR_STREAM_PREMATURE_CLOSE'; + response.body.emit('error', error); + } + }); + }); + } + + request_.on('response', response_ => { + request_.setTimeout(0); + const headers = fromRawHeaders(response_.rawHeaders); + + // HTTP fetch step 5 + if (isRedirect(response_.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + let locationURL = null; + try { + locationURL = location === null ? null : new URL(location, request.url); + } catch { + // error here can only be invalid URL in Location: header + // do not throw when options.redirect == manual + // let the user extract the errorneous redirect URL + if (request.redirect !== 'manual') { + reject(new FetchError(`uri requested responds with an invalid redirect URL: ${location}`, 'invalid-redirect')); + finalize(); + return; + } + } + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`uri requested responds with a redirect, redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // Nothing to do + break; + case 'follow': { + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOptions = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: clone(request), + signal: request.signal, + size: request.size, + referrer: request.referrer, + referrerPolicy: request.referrerPolicy + }; + + // when forwarding sensitive headers like "Authorization", + // "WWW-Authenticate", and "Cookie" to untrusted targets, + // headers will be ignored when following a redirect to a domain + // that is not a subdomain match or exact match of the initial domain. + // For example, a redirect from "foo.com" to either "foo.com" or "sub.foo.com" + // will forward the sensitive headers, but a redirect to "bar.com" will not. + // headers will also be ignored when following a redirect to a domain using + // a different protocol. For example, a redirect from "https://foo.com" to "http://foo.com" + // will not forward the sensitive headers + if (!isDomainOrSubdomain(request.url, locationURL) || !isSameProtocol(request.url, locationURL)) { + for (const name of ['authorization', 'www-authenticate', 'cookie', 'cookie2']) { + requestOptions.headers.delete(name); + } + } + + // HTTP-redirect fetch step 9 + if (response_.statusCode !== 303 && request.body && options_.body instanceof Stream.Readable) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (response_.statusCode === 303 || ((response_.statusCode === 301 || response_.statusCode === 302) && request.method === 'POST')) { + requestOptions.method = 'GET'; + requestOptions.body = undefined; + requestOptions.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 14 + const responseReferrerPolicy = parseReferrerPolicyFromHeader(headers); + if (responseReferrerPolicy) { + requestOptions.referrerPolicy = responseReferrerPolicy; + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOptions))); + finalize(); + return; + } + + default: + return reject(new TypeError(`Redirect option '${request.redirect}' is not a valid value of RequestRedirect`)); + } + } + + // Prepare response + if (signal) { + response_.once('end', () => { + signal.removeEventListener('abort', abortAndFinalize); + }); + } + + let body = pump(response_, new PassThrough(), error => { + if (error) { + reject(error); + } + }); + // see https://github.com/nodejs/node/pull/29376 + /* c8 ignore next 3 */ + if (process.version < 'v12.10') { + response_.on('aborted', abortAndFinalize); + } + + const responseOptions = { + url: request.url, + status: response_.statusCode, + statusText: response_.statusMessage, + headers, + size: request.size, + counter: request.counter, + highWaterMark: request.highWaterMark + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || response_.statusCode === 204 || response_.statusCode === 304) { + response = new Response(body, responseOptions); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // For gzip + if (codings === 'gzip' || codings === 'x-gzip') { + body = pump(body, zlib.createGunzip(zlibOptions), error => { + if (error) { + reject(error); + } + }); + response = new Response(body, responseOptions); + resolve(response); + return; + } + + // For deflate + if (codings === 'deflate' || codings === 'x-deflate') { + // Handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = pump(response_, new PassThrough(), error => { + if (error) { + reject(error); + } + }); + raw.once('data', chunk => { + // See http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = pump(body, zlib.createInflate(), error => { + if (error) { + reject(error); + } + }); + } else { + body = pump(body, zlib.createInflateRaw(), error => { + if (error) { + reject(error); + } + }); + } + + response = new Response(body, responseOptions); + resolve(response); + }); + raw.once('end', () => { + // Some old IIS servers return zero-length OK deflate responses, so + // 'data' is never emitted. See https://github.com/node-fetch/node-fetch/pull/903 + if (!response) { + response = new Response(body, responseOptions); + resolve(response); + } + }); + return; + } + + // For br + if (codings === 'br') { + body = pump(body, zlib.createBrotliDecompress(), error => { + if (error) { + reject(error); + } + }); + response = new Response(body, responseOptions); + resolve(response); + return; + } + + // Otherwise, use response as-is + response = new Response(body, responseOptions); + resolve(response); + }); + + // eslint-disable-next-line promise/prefer-await-to-then + writeToStream(request_, request).catch(reject); + }); +} + +function fixResponseChunkedTransferBadEnding(request, errorCallback) { + const LAST_CHUNK = Buffer.from('0\r\n\r\n'); + + let isChunkedTransfer = false; + let properLastChunkReceived = false; + let previousChunk; + + request.on('response', response => { + const {headers} = response; + isChunkedTransfer = headers['transfer-encoding'] === 'chunked' && !headers['content-length']; + }); + + request.on('socket', socket => { + const onSocketClose = () => { + if (isChunkedTransfer && !properLastChunkReceived) { + const error = new Error('Premature close'); + error.code = 'ERR_STREAM_PREMATURE_CLOSE'; + errorCallback(error); + } + }; + + const onData = buf => { + properLastChunkReceived = Buffer.compare(buf.slice(-5), LAST_CHUNK) === 0; + + // Sometimes final 0-length chunk and end of message code are in separate packets + if (!properLastChunkReceived && previousChunk) { + properLastChunkReceived = ( + Buffer.compare(previousChunk.slice(-3), LAST_CHUNK.slice(0, 3)) === 0 && + Buffer.compare(buf.slice(-2), LAST_CHUNK.slice(3)) === 0 + ); + } + + previousChunk = buf; + }; + + socket.prependListener('close', onSocketClose); + socket.on('data', onData); + + request.on('close', () => { + socket.removeListener('close', onSocketClose); + socket.removeListener('data', onData); + }); + }); +} diff --git a/_shared/lib/node-fetch/src/request.js b/_shared/lib/node-fetch/src/request.js new file mode 100644 index 00000000..af2ebc8e --- /dev/null +++ b/_shared/lib/node-fetch/src/request.js @@ -0,0 +1,313 @@ +/** + * Request.js + * + * Request class contains server only options + * + * All spec algorithm step numbers are based on https://fetch.spec.whatwg.org/commit-snapshots/ae716822cb3a61843226cd090eefc6589446c1d2/. + */ + +import {format as formatUrl} from 'node:url'; +import {deprecate} from 'node:util'; +import Headers from './headers.js'; +import Body, {clone, extractContentType, getTotalBytes} from './body.js'; +import {isAbortSignal} from './utils/is.js'; +import {getSearch} from './utils/get-search.js'; +import { + validateReferrerPolicy, determineRequestsReferrer, DEFAULT_REFERRER_POLICY +} from './utils/referrer.js'; + +const INTERNALS = Symbol('Request internals'); + +/** + * Check if `obj` is an instance of Request. + * + * @param {*} object + * @return {boolean} + */ +const isRequest = object => { + return ( + typeof object === 'object' && + typeof object[INTERNALS] === 'object' + ); +}; + +const doBadDataWarn = deprecate(() => {}, + '.data is not a valid RequestInit property, use .body instead', + 'https://github.com/node-fetch/node-fetch/issues/1000 (request)'); + +/** + * Request class + * + * Ref: https://fetch.spec.whatwg.org/#request-class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +export default class Request extends Body { + constructor(input, init = {}) { + let parsedURL; + + // Normalize input and force URL to be encoded as UTF-8 (https://github.com/node-fetch/node-fetch/issues/245) + if (isRequest(input)) { + parsedURL = new URL(input.url); + } else { + parsedURL = new URL(input); + input = {}; + } + + if (parsedURL.username !== '' || parsedURL.password !== '') { + throw new TypeError(`${parsedURL} is an url with embedded credentials.`); + } + + let method = init.method || input.method || 'GET'; + if (/^(delete|get|head|options|post|put)$/i.test(method)) { + method = method.toUpperCase(); + } + + if (!isRequest(init) && 'data' in init) { + doBadDataWarn(); + } + + // eslint-disable-next-line no-eq-null, eqeqeq + if ((init.body != null || (isRequest(input) && input.body !== null)) && + (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + const inputBody = init.body ? + init.body : + (isRequest(input) && input.body !== null ? + clone(input) : + null); + + super(inputBody, { + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody !== null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody, this); + if (contentType) { + headers.set('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? + input.signal : + null; + if ('signal' in init) { + signal = init.signal; + } + + // eslint-disable-next-line no-eq-null, eqeqeq + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal or EventTarget'); + } + + // §5.4, Request constructor steps, step 15.1 + // eslint-disable-next-line no-eq-null, eqeqeq + let referrer = init.referrer == null ? input.referrer : init.referrer; + if (referrer === '') { + // §5.4, Request constructor steps, step 15.2 + referrer = 'no-referrer'; + } else if (referrer) { + // §5.4, Request constructor steps, step 15.3.1, 15.3.2 + const parsedReferrer = new URL(referrer); + // §5.4, Request constructor steps, step 15.3.3, 15.3.4 + referrer = /^about:(\/\/)?client$/.test(parsedReferrer) ? 'client' : parsedReferrer; + } else { + referrer = undefined; + } + + this[INTERNALS] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal, + referrer + }; + + // Node-fetch-only options + this.follow = init.follow === undefined ? (input.follow === undefined ? 20 : input.follow) : init.follow; + this.compress = init.compress === undefined ? (input.compress === undefined ? true : input.compress) : init.compress; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + this.highWaterMark = init.highWaterMark || input.highWaterMark || 16384; + this.insecureHTTPParser = init.insecureHTTPParser || input.insecureHTTPParser || false; + + // §5.4, Request constructor steps, step 16. + // Default is empty string per https://fetch.spec.whatwg.org/#concept-request-referrer-policy + this.referrerPolicy = init.referrerPolicy || input.referrerPolicy || ''; + } + + /** @returns {string} */ + get method() { + return this[INTERNALS].method; + } + + /** @returns {string} */ + get url() { + return formatUrl(this[INTERNALS].parsedURL); + } + + /** @returns {Headers} */ + get headers() { + return this[INTERNALS].headers; + } + + get redirect() { + return this[INTERNALS].redirect; + } + + /** @returns {AbortSignal} */ + get signal() { + return this[INTERNALS].signal; + } + + // https://fetch.spec.whatwg.org/#dom-request-referrer + get referrer() { + if (this[INTERNALS].referrer === 'no-referrer') { + return ''; + } + + if (this[INTERNALS].referrer === 'client') { + return 'about:client'; + } + + if (this[INTERNALS].referrer) { + return this[INTERNALS].referrer.toString(); + } + + return undefined; + } + + get referrerPolicy() { + return this[INTERNALS].referrerPolicy; + } + + set referrerPolicy(referrerPolicy) { + this[INTERNALS].referrerPolicy = validateReferrerPolicy(referrerPolicy); + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } + + get [Symbol.toStringTag]() { + return 'Request'; + } +} + +Object.defineProperties(Request.prototype, { + method: {enumerable: true}, + url: {enumerable: true}, + headers: {enumerable: true}, + redirect: {enumerable: true}, + clone: {enumerable: true}, + signal: {enumerable: true}, + referrer: {enumerable: true}, + referrerPolicy: {enumerable: true} +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param {Request} request - A Request instance + * @return The options object to be passed to http.request + */ +export const getNodeRequestOptions = request => { + const {parsedURL} = request[INTERNALS]; + const headers = new Headers(request[INTERNALS].headers); + + // Fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body === null && /^(post|put)$/i.test(request.method)) { + contentLengthValue = '0'; + } + + if (request.body !== null) { + const totalBytes = getTotalBytes(request); + // Set Content-Length if totalBytes is a number (that is not NaN) + if (typeof totalBytes === 'number' && !Number.isNaN(totalBytes)) { + contentLengthValue = String(totalBytes); + } + } + + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // 4.1. Main fetch, step 2.6 + // > If request's referrer policy is the empty string, then set request's referrer policy to the + // > default referrer policy. + if (request.referrerPolicy === '') { + request.referrerPolicy = DEFAULT_REFERRER_POLICY; + } + + // 4.1. Main fetch, step 2.7 + // > If request's referrer is not "no-referrer", set request's referrer to the result of invoking + // > determine request's referrer. + if (request.referrer && request.referrer !== 'no-referrer') { + request[INTERNALS].referrer = determineRequestsReferrer(request); + } else { + request[INTERNALS].referrer = 'no-referrer'; + } + + // 4.5. HTTP-network-or-cache fetch, step 6.9 + // > If httpRequest's referrer is a URL, then append `Referer`/httpRequest's referrer, serialized + // > and isomorphic encoded, to httpRequest's header list. + if (request[INTERNALS].referrer instanceof URL) { + headers.set('Referer', request.referrer); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip, deflate, br'); + } + + let {agent} = request; + if (typeof agent === 'function') { + agent = agent(parsedURL); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + const search = getSearch(parsedURL); + + // Pass the full URL directly to request(), but overwrite the following + // options: + const options = { + // Overwrite search to retain trailing ? (issue #776) + path: parsedURL.pathname + search, + // The following options are not expressed in the URL + method: request.method, + headers: headers[Symbol.for('nodejs.util.inspect.custom')](), + insecureHTTPParser: request.insecureHTTPParser, + agent + }; + + return { + /** @type {URL} */ + parsedURL, + options + }; +}; diff --git a/_shared/lib/node-fetch/src/response.js b/_shared/lib/node-fetch/src/response.js new file mode 100644 index 00000000..9806c0cb --- /dev/null +++ b/_shared/lib/node-fetch/src/response.js @@ -0,0 +1,160 @@ +/** + * Response.js + * + * Response class provides content decoding + */ + +import Headers from './headers.js'; +import Body, {clone, extractContentType} from './body.js'; +import {isRedirect} from './utils/is-redirect.js'; + +const INTERNALS = Symbol('Response internals'); + +/** + * Response class + * + * Ref: https://fetch.spec.whatwg.org/#response-class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +export default class Response extends Body { + constructor(body = null, options = {}) { + super(body, options); + + // eslint-disable-next-line no-eq-null, eqeqeq, no-negated-condition + const status = options.status != null ? options.status : 200; + + const headers = new Headers(options.headers); + + if (body !== null && !headers.has('Content-Type')) { + const contentType = extractContentType(body, this); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS] = { + type: 'default', + url: options.url, + status, + statusText: options.statusText || '', + headers, + counter: options.counter, + highWaterMark: options.highWaterMark + }; + } + + get type() { + return this[INTERNALS].type; + } + + get url() { + return this[INTERNALS].url || ''; + } + + get status() { + return this[INTERNALS].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS].status >= 200 && this[INTERNALS].status < 300; + } + + get redirected() { + return this[INTERNALS].counter > 0; + } + + get statusText() { + return this[INTERNALS].statusText; + } + + get headers() { + return this[INTERNALS].headers; + } + + get highWaterMark() { + return this[INTERNALS].highWaterMark; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this, this.highWaterMark), { + type: this.type, + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected, + size: this.size, + highWaterMark: this.highWaterMark + }); + } + + /** + * @param {string} url The URL that the new response is to originate from. + * @param {number} status An optional status code for the response (e.g., 302.) + * @returns {Response} A Response object. + */ + static redirect(url, status = 302) { + if (!isRedirect(status)) { + throw new RangeError('Failed to execute "redirect" on "response": Invalid status code'); + } + + return new Response(null, { + headers: { + location: new URL(url).toString() + }, + status + }); + } + + static error() { + const response = new Response(null, {status: 0, statusText: ''}); + response[INTERNALS].type = 'error'; + return response; + } + + static json(data = undefined, init = {}) { + const body = JSON.stringify(data); + + if (body === undefined) { + throw new TypeError('data is not JSON serializable'); + } + + const headers = new Headers(init && init.headers); + + if (!headers.has('content-type')) { + headers.set('content-type', 'application/json'); + } + + return new Response(body, { + ...init, + headers + }); + } + + get [Symbol.toStringTag]() { + return 'Response'; + } +} + +Object.defineProperties(Response.prototype, { + type: {enumerable: true}, + url: {enumerable: true}, + status: {enumerable: true}, + ok: {enumerable: true}, + redirected: {enumerable: true}, + statusText: {enumerable: true}, + headers: {enumerable: true}, + clone: {enumerable: true} +}); diff --git a/_shared/lib/node-fetch/src/utils/get-search.js b/_shared/lib/node-fetch/src/utils/get-search.js new file mode 100644 index 00000000..d067e7c7 --- /dev/null +++ b/_shared/lib/node-fetch/src/utils/get-search.js @@ -0,0 +1,9 @@ +export const getSearch = parsedURL => { + if (parsedURL.search) { + return parsedURL.search; + } + + const lastOffset = parsedURL.href.length - 1; + const hash = parsedURL.hash || (parsedURL.href[lastOffset] === '#' ? '#' : ''); + return parsedURL.href[lastOffset - hash.length] === '?' ? '?' : ''; +}; diff --git a/_shared/lib/node-fetch/src/utils/is-redirect.js b/_shared/lib/node-fetch/src/utils/is-redirect.js new file mode 100644 index 00000000..d1347f00 --- /dev/null +++ b/_shared/lib/node-fetch/src/utils/is-redirect.js @@ -0,0 +1,11 @@ +const redirectStatus = new Set([301, 302, 303, 307, 308]); + +/** + * Redirect code matching + * + * @param {number} code - Status code + * @return {boolean} + */ +export const isRedirect = code => { + return redirectStatus.has(code); +}; diff --git a/_shared/lib/node-fetch/src/utils/is.js b/_shared/lib/node-fetch/src/utils/is.js new file mode 100644 index 00000000..f9e467e4 --- /dev/null +++ b/_shared/lib/node-fetch/src/utils/is.js @@ -0,0 +1,87 @@ +/** + * Is.js + * + * Object type checks. + */ + +const NAME = Symbol.toStringTag; + +/** + * Check if `obj` is a URLSearchParams object + * ref: https://github.com/node-fetch/node-fetch/issues/296#issuecomment-307598143 + * @param {*} object - Object to check for + * @return {boolean} + */ +export const isURLSearchParameters = object => { + return ( + typeof object === 'object' && + typeof object.append === 'function' && + typeof object.delete === 'function' && + typeof object.get === 'function' && + typeof object.getAll === 'function' && + typeof object.has === 'function' && + typeof object.set === 'function' && + typeof object.sort === 'function' && + object[NAME] === 'URLSearchParams' + ); +}; + +/** + * Check if `object` is a W3C `Blob` object (which `File` inherits from) + * @param {*} object - Object to check for + * @return {boolean} + */ +export const isBlob = object => { + return ( + object && + typeof object === 'object' && + typeof object.arrayBuffer === 'function' && + typeof object.type === 'string' && + typeof object.stream === 'function' && + typeof object.constructor === 'function' && + /^(Blob|File)$/.test(object[NAME]) + ); +}; + +/** + * Check if `obj` is an instance of AbortSignal. + * @param {*} object - Object to check for + * @return {boolean} + */ +export const isAbortSignal = object => { + return ( + typeof object === 'object' && ( + object[NAME] === 'AbortSignal' || + object[NAME] === 'EventTarget' + ) + ); +}; + +/** + * isDomainOrSubdomain reports whether sub is a subdomain (or exact match) of + * the parent domain. + * + * Both domains must already be in canonical form. + * @param {string|URL} original + * @param {string|URL} destination + */ +export const isDomainOrSubdomain = (destination, original) => { + const orig = new URL(original).hostname; + const dest = new URL(destination).hostname; + + return orig === dest || orig.endsWith(`.${dest}`); +}; + +/** + * isSameProtocol reports whether the two provided URLs use the same protocol. + * + * Both domains must already be in canonical form. + * @param {string|URL} original + * @param {string|URL} destination + */ +export const isSameProtocol = (destination, original) => { + const orig = new URL(original).protocol; + const dest = new URL(destination).protocol; + + return orig === dest; +}; diff --git a/_shared/lib/node-fetch/src/utils/multipart-parser.js b/_shared/lib/node-fetch/src/utils/multipart-parser.js new file mode 100644 index 00000000..5ad06f98 --- /dev/null +++ b/_shared/lib/node-fetch/src/utils/multipart-parser.js @@ -0,0 +1,432 @@ +import {File} from 'fetch-blob/from.js'; +import {FormData} from 'formdata-polyfill/esm.min.js'; + +let s = 0; +const S = { + START_BOUNDARY: s++, + HEADER_FIELD_START: s++, + HEADER_FIELD: s++, + HEADER_VALUE_START: s++, + HEADER_VALUE: s++, + HEADER_VALUE_ALMOST_DONE: s++, + HEADERS_ALMOST_DONE: s++, + PART_DATA_START: s++, + PART_DATA: s++, + END: s++ +}; + +let f = 1; +const F = { + PART_BOUNDARY: f, + LAST_BOUNDARY: f *= 2 +}; + +const LF = 10; +const CR = 13; +const SPACE = 32; +const HYPHEN = 45; +const COLON = 58; +const A = 97; +const Z = 122; + +const lower = c => c | 0x20; + +const noop = () => {}; + +class MultipartParser { + /** + * @param {string} boundary + */ + constructor(boundary) { + this.index = 0; + this.flags = 0; + + this.onHeaderEnd = noop; + this.onHeaderField = noop; + this.onHeadersEnd = noop; + this.onHeaderValue = noop; + this.onPartBegin = noop; + this.onPartData = noop; + this.onPartEnd = noop; + + this.boundaryChars = {}; + + boundary = '\r\n--' + boundary; + const ui8a = new Uint8Array(boundary.length); + for (let i = 0; i < boundary.length; i++) { + ui8a[i] = boundary.charCodeAt(i); + this.boundaryChars[ui8a[i]] = true; + } + + this.boundary = ui8a; + this.lookbehind = new Uint8Array(this.boundary.length + 8); + this.state = S.START_BOUNDARY; + } + + /** + * @param {Uint8Array} data + */ + write(data) { + let i = 0; + const length_ = data.length; + let previousIndex = this.index; + let {lookbehind, boundary, boundaryChars, index, state, flags} = this; + const boundaryLength = this.boundary.length; + const boundaryEnd = boundaryLength - 1; + const bufferLength = data.length; + let c; + let cl; + + const mark = name => { + this[name + 'Mark'] = i; + }; + + const clear = name => { + delete this[name + 'Mark']; + }; + + const callback = (callbackSymbol, start, end, ui8a) => { + if (start === undefined || start !== end) { + this[callbackSymbol](ui8a && ui8a.subarray(start, end)); + } + }; + + const dataCallback = (name, clear) => { + const markSymbol = name + 'Mark'; + if (!(markSymbol in this)) { + return; + } + + if (clear) { + callback(name, this[markSymbol], i, data); + delete this[markSymbol]; + } else { + callback(name, this[markSymbol], data.length, data); + this[markSymbol] = 0; + } + }; + + for (i = 0; i < length_; i++) { + c = data[i]; + + switch (state) { + case S.START_BOUNDARY: + if (index === boundary.length - 2) { + if (c === HYPHEN) { + flags |= F.LAST_BOUNDARY; + } else if (c !== CR) { + return; + } + + index++; + break; + } else if (index - 1 === boundary.length - 2) { + if (flags & F.LAST_BOUNDARY && c === HYPHEN) { + state = S.END; + flags = 0; + } else if (!(flags & F.LAST_BOUNDARY) && c === LF) { + index = 0; + callback('onPartBegin'); + state = S.HEADER_FIELD_START; + } else { + return; + } + + break; + } + + if (c !== boundary[index + 2]) { + index = -2; + } + + if (c === boundary[index + 2]) { + index++; + } + + break; + case S.HEADER_FIELD_START: + state = S.HEADER_FIELD; + mark('onHeaderField'); + index = 0; + // falls through + case S.HEADER_FIELD: + if (c === CR) { + clear('onHeaderField'); + state = S.HEADERS_ALMOST_DONE; + break; + } + + index++; + if (c === HYPHEN) { + break; + } + + if (c === COLON) { + if (index === 1) { + // empty header field + return; + } + + dataCallback('onHeaderField', true); + state = S.HEADER_VALUE_START; + break; + } + + cl = lower(c); + if (cl < A || cl > Z) { + return; + } + + break; + case S.HEADER_VALUE_START: + if (c === SPACE) { + break; + } + + mark('onHeaderValue'); + state = S.HEADER_VALUE; + // falls through + case S.HEADER_VALUE: + if (c === CR) { + dataCallback('onHeaderValue', true); + callback('onHeaderEnd'); + state = S.HEADER_VALUE_ALMOST_DONE; + } + + break; + case S.HEADER_VALUE_ALMOST_DONE: + if (c !== LF) { + return; + } + + state = S.HEADER_FIELD_START; + break; + case S.HEADERS_ALMOST_DONE: + if (c !== LF) { + return; + } + + callback('onHeadersEnd'); + state = S.PART_DATA_START; + break; + case S.PART_DATA_START: + state = S.PART_DATA; + mark('onPartData'); + // falls through + case S.PART_DATA: + previousIndex = index; + + if (index === 0) { + // boyer-moore derrived algorithm to safely skip non-boundary data + i += boundaryEnd; + while (i < bufferLength && !(data[i] in boundaryChars)) { + i += boundaryLength; + } + + i -= boundaryEnd; + c = data[i]; + } + + if (index < boundary.length) { + if (boundary[index] === c) { + if (index === 0) { + dataCallback('onPartData', true); + } + + index++; + } else { + index = 0; + } + } else if (index === boundary.length) { + index++; + if (c === CR) { + // CR = part boundary + flags |= F.PART_BOUNDARY; + } else if (c === HYPHEN) { + // HYPHEN = end boundary + flags |= F.LAST_BOUNDARY; + } else { + index = 0; + } + } else if (index - 1 === boundary.length) { + if (flags & F.PART_BOUNDARY) { + index = 0; + if (c === LF) { + // unset the PART_BOUNDARY flag + flags &= ~F.PART_BOUNDARY; + callback('onPartEnd'); + callback('onPartBegin'); + state = S.HEADER_FIELD_START; + break; + } + } else if (flags & F.LAST_BOUNDARY) { + if (c === HYPHEN) { + callback('onPartEnd'); + state = S.END; + flags = 0; + } else { + index = 0; + } + } else { + index = 0; + } + } + + if (index > 0) { + // when matching a possible boundary, keep a lookbehind reference + // in case it turns out to be a false lead + lookbehind[index - 1] = c; + } else if (previousIndex > 0) { + // if our boundary turned out to be rubbish, the captured lookbehind + // belongs to partData + const _lookbehind = new Uint8Array(lookbehind.buffer, lookbehind.byteOffset, lookbehind.byteLength); + callback('onPartData', 0, previousIndex, _lookbehind); + previousIndex = 0; + mark('onPartData'); + + // reconsider the current character even so it interrupted the sequence + // it could be the beginning of a new sequence + i--; + } + + break; + case S.END: + break; + default: + throw new Error(`Unexpected state entered: ${state}`); + } + } + + dataCallback('onHeaderField'); + dataCallback('onHeaderValue'); + dataCallback('onPartData'); + + // Update properties for the next call + this.index = index; + this.state = state; + this.flags = flags; + } + + end() { + if ((this.state === S.HEADER_FIELD_START && this.index === 0) || + (this.state === S.PART_DATA && this.index === this.boundary.length)) { + this.onPartEnd(); + } else if (this.state !== S.END) { + throw new Error('MultipartParser.end(): stream ended unexpectedly'); + } + } +} + +function _fileName(headerValue) { + // matches either a quoted-string or a token (RFC 2616 section 19.5.1) + const m = headerValue.match(/\bfilename=("(.*?)"|([^()<>@,;:\\"/[\]?={}\s\t]+))($|;\s)/i); + if (!m) { + return; + } + + const match = m[2] || m[3] || ''; + let filename = match.slice(match.lastIndexOf('\\') + 1); + filename = filename.replace(/%22/g, '"'); + filename = filename.replace(/&#(\d{4});/g, (m, code) => { + return String.fromCharCode(code); + }); + return filename; +} + +export async function toFormData(Body, ct) { + if (!/multipart/i.test(ct)) { + throw new TypeError('Failed to fetch'); + } + + const m = ct.match(/boundary=(?:"([^"]+)"|([^;]+))/i); + + if (!m) { + throw new TypeError('no or bad content-type header, no multipart boundary'); + } + + const parser = new MultipartParser(m[1] || m[2]); + + let headerField; + let headerValue; + let entryValue; + let entryName; + let contentType; + let filename; + const entryChunks = []; + const formData = new FormData(); + + const onPartData = ui8a => { + entryValue += decoder.decode(ui8a, {stream: true}); + }; + + const appendToFile = ui8a => { + entryChunks.push(ui8a); + }; + + const appendFileToFormData = () => { + const file = new File(entryChunks, filename, {type: contentType}); + formData.append(entryName, file); + }; + + const appendEntryToFormData = () => { + formData.append(entryName, entryValue); + }; + + const decoder = new TextDecoder('utf-8'); + decoder.decode(); + + parser.onPartBegin = function () { + parser.onPartData = onPartData; + parser.onPartEnd = appendEntryToFormData; + + headerField = ''; + headerValue = ''; + entryValue = ''; + entryName = ''; + contentType = ''; + filename = null; + entryChunks.length = 0; + }; + + parser.onHeaderField = function (ui8a) { + headerField += decoder.decode(ui8a, {stream: true}); + }; + + parser.onHeaderValue = function (ui8a) { + headerValue += decoder.decode(ui8a, {stream: true}); + }; + + parser.onHeaderEnd = function () { + headerValue += decoder.decode(); + headerField = headerField.toLowerCase(); + + if (headerField === 'content-disposition') { + // matches either a quoted-string or a token (RFC 2616 section 19.5.1) + const m = headerValue.match(/\bname=("([^"]*)"|([^()<>@,;:\\"/[\]?={}\s\t]+))/i); + + if (m) { + entryName = m[2] || m[3] || ''; + } + + filename = _fileName(headerValue); + + if (filename) { + parser.onPartData = appendToFile; + parser.onPartEnd = appendFileToFormData; + } + } else if (headerField === 'content-type') { + contentType = headerValue; + } + + headerValue = ''; + headerField = ''; + }; + + for await (const chunk of Body) { + parser.write(chunk); + } + + parser.end(); + + return formData; +} diff --git a/_shared/lib/node-fetch/src/utils/referrer.js b/_shared/lib/node-fetch/src/utils/referrer.js new file mode 100644 index 00000000..6741f2fc --- /dev/null +++ b/_shared/lib/node-fetch/src/utils/referrer.js @@ -0,0 +1,340 @@ +import {isIP} from 'node:net'; + +/** + * @external URL + * @see {@link https://developer.mozilla.org/en-US/docs/Web/API/URL|URL} + */ + +/** + * @module utils/referrer + * @private + */ + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#strip-url|Referrer Policy §8.4. Strip url for use as a referrer} + * @param {string} URL + * @param {boolean} [originOnly=false] + */ +export function stripURLForUseAsAReferrer(url, originOnly = false) { + // 1. If url is null, return no referrer. + if (url == null) { // eslint-disable-line no-eq-null, eqeqeq + return 'no-referrer'; + } + + url = new URL(url); + + // 2. If url's scheme is a local scheme, then return no referrer. + if (/^(about|blob|data):$/.test(url.protocol)) { + return 'no-referrer'; + } + + // 3. Set url's username to the empty string. + url.username = ''; + + // 4. Set url's password to null. + // Note: `null` appears to be a mistake as this actually results in the password being `"null"`. + url.password = ''; + + // 5. Set url's fragment to null. + // Note: `null` appears to be a mistake as this actually results in the fragment being `"#null"`. + url.hash = ''; + + // 6. If the origin-only flag is true, then: + if (originOnly) { + // 6.1. Set url's path to null. + // Note: `null` appears to be a mistake as this actually results in the path being `"/null"`. + url.pathname = ''; + + // 6.2. Set url's query to null. + // Note: `null` appears to be a mistake as this actually results in the query being `"?null"`. + url.search = ''; + } + + // 7. Return url. + return url; +} + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#enumdef-referrerpolicy|enum ReferrerPolicy} + */ +export const ReferrerPolicy = new Set([ + '', + 'no-referrer', + 'no-referrer-when-downgrade', + 'same-origin', + 'origin', + 'strict-origin', + 'origin-when-cross-origin', + 'strict-origin-when-cross-origin', + 'unsafe-url' +]); + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#default-referrer-policy|default referrer policy} + */ +export const DEFAULT_REFERRER_POLICY = 'strict-origin-when-cross-origin'; + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#referrer-policies|Referrer Policy §3. Referrer Policies} + * @param {string} referrerPolicy + * @returns {string} referrerPolicy + */ +export function validateReferrerPolicy(referrerPolicy) { + if (!ReferrerPolicy.has(referrerPolicy)) { + throw new TypeError(`Invalid referrerPolicy: ${referrerPolicy}`); + } + + return referrerPolicy; +} + +/** + * @see {@link https://w3c.github.io/webappsec-secure-contexts/#is-origin-trustworthy|Referrer Policy §3.2. Is origin potentially trustworthy?} + * @param {external:URL} url + * @returns `true`: "Potentially Trustworthy", `false`: "Not Trustworthy" + */ +export function isOriginPotentiallyTrustworthy(url) { + // 1. If origin is an opaque origin, return "Not Trustworthy". + // Not applicable + + // 2. Assert: origin is a tuple origin. + // Not for implementations + + // 3. If origin's scheme is either "https" or "wss", return "Potentially Trustworthy". + if (/^(http|ws)s:$/.test(url.protocol)) { + return true; + } + + // 4. If origin's host component matches one of the CIDR notations 127.0.0.0/8 or ::1/128 [RFC4632], return "Potentially Trustworthy". + const hostIp = url.host.replace(/(^\[)|(]$)/g, ''); + const hostIPVersion = isIP(hostIp); + + if (hostIPVersion === 4 && /^127\./.test(hostIp)) { + return true; + } + + if (hostIPVersion === 6 && /^(((0+:){7})|(::(0+:){0,6}))0*1$/.test(hostIp)) { + return true; + } + + // 5. If origin's host component is "localhost" or falls within ".localhost", and the user agent conforms to the name resolution rules in [let-localhost-be-localhost], return "Potentially Trustworthy". + // We are returning FALSE here because we cannot ensure conformance to + // let-localhost-be-loalhost (https://tools.ietf.org/html/draft-west-let-localhost-be-localhost) + if (url.host === 'localhost' || url.host.endsWith('.localhost')) { + return false; + } + + // 6. If origin's scheme component is file, return "Potentially Trustworthy". + if (url.protocol === 'file:') { + return true; + } + + // 7. If origin's scheme component is one which the user agent considers to be authenticated, return "Potentially Trustworthy". + // Not supported + + // 8. If origin has been configured as a trustworthy origin, return "Potentially Trustworthy". + // Not supported + + // 9. Return "Not Trustworthy". + return false; +} + +/** + * @see {@link https://w3c.github.io/webappsec-secure-contexts/#is-url-trustworthy|Referrer Policy §3.3. Is url potentially trustworthy?} + * @param {external:URL} url + * @returns `true`: "Potentially Trustworthy", `false`: "Not Trustworthy" + */ +export function isUrlPotentiallyTrustworthy(url) { + // 1. If url is "about:blank" or "about:srcdoc", return "Potentially Trustworthy". + if (/^about:(blank|srcdoc)$/.test(url)) { + return true; + } + + // 2. If url's scheme is "data", return "Potentially Trustworthy". + if (url.protocol === 'data:') { + return true; + } + + // Note: The origin of blob: and filesystem: URLs is the origin of the context in which they were + // created. Therefore, blobs created in a trustworthy origin will themselves be potentially + // trustworthy. + if (/^(blob|filesystem):$/.test(url.protocol)) { + return true; + } + + // 3. Return the result of executing §3.2 Is origin potentially trustworthy? on url's origin. + return isOriginPotentiallyTrustworthy(url); +} + +/** + * Modifies the referrerURL to enforce any extra security policy considerations. + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#determine-requests-referrer|Referrer Policy §8.3. Determine request's Referrer}, step 7 + * @callback module:utils/referrer~referrerURLCallback + * @param {external:URL} referrerURL + * @returns {external:URL} modified referrerURL + */ + +/** + * Modifies the referrerOrigin to enforce any extra security policy considerations. + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#determine-requests-referrer|Referrer Policy §8.3. Determine request's Referrer}, step 7 + * @callback module:utils/referrer~referrerOriginCallback + * @param {external:URL} referrerOrigin + * @returns {external:URL} modified referrerOrigin + */ + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#determine-requests-referrer|Referrer Policy §8.3. Determine request's Referrer} + * @param {Request} request + * @param {object} o + * @param {module:utils/referrer~referrerURLCallback} o.referrerURLCallback + * @param {module:utils/referrer~referrerOriginCallback} o.referrerOriginCallback + * @returns {external:URL} Request's referrer + */ +export function determineRequestsReferrer(request, {referrerURLCallback, referrerOriginCallback} = {}) { + // There are 2 notes in the specification about invalid pre-conditions. We return null, here, for + // these cases: + // > Note: If request's referrer is "no-referrer", Fetch will not call into this algorithm. + // > Note: If request's referrer policy is the empty string, Fetch will not call into this + // > algorithm. + if (request.referrer === 'no-referrer' || request.referrerPolicy === '') { + return null; + } + + // 1. Let policy be request's associated referrer policy. + const policy = request.referrerPolicy; + + // 2. Let environment be request's client. + // not applicable to node.js + + // 3. Switch on request's referrer: + if (request.referrer === 'about:client') { + return 'no-referrer'; + } + + // "a URL": Let referrerSource be request's referrer. + const referrerSource = request.referrer; + + // 4. Let request's referrerURL be the result of stripping referrerSource for use as a referrer. + let referrerURL = stripURLForUseAsAReferrer(referrerSource); + + // 5. Let referrerOrigin be the result of stripping referrerSource for use as a referrer, with the + // origin-only flag set to true. + let referrerOrigin = stripURLForUseAsAReferrer(referrerSource, true); + + // 6. If the result of serializing referrerURL is a string whose length is greater than 4096, set + // referrerURL to referrerOrigin. + if (referrerURL.toString().length > 4096) { + referrerURL = referrerOrigin; + } + + // 7. The user agent MAY alter referrerURL or referrerOrigin at this point to enforce arbitrary + // policy considerations in the interests of minimizing data leakage. For example, the user + // agent could strip the URL down to an origin, modify its host, replace it with an empty + // string, etc. + if (referrerURLCallback) { + referrerURL = referrerURLCallback(referrerURL); + } + + if (referrerOriginCallback) { + referrerOrigin = referrerOriginCallback(referrerOrigin); + } + + // 8.Execute the statements corresponding to the value of policy: + const currentURL = new URL(request.url); + + switch (policy) { + case 'no-referrer': + return 'no-referrer'; + + case 'origin': + return referrerOrigin; + + case 'unsafe-url': + return referrerURL; + + case 'strict-origin': + // 1. If referrerURL is a potentially trustworthy URL and request's current URL is not a + // potentially trustworthy URL, then return no referrer. + if (isUrlPotentiallyTrustworthy(referrerURL) && !isUrlPotentiallyTrustworthy(currentURL)) { + return 'no-referrer'; + } + + // 2. Return referrerOrigin. + return referrerOrigin.toString(); + + case 'strict-origin-when-cross-origin': + // 1. If the origin of referrerURL and the origin of request's current URL are the same, then + // return referrerURL. + if (referrerURL.origin === currentURL.origin) { + return referrerURL; + } + + // 2. If referrerURL is a potentially trustworthy URL and request's current URL is not a + // potentially trustworthy URL, then return no referrer. + if (isUrlPotentiallyTrustworthy(referrerURL) && !isUrlPotentiallyTrustworthy(currentURL)) { + return 'no-referrer'; + } + + // 3. Return referrerOrigin. + return referrerOrigin; + + case 'same-origin': + // 1. If the origin of referrerURL and the origin of request's current URL are the same, then + // return referrerURL. + if (referrerURL.origin === currentURL.origin) { + return referrerURL; + } + + // 2. Return no referrer. + return 'no-referrer'; + + case 'origin-when-cross-origin': + // 1. If the origin of referrerURL and the origin of request's current URL are the same, then + // return referrerURL. + if (referrerURL.origin === currentURL.origin) { + return referrerURL; + } + + // Return referrerOrigin. + return referrerOrigin; + + case 'no-referrer-when-downgrade': + // 1. If referrerURL is a potentially trustworthy URL and request's current URL is not a + // potentially trustworthy URL, then return no referrer. + if (isUrlPotentiallyTrustworthy(referrerURL) && !isUrlPotentiallyTrustworthy(currentURL)) { + return 'no-referrer'; + } + + // 2. Return referrerURL. + return referrerURL; + + default: + throw new TypeError(`Invalid referrerPolicy: ${policy}`); + } +} + +/** + * @see {@link https://w3c.github.io/webappsec-referrer-policy/#parse-referrer-policy-from-header|Referrer Policy §8.1. Parse a referrer policy from a Referrer-Policy header} + * @param {Headers} headers Response headers + * @returns {string} policy + */ +export function parseReferrerPolicyFromHeader(headers) { + // 1. Let policy-tokens be the result of extracting header list values given `Referrer-Policy` + // and response’s header list. + const policyTokens = (headers.get('referrer-policy') || '').split(/[,\s]+/); + + // 2. Let policy be the empty string. + let policy = ''; + + // 3. For each token in policy-tokens, if token is a referrer policy and token is not the empty + // string, then set policy to token. + // Note: This algorithm loops over multiple policy values to allow deployment of new policy + // values with fallbacks for older user agents, as described in § 11.1 Unknown Policy Values. + for (const token of policyTokens) { + if (token && ReferrerPolicy.has(token)) { + policy = token; + } + } + + // 4. Return policy. + return policy; +}