This commit is contained in:
2025-05-09 05:30:08 +02:00
parent 7bb10e7df4
commit 73367bad9e
5322 changed files with 1266973 additions and 313 deletions

View File

@@ -0,0 +1,440 @@
// Partial port of python's argparse module, version 3.9.0 (only wrap and fill functions):
// https://github.com/python/cpython/blob/v3.9.0b4/Lib/textwrap.py
'use strict'
/*
* Text wrapping and filling.
*/
// Copyright (C) 1999-2001 Gregory P. Ward.
// Copyright (C) 2002, 2003 Python Software Foundation.
// Copyright (C) 2020 argparse.js authors
// Originally written by Greg Ward <gward@python.net>
// Hardcode the recognized whitespace characters to the US-ASCII
// whitespace characters. The main reason for doing this is that
// some Unicode spaces (like \u00a0) are non-breaking whitespaces.
//
// This less funky little regex just split on recognized spaces. E.g.
// "Hello there -- you goof-ball, use the -b option!"
// splits into
// Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
const wordsep_simple_re = /([\t\n\x0b\x0c\r ]+)/
class TextWrapper {
/*
* Object for wrapping/filling text. The public interface consists of
* the wrap() and fill() methods; the other methods are just there for
* subclasses to override in order to tweak the default behaviour.
* If you want to completely replace the main wrapping algorithm,
* you'll probably have to override _wrap_chunks().
*
* Several instance attributes control various aspects of wrapping:
* width (default: 70)
* the maximum width of wrapped lines (unless break_long_words
* is false)
* initial_indent (default: "")
* string that will be prepended to the first line of wrapped
* output. Counts towards the line's width.
* subsequent_indent (default: "")
* string that will be prepended to all lines save the first
* of wrapped output; also counts towards each line's width.
* expand_tabs (default: true)
* Expand tabs in input text to spaces before further processing.
* Each tab will become 0 .. 'tabsize' spaces, depending on its position
* in its line. If false, each tab is treated as a single character.
* tabsize (default: 8)
* Expand tabs in input text to 0 .. 'tabsize' spaces, unless
* 'expand_tabs' is false.
* replace_whitespace (default: true)
* Replace all whitespace characters in the input text by spaces
* after tab expansion. Note that if expand_tabs is false and
* replace_whitespace is true, every tab will be converted to a
* single space!
* fix_sentence_endings (default: false)
* Ensure that sentence-ending punctuation is always followed
* by two spaces. Off by default because the algorithm is
* (unavoidably) imperfect.
* break_long_words (default: true)
* Break words longer than 'width'. If false, those words will not
* be broken, and some lines might be longer than 'width'.
* break_on_hyphens (default: true)
* Allow breaking hyphenated words. If true, wrapping will occur
* preferably on whitespaces and right after hyphens part of
* compound words.
* drop_whitespace (default: true)
* Drop leading and trailing whitespace from lines.
* max_lines (default: None)
* Truncate wrapped lines.
* placeholder (default: ' [...]')
* Append to the last line of truncated text.
*/
constructor(options = {}) {
let {
width = 70,
initial_indent = '',
subsequent_indent = '',
expand_tabs = true,
replace_whitespace = true,
fix_sentence_endings = false,
break_long_words = true,
drop_whitespace = true,
break_on_hyphens = true,
tabsize = 8,
max_lines = undefined,
placeholder=' [...]'
} = options
this.width = width
this.initial_indent = initial_indent
this.subsequent_indent = subsequent_indent
this.expand_tabs = expand_tabs
this.replace_whitespace = replace_whitespace
this.fix_sentence_endings = fix_sentence_endings
this.break_long_words = break_long_words
this.drop_whitespace = drop_whitespace
this.break_on_hyphens = break_on_hyphens
this.tabsize = tabsize
this.max_lines = max_lines
this.placeholder = placeholder
}
// -- Private methods -----------------------------------------------
// (possibly useful for subclasses to override)
_munge_whitespace(text) {
/*
* _munge_whitespace(text : string) -> string
*
* Munge whitespace in text: expand tabs and convert all other
* whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
* becomes " foo bar baz".
*/
if (this.expand_tabs) {
text = text.replace(/\t/g, ' '.repeat(this.tabsize)) // not strictly correct in js
}
if (this.replace_whitespace) {
text = text.replace(/[\t\n\x0b\x0c\r]/g, ' ')
}
return text
}
_split(text) {
/*
* _split(text : string) -> [string]
*
* Split the text to wrap into indivisible chunks. Chunks are
* not quite the same as words; see _wrap_chunks() for full
* details. As an example, the text
* Look, goof-ball -- use the -b option!
* breaks into the following chunks:
* 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
* 'use', ' ', 'the', ' ', '-b', ' ', 'option!'
* if break_on_hyphens is True, or in:
* 'Look,', ' ', 'goof-ball', ' ', '--', ' ',
* 'use', ' ', 'the', ' ', '-b', ' ', option!'
* otherwise.
*/
let chunks = text.split(wordsep_simple_re)
chunks = chunks.filter(Boolean)
return chunks
}
_handle_long_word(reversed_chunks, cur_line, cur_len, width) {
/*
* _handle_long_word(chunks : [string],
* cur_line : [string],
* cur_len : int, width : int)
*
* Handle a chunk of text (most likely a word, not whitespace) that
* is too long to fit in any line.
*/
// Figure out when indent is larger than the specified width, and make
// sure at least one character is stripped off on every pass
let space_left
if (width < 1) {
space_left = 1
} else {
space_left = width - cur_len
}
// If we're allowed to break long words, then do so: put as much
// of the next chunk onto the current line as will fit.
if (this.break_long_words) {
cur_line.push(reversed_chunks[reversed_chunks.length - 1].slice(0, space_left))
reversed_chunks[reversed_chunks.length - 1] = reversed_chunks[reversed_chunks.length - 1].slice(space_left)
// Otherwise, we have to preserve the long word intact. Only add
// it to the current line if there's nothing already there --
// that minimizes how much we violate the width constraint.
} else if (!cur_line) {
cur_line.push(...reversed_chunks.pop())
}
// If we're not allowed to break long words, and there's already
// text on the current line, do nothing. Next time through the
// main loop of _wrap_chunks(), we'll wind up here again, but
// cur_len will be zero, so the next line will be entirely
// devoted to the long word that we can't handle right now.
}
_wrap_chunks(chunks) {
/*
* _wrap_chunks(chunks : [string]) -> [string]
*
* Wrap a sequence of text chunks and return a list of lines of
* length 'self.width' or less. (If 'break_long_words' is false,
* some lines may be longer than this.) Chunks correspond roughly
* to words and the whitespace between them: each chunk is
* indivisible (modulo 'break_long_words'), but a line break can
* come between any two chunks. Chunks should not have internal
* whitespace; ie. a chunk is either all whitespace or a "word".
* Whitespace chunks will be removed from the beginning and end of
* lines, but apart from that whitespace is preserved.
*/
let lines = []
let indent
if (this.width <= 0) {
throw Error(`invalid width ${this.width} (must be > 0)`)
}
if (this.max_lines !== undefined) {
if (this.max_lines > 1) {
indent = this.subsequent_indent
} else {
indent = this.initial_indent
}
if (indent.length + this.placeholder.trimStart().length > this.width) {
throw Error('placeholder too large for max width')
}
}
// Arrange in reverse order so items can be efficiently popped
// from a stack of chucks.
chunks = chunks.reverse()
while (chunks.length > 0) {
// Start the list of chunks that will make up the current line.
// cur_len is just the length of all the chunks in cur_line.
let cur_line = []
let cur_len = 0
// Figure out which static string will prefix this line.
let indent
if (lines) {
indent = this.subsequent_indent
} else {
indent = this.initial_indent
}
// Maximum width for this line.
let width = this.width - indent.length
// First chunk on line is whitespace -- drop it, unless this
// is the very beginning of the text (ie. no lines started yet).
if (this.drop_whitespace && chunks[chunks.length - 1].trim() === '' && lines.length > 0) {
chunks.pop()
}
while (chunks.length > 0) {
let l = chunks[chunks.length - 1].length
// Can at least squeeze this chunk onto the current line.
if (cur_len + l <= width) {
cur_line.push(chunks.pop())
cur_len += l
// Nope, this line is full.
} else {
break
}
}
// The current line is full, and the next chunk is too big to
// fit on *any* line (not just this one).
if (chunks.length && chunks[chunks.length - 1].length > width) {
this._handle_long_word(chunks, cur_line, cur_len, width)
cur_len = cur_line.map(l => l.length).reduce((a, b) => a + b, 0)
}
// If the last chunk on this line is all whitespace, drop it.
if (this.drop_whitespace && cur_line.length > 0 && cur_line[cur_line.length - 1].trim() === '') {
cur_len -= cur_line[cur_line.length - 1].length
cur_line.pop()
}
if (cur_line) {
if (this.max_lines === undefined ||
lines.length + 1 < this.max_lines ||
(chunks.length === 0 ||
this.drop_whitespace &&
chunks.length === 1 &&
!chunks[0].trim()) && cur_len <= width) {
// Convert current line back to a string and store it in
// list of all lines (return value).
lines.push(indent + cur_line.join(''))
} else {
let had_break = false
while (cur_line) {
if (cur_line[cur_line.length - 1].trim() &&
cur_len + this.placeholder.length <= width) {
cur_line.push(this.placeholder)
lines.push(indent + cur_line.join(''))
had_break = true
break
}
cur_len -= cur_line[-1].length
cur_line.pop()
}
if (!had_break) {
if (lines) {
let prev_line = lines[lines.length - 1].trimEnd()
if (prev_line.length + this.placeholder.length <=
this.width) {
lines[lines.length - 1] = prev_line + this.placeholder
break
}
}
lines.push(indent + this.placeholder.lstrip())
}
break
}
}
}
return lines
}
_split_chunks(text) {
text = this._munge_whitespace(text)
return this._split(text)
}
// -- Public interface ----------------------------------------------
wrap(text) {
/*
* wrap(text : string) -> [string]
*
* Reformat the single paragraph in 'text' so it fits in lines of
* no more than 'self.width' columns, and return a list of wrapped
* lines. Tabs in 'text' are expanded with string.expandtabs(),
* and all other whitespace characters (including newline) are
* converted to space.
*/
let chunks = this._split_chunks(text)
// not implemented in js
//if (this.fix_sentence_endings) {
// this._fix_sentence_endings(chunks)
//}
return this._wrap_chunks(chunks)
}
fill(text) {
/*
* fill(text : string) -> string
*
* Reformat the single paragraph in 'text' to fit in lines of no
* more than 'self.width' columns, and return a new string
* containing the entire wrapped paragraph.
*/
return this.wrap(text).join('\n')
}
}
// -- Convenience interface ---------------------------------------------
function wrap(text, options = {}) {
/*
* Wrap a single paragraph of text, returning a list of wrapped lines.
*
* Reformat the single paragraph in 'text' so it fits in lines of no
* more than 'width' columns, and return a list of wrapped lines. By
* default, tabs in 'text' are expanded with string.expandtabs(), and
* all other whitespace characters (including newline) are converted to
* space. See TextWrapper class for available keyword args to customize
* wrapping behaviour.
*/
let { width = 70, ...kwargs } = options
let w = new TextWrapper(Object.assign({ width }, kwargs))
return w.wrap(text)
}
function fill(text, options = {}) {
/*
* Fill a single paragraph of text, returning a new string.
*
* Reformat the single paragraph in 'text' to fit in lines of no more
* than 'width' columns, and return a new string containing the entire
* wrapped paragraph. As with wrap(), tabs are expanded and other
* whitespace characters converted to space. See TextWrapper class for
* available keyword args to customize wrapping behaviour.
*/
let { width = 70, ...kwargs } = options
let w = new TextWrapper(Object.assign({ width }, kwargs))
return w.fill(text)
}
// -- Loosely related functionality -------------------------------------
let _whitespace_only_re = /^[ \t]+$/mg
let _leading_whitespace_re = /(^[ \t]*)(?:[^ \t\n])/mg
function dedent(text) {
/*
* Remove any common leading whitespace from every line in `text`.
*
* This can be used to make triple-quoted strings line up with the left
* edge of the display, while still presenting them in the source code
* in indented form.
*
* Note that tabs and spaces are both treated as whitespace, but they
* are not equal: the lines " hello" and "\\thello" are
* considered to have no common leading whitespace.
*
* Entirely blank lines are normalized to a newline character.
*/
// Look for the longest leading string of spaces and tabs common to
// all lines.
let margin = undefined
text = text.replace(_whitespace_only_re, '')
let indents = text.match(_leading_whitespace_re) || []
for (let indent of indents) {
indent = indent.slice(0, -1)
if (margin === undefined) {
margin = indent
// Current line more deeply indented than previous winner:
// no change (previous winner is still on top).
} else if (indent.startsWith(margin)) {
// pass
// Current line consistent with and no deeper than previous winner:
// it's the new winner.
} else if (margin.startsWith(indent)) {
margin = indent
// Find the largest common whitespace between current line and previous
// winner.
} else {
for (let i = 0; i < margin.length && i < indent.length; i++) {
if (margin[i] !== indent[i]) {
margin = margin.slice(0, i)
break
}
}
}
}
if (margin) {
text = text.replace(new RegExp('^' + margin, 'mg'), '')
}
return text
}
module.exports = { wrap, fill, dedent }

View File

@@ -0,0 +1,39 @@
import type { AllContext, RouteById } from './routeInfo'
import type { AnyRouter } from './router'
import type { Expand, StrictOrFrom } from './utils'
export interface UseRouteContextBaseOptions<
TRouter extends AnyRouter,
TFrom,
TStrict extends boolean,
TSelected,
> {
select?: (
search: ResolveUseRouteContext<TRouter, TFrom, TStrict>,
) => TSelected
}
export type UseRouteContextOptions<
TRouter extends AnyRouter,
TFrom extends string | undefined,
TStrict extends boolean,
TSelected,
> = StrictOrFrom<TRouter, TFrom, TStrict> &
UseRouteContextBaseOptions<TRouter, TFrom, TStrict, TSelected>
export type ResolveUseRouteContext<
TRouter extends AnyRouter,
TFrom,
TStrict extends boolean,
> = TStrict extends false
? AllContext<TRouter['routeTree']>
: Expand<RouteById<TRouter['routeTree'], TFrom>['types']['allContext']>
export type UseRouteContextResult<
TRouter extends AnyRouter,
TFrom,
TStrict extends boolean,
TSelected,
> = unknown extends TSelected
? ResolveUseRouteContext<TRouter, TFrom, TStrict>
: TSelected

View File

@@ -0,0 +1 @@
export declare function shallow<T>(valueA: T, valueB: T): boolean;

View File

@@ -0,0 +1,224 @@
/**
* @fileoverview Rule to flag unnecessary bind calls
* @author Bence Dányi <bence@danyi.me>
*/
"use strict";
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
const astUtils = require("./utils/ast-utils");
//------------------------------------------------------------------------------
// Helpers
//------------------------------------------------------------------------------
const SIDE_EFFECT_FREE_NODE_TYPES = new Set([
"Literal",
"Identifier",
"ThisExpression",
"FunctionExpression",
]);
//------------------------------------------------------------------------------
// Rule Definition
//------------------------------------------------------------------------------
/** @type {import('../shared/types').Rule} */
module.exports = {
meta: {
type: "suggestion",
docs: {
description: "Disallow unnecessary calls to `.bind()`",
recommended: false,
url: "https://eslint.org/docs/latest/rules/no-extra-bind",
},
schema: [],
fixable: "code",
messages: {
unexpected: "The function binding is unnecessary.",
},
},
create(context) {
const sourceCode = context.sourceCode;
let scopeInfo = null;
/**
* Checks if a node is free of side effects.
*
* This check is stricter than it needs to be, in order to keep the implementation simple.
* @param {ASTNode} node A node to check.
* @returns {boolean} True if the node is known to be side-effect free, false otherwise.
*/
function isSideEffectFree(node) {
return SIDE_EFFECT_FREE_NODE_TYPES.has(node.type);
}
/**
* Reports a given function node.
* @param {ASTNode} node A node to report. This is a FunctionExpression or
* an ArrowFunctionExpression.
* @returns {void}
*/
function report(node) {
const memberNode = node.parent;
const callNode =
memberNode.parent.type === "ChainExpression"
? memberNode.parent.parent
: memberNode.parent;
context.report({
node: callNode,
messageId: "unexpected",
loc: memberNode.property.loc,
fix(fixer) {
if (!isSideEffectFree(callNode.arguments[0])) {
return null;
}
/*
* The list of the first/last token pair of a removal range.
* This is two parts because closing parentheses may exist between the method name and arguments.
* E.g. `(function(){}.bind ) (obj)`
* ^^^^^ ^^^^^ < removal ranges
* E.g. `(function(){}?.['bind'] ) ?.(obj)`
* ^^^^^^^^^^ ^^^^^^^ < removal ranges
*/
const tokenPairs = [
[
// `.`, `?.`, or `[` token.
sourceCode.getTokenAfter(
memberNode.object,
astUtils.isNotClosingParenToken,
),
// property name or `]` token.
sourceCode.getLastToken(memberNode),
],
[
// `?.` or `(` token of arguments.
sourceCode.getTokenAfter(
memberNode,
astUtils.isNotClosingParenToken,
),
// `)` token of arguments.
sourceCode.getLastToken(callNode),
],
];
const firstTokenToRemove = tokenPairs[0][0];
const lastTokenToRemove = tokenPairs[1][1];
if (
sourceCode.commentsExistBetween(
firstTokenToRemove,
lastTokenToRemove,
)
) {
return null;
}
return tokenPairs.map(([start, end]) =>
fixer.removeRange([start.range[0], end.range[1]]),
);
},
});
}
/**
* Checks whether or not a given function node is the callee of `.bind()`
* method.
*
* e.g. `(function() {}.bind(foo))`
* @param {ASTNode} node A node to report. This is a FunctionExpression or
* an ArrowFunctionExpression.
* @returns {boolean} `true` if the node is the callee of `.bind()` method.
*/
function isCalleeOfBindMethod(node) {
if (!astUtils.isSpecificMemberAccess(node.parent, null, "bind")) {
return false;
}
// The node of `*.bind` member access.
const bindNode =
node.parent.parent.type === "ChainExpression"
? node.parent.parent
: node.parent;
return (
bindNode.parent.type === "CallExpression" &&
bindNode.parent.callee === bindNode &&
bindNode.parent.arguments.length === 1 &&
bindNode.parent.arguments[0].type !== "SpreadElement"
);
}
/**
* Adds a scope information object to the stack.
* @param {ASTNode} node A node to add. This node is a FunctionExpression
* or a FunctionDeclaration node.
* @returns {void}
*/
function enterFunction(node) {
scopeInfo = {
isBound: isCalleeOfBindMethod(node),
thisFound: false,
upper: scopeInfo,
};
}
/**
* Removes the scope information object from the top of the stack.
* At the same time, this reports the function node if the function has
* `.bind()` and the `this` keywords found.
* @param {ASTNode} node A node to remove. This node is a
* FunctionExpression or a FunctionDeclaration node.
* @returns {void}
*/
function exitFunction(node) {
if (scopeInfo.isBound && !scopeInfo.thisFound) {
report(node);
}
scopeInfo = scopeInfo.upper;
}
/**
* Reports a given arrow function if the function is callee of `.bind()`
* method.
* @param {ASTNode} node A node to report. This node is an
* ArrowFunctionExpression.
* @returns {void}
*/
function exitArrowFunction(node) {
if (isCalleeOfBindMethod(node)) {
report(node);
}
}
/**
* Set the mark as the `this` keyword was found in this scope.
* @returns {void}
*/
function markAsThisFound() {
if (scopeInfo) {
scopeInfo.thisFound = true;
}
}
return {
"ArrowFunctionExpression:exit": exitArrowFunction,
FunctionDeclaration: enterFunction,
"FunctionDeclaration:exit": exitFunction,
FunctionExpression: enterFunction,
"FunctionExpression:exit": exitFunction,
ThisExpression: markAsThisFound,
};
},
};

View File

@@ -0,0 +1,186 @@
export class Buffer extends Uint8Array {
length: number
write(string: string, offset?: number, length?: number, encoding?: string): number;
toString(encoding?: string, start?: number, end?: number): string;
toJSON(): { type: 'Buffer', data: any[] };
equals(otherBuffer: Buffer): boolean;
compare(otherBuffer: Buffer, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number;
copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number;
slice(start?: number, end?: number): Buffer;
writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number;
readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number;
readIntLE(offset: number, byteLength: number, noAssert?: boolean): number;
readIntBE(offset: number, byteLength: number, noAssert?: boolean): number;
readUInt8(offset: number, noAssert?: boolean): number;
readUInt16LE(offset: number, noAssert?: boolean): number;
readUInt16BE(offset: number, noAssert?: boolean): number;
readUInt32LE(offset: number, noAssert?: boolean): number;
readUInt32BE(offset: number, noAssert?: boolean): number;
readInt8(offset: number, noAssert?: boolean): number;
readInt16LE(offset: number, noAssert?: boolean): number;
readInt16BE(offset: number, noAssert?: boolean): number;
readInt32LE(offset: number, noAssert?: boolean): number;
readInt32BE(offset: number, noAssert?: boolean): number;
readFloatLE(offset: number, noAssert?: boolean): number;
readFloatBE(offset: number, noAssert?: boolean): number;
readDoubleLE(offset: number, noAssert?: boolean): number;
readDoubleBE(offset: number, noAssert?: boolean): number;
reverse(): this;
swap16(): Buffer;
swap32(): Buffer;
swap64(): Buffer;
writeUInt8(value: number, offset: number, noAssert?: boolean): number;
writeUInt16LE(value: number, offset: number, noAssert?: boolean): number;
writeUInt16BE(value: number, offset: number, noAssert?: boolean): number;
writeUInt32LE(value: number, offset: number, noAssert?: boolean): number;
writeUInt32BE(value: number, offset: number, noAssert?: boolean): number;
writeInt8(value: number, offset: number, noAssert?: boolean): number;
writeInt16LE(value: number, offset: number, noAssert?: boolean): number;
writeInt16BE(value: number, offset: number, noAssert?: boolean): number;
writeInt32LE(value: number, offset: number, noAssert?: boolean): number;
writeInt32BE(value: number, offset: number, noAssert?: boolean): number;
writeFloatLE(value: number, offset: number, noAssert?: boolean): number;
writeFloatBE(value: number, offset: number, noAssert?: boolean): number;
writeDoubleLE(value: number, offset: number, noAssert?: boolean): number;
writeDoubleBE(value: number, offset: number, noAssert?: boolean): number;
fill(value: any, offset?: number, end?: number): this;
indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number;
lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number;
includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean;
/**
* Allocates a new buffer containing the given {str}.
*
* @param str String to store in buffer.
* @param encoding encoding to use, optional. Default is 'utf8'
*/
constructor (str: string, encoding?: string);
/**
* Allocates a new buffer of {size} octets.
*
* @param size count of octets to allocate.
*/
constructor (size: number);
/**
* Allocates a new buffer containing the given {array} of octets.
*
* @param array The octets to store.
*/
constructor (array: Uint8Array);
/**
* Produces a Buffer backed by the same allocated memory as
* the given {ArrayBuffer}.
*
*
* @param arrayBuffer The ArrayBuffer with which to share memory.
*/
constructor (arrayBuffer: ArrayBuffer);
/**
* Allocates a new buffer containing the given {array} of octets.
*
* @param array The octets to store.
*/
constructor (array: any[]);
/**
* Copies the passed {buffer} data onto a new {Buffer} instance.
*
* @param buffer The buffer to copy.
*/
constructor (buffer: Buffer);
prototype: Buffer;
/**
* Allocates a new Buffer using an {array} of octets.
*
* @param array
*/
static from(array: any[]): Buffer;
/**
* When passed a reference to the .buffer property of a TypedArray instance,
* the newly created Buffer will share the same allocated memory as the TypedArray.
* The optional {byteOffset} and {length} arguments specify a memory range
* within the {arrayBuffer} that will be shared by the Buffer.
*
* @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer()
* @param byteOffset
* @param length
*/
static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer;
/**
* Copies the passed {buffer} data onto a new Buffer instance.
*
* @param buffer
*/
static from(buffer: Buffer | Uint8Array): Buffer;
/**
* Creates a new Buffer containing the given JavaScript string {str}.
* If provided, the {encoding} parameter identifies the character encoding.
* If not provided, {encoding} defaults to 'utf8'.
*
* @param str
*/
static from(str: string, encoding?: string): Buffer;
/**
* Returns true if {obj} is a Buffer
*
* @param obj object to test.
*/
static isBuffer(obj: any): obj is Buffer;
/**
* Returns true if {encoding} is a valid encoding argument.
* Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex'
*
* @param encoding string to test.
*/
static isEncoding(encoding: string): boolean;
/**
* Gives the actual byte length of a string. encoding defaults to 'utf8'.
* This is not the same as String.prototype.length since that returns the number of characters in a string.
*
* @param string string to test.
* @param encoding encoding used to evaluate (defaults to 'utf8')
*/
static byteLength(string: string, encoding?: string): number;
/**
* Returns a buffer which is the result of concatenating all the buffers in the list together.
*
* If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer.
* If the list has exactly one item, then the first item of the list is returned.
* If the list has more than one item, then a new Buffer is created.
*
* @param list An array of Buffer objects to concatenate
* @param totalLength Total length of the buffers when concatenated.
* If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly.
*/
static concat(list: Buffer[], totalLength?: number): Buffer;
/**
* The same as buf1.compare(buf2).
*/
static compare(buf1: Buffer, buf2: Buffer): number;
/**
* Allocates a new buffer of {size} octets.
*
* @param size count of octets to allocate.
* @param fill if specified, buffer will be initialized by calling buf.fill(fill).
* If parameter is omitted, buffer will be filled with zeros.
* @param encoding encoding used for call to buf.fill while initializing
*/
static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer;
/**
* Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents
* of the newly created Buffer are unknown and may contain sensitive data.
*
* @param size count of octets to allocate
*/
static allocUnsafe(size: number): Buffer;
/**
* Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents
* of the newly created Buffer are unknown and may contain sensitive data.
*
* @param size count of octets to allocate
*/
static allocUnsafeSlow(size: number): Buffer;
}

View File

@@ -0,0 +1 @@
{"version":3,"names":["_index","require","_index2","prependToMemberExpression","member","prepend","isSuper","object","Error","memberExpression"],"sources":["../../src/modifications/prependToMemberExpression.ts"],"sourcesContent":["import { memberExpression } from \"../builders/generated/index.ts\";\nimport { isSuper } from \"../index.ts\";\nimport type * as t from \"../index.ts\";\n\n/**\n * Prepend a node to a member expression.\n */\nexport default function prependToMemberExpression<\n T extends Pick<t.MemberExpression, \"object\" | \"property\">,\n>(member: T, prepend: t.MemberExpression[\"object\"]): T {\n if (isSuper(member.object)) {\n throw new Error(\n \"Cannot prepend node to super property access (`super.foo`).\",\n );\n }\n member.object = memberExpression(prepend, member.object);\n\n return member;\n}\n"],"mappings":";;;;;;AAAA,IAAAA,MAAA,GAAAC,OAAA;AACA,IAAAC,OAAA,GAAAD,OAAA;AAMe,SAASE,yBAAyBA,CAE/CC,MAAS,EAAEC,OAAqC,EAAK;EACrD,IAAI,IAAAC,eAAO,EAACF,MAAM,CAACG,MAAM,CAAC,EAAE;IAC1B,MAAM,IAAIC,KAAK,CACb,6DACF,CAAC;EACH;EACAJ,MAAM,CAACG,MAAM,GAAG,IAAAE,uBAAgB,EAACJ,OAAO,EAAED,MAAM,CAACG,MAAM,CAAC;EAExD,OAAOH,MAAM;AACf","ignoreList":[]}

View File

@@ -0,0 +1,50 @@
/**
* @fileoverview Interpolate keys from an object into a string with {{ }} markers.
* @author Jed Fox
*/
"use strict";
//------------------------------------------------------------------------------
// Public Interface
//------------------------------------------------------------------------------
/**
* Returns a global expression matching placeholders in messages.
* @returns {RegExp} Global regular expression matching placeholders
*/
function getPlaceholderMatcher() {
return /\{\{([^{}]+?)\}\}/gu;
}
/**
* Replaces {{ placeholders }} in the message with the provided data.
* Does not replace placeholders not available in the data.
* @param {string} text Original message with potential placeholders
* @param {Record<string, string>} data Map of placeholder name to its value
* @returns {string} Message with replaced placeholders
*/
function interpolate(text, data) {
if (!data) {
return text;
}
const matcher = getPlaceholderMatcher();
// Substitution content for any {{ }} markers.
return text.replace(matcher, (fullMatch, termWithWhitespace) => {
const term = termWithWhitespace.trim();
if (term in data) {
return data[term];
}
// Preserve old behavior: If parameter name not provided, don't replace it.
return fullMatch;
});
}
module.exports = {
getPlaceholderMatcher,
interpolate,
};

View File

@@ -0,0 +1,62 @@
{{# def.definitions }}
{{# def.errors }}
{{# def.setupKeyword }}
{{# def.$data }}
{{? ($schema || $isData) && it.opts.uniqueItems !== false }}
{{? $isData }}
var {{=$valid}};
if ({{=$schemaValue}} === false || {{=$schemaValue}} === undefined)
{{=$valid}} = true;
else if (typeof {{=$schemaValue}} != 'boolean')
{{=$valid}} = false;
else {
{{?}}
var i = {{=$data}}.length
, {{=$valid}} = true
, j;
if (i > 1) {
{{
var $itemType = it.schema.items && it.schema.items.type
, $typeIsArray = Array.isArray($itemType);
}}
{{? !$itemType || $itemType == 'object' || $itemType == 'array' ||
($typeIsArray && ($itemType.indexOf('object') >= 0 || $itemType.indexOf('array') >= 0)) }}
outer:
for (;i--;) {
for (j = i; j--;) {
if (equal({{=$data}}[i], {{=$data}}[j])) {
{{=$valid}} = false;
break outer;
}
}
}
{{??}}
var itemIndices = {}, item;
for (;i--;) {
var item = {{=$data}}[i];
{{ var $method = 'checkDataType' + ($typeIsArray ? 's' : ''); }}
if ({{= it.util[$method]($itemType, 'item', it.opts.strictNumbers, true) }}) continue;
{{? $typeIsArray}}
if (typeof item == 'string') item = '"' + item;
{{?}}
if (typeof itemIndices[item] == 'number') {
{{=$valid}} = false;
j = itemIndices[item];
break;
}
itemIndices[item] = i;
}
{{?}}
}
{{? $isData }} } {{?}}
if (!{{=$valid}}) {
{{# def.error:'uniqueItems' }}
} {{? $breakOnError }} else { {{?}}
{{??}}
{{? $breakOnError }} if (true) { {{?}}
{{?}}

View File

@@ -0,0 +1,163 @@
# prebuild-install
> **A command line tool to easily install prebuilt binaries for multiple versions of Node.js & Electron on a specific platform.**
> By default it downloads prebuilt binaries from a GitHub release.
[![npm](https://img.shields.io/npm/v/prebuild-install.svg)](https://www.npmjs.com/package/prebuild-install)
![Node version](https://img.shields.io/node/v/prebuild-install.svg)
[![Test](https://img.shields.io/github/workflow/status/prebuild/prebuild-install/Test?label=test)](https://github.com/prebuild/prebuild-install/actions/workflows/test.yml)
[![Standard](https://img.shields.io/badge/standard-informational?logo=javascript\&logoColor=fff)](https://standardjs.com)
[![Common Changelog](https://common-changelog.org/badge.svg)](https://common-changelog.org)
## Note
**Instead of [`prebuild`](https://github.com/prebuild/prebuild) paired with [`prebuild-install`](https://github.com/prebuild/prebuild-install), we recommend [`prebuildify`](https://github.com/prebuild/prebuildify) paired with [`node-gyp-build`](https://github.com/prebuild/node-gyp-build).**
With `prebuildify`, all prebuilt binaries are shipped inside the package that is published to npm, which means there's no need for a separate download step like you find in `prebuild`. The irony of this approach is that it is faster to download all prebuilt binaries for every platform when they are bundled than it is to download a single prebuilt binary as an install script.
Upsides:
1. No extra download step, making it more reliable and faster to install.
2. Supports changing runtime versions locally and using the same install between Node.js and Electron. Reinstalling or rebuilding is not necessary, as all prebuilt binaries are in the npm tarball and the correct one is simply picked on runtime.
3. The `node-gyp-build` runtime dependency is dependency-free and will remain so out of principle, because introducing dependencies would negate the shorter install time.
4. Prebuilt binaries work even if npm install scripts are disabled.
5. The npm package checksum covers prebuilt binaries too.
Downsides:
1. The installed npm package is larger on disk. Using [Node-API](https://nodejs.org/api/n-api.html) alleviates this because Node-API binaries are runtime-agnostic and forward-compatible.
2. Publishing is mildly more complicated, because `npm publish` must be done after compiling and fetching prebuilt binaries (typically in CI).
## Usage
Use [`prebuild`](https://github.com/prebuild/prebuild) to create and upload prebuilt binaries. Then change your package.json install script to:
```json
{
"scripts": {
"install": "prebuild-install || node-gyp rebuild"
}
}
```
When a consumer then installs your package with npm thus triggering the above install script, `prebuild-install` will download a suitable prebuilt binary, or exit with a non-zero exit code if there is none, which triggers `node-gyp rebuild` in order to build from source.
Options (see below) can be passed to `prebuild-install` like so:
```json
{
"scripts": {
"install": "prebuild-install -r napi || node-gyp rebuild"
}
}
```
### Help
```
prebuild-install [options]
--download -d [url] (download prebuilds, no url means github)
--target -t version (version to install for)
--runtime -r runtime (Node runtime [node, napi or electron] to build or install for, default is node)
--path -p path (make a prebuild-install here)
--token -T gh-token (github token for private repos)
--arch arch (target CPU architecture, see Node OS module docs, default is current arch)
--platform platform (target platform, see Node OS module docs, default is current platform)
--tag-prefix <prefix> (github tag prefix, default is "v")
--build-from-source (skip prebuild download)
--verbose (log verbosely)
--libc (use provided libc rather than system default)
--debug (set Debug or Release configuration)
--version (print prebuild-install version and exit)
```
When `prebuild-install` is run via an `npm` script, options `--build-from-source`, `--debug`, `--download`, `--target`, `--runtime`, `--arch` `--platform` and `--libc` may be passed through via arguments given to the `npm` command.
Alternatively you can set environment variables `npm_config_build_from_source=true`, `npm_config_platform`, `npm_config_arch`, `npm_config_target` `npm_config_runtime` and `npm_config_libc`.
### Libc
On non-glibc Linux platforms, the Libc name is appended to platform name. For example, musl-based environments are called `linuxmusl`. If `--libc=glibc` is passed as option, glibc is discarded and platform is called as just `linux`. This can be used for example to build cross-platform packages on Alpine Linux.
### Private Repositories
`prebuild-install` supports downloading prebuilds from private GitHub repositories using the `-T <github-token>`:
```
$ prebuild-install -T <github-token>
```
If you don't want to use the token on cli you can put it in `~/.prebuild-installrc`:
```
token=<github-token>
```
Alternatively you can specify it in the `prebuild-install_token` environment variable.
Note that using a GitHub token uses the API to resolve the correct release meaning that you are subject to the ([GitHub Rate Limit](https://developer.github.com/v3/rate_limit/)).
### Create GitHub Token
To create a token:
- Go to [this page](https://github.com/settings/tokens)
- Click the `Generate new token` button
- Give the token a name and click the `Generate token` button, see below
![prebuild-token](https://cloud.githubusercontent.com/assets/13285808/20844584/d0b85268-b8c0-11e6-8b08-2b19522165a9.png)
The default scopes should be fine.
### Custom binaries
The end user can override binary download location through environment variables in their .npmrc file.
The variable needs to meet the mask `% your package name %_binary_host` or `% your package name %_binary_host_mirror`. For example:
```
leveldown_binary_host=http://overriden-host.com/overriden-path
```
Note that the package version subpath and file name will still be appended.
So if you are installing `leveldown@1.2.3` the resulting url will be:
```
http://overriden-host.com/overriden-path/v1.2.3/leveldown-v1.2.3-node-v57-win32-x64.tar.gz
```
#### Local prebuilds
If you want to use prebuilds from your local filesystem, you can use the `% your package name %_local_prebuilds` .npmrc variable to set a path to the folder containing prebuilds. For example:
```
leveldown_local_prebuilds=/path/to/prebuilds
```
This option will look directly in that folder for bundles created with `prebuild`, for example:
```
/path/to/prebuilds/leveldown-v1.2.3-node-v57-win32-x64.tar.gz
```
Non-absolute paths resolve relative to the directory of the package invoking prebuild-install, e.g. for nested dependencies.
### Cache
All prebuilt binaries are cached to minimize traffic. So first `prebuild-install` picks binaries from the cache and if no binary could be found, it will be downloaded. Depending on the environment, the cache folder is determined in the following order:
- `${npm_config_cache}/_prebuilds`
- `${APP_DATA}/npm-cache/_prebuilds`
- `${HOME}/.npm/_prebuilds`
## Install
With [npm](https://npmjs.org) do:
```
npm install prebuild-install
```
## License
[MIT](./LICENSE)

View File

@@ -0,0 +1,116 @@
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
// As defined on the list of supported events: https://reactjs.org/docs/events.html
export var clipboardEvents = ['onCopy', 'onCut', 'onPaste'];
export var compositionEvents = [
'onCompositionEnd',
'onCompositionStart',
'onCompositionUpdate',
];
export var focusEvents = ['onFocus', 'onBlur'];
export var formEvents = ['onInput', 'onInvalid', 'onReset', 'onSubmit'];
export var imageEvents = ['onLoad', 'onError'];
export var keyboardEvents = ['onKeyDown', 'onKeyPress', 'onKeyUp'];
export var mediaEvents = [
'onAbort',
'onCanPlay',
'onCanPlayThrough',
'onDurationChange',
'onEmptied',
'onEncrypted',
'onEnded',
'onError',
'onLoadedData',
'onLoadedMetadata',
'onLoadStart',
'onPause',
'onPlay',
'onPlaying',
'onProgress',
'onRateChange',
'onSeeked',
'onSeeking',
'onStalled',
'onSuspend',
'onTimeUpdate',
'onVolumeChange',
'onWaiting',
];
export var mouseEvents = [
'onClick',
'onContextMenu',
'onDoubleClick',
'onMouseDown',
'onMouseEnter',
'onMouseLeave',
'onMouseMove',
'onMouseOut',
'onMouseOver',
'onMouseUp',
];
export var dragEvents = [
'onDrag',
'onDragEnd',
'onDragEnter',
'onDragExit',
'onDragLeave',
'onDragOver',
'onDragStart',
'onDrop',
];
export var selectionEvents = ['onSelect'];
export var touchEvents = ['onTouchCancel', 'onTouchEnd', 'onTouchMove', 'onTouchStart'];
export var pointerEvents = [
'onPointerDown',
'onPointerMove',
'onPointerUp',
'onPointerCancel',
'onGotPointerCapture',
'onLostPointerCapture',
'onPointerEnter',
'onPointerLeave',
'onPointerOver',
'onPointerOut',
];
export var uiEvents = ['onScroll'];
export var wheelEvents = ['onWheel'];
export var animationEvents = [
'onAnimationStart',
'onAnimationEnd',
'onAnimationIteration',
];
export var transitionEvents = ['onTransitionEnd'];
export var otherEvents = ['onToggle'];
export var changeEvents = ['onChange'];
export var allEvents = __spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], clipboardEvents, true), compositionEvents, true), focusEvents, true), formEvents, true), imageEvents, true), keyboardEvents, true), mediaEvents, true), mouseEvents, true), dragEvents, true), selectionEvents, true), touchEvents, true), pointerEvents, true), uiEvents, true), wheelEvents, true), animationEvents, true), transitionEvents, true), changeEvents, true), otherEvents, true);
/**
* Returns an object with on-event callback props curried with provided args.
* @param {Object} props Props passed to a component.
* @param {Function=} getArgs A function that returns argument(s) on-event callbacks
* shall be curried with.
*/
export default function makeEventProps(props, getArgs) {
var eventProps = {};
allEvents.forEach(function (eventName) {
var eventHandler = props[eventName];
if (!eventHandler) {
return;
}
if (getArgs) {
eventProps[eventName] = (function (event) {
return eventHandler(event, getArgs(eventName));
});
}
else {
eventProps[eventName] = eventHandler;
}
});
return eventProps;
}

View File

@@ -0,0 +1,5 @@
"use strict";
Object.defineProperty(exports, Symbol.toStringTag, { value: "Module" });
const preloadWarning = "Error preloading route! ☝️";
exports.preloadWarning = preloadWarning;
//# sourceMappingURL=link.cjs.map

View File

@@ -0,0 +1,19 @@
# @babel/plugin-transform-react-jsx-source
> Add a __source prop to all JSX Elements
See our website [@babel/plugin-transform-react-jsx-source](https://babeljs.io/docs/babel-plugin-transform-react-jsx-source) for more information.
## Install
Using npm:
```sh
npm install --save-dev @babel/plugin-transform-react-jsx-source
```
or using yarn:
```sh
yarn add @babel/plugin-transform-react-jsx-source --dev
```

View File

@@ -0,0 +1,15 @@
import type { SourceMapInput } from '@jridgewell/trace-mapping';
export type { SourceMapSegment, DecodedSourceMap, EncodedSourceMap, } from '@jridgewell/trace-mapping';
export type { SourceMapInput };
export declare type LoaderContext = {
readonly importer: string;
readonly depth: number;
source: string;
content: string | null | undefined;
ignore: boolean | undefined;
};
export declare type SourceMapLoader = (file: string, ctx: LoaderContext) => SourceMapInput | null | undefined | void;
export declare type Options = {
excludeContent?: boolean;
decodedMappings?: boolean;
};

View File

@@ -0,0 +1,68 @@
#!/usr/bin/env node
"use strict";
var __getOwnPropNames = Object.getOwnPropertyNames;
var __commonJS = function(cb, mod) {
return function __require() {
return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = { exports: {} }).exports, mod), mod.exports;
};
};
// node_modules/semver-compare/index.js
var require_semver_compare = __commonJS({
"node_modules/semver-compare/index.js": function(exports2, module2) {
module2.exports = function cmp(a, b) {
var pa = a.split(".");
var pb = b.split(".");
for (var i = 0; i < 3; i++) {
var na = Number(pa[i]);
var nb = Number(pb[i]);
if (na > nb) return 1;
if (nb > na) return -1;
if (!isNaN(na) && isNaN(nb)) return 1;
if (isNaN(na) && !isNaN(nb)) return -1;
}
return 0;
};
}
});
// node_modules/please-upgrade-node/index.js
var require_please_upgrade_node = __commonJS({
"node_modules/please-upgrade-node/index.js": function(exports2, module2) {
var semverCompare = require_semver_compare();
module2.exports = function pleaseUpgradeNode2(pkg, opts) {
var opts = opts || {};
var requiredVersion = pkg.engines.node.replace(">=", "");
var currentVersion = process.version.replace("v", "");
if (semverCompare(currentVersion, requiredVersion) === -1) {
if (opts.message) {
console.error(opts.message(requiredVersion));
} else {
console.error(
pkg.name + " requires at least version " + requiredVersion + " of Node, please upgrade"
);
}
if (opts.hasOwnProperty("exitCode")) {
process.exit(opts.exitCode);
} else {
process.exit(1);
}
}
};
}
});
// bin/prettier.cjs
var nodeModule = require("module");
if (typeof nodeModule.enableCompileCache === "function") {
nodeModule.enableCompileCache();
}
var pleaseUpgradeNode = require_please_upgrade_node();
var packageJson = require("../package.json");
pleaseUpgradeNode(packageJson);
function runCli(cli) {
return cli.run();
}
var dynamicImport = new Function("module", "return import(module)");
var promise = dynamicImport("../internal/cli.mjs").then(runCli);
module.exports.__promise = promise;