FIN INIT
This commit is contained in:
29
node_modules/sucrase/dist/util/elideImportEquals.js
generated
vendored
Normal file
29
node_modules/sucrase/dist/util/elideImportEquals.js
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
function elideImportEquals(tokens) {
|
||||
// import
|
||||
tokens.removeInitialToken();
|
||||
// name
|
||||
tokens.removeToken();
|
||||
// =
|
||||
tokens.removeToken();
|
||||
// name or require
|
||||
tokens.removeToken();
|
||||
// Handle either `import A = require('A')` or `import A = B.C.D`.
|
||||
if (tokens.matches1(_types.TokenType.parenL)) {
|
||||
// (
|
||||
tokens.removeToken();
|
||||
// path string
|
||||
tokens.removeToken();
|
||||
// )
|
||||
tokens.removeToken();
|
||||
} else {
|
||||
while (tokens.matches1(_types.TokenType.dot)) {
|
||||
// .
|
||||
tokens.removeToken();
|
||||
// name
|
||||
tokens.removeToken();
|
||||
}
|
||||
}
|
||||
} exports.default = elideImportEquals;
|
74
node_modules/sucrase/dist/util/formatTokens.js
generated
vendored
Normal file
74
node_modules/sucrase/dist/util/formatTokens.js
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }var _linesandcolumns = require('lines-and-columns'); var _linesandcolumns2 = _interopRequireDefault(_linesandcolumns);
|
||||
|
||||
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
function formatTokens(code, tokens) {
|
||||
if (tokens.length === 0) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const tokenKeys = Object.keys(tokens[0]).filter(
|
||||
(k) => k !== "type" && k !== "value" && k !== "start" && k !== "end" && k !== "loc",
|
||||
);
|
||||
const typeKeys = Object.keys(tokens[0].type).filter((k) => k !== "label" && k !== "keyword");
|
||||
|
||||
const headings = ["Location", "Label", "Raw", ...tokenKeys, ...typeKeys];
|
||||
|
||||
const lines = new (0, _linesandcolumns2.default)(code);
|
||||
const rows = [headings, ...tokens.map(getTokenComponents)];
|
||||
const padding = headings.map(() => 0);
|
||||
for (const components of rows) {
|
||||
for (let i = 0; i < components.length; i++) {
|
||||
padding[i] = Math.max(padding[i], components[i].length);
|
||||
}
|
||||
}
|
||||
return rows
|
||||
.map((components) => components.map((component, i) => component.padEnd(padding[i])).join(" "))
|
||||
.join("\n");
|
||||
|
||||
function getTokenComponents(token) {
|
||||
const raw = code.slice(token.start, token.end);
|
||||
return [
|
||||
formatRange(token.start, token.end),
|
||||
_types.formatTokenType.call(void 0, token.type),
|
||||
truncate(String(raw), 14),
|
||||
// @ts-ignore: Intentional dynamic access by key.
|
||||
...tokenKeys.map((key) => formatValue(token[key], key)),
|
||||
// @ts-ignore: Intentional dynamic access by key.
|
||||
...typeKeys.map((key) => formatValue(token.type[key], key)),
|
||||
];
|
||||
}
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
function formatValue(value, key) {
|
||||
if (value === true) {
|
||||
return key;
|
||||
} else if (value === false || value === null) {
|
||||
return "";
|
||||
} else {
|
||||
return String(value);
|
||||
}
|
||||
}
|
||||
|
||||
function formatRange(start, end) {
|
||||
return `${formatPos(start)}-${formatPos(end)}`;
|
||||
}
|
||||
|
||||
function formatPos(pos) {
|
||||
const location = lines.locationForIndex(pos);
|
||||
if (!location) {
|
||||
return "Unknown";
|
||||
} else {
|
||||
return `${location.line + 1}:${location.column + 1}`;
|
||||
}
|
||||
}
|
||||
} exports.default = formatTokens;
|
||||
|
||||
function truncate(s, length) {
|
||||
if (s.length > length) {
|
||||
return `${s.slice(0, length - 3)}...`;
|
||||
} else {
|
||||
return s;
|
||||
}
|
||||
}
|
352
node_modules/sucrase/dist/util/getClassInfo.js
generated
vendored
Normal file
352
node_modules/sucrase/dist/util/getClassInfo.js
generated
vendored
Normal file
@ -0,0 +1,352 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});
|
||||
|
||||
var _keywords = require('../parser/tokenizer/keywords');
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Get information about the class fields for this class, given a token processor pointing to the
|
||||
* open-brace at the start of the class.
|
||||
*/
|
||||
function getClassInfo(
|
||||
rootTransformer,
|
||||
tokens,
|
||||
nameManager,
|
||||
disableESTransforms,
|
||||
) {
|
||||
const snapshot = tokens.snapshot();
|
||||
|
||||
const headerInfo = processClassHeader(tokens);
|
||||
|
||||
let constructorInitializerStatements = [];
|
||||
const instanceInitializerNames = [];
|
||||
const staticInitializerNames = [];
|
||||
let constructorInsertPos = null;
|
||||
const fields = [];
|
||||
const rangesToRemove = [];
|
||||
|
||||
const classContextId = tokens.currentToken().contextId;
|
||||
if (classContextId == null) {
|
||||
throw new Error("Expected non-null class context ID on class open-brace.");
|
||||
}
|
||||
|
||||
tokens.nextToken();
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.braceR, classContextId)) {
|
||||
if (tokens.matchesContextual(_keywords.ContextualKeyword._constructor) && !tokens.currentToken().isType) {
|
||||
({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
|
||||
} else if (tokens.matches1(_types.TokenType.semi)) {
|
||||
if (!disableESTransforms) {
|
||||
rangesToRemove.push({start: tokens.currentIndex(), end: tokens.currentIndex() + 1});
|
||||
}
|
||||
tokens.nextToken();
|
||||
} else if (tokens.currentToken().isType) {
|
||||
tokens.nextToken();
|
||||
} else {
|
||||
// Either a method or a field. Skip to the identifier part.
|
||||
const statementStartIndex = tokens.currentIndex();
|
||||
let isStatic = false;
|
||||
let isESPrivate = false;
|
||||
let isDeclareOrAbstract = false;
|
||||
while (isAccessModifier(tokens.currentToken())) {
|
||||
if (tokens.matches1(_types.TokenType._static)) {
|
||||
isStatic = true;
|
||||
}
|
||||
if (tokens.matches1(_types.TokenType.hash)) {
|
||||
isESPrivate = true;
|
||||
}
|
||||
if (tokens.matches1(_types.TokenType._declare) || tokens.matches1(_types.TokenType._abstract)) {
|
||||
isDeclareOrAbstract = true;
|
||||
}
|
||||
tokens.nextToken();
|
||||
}
|
||||
if (isStatic && tokens.matches1(_types.TokenType.braceL)) {
|
||||
// This is a static block, so don't process it in any special way.
|
||||
skipToNextClassElement(tokens, classContextId);
|
||||
continue;
|
||||
}
|
||||
if (isESPrivate) {
|
||||
// Sucrase doesn't attempt to transpile private fields; just leave them as-is.
|
||||
skipToNextClassElement(tokens, classContextId);
|
||||
continue;
|
||||
}
|
||||
if (
|
||||
tokens.matchesContextual(_keywords.ContextualKeyword._constructor) &&
|
||||
!tokens.currentToken().isType
|
||||
) {
|
||||
({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
|
||||
continue;
|
||||
}
|
||||
|
||||
const nameStartIndex = tokens.currentIndex();
|
||||
skipFieldName(tokens);
|
||||
if (tokens.matches1(_types.TokenType.lessThan) || tokens.matches1(_types.TokenType.parenL)) {
|
||||
// This is a method, so nothing to process.
|
||||
skipToNextClassElement(tokens, classContextId);
|
||||
continue;
|
||||
}
|
||||
// There might be a type annotation that we need to skip.
|
||||
while (tokens.currentToken().isType) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
if (tokens.matches1(_types.TokenType.eq)) {
|
||||
const equalsIndex = tokens.currentIndex();
|
||||
// This is an initializer, so we need to wrap in an initializer method.
|
||||
const valueEnd = tokens.currentToken().rhsEndIndex;
|
||||
if (valueEnd == null) {
|
||||
throw new Error("Expected rhsEndIndex on class field assignment.");
|
||||
}
|
||||
tokens.nextToken();
|
||||
while (tokens.currentIndex() < valueEnd) {
|
||||
rootTransformer.processToken();
|
||||
}
|
||||
let initializerName;
|
||||
if (isStatic) {
|
||||
initializerName = nameManager.claimFreeName("__initStatic");
|
||||
staticInitializerNames.push(initializerName);
|
||||
} else {
|
||||
initializerName = nameManager.claimFreeName("__init");
|
||||
instanceInitializerNames.push(initializerName);
|
||||
}
|
||||
// Fields start at the name, so `static x = 1;` has a field range of `x = 1;`.
|
||||
fields.push({
|
||||
initializerName,
|
||||
equalsIndex,
|
||||
start: nameStartIndex,
|
||||
end: tokens.currentIndex(),
|
||||
});
|
||||
} else if (!disableESTransforms || isDeclareOrAbstract) {
|
||||
// This is a regular field declaration, like `x;`. With the class transform enabled, we just
|
||||
// remove the line so that no output is produced. With the class transform disabled, we
|
||||
// usually want to preserve the declaration (but still strip types), but if the `declare`
|
||||
// or `abstract` keyword is specified, we should remove the line to avoid initializing the
|
||||
// value to undefined.
|
||||
rangesToRemove.push({start: statementStartIndex, end: tokens.currentIndex()});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokens.restoreToSnapshot(snapshot);
|
||||
if (disableESTransforms) {
|
||||
// With ES transforms disabled, we don't want to transform regular class
|
||||
// field declarations, and we don't need to do any additional tricks to
|
||||
// reference the constructor for static init, but we still need to transform
|
||||
// TypeScript field initializers defined as constructor parameters and we
|
||||
// still need to remove `declare` fields. For now, we run the same code
|
||||
// path but omit any field information, as if the class had no field
|
||||
// declarations. In the future, when we fully drop the class fields
|
||||
// transform, we can simplify this code significantly.
|
||||
return {
|
||||
headerInfo,
|
||||
constructorInitializerStatements,
|
||||
instanceInitializerNames: [],
|
||||
staticInitializerNames: [],
|
||||
constructorInsertPos,
|
||||
fields: [],
|
||||
rangesToRemove,
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
headerInfo,
|
||||
constructorInitializerStatements,
|
||||
instanceInitializerNames,
|
||||
staticInitializerNames,
|
||||
constructorInsertPos,
|
||||
fields,
|
||||
rangesToRemove,
|
||||
};
|
||||
}
|
||||
} exports.default = getClassInfo;
|
||||
|
||||
/**
|
||||
* Move the token processor to the next method/field in the class.
|
||||
*
|
||||
* To do that, we seek forward to the next start of a class name (either an open
|
||||
* bracket or an identifier, or the closing curly brace), then seek backward to
|
||||
* include any access modifiers.
|
||||
*/
|
||||
function skipToNextClassElement(tokens, classContextId) {
|
||||
tokens.nextToken();
|
||||
while (tokens.currentToken().contextId !== classContextId) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
while (isAccessModifier(tokens.tokenAtRelativeIndex(-1))) {
|
||||
tokens.previousToken();
|
||||
}
|
||||
}
|
||||
|
||||
function processClassHeader(tokens) {
|
||||
const classToken = tokens.currentToken();
|
||||
const contextId = classToken.contextId;
|
||||
if (contextId == null) {
|
||||
throw new Error("Expected context ID on class token.");
|
||||
}
|
||||
const isExpression = classToken.isExpression;
|
||||
if (isExpression == null) {
|
||||
throw new Error("Expected isExpression on class token.");
|
||||
}
|
||||
let className = null;
|
||||
let hasSuperclass = false;
|
||||
tokens.nextToken();
|
||||
if (tokens.matches1(_types.TokenType.name)) {
|
||||
className = tokens.identifierName();
|
||||
}
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.braceL, contextId)) {
|
||||
// If this has a superclass, there will always be an `extends` token. If it doesn't have a
|
||||
// superclass, only type parameters and `implements` clauses can show up here, all of which
|
||||
// consist only of type tokens. A declaration like `class A<B extends C> {` should *not* count
|
||||
// as having a superclass.
|
||||
if (tokens.matches1(_types.TokenType._extends) && !tokens.currentToken().isType) {
|
||||
hasSuperclass = true;
|
||||
}
|
||||
tokens.nextToken();
|
||||
}
|
||||
return {isExpression, className, hasSuperclass};
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract useful information out of a constructor, starting at the "constructor" name.
|
||||
*/
|
||||
function processConstructor(tokens)
|
||||
|
||||
|
||||
{
|
||||
const constructorInitializerStatements = [];
|
||||
|
||||
tokens.nextToken();
|
||||
const constructorContextId = tokens.currentToken().contextId;
|
||||
if (constructorContextId == null) {
|
||||
throw new Error("Expected context ID on open-paren starting constructor params.");
|
||||
}
|
||||
// Advance through parameters looking for access modifiers.
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.parenR, constructorContextId)) {
|
||||
if (tokens.currentToken().contextId === constructorContextId) {
|
||||
// Current token is an open paren or comma just before a param, so check
|
||||
// that param for access modifiers.
|
||||
tokens.nextToken();
|
||||
if (isAccessModifier(tokens.currentToken())) {
|
||||
tokens.nextToken();
|
||||
while (isAccessModifier(tokens.currentToken())) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
const token = tokens.currentToken();
|
||||
if (token.type !== _types.TokenType.name) {
|
||||
throw new Error("Expected identifier after access modifiers in constructor arg.");
|
||||
}
|
||||
const name = tokens.identifierNameForToken(token);
|
||||
constructorInitializerStatements.push(`this.${name} = ${name}`);
|
||||
}
|
||||
} else {
|
||||
tokens.nextToken();
|
||||
}
|
||||
}
|
||||
// )
|
||||
tokens.nextToken();
|
||||
// Constructor type annotations are invalid, but skip them anyway since
|
||||
// they're easy to skip.
|
||||
while (tokens.currentToken().isType) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
let constructorInsertPos = tokens.currentIndex();
|
||||
|
||||
// Advance through body looking for a super call.
|
||||
let foundSuperCall = false;
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.braceR, constructorContextId)) {
|
||||
if (!foundSuperCall && tokens.matches2(_types.TokenType._super, _types.TokenType.parenL)) {
|
||||
tokens.nextToken();
|
||||
const superCallContextId = tokens.currentToken().contextId;
|
||||
if (superCallContextId == null) {
|
||||
throw new Error("Expected a context ID on the super call");
|
||||
}
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.parenR, superCallContextId)) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
constructorInsertPos = tokens.currentIndex();
|
||||
foundSuperCall = true;
|
||||
}
|
||||
tokens.nextToken();
|
||||
}
|
||||
// }
|
||||
tokens.nextToken();
|
||||
|
||||
return {constructorInitializerStatements, constructorInsertPos};
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if this is any token that can go before the name in a method/field.
|
||||
*/
|
||||
function isAccessModifier(token) {
|
||||
return [
|
||||
_types.TokenType._async,
|
||||
_types.TokenType._get,
|
||||
_types.TokenType._set,
|
||||
_types.TokenType.plus,
|
||||
_types.TokenType.minus,
|
||||
_types.TokenType._readonly,
|
||||
_types.TokenType._static,
|
||||
_types.TokenType._public,
|
||||
_types.TokenType._private,
|
||||
_types.TokenType._protected,
|
||||
_types.TokenType._override,
|
||||
_types.TokenType._abstract,
|
||||
_types.TokenType.star,
|
||||
_types.TokenType._declare,
|
||||
_types.TokenType.hash,
|
||||
].includes(token.type);
|
||||
}
|
||||
|
||||
/**
|
||||
* The next token or set of tokens is either an identifier or an expression in square brackets, for
|
||||
* a method or field name.
|
||||
*/
|
||||
function skipFieldName(tokens) {
|
||||
if (tokens.matches1(_types.TokenType.bracketL)) {
|
||||
const startToken = tokens.currentToken();
|
||||
const classContextId = startToken.contextId;
|
||||
if (classContextId == null) {
|
||||
throw new Error("Expected class context ID on computed name open bracket.");
|
||||
}
|
||||
while (!tokens.matchesContextIdAndLabel(_types.TokenType.bracketR, classContextId)) {
|
||||
tokens.nextToken();
|
||||
}
|
||||
tokens.nextToken();
|
||||
} else {
|
||||
tokens.nextToken();
|
||||
}
|
||||
}
|
40
node_modules/sucrase/dist/util/getDeclarationInfo.js
generated
vendored
Normal file
40
node_modules/sucrase/dist/util/getDeclarationInfo.js
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _tokenizer = require('../parser/tokenizer');
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
const EMPTY_DECLARATION_INFO = {
|
||||
typeDeclarations: new Set(),
|
||||
valueDeclarations: new Set(),
|
||||
}; exports.EMPTY_DECLARATION_INFO = EMPTY_DECLARATION_INFO;
|
||||
|
||||
/**
|
||||
* Get all top-level identifiers that should be preserved when exported in TypeScript.
|
||||
*
|
||||
* Examples:
|
||||
* - If an identifier is declared as `const x`, then `export {x}` should be preserved.
|
||||
* - If it's declared as `type x`, then `export {x}` should be removed.
|
||||
* - If it's declared as both `const x` and `type x`, then the export should be preserved.
|
||||
* - Classes and enums should be preserved (even though they also introduce types).
|
||||
* - Imported identifiers should be preserved since we don't have enough information to
|
||||
* rule them out. --isolatedModules disallows re-exports, which catches errors here.
|
||||
*/
|
||||
function getDeclarationInfo(tokens) {
|
||||
const typeDeclarations = new Set();
|
||||
const valueDeclarations = new Set();
|
||||
for (let i = 0; i < tokens.tokens.length; i++) {
|
||||
const token = tokens.tokens[i];
|
||||
if (token.type === _types.TokenType.name && _tokenizer.isTopLevelDeclaration.call(void 0, token)) {
|
||||
if (token.isType) {
|
||||
typeDeclarations.add(tokens.identifierNameForToken(token));
|
||||
} else {
|
||||
valueDeclarations.add(tokens.identifierNameForToken(token));
|
||||
}
|
||||
}
|
||||
}
|
||||
return {typeDeclarations, valueDeclarations};
|
||||
} exports.default = getDeclarationInfo;
|
15
node_modules/sucrase/dist/util/getIdentifierNames.js
generated
vendored
Normal file
15
node_modules/sucrase/dist/util/getIdentifierNames.js
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
/**
|
||||
* Get all identifier names in the code, in order, including duplicates.
|
||||
*/
|
||||
function getIdentifierNames(code, tokens) {
|
||||
const names = [];
|
||||
for (const token of tokens) {
|
||||
if (token.type === _types.TokenType.name) {
|
||||
names.push(code.slice(token.start, token.end));
|
||||
}
|
||||
}
|
||||
return names;
|
||||
} exports.default = getIdentifierNames;
|
92
node_modules/sucrase/dist/util/getImportExportSpecifierInfo.js
generated
vendored
Normal file
92
node_modules/sucrase/dist/util/getImportExportSpecifierInfo.js
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Determine information about this named import or named export specifier.
|
||||
*
|
||||
* This syntax is the `a` from statements like these:
|
||||
* import {A} from "./foo";
|
||||
* export {A};
|
||||
* export {A} from "./foo";
|
||||
*
|
||||
* As it turns out, we can exactly characterize the syntax meaning by simply
|
||||
* counting the number of tokens, which can be from 1 to 4:
|
||||
* {A}
|
||||
* {type A}
|
||||
* {A as B}
|
||||
* {type A as B}
|
||||
*
|
||||
* In the type case, we never actually need the names in practice, so don't get
|
||||
* them.
|
||||
*
|
||||
* TODO: There's some redundancy with the type detection here and the isType
|
||||
* flag that's already present on tokens in TS mode. This function could
|
||||
* potentially be simplified and/or pushed to the call sites to avoid the object
|
||||
* allocation.
|
||||
*/
|
||||
function getImportExportSpecifierInfo(
|
||||
tokens,
|
||||
index = tokens.currentIndex(),
|
||||
) {
|
||||
let endIndex = index + 1;
|
||||
if (isSpecifierEnd(tokens, endIndex)) {
|
||||
// import {A}
|
||||
const name = tokens.identifierNameAtIndex(index);
|
||||
return {
|
||||
isType: false,
|
||||
leftName: name,
|
||||
rightName: name,
|
||||
endIndex,
|
||||
};
|
||||
}
|
||||
endIndex++;
|
||||
if (isSpecifierEnd(tokens, endIndex)) {
|
||||
// import {type A}
|
||||
return {
|
||||
isType: true,
|
||||
leftName: null,
|
||||
rightName: null,
|
||||
endIndex,
|
||||
};
|
||||
}
|
||||
endIndex++;
|
||||
if (isSpecifierEnd(tokens, endIndex)) {
|
||||
// import {A as B}
|
||||
return {
|
||||
isType: false,
|
||||
leftName: tokens.identifierNameAtIndex(index),
|
||||
rightName: tokens.identifierNameAtIndex(index + 2),
|
||||
endIndex,
|
||||
};
|
||||
}
|
||||
endIndex++;
|
||||
if (isSpecifierEnd(tokens, endIndex)) {
|
||||
// import {type A as B}
|
||||
return {
|
||||
isType: true,
|
||||
leftName: null,
|
||||
rightName: null,
|
||||
endIndex,
|
||||
};
|
||||
}
|
||||
throw new Error(`Unexpected import/export specifier at ${index}`);
|
||||
} exports.default = getImportExportSpecifierInfo;
|
||||
|
||||
function isSpecifierEnd(tokens, index) {
|
||||
const token = tokens.tokens[index];
|
||||
return token.type === _types.TokenType.braceR || token.type === _types.TokenType.comma;
|
||||
}
|
22
node_modules/sucrase/dist/util/getJSXPragmaInfo.js
generated
vendored
Normal file
22
node_modules/sucrase/dist/util/getJSXPragmaInfo.js
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
function getJSXPragmaInfo(options) {
|
||||
const [base, suffix] = splitPragma(options.jsxPragma || "React.createElement");
|
||||
const [fragmentBase, fragmentSuffix] = splitPragma(options.jsxFragmentPragma || "React.Fragment");
|
||||
return {base, suffix, fragmentBase, fragmentSuffix};
|
||||
} exports.default = getJSXPragmaInfo;
|
||||
|
||||
function splitPragma(pragma) {
|
||||
let dotIndex = pragma.indexOf(".");
|
||||
if (dotIndex === -1) {
|
||||
dotIndex = pragma.length;
|
||||
}
|
||||
return [pragma.slice(0, dotIndex), pragma.slice(dotIndex)];
|
||||
}
|
43
node_modules/sucrase/dist/util/getNonTypeIdentifiers.js
generated
vendored
Normal file
43
node_modules/sucrase/dist/util/getNonTypeIdentifiers.js
generated
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
|
||||
var _tokenizer = require('../parser/tokenizer');
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
var _JSXTransformer = require('../transformers/JSXTransformer');
|
||||
var _getJSXPragmaInfo = require('./getJSXPragmaInfo'); var _getJSXPragmaInfo2 = _interopRequireDefault(_getJSXPragmaInfo);
|
||||
|
||||
function getNonTypeIdentifiers(tokens, options) {
|
||||
const jsxPragmaInfo = _getJSXPragmaInfo2.default.call(void 0, options);
|
||||
const nonTypeIdentifiers = new Set();
|
||||
for (let i = 0; i < tokens.tokens.length; i++) {
|
||||
const token = tokens.tokens[i];
|
||||
if (
|
||||
token.type === _types.TokenType.name &&
|
||||
!token.isType &&
|
||||
(token.identifierRole === _tokenizer.IdentifierRole.Access ||
|
||||
token.identifierRole === _tokenizer.IdentifierRole.ObjectShorthand ||
|
||||
token.identifierRole === _tokenizer.IdentifierRole.ExportAccess) &&
|
||||
!token.shadowsGlobal
|
||||
) {
|
||||
nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
|
||||
}
|
||||
if (token.type === _types.TokenType.jsxTagStart) {
|
||||
nonTypeIdentifiers.add(jsxPragmaInfo.base);
|
||||
}
|
||||
if (
|
||||
token.type === _types.TokenType.jsxTagStart &&
|
||||
i + 1 < tokens.tokens.length &&
|
||||
tokens.tokens[i + 1].type === _types.TokenType.jsxTagEnd
|
||||
) {
|
||||
nonTypeIdentifiers.add(jsxPragmaInfo.base);
|
||||
nonTypeIdentifiers.add(jsxPragmaInfo.fragmentBase);
|
||||
}
|
||||
if (token.type === _types.TokenType.jsxName && token.identifierRole === _tokenizer.IdentifierRole.Access) {
|
||||
const identifierName = tokens.identifierNameForToken(token);
|
||||
// Lower-case single-component tag names like "div" don't count.
|
||||
if (!_JSXTransformer.startsWithLowerCase.call(void 0, identifierName) || tokens.tokens[i + 1].type === _types.TokenType.dot) {
|
||||
nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
|
||||
}
|
||||
}
|
||||
}
|
||||
return nonTypeIdentifiers;
|
||||
} exports.getNonTypeIdentifiers = getNonTypeIdentifiers;
|
84
node_modules/sucrase/dist/util/getTSImportedNames.js
generated
vendored
Normal file
84
node_modules/sucrase/dist/util/getTSImportedNames.js
generated
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }var _types = require('../parser/tokenizer/types');
|
||||
|
||||
var _getImportExportSpecifierInfo = require('./getImportExportSpecifierInfo'); var _getImportExportSpecifierInfo2 = _interopRequireDefault(_getImportExportSpecifierInfo);
|
||||
|
||||
/**
|
||||
* Special case code to scan for imported names in ESM TypeScript. We need to do this so we can
|
||||
* properly get globals so we can compute shadowed globals.
|
||||
*
|
||||
* This is similar to logic in CJSImportProcessor, but trimmed down to avoid logic with CJS
|
||||
* replacement and flow type imports.
|
||||
*/
|
||||
function getTSImportedNames(tokens) {
|
||||
const importedNames = new Set();
|
||||
for (let i = 0; i < tokens.tokens.length; i++) {
|
||||
if (
|
||||
tokens.matches1AtIndex(i, _types.TokenType._import) &&
|
||||
!tokens.matches3AtIndex(i, _types.TokenType._import, _types.TokenType.name, _types.TokenType.eq)
|
||||
) {
|
||||
collectNamesForImport(tokens, i, importedNames);
|
||||
}
|
||||
}
|
||||
return importedNames;
|
||||
} exports.default = getTSImportedNames;
|
||||
|
||||
function collectNamesForImport(
|
||||
tokens,
|
||||
index,
|
||||
importedNames,
|
||||
) {
|
||||
index++;
|
||||
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.parenL)) {
|
||||
// Dynamic import, so nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.name)) {
|
||||
importedNames.add(tokens.identifierNameAtIndex(index));
|
||||
index++;
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.comma)) {
|
||||
index++;
|
||||
}
|
||||
}
|
||||
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.star)) {
|
||||
// * as
|
||||
index += 2;
|
||||
importedNames.add(tokens.identifierNameAtIndex(index));
|
||||
index++;
|
||||
}
|
||||
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.braceL)) {
|
||||
index++;
|
||||
collectNamesForNamedImport(tokens, index, importedNames);
|
||||
}
|
||||
}
|
||||
|
||||
function collectNamesForNamedImport(
|
||||
tokens,
|
||||
index,
|
||||
importedNames,
|
||||
) {
|
||||
while (true) {
|
||||
if (tokens.matches1AtIndex(index, _types.TokenType.braceR)) {
|
||||
return;
|
||||
}
|
||||
|
||||
const specifierInfo = _getImportExportSpecifierInfo2.default.call(void 0, tokens, index);
|
||||
index = specifierInfo.endIndex;
|
||||
if (!specifierInfo.isType) {
|
||||
importedNames.add(specifierInfo.rightName);
|
||||
}
|
||||
|
||||
if (tokens.matches2AtIndex(index, _types.TokenType.comma, _types.TokenType.braceR)) {
|
||||
return;
|
||||
} else if (tokens.matches1AtIndex(index, _types.TokenType.braceR)) {
|
||||
return;
|
||||
} else if (tokens.matches1AtIndex(index, _types.TokenType.comma)) {
|
||||
index++;
|
||||
} else {
|
||||
throw new Error(`Unexpected token: ${JSON.stringify(tokens.tokens[index])}`);
|
||||
}
|
||||
}
|
||||
}
|
38
node_modules/sucrase/dist/util/isAsyncOperation.js
generated
vendored
Normal file
38
node_modules/sucrase/dist/util/isAsyncOperation.js
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _keywords = require('../parser/tokenizer/keywords');
|
||||
|
||||
|
||||
/**
|
||||
* Determine whether this optional chain or nullish coalescing operation has any await statements in
|
||||
* it. If so, we'll need to transpile to an async operation.
|
||||
*
|
||||
* We compute this by walking the length of the operation and returning true if we see an await
|
||||
* keyword used as a real await (rather than an object key or property access). Nested optional
|
||||
* chain/nullish operations need to be tracked but don't silence await, but a nested async function
|
||||
* (or any other nested scope) will make the await not count.
|
||||
*/
|
||||
function isAsyncOperation(tokens) {
|
||||
let index = tokens.currentIndex();
|
||||
let depth = 0;
|
||||
const startToken = tokens.currentToken();
|
||||
do {
|
||||
const token = tokens.tokens[index];
|
||||
if (token.isOptionalChainStart) {
|
||||
depth++;
|
||||
}
|
||||
if (token.isOptionalChainEnd) {
|
||||
depth--;
|
||||
}
|
||||
depth += token.numNullishCoalesceStarts;
|
||||
depth -= token.numNullishCoalesceEnds;
|
||||
|
||||
if (
|
||||
token.contextualKeyword === _keywords.ContextualKeyword._await &&
|
||||
token.identifierRole == null &&
|
||||
token.scopeDepth === startToken.scopeDepth
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
index += 1;
|
||||
} while (depth > 0 && index < tokens.tokens.length);
|
||||
return false;
|
||||
} exports.default = isAsyncOperation;
|
18
node_modules/sucrase/dist/util/isExportFrom.js
generated
vendored
Normal file
18
node_modules/sucrase/dist/util/isExportFrom.js
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _keywords = require('../parser/tokenizer/keywords');
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
/**
|
||||
* Starting at `export {`, look ahead and return `true` if this is an
|
||||
* `export {...} from` statement and `false` if this is a plain multi-export.
|
||||
*/
|
||||
function isExportFrom(tokens) {
|
||||
let closeBraceIndex = tokens.currentIndex();
|
||||
while (!tokens.matches1AtIndex(closeBraceIndex, _types.TokenType.braceR)) {
|
||||
closeBraceIndex++;
|
||||
}
|
||||
return (
|
||||
tokens.matchesContextualAtIndex(closeBraceIndex + 1, _keywords.ContextualKeyword._from) &&
|
||||
tokens.matches1AtIndex(closeBraceIndex + 2, _types.TokenType.string)
|
||||
);
|
||||
} exports.default = isExportFrom;
|
81
node_modules/sucrase/dist/util/isIdentifier.js
generated
vendored
Normal file
81
node_modules/sucrase/dist/util/isIdentifier.js
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _identifier = require('../parser/util/identifier');
|
||||
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
|
||||
// Hard-code a list of reserved words rather than trying to use keywords or contextual keywords
|
||||
// from the parser, since currently there are various exceptions, like `package` being reserved
|
||||
// but unused and various contextual keywords being reserved. Note that we assume that all code
|
||||
// compiled by Sucrase is in a module, so strict mode words and await are all considered reserved
|
||||
// here.
|
||||
const RESERVED_WORDS = new Set([
|
||||
// Reserved keywords as of ECMAScript 2015
|
||||
"break",
|
||||
"case",
|
||||
"catch",
|
||||
"class",
|
||||
"const",
|
||||
"continue",
|
||||
"debugger",
|
||||
"default",
|
||||
"delete",
|
||||
"do",
|
||||
"else",
|
||||
"export",
|
||||
"extends",
|
||||
"finally",
|
||||
"for",
|
||||
"function",
|
||||
"if",
|
||||
"import",
|
||||
"in",
|
||||
"instanceof",
|
||||
"new",
|
||||
"return",
|
||||
"super",
|
||||
"switch",
|
||||
"this",
|
||||
"throw",
|
||||
"try",
|
||||
"typeof",
|
||||
"var",
|
||||
"void",
|
||||
"while",
|
||||
"with",
|
||||
"yield",
|
||||
// Future reserved keywords
|
||||
"enum",
|
||||
"implements",
|
||||
"interface",
|
||||
"let",
|
||||
"package",
|
||||
"private",
|
||||
"protected",
|
||||
"public",
|
||||
"static",
|
||||
"await",
|
||||
// Literals that cannot be used as identifiers
|
||||
"false",
|
||||
"null",
|
||||
"true",
|
||||
]);
|
||||
|
||||
/**
|
||||
* Determine if the given name is a legal variable name.
|
||||
*
|
||||
* This is needed when transforming TypeScript enums; if an enum key is a valid
|
||||
* variable name, it might be referenced later in the enum, so we need to
|
||||
* declare a variable.
|
||||
*/
|
||||
function isIdentifier(name) {
|
||||
if (name.length === 0) {
|
||||
return false;
|
||||
}
|
||||
if (!_identifier.IS_IDENTIFIER_START[name.charCodeAt(0)]) {
|
||||
return false;
|
||||
}
|
||||
for (let i = 1; i < name.length; i++) {
|
||||
if (!_identifier.IS_IDENTIFIER_CHAR[name.charCodeAt(i)]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return !RESERVED_WORDS.has(name);
|
||||
} exports.default = isIdentifier;
|
22
node_modules/sucrase/dist/util/removeMaybeImportAttributes.js
generated
vendored
Normal file
22
node_modules/sucrase/dist/util/removeMaybeImportAttributes.js
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _keywords = require('../parser/tokenizer/keywords');
|
||||
var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
/**
|
||||
* Starting at a potential `with` or (legacy) `assert` token, remove the import
|
||||
* attributes if they exist.
|
||||
*/
|
||||
function removeMaybeImportAttributes(tokens) {
|
||||
if (
|
||||
tokens.matches2(_types.TokenType._with, _types.TokenType.braceL) ||
|
||||
(tokens.matches2(_types.TokenType.name, _types.TokenType.braceL) && tokens.matchesContextual(_keywords.ContextualKeyword._assert))
|
||||
) {
|
||||
// assert
|
||||
tokens.removeToken();
|
||||
// {
|
||||
tokens.removeToken();
|
||||
tokens.removeBalancedCode();
|
||||
// }
|
||||
tokens.removeToken();
|
||||
}
|
||||
} exports.removeMaybeImportAttributes = removeMaybeImportAttributes;
|
38
node_modules/sucrase/dist/util/shouldElideDefaultExport.js
generated
vendored
Normal file
38
node_modules/sucrase/dist/util/shouldElideDefaultExport.js
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
"use strict";Object.defineProperty(exports, "__esModule", {value: true});var _types = require('../parser/tokenizer/types');
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Common method sharing code between CJS and ESM cases, since they're the same here.
|
||||
*/
|
||||
function shouldElideDefaultExport(
|
||||
isTypeScriptTransformEnabled,
|
||||
keepUnusedImports,
|
||||
tokens,
|
||||
declarationInfo,
|
||||
) {
|
||||
if (!isTypeScriptTransformEnabled || keepUnusedImports) {
|
||||
return false;
|
||||
}
|
||||
const exportToken = tokens.currentToken();
|
||||
if (exportToken.rhsEndIndex == null) {
|
||||
throw new Error("Expected non-null rhsEndIndex on export token.");
|
||||
}
|
||||
// The export must be of the form `export default a` or `export default a;`.
|
||||
const numTokens = exportToken.rhsEndIndex - tokens.currentIndex();
|
||||
if (
|
||||
numTokens !== 3 &&
|
||||
!(numTokens === 4 && tokens.matches1AtIndex(exportToken.rhsEndIndex - 1, _types.TokenType.semi))
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
const identifierToken = tokens.tokenAtRelativeIndex(2);
|
||||
if (identifierToken.type !== _types.TokenType.name) {
|
||||
return false;
|
||||
}
|
||||
const exportedName = tokens.identifierNameForToken(identifierToken);
|
||||
return (
|
||||
declarationInfo.typeDeclarations.has(exportedName) &&
|
||||
!declarationInfo.valueDeclarations.has(exportedName)
|
||||
);
|
||||
} exports.default = shouldElideDefaultExport;
|
Reference in New Issue
Block a user