This commit is contained in:
2025-06-04 10:03:22 +02:00
commit 785a2b6134
14182 changed files with 1764617 additions and 0 deletions

View File

@ -0,0 +1,29 @@
import {TokenType as tt} from "../parser/tokenizer/types";
export default function elideImportEquals(tokens) {
// import
tokens.removeInitialToken();
// name
tokens.removeToken();
// =
tokens.removeToken();
// name or require
tokens.removeToken();
// Handle either `import A = require('A')` or `import A = B.C.D`.
if (tokens.matches1(tt.parenL)) {
// (
tokens.removeToken();
// path string
tokens.removeToken();
// )
tokens.removeToken();
} else {
while (tokens.matches1(tt.dot)) {
// .
tokens.removeToken();
// name
tokens.removeToken();
}
}
}

74
node_modules/sucrase/dist/esm/util/formatTokens.js generated vendored Normal file
View File

@ -0,0 +1,74 @@
import LinesAndColumns from "lines-and-columns";
import {formatTokenType} from "../parser/tokenizer/types";
export default function formatTokens(code, tokens) {
if (tokens.length === 0) {
return "";
}
const tokenKeys = Object.keys(tokens[0]).filter(
(k) => k !== "type" && k !== "value" && k !== "start" && k !== "end" && k !== "loc",
);
const typeKeys = Object.keys(tokens[0].type).filter((k) => k !== "label" && k !== "keyword");
const headings = ["Location", "Label", "Raw", ...tokenKeys, ...typeKeys];
const lines = new LinesAndColumns(code);
const rows = [headings, ...tokens.map(getTokenComponents)];
const padding = headings.map(() => 0);
for (const components of rows) {
for (let i = 0; i < components.length; i++) {
padding[i] = Math.max(padding[i], components[i].length);
}
}
return rows
.map((components) => components.map((component, i) => component.padEnd(padding[i])).join(" "))
.join("\n");
function getTokenComponents(token) {
const raw = code.slice(token.start, token.end);
return [
formatRange(token.start, token.end),
formatTokenType(token.type),
truncate(String(raw), 14),
// @ts-ignore: Intentional dynamic access by key.
...tokenKeys.map((key) => formatValue(token[key], key)),
// @ts-ignore: Intentional dynamic access by key.
...typeKeys.map((key) => formatValue(token.type[key], key)),
];
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function formatValue(value, key) {
if (value === true) {
return key;
} else if (value === false || value === null) {
return "";
} else {
return String(value);
}
}
function formatRange(start, end) {
return `${formatPos(start)}-${formatPos(end)}`;
}
function formatPos(pos) {
const location = lines.locationForIndex(pos);
if (!location) {
return "Unknown";
} else {
return `${location.line + 1}:${location.column + 1}`;
}
}
}
function truncate(s, length) {
if (s.length > length) {
return `${s.slice(0, length - 3)}...`;
} else {
return s;
}
}

352
node_modules/sucrase/dist/esm/util/getClassInfo.js generated vendored Normal file
View File

@ -0,0 +1,352 @@
import {ContextualKeyword} from "../parser/tokenizer/keywords";
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Get information about the class fields for this class, given a token processor pointing to the
* open-brace at the start of the class.
*/
export default function getClassInfo(
rootTransformer,
tokens,
nameManager,
disableESTransforms,
) {
const snapshot = tokens.snapshot();
const headerInfo = processClassHeader(tokens);
let constructorInitializerStatements = [];
const instanceInitializerNames = [];
const staticInitializerNames = [];
let constructorInsertPos = null;
const fields = [];
const rangesToRemove = [];
const classContextId = tokens.currentToken().contextId;
if (classContextId == null) {
throw new Error("Expected non-null class context ID on class open-brace.");
}
tokens.nextToken();
while (!tokens.matchesContextIdAndLabel(tt.braceR, classContextId)) {
if (tokens.matchesContextual(ContextualKeyword._constructor) && !tokens.currentToken().isType) {
({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
} else if (tokens.matches1(tt.semi)) {
if (!disableESTransforms) {
rangesToRemove.push({start: tokens.currentIndex(), end: tokens.currentIndex() + 1});
}
tokens.nextToken();
} else if (tokens.currentToken().isType) {
tokens.nextToken();
} else {
// Either a method or a field. Skip to the identifier part.
const statementStartIndex = tokens.currentIndex();
let isStatic = false;
let isESPrivate = false;
let isDeclareOrAbstract = false;
while (isAccessModifier(tokens.currentToken())) {
if (tokens.matches1(tt._static)) {
isStatic = true;
}
if (tokens.matches1(tt.hash)) {
isESPrivate = true;
}
if (tokens.matches1(tt._declare) || tokens.matches1(tt._abstract)) {
isDeclareOrAbstract = true;
}
tokens.nextToken();
}
if (isStatic && tokens.matches1(tt.braceL)) {
// This is a static block, so don't process it in any special way.
skipToNextClassElement(tokens, classContextId);
continue;
}
if (isESPrivate) {
// Sucrase doesn't attempt to transpile private fields; just leave them as-is.
skipToNextClassElement(tokens, classContextId);
continue;
}
if (
tokens.matchesContextual(ContextualKeyword._constructor) &&
!tokens.currentToken().isType
) {
({constructorInitializerStatements, constructorInsertPos} = processConstructor(tokens));
continue;
}
const nameStartIndex = tokens.currentIndex();
skipFieldName(tokens);
if (tokens.matches1(tt.lessThan) || tokens.matches1(tt.parenL)) {
// This is a method, so nothing to process.
skipToNextClassElement(tokens, classContextId);
continue;
}
// There might be a type annotation that we need to skip.
while (tokens.currentToken().isType) {
tokens.nextToken();
}
if (tokens.matches1(tt.eq)) {
const equalsIndex = tokens.currentIndex();
// This is an initializer, so we need to wrap in an initializer method.
const valueEnd = tokens.currentToken().rhsEndIndex;
if (valueEnd == null) {
throw new Error("Expected rhsEndIndex on class field assignment.");
}
tokens.nextToken();
while (tokens.currentIndex() < valueEnd) {
rootTransformer.processToken();
}
let initializerName;
if (isStatic) {
initializerName = nameManager.claimFreeName("__initStatic");
staticInitializerNames.push(initializerName);
} else {
initializerName = nameManager.claimFreeName("__init");
instanceInitializerNames.push(initializerName);
}
// Fields start at the name, so `static x = 1;` has a field range of `x = 1;`.
fields.push({
initializerName,
equalsIndex,
start: nameStartIndex,
end: tokens.currentIndex(),
});
} else if (!disableESTransforms || isDeclareOrAbstract) {
// This is a regular field declaration, like `x;`. With the class transform enabled, we just
// remove the line so that no output is produced. With the class transform disabled, we
// usually want to preserve the declaration (but still strip types), but if the `declare`
// or `abstract` keyword is specified, we should remove the line to avoid initializing the
// value to undefined.
rangesToRemove.push({start: statementStartIndex, end: tokens.currentIndex()});
}
}
}
tokens.restoreToSnapshot(snapshot);
if (disableESTransforms) {
// With ES transforms disabled, we don't want to transform regular class
// field declarations, and we don't need to do any additional tricks to
// reference the constructor for static init, but we still need to transform
// TypeScript field initializers defined as constructor parameters and we
// still need to remove `declare` fields. For now, we run the same code
// path but omit any field information, as if the class had no field
// declarations. In the future, when we fully drop the class fields
// transform, we can simplify this code significantly.
return {
headerInfo,
constructorInitializerStatements,
instanceInitializerNames: [],
staticInitializerNames: [],
constructorInsertPos,
fields: [],
rangesToRemove,
};
} else {
return {
headerInfo,
constructorInitializerStatements,
instanceInitializerNames,
staticInitializerNames,
constructorInsertPos,
fields,
rangesToRemove,
};
}
}
/**
* Move the token processor to the next method/field in the class.
*
* To do that, we seek forward to the next start of a class name (either an open
* bracket or an identifier, or the closing curly brace), then seek backward to
* include any access modifiers.
*/
function skipToNextClassElement(tokens, classContextId) {
tokens.nextToken();
while (tokens.currentToken().contextId !== classContextId) {
tokens.nextToken();
}
while (isAccessModifier(tokens.tokenAtRelativeIndex(-1))) {
tokens.previousToken();
}
}
function processClassHeader(tokens) {
const classToken = tokens.currentToken();
const contextId = classToken.contextId;
if (contextId == null) {
throw new Error("Expected context ID on class token.");
}
const isExpression = classToken.isExpression;
if (isExpression == null) {
throw new Error("Expected isExpression on class token.");
}
let className = null;
let hasSuperclass = false;
tokens.nextToken();
if (tokens.matches1(tt.name)) {
className = tokens.identifierName();
}
while (!tokens.matchesContextIdAndLabel(tt.braceL, contextId)) {
// If this has a superclass, there will always be an `extends` token. If it doesn't have a
// superclass, only type parameters and `implements` clauses can show up here, all of which
// consist only of type tokens. A declaration like `class A<B extends C> {` should *not* count
// as having a superclass.
if (tokens.matches1(tt._extends) && !tokens.currentToken().isType) {
hasSuperclass = true;
}
tokens.nextToken();
}
return {isExpression, className, hasSuperclass};
}
/**
* Extract useful information out of a constructor, starting at the "constructor" name.
*/
function processConstructor(tokens)
{
const constructorInitializerStatements = [];
tokens.nextToken();
const constructorContextId = tokens.currentToken().contextId;
if (constructorContextId == null) {
throw new Error("Expected context ID on open-paren starting constructor params.");
}
// Advance through parameters looking for access modifiers.
while (!tokens.matchesContextIdAndLabel(tt.parenR, constructorContextId)) {
if (tokens.currentToken().contextId === constructorContextId) {
// Current token is an open paren or comma just before a param, so check
// that param for access modifiers.
tokens.nextToken();
if (isAccessModifier(tokens.currentToken())) {
tokens.nextToken();
while (isAccessModifier(tokens.currentToken())) {
tokens.nextToken();
}
const token = tokens.currentToken();
if (token.type !== tt.name) {
throw new Error("Expected identifier after access modifiers in constructor arg.");
}
const name = tokens.identifierNameForToken(token);
constructorInitializerStatements.push(`this.${name} = ${name}`);
}
} else {
tokens.nextToken();
}
}
// )
tokens.nextToken();
// Constructor type annotations are invalid, but skip them anyway since
// they're easy to skip.
while (tokens.currentToken().isType) {
tokens.nextToken();
}
let constructorInsertPos = tokens.currentIndex();
// Advance through body looking for a super call.
let foundSuperCall = false;
while (!tokens.matchesContextIdAndLabel(tt.braceR, constructorContextId)) {
if (!foundSuperCall && tokens.matches2(tt._super, tt.parenL)) {
tokens.nextToken();
const superCallContextId = tokens.currentToken().contextId;
if (superCallContextId == null) {
throw new Error("Expected a context ID on the super call");
}
while (!tokens.matchesContextIdAndLabel(tt.parenR, superCallContextId)) {
tokens.nextToken();
}
constructorInsertPos = tokens.currentIndex();
foundSuperCall = true;
}
tokens.nextToken();
}
// }
tokens.nextToken();
return {constructorInitializerStatements, constructorInsertPos};
}
/**
* Determine if this is any token that can go before the name in a method/field.
*/
function isAccessModifier(token) {
return [
tt._async,
tt._get,
tt._set,
tt.plus,
tt.minus,
tt._readonly,
tt._static,
tt._public,
tt._private,
tt._protected,
tt._override,
tt._abstract,
tt.star,
tt._declare,
tt.hash,
].includes(token.type);
}
/**
* The next token or set of tokens is either an identifier or an expression in square brackets, for
* a method or field name.
*/
function skipFieldName(tokens) {
if (tokens.matches1(tt.bracketL)) {
const startToken = tokens.currentToken();
const classContextId = startToken.contextId;
if (classContextId == null) {
throw new Error("Expected class context ID on computed name open bracket.");
}
while (!tokens.matchesContextIdAndLabel(tt.bracketR, classContextId)) {
tokens.nextToken();
}
tokens.nextToken();
} else {
tokens.nextToken();
}
}

View File

@ -0,0 +1,40 @@
import {isTopLevelDeclaration} from "../parser/tokenizer";
import {TokenType as tt} from "../parser/tokenizer/types";
export const EMPTY_DECLARATION_INFO = {
typeDeclarations: new Set(),
valueDeclarations: new Set(),
};
/**
* Get all top-level identifiers that should be preserved when exported in TypeScript.
*
* Examples:
* - If an identifier is declared as `const x`, then `export {x}` should be preserved.
* - If it's declared as `type x`, then `export {x}` should be removed.
* - If it's declared as both `const x` and `type x`, then the export should be preserved.
* - Classes and enums should be preserved (even though they also introduce types).
* - Imported identifiers should be preserved since we don't have enough information to
* rule them out. --isolatedModules disallows re-exports, which catches errors here.
*/
export default function getDeclarationInfo(tokens) {
const typeDeclarations = new Set();
const valueDeclarations = new Set();
for (let i = 0; i < tokens.tokens.length; i++) {
const token = tokens.tokens[i];
if (token.type === tt.name && isTopLevelDeclaration(token)) {
if (token.isType) {
typeDeclarations.add(tokens.identifierNameForToken(token));
} else {
valueDeclarations.add(tokens.identifierNameForToken(token));
}
}
}
return {typeDeclarations, valueDeclarations};
}

View File

@ -0,0 +1,15 @@
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Get all identifier names in the code, in order, including duplicates.
*/
export default function getIdentifierNames(code, tokens) {
const names = [];
for (const token of tokens) {
if (token.type === tt.name) {
names.push(code.slice(token.start, token.end));
}
}
return names;
}

View File

@ -0,0 +1,92 @@
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Determine information about this named import or named export specifier.
*
* This syntax is the `a` from statements like these:
* import {A} from "./foo";
* export {A};
* export {A} from "./foo";
*
* As it turns out, we can exactly characterize the syntax meaning by simply
* counting the number of tokens, which can be from 1 to 4:
* {A}
* {type A}
* {A as B}
* {type A as B}
*
* In the type case, we never actually need the names in practice, so don't get
* them.
*
* TODO: There's some redundancy with the type detection here and the isType
* flag that's already present on tokens in TS mode. This function could
* potentially be simplified and/or pushed to the call sites to avoid the object
* allocation.
*/
export default function getImportExportSpecifierInfo(
tokens,
index = tokens.currentIndex(),
) {
let endIndex = index + 1;
if (isSpecifierEnd(tokens, endIndex)) {
// import {A}
const name = tokens.identifierNameAtIndex(index);
return {
isType: false,
leftName: name,
rightName: name,
endIndex,
};
}
endIndex++;
if (isSpecifierEnd(tokens, endIndex)) {
// import {type A}
return {
isType: true,
leftName: null,
rightName: null,
endIndex,
};
}
endIndex++;
if (isSpecifierEnd(tokens, endIndex)) {
// import {A as B}
return {
isType: false,
leftName: tokens.identifierNameAtIndex(index),
rightName: tokens.identifierNameAtIndex(index + 2),
endIndex,
};
}
endIndex++;
if (isSpecifierEnd(tokens, endIndex)) {
// import {type A as B}
return {
isType: true,
leftName: null,
rightName: null,
endIndex,
};
}
throw new Error(`Unexpected import/export specifier at ${index}`);
}
function isSpecifierEnd(tokens, index) {
const token = tokens.tokens[index];
return token.type === tt.braceR || token.type === tt.comma;
}

22
node_modules/sucrase/dist/esm/util/getJSXPragmaInfo.js generated vendored Normal file
View File

@ -0,0 +1,22 @@
export default function getJSXPragmaInfo(options) {
const [base, suffix] = splitPragma(options.jsxPragma || "React.createElement");
const [fragmentBase, fragmentSuffix] = splitPragma(options.jsxFragmentPragma || "React.Fragment");
return {base, suffix, fragmentBase, fragmentSuffix};
}
function splitPragma(pragma) {
let dotIndex = pragma.indexOf(".");
if (dotIndex === -1) {
dotIndex = pragma.length;
}
return [pragma.slice(0, dotIndex), pragma.slice(dotIndex)];
}

View File

@ -0,0 +1,43 @@
import {IdentifierRole} from "../parser/tokenizer";
import {TokenType, TokenType as tt} from "../parser/tokenizer/types";
import {startsWithLowerCase} from "../transformers/JSXTransformer";
import getJSXPragmaInfo from "./getJSXPragmaInfo";
export function getNonTypeIdentifiers(tokens, options) {
const jsxPragmaInfo = getJSXPragmaInfo(options);
const nonTypeIdentifiers = new Set();
for (let i = 0; i < tokens.tokens.length; i++) {
const token = tokens.tokens[i];
if (
token.type === tt.name &&
!token.isType &&
(token.identifierRole === IdentifierRole.Access ||
token.identifierRole === IdentifierRole.ObjectShorthand ||
token.identifierRole === IdentifierRole.ExportAccess) &&
!token.shadowsGlobal
) {
nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
}
if (token.type === tt.jsxTagStart) {
nonTypeIdentifiers.add(jsxPragmaInfo.base);
}
if (
token.type === tt.jsxTagStart &&
i + 1 < tokens.tokens.length &&
tokens.tokens[i + 1].type === tt.jsxTagEnd
) {
nonTypeIdentifiers.add(jsxPragmaInfo.base);
nonTypeIdentifiers.add(jsxPragmaInfo.fragmentBase);
}
if (token.type === tt.jsxName && token.identifierRole === IdentifierRole.Access) {
const identifierName = tokens.identifierNameForToken(token);
// Lower-case single-component tag names like "div" don't count.
if (!startsWithLowerCase(identifierName) || tokens.tokens[i + 1].type === TokenType.dot) {
nonTypeIdentifiers.add(tokens.identifierNameForToken(token));
}
}
}
return nonTypeIdentifiers;
}

View File

@ -0,0 +1,84 @@
import {TokenType as tt} from "../parser/tokenizer/types";
import getImportExportSpecifierInfo from "./getImportExportSpecifierInfo";
/**
* Special case code to scan for imported names in ESM TypeScript. We need to do this so we can
* properly get globals so we can compute shadowed globals.
*
* This is similar to logic in CJSImportProcessor, but trimmed down to avoid logic with CJS
* replacement and flow type imports.
*/
export default function getTSImportedNames(tokens) {
const importedNames = new Set();
for (let i = 0; i < tokens.tokens.length; i++) {
if (
tokens.matches1AtIndex(i, tt._import) &&
!tokens.matches3AtIndex(i, tt._import, tt.name, tt.eq)
) {
collectNamesForImport(tokens, i, importedNames);
}
}
return importedNames;
}
function collectNamesForImport(
tokens,
index,
importedNames,
) {
index++;
if (tokens.matches1AtIndex(index, tt.parenL)) {
// Dynamic import, so nothing to do
return;
}
if (tokens.matches1AtIndex(index, tt.name)) {
importedNames.add(tokens.identifierNameAtIndex(index));
index++;
if (tokens.matches1AtIndex(index, tt.comma)) {
index++;
}
}
if (tokens.matches1AtIndex(index, tt.star)) {
// * as
index += 2;
importedNames.add(tokens.identifierNameAtIndex(index));
index++;
}
if (tokens.matches1AtIndex(index, tt.braceL)) {
index++;
collectNamesForNamedImport(tokens, index, importedNames);
}
}
function collectNamesForNamedImport(
tokens,
index,
importedNames,
) {
while (true) {
if (tokens.matches1AtIndex(index, tt.braceR)) {
return;
}
const specifierInfo = getImportExportSpecifierInfo(tokens, index);
index = specifierInfo.endIndex;
if (!specifierInfo.isType) {
importedNames.add(specifierInfo.rightName);
}
if (tokens.matches2AtIndex(index, tt.comma, tt.braceR)) {
return;
} else if (tokens.matches1AtIndex(index, tt.braceR)) {
return;
} else if (tokens.matches1AtIndex(index, tt.comma)) {
index++;
} else {
throw new Error(`Unexpected token: ${JSON.stringify(tokens.tokens[index])}`);
}
}
}

38
node_modules/sucrase/dist/esm/util/isAsyncOperation.js generated vendored Normal file
View File

@ -0,0 +1,38 @@
import {ContextualKeyword} from "../parser/tokenizer/keywords";
/**
* Determine whether this optional chain or nullish coalescing operation has any await statements in
* it. If so, we'll need to transpile to an async operation.
*
* We compute this by walking the length of the operation and returning true if we see an await
* keyword used as a real await (rather than an object key or property access). Nested optional
* chain/nullish operations need to be tracked but don't silence await, but a nested async function
* (or any other nested scope) will make the await not count.
*/
export default function isAsyncOperation(tokens) {
let index = tokens.currentIndex();
let depth = 0;
const startToken = tokens.currentToken();
do {
const token = tokens.tokens[index];
if (token.isOptionalChainStart) {
depth++;
}
if (token.isOptionalChainEnd) {
depth--;
}
depth += token.numNullishCoalesceStarts;
depth -= token.numNullishCoalesceEnds;
if (
token.contextualKeyword === ContextualKeyword._await &&
token.identifierRole == null &&
token.scopeDepth === startToken.scopeDepth
) {
return true;
}
index += 1;
} while (depth > 0 && index < tokens.tokens.length);
return false;
}

18
node_modules/sucrase/dist/esm/util/isExportFrom.js generated vendored Normal file
View File

@ -0,0 +1,18 @@
import {ContextualKeyword} from "../parser/tokenizer/keywords";
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Starting at `export {`, look ahead and return `true` if this is an
* `export {...} from` statement and `false` if this is a plain multi-export.
*/
export default function isExportFrom(tokens) {
let closeBraceIndex = tokens.currentIndex();
while (!tokens.matches1AtIndex(closeBraceIndex, tt.braceR)) {
closeBraceIndex++;
}
return (
tokens.matchesContextualAtIndex(closeBraceIndex + 1, ContextualKeyword._from) &&
tokens.matches1AtIndex(closeBraceIndex + 2, tt.string)
);
}

81
node_modules/sucrase/dist/esm/util/isIdentifier.js generated vendored Normal file
View File

@ -0,0 +1,81 @@
import {IS_IDENTIFIER_CHAR, IS_IDENTIFIER_START} from "../parser/util/identifier";
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
// Hard-code a list of reserved words rather than trying to use keywords or contextual keywords
// from the parser, since currently there are various exceptions, like `package` being reserved
// but unused and various contextual keywords being reserved. Note that we assume that all code
// compiled by Sucrase is in a module, so strict mode words and await are all considered reserved
// here.
const RESERVED_WORDS = new Set([
// Reserved keywords as of ECMAScript 2015
"break",
"case",
"catch",
"class",
"const",
"continue",
"debugger",
"default",
"delete",
"do",
"else",
"export",
"extends",
"finally",
"for",
"function",
"if",
"import",
"in",
"instanceof",
"new",
"return",
"super",
"switch",
"this",
"throw",
"try",
"typeof",
"var",
"void",
"while",
"with",
"yield",
// Future reserved keywords
"enum",
"implements",
"interface",
"let",
"package",
"private",
"protected",
"public",
"static",
"await",
// Literals that cannot be used as identifiers
"false",
"null",
"true",
]);
/**
* Determine if the given name is a legal variable name.
*
* This is needed when transforming TypeScript enums; if an enum key is a valid
* variable name, it might be referenced later in the enum, so we need to
* declare a variable.
*/
export default function isIdentifier(name) {
if (name.length === 0) {
return false;
}
if (!IS_IDENTIFIER_START[name.charCodeAt(0)]) {
return false;
}
for (let i = 1; i < name.length; i++) {
if (!IS_IDENTIFIER_CHAR[name.charCodeAt(i)]) {
return false;
}
}
return !RESERVED_WORDS.has(name);
}

View File

@ -0,0 +1,22 @@
import {ContextualKeyword} from "../parser/tokenizer/keywords";
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Starting at a potential `with` or (legacy) `assert` token, remove the import
* attributes if they exist.
*/
export function removeMaybeImportAttributes(tokens) {
if (
tokens.matches2(tt._with, tt.braceL) ||
(tokens.matches2(tt.name, tt.braceL) && tokens.matchesContextual(ContextualKeyword._assert))
) {
// assert
tokens.removeToken();
// {
tokens.removeToken();
tokens.removeBalancedCode();
// }
tokens.removeToken();
}
}

View File

@ -0,0 +1,38 @@
import {TokenType as tt} from "../parser/tokenizer/types";
/**
* Common method sharing code between CJS and ESM cases, since they're the same here.
*/
export default function shouldElideDefaultExport(
isTypeScriptTransformEnabled,
keepUnusedImports,
tokens,
declarationInfo,
) {
if (!isTypeScriptTransformEnabled || keepUnusedImports) {
return false;
}
const exportToken = tokens.currentToken();
if (exportToken.rhsEndIndex == null) {
throw new Error("Expected non-null rhsEndIndex on export token.");
}
// The export must be of the form `export default a` or `export default a;`.
const numTokens = exportToken.rhsEndIndex - tokens.currentIndex();
if (
numTokens !== 3 &&
!(numTokens === 4 && tokens.matches1AtIndex(exportToken.rhsEndIndex - 1, tt.semi))
) {
return false;
}
const identifierToken = tokens.tokenAtRelativeIndex(2);
if (identifierToken.type !== tt.name) {
return false;
}
const exportedName = tokens.identifierNameForToken(identifierToken);
return (
declarationInfo.typeDeclarations.has(exportedName) &&
!declarationInfo.valueDeclarations.has(exportedName)
);
}