Initial commit with Advoware proxy

This commit is contained in:
root
2025-10-19 14:57:07 +00:00
commit 273aa8b549
45771 changed files with 5534555 additions and 0 deletions

View File

@@ -0,0 +1,43 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
/** How to emit recognition errors. */
import { Recognizer } from "./Recognizer";
import { RecognitionException } from "./RecognitionException";
export interface ANTLRErrorListener<TSymbol> {
/**
* Upon syntax error, notify any interested parties. This is not how to
* recover from errors or compute error messages. {@link ANTLRErrorStrategy}
* specifies how to recover from syntax errors and how to compute error
* messages. This listener's job is simply to emit a computed message,
* though it has enough information to create its own message in many cases.
*
* The {@link RecognitionException} is non-`undefined` for all syntax errors except
* when we discover mismatched token errors that we can recover from
* in-line, without returning from the surrounding rule (via the single
* token insertion and deletion mechanism).
*
* @param recognizer
* What parser got the error. From this
* object, you can access the context as well
* as the input stream.
* @param offendingSymbol
* The offending token in the input token
* stream, unless recognizer is a lexer (then it's `undefined`). If
* no viable alternative error, `e` has token at which we
* started production for the decision.
* @param line
* The line number in the input where the error occurred.
* @param charPositionInLine
* The character position within that line where the error occurred.
* @param msg
* The message to emit.
* @param e
* The exception generated by the parser that led to
* the reporting of an error. It is `undefined` in the case where
* the parser was able to recover in line without exiting the
* surrounding rule.
*/
syntaxError?: <T extends TSymbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined) => void;
}

View File

@@ -0,0 +1,7 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=ANTLRErrorListener.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"ANTLRErrorListener.js","sourceRoot":"","sources":["../../src/ANTLRErrorListener.ts"],"names":[],"mappings":";AAAA;;;GAGG","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:48.7499997-07:00\r\n\r\n/** How to emit recognition errors. */\r\nimport { Recognizer } from \"./Recognizer\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\n\r\nexport interface ANTLRErrorListener<TSymbol> {\r\n\t/**\r\n\t * Upon syntax error, notify any interested parties. This is not how to\r\n\t * recover from errors or compute error messages. {@link ANTLRErrorStrategy}\r\n\t * specifies how to recover from syntax errors and how to compute error\r\n\t * messages. This listener's job is simply to emit a computed message,\r\n\t * though it has enough information to create its own message in many cases.\r\n\t *\r\n\t * The {@link RecognitionException} is non-`undefined` for all syntax errors except\r\n\t * when we discover mismatched token errors that we can recover from\r\n\t * in-line, without returning from the surrounding rule (via the single\r\n\t * token insertion and deletion mechanism).\r\n\t *\r\n\t * @param recognizer\r\n\t * What parser got the error. From this\r\n\t * \t\t object, you can access the context as well\r\n\t * \t\t as the input stream.\r\n\t * @param offendingSymbol\r\n\t * The offending token in the input token\r\n\t * \t\t stream, unless recognizer is a lexer (then it's `undefined`). If\r\n\t * \t\t no viable alternative error, `e` has token at which we\r\n\t * \t\t started production for the decision.\r\n\t * @param line\r\n\t * \t\t The line number in the input where the error occurred.\r\n\t * @param charPositionInLine\r\n\t * \t\t The character position within that line where the error occurred.\r\n\t * @param msg\r\n\t * \t\t The message to emit.\r\n\t * @param e\r\n\t * The exception generated by the parser that led to\r\n\t * the reporting of an error. It is `undefined` in the case where\r\n\t * the parser was able to recover in line without exiting the\r\n\t * surrounding rule.\r\n\t */\r\n\tsyntaxError?: <T extends TSymbol>(\r\n\t\t/*@NotNull*/\r\n\t\trecognizer: Recognizer<T, any>,\r\n\t\toffendingSymbol: T | undefined,\r\n\t\tline: number,\r\n\t\tcharPositionInLine: number,\r\n\t\t/*@NotNull*/\r\n\t\tmsg: string,\r\n\t\te: RecognitionException | undefined) => void;\r\n}\r\n"]}

View File

@@ -0,0 +1,109 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { Parser } from "./Parser";
import { Token } from "./Token";
import { RecognitionException } from "./RecognitionException";
/**
* The interface for defining strategies to deal with syntax errors encountered
* during a parse by ANTLR-generated parsers. We distinguish between three
* different kinds of errors:
*
* * The parser could not figure out which path to take in the ATN (none of
* the available alternatives could possibly match)
* * The current input does not match what we were looking for
* * A predicate evaluated to false
*
* Implementations of this interface report syntax errors by calling
* {@link Parser#notifyErrorListeners}.
*
* TODO: what to do about lexers
*/
export interface ANTLRErrorStrategy {
/**
* Reset the error handler state for the specified `recognizer`.
* @param recognizer the parser instance
*/
reset(/*@NotNull*/ recognizer: Parser): void;
/**
* This method is called when an unexpected symbol is encountered during an
* inline match operation, such as {@link Parser#match}. If the error
* strategy successfully recovers from the match failure, this method
* returns the {@link Token} instance which should be treated as the
* successful result of the match.
*
* This method handles the consumption of any tokens - the caller should
* *not* call {@link Parser#consume} after a successful recovery.
*
* Note that the calling code will not report an error if this method
* returns successfully. The error strategy implementation is responsible
* for calling {@link Parser#notifyErrorListeners} as appropriate.
*
* @param recognizer the parser instance
* @ if the error strategy was not able to
* recover from the unexpected input symbol
*/
recoverInline(/*@NotNull*/ recognizer: Parser): Token;
/**
* This method is called to recover from exception `e`. This method is
* called after {@link #reportError} by the default exception handler
* generated for a rule method.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception to recover from
* @ if the error strategy could not recover from
* the recognition exception
*/
recover(/*@NotNull*/ recognizer: Parser, /*@NotNull*/ e: RecognitionException): void;
/**
* This method provides the error handler with an opportunity to handle
* syntactic or semantic errors in the input stream before they result in a
* {@link RecognitionException}.
*
* The generated code currently contains calls to {@link #sync} after
* entering the decision state of a closure block (`(...)*` or
* `(...)+`).
*
* For an implementation based on Jim Idle's "magic sync" mechanism, see
* {@link DefaultErrorStrategy#sync}.
*
* @see DefaultErrorStrategy#sync
*
* @param recognizer the parser instance
* @ if an error is detected by the error
* strategy but cannot be automatically recovered at the current state in
* the parsing process
*/
sync(/*@NotNull*/ recognizer: Parser): void;
/**
* Tests whether or not `recognizer` is in the process of recovering
* from an error. In error recovery mode, {@link Parser#consume} adds
* symbols to the parse tree by calling
* {@link Parser#createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
* {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
*
* @param recognizer the parser instance
* @returns `true` if the parser is currently recovering from a parse
* error, otherwise `false`
*/
inErrorRecoveryMode(/*@NotNull*/ recognizer: Parser): boolean;
/**
* This method is called by when the parser successfully matches an input
* symbol.
*
* @param recognizer the parser instance
*/
reportMatch(/*@NotNull*/ recognizer: Parser): void;
/**
* Report any kind of {@link RecognitionException}. This method is called by
* the default exception handler generated for a rule method.
*
* @param recognizer the parser instance
* @param e the recognition exception to report
*/
reportError(recognizer: Parser, e: RecognitionException): void;
}

View File

@@ -0,0 +1,7 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=ANTLRErrorStrategy.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,51 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { Interval } from "./misc/Interval";
/**
* Vacuum all input from a {@link Reader}/{@link InputStream} and then treat it
* like a `char[]` buffer. Can also pass in a {@link String} or
* `char[]` to use.
*
* If you need encoding, pass in stream/reader with correct encoding.
*
* @deprecated as of 4.7, please use `CharStreams` interface.
*/
export declare class ANTLRInputStream implements CharStream {
/** The data being scanned */
protected data: string;
/** How many characters are actually in the buffer */
protected n: number;
/** 0..n-1 index into string of next char */
protected p: number;
/** What is name or source of this char stream? */
name?: string;
/** Copy data in string to a local char array */
constructor(input: string);
/** Reset the stream so that it's in the same state it was
* when the object was created *except* the data array is not
* touched.
*/
reset(): void;
consume(): void;
LA(i: number): number;
LT(i: number): number;
/** Return the current input symbol index 0..n where n indicates the
* last symbol has been read. The index is the index of char to
* be returned from LA(1).
*/
get index(): number;
get size(): number;
/** mark/release do nothing; we have entire buffer */
mark(): number;
release(marker: number): void;
/** consume() ahead until p==index; can't just set p=index as we must
* update line and charPositionInLine. If we seek backwards, just set p
*/
seek(index: number): void;
getText(interval: Interval): string;
get sourceName(): string;
toString(): string;
}

View File

@@ -0,0 +1,161 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
// ConvertTo-TS run at 2016-10-04T11:26:49.0828748-07:00
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ANTLRInputStream = void 0;
const assert = require("assert");
const Decorators_1 = require("./Decorators");
const IntStream_1 = require("./IntStream");
const READ_BUFFER_SIZE = 1024;
const INITIAL_BUFFER_SIZE = 1024;
/**
* Vacuum all input from a {@link Reader}/{@link InputStream} and then treat it
* like a `char[]` buffer. Can also pass in a {@link String} or
* `char[]` to use.
*
* If you need encoding, pass in stream/reader with correct encoding.
*
* @deprecated as of 4.7, please use `CharStreams` interface.
*/
class ANTLRInputStream {
/** Copy data in string to a local char array */
constructor(input) {
/** 0..n-1 index into string of next char */
this.p = 0;
this.data = input;
this.n = input.length;
}
/** Reset the stream so that it's in the same state it was
* when the object was created *except* the data array is not
* touched.
*/
reset() {
this.p = 0;
}
consume() {
if (this.p >= this.n) {
assert(this.LA(1) === IntStream_1.IntStream.EOF);
throw new Error("cannot consume EOF");
}
//System.out.println("prev p="+p+", c="+(char)data[p]);
if (this.p < this.n) {
this.p++;
//System.out.println("p moves to "+p+" (c='"+(char)data[p]+"')");
}
}
LA(i) {
if (i === 0) {
return 0; // undefined
}
if (i < 0) {
i++; // e.g., translate LA(-1) to use offset i=0; then data[p+0-1]
if ((this.p + i - 1) < 0) {
return IntStream_1.IntStream.EOF; // invalid; no char before first char
}
}
if ((this.p + i - 1) >= this.n) {
//System.out.println("char LA("+i+")=EOF; p="+p);
return IntStream_1.IntStream.EOF;
}
//System.out.println("char LA("+i+")="+(char)data[p+i-1]+"; p="+p);
//System.out.println("LA("+i+"); p="+p+" n="+n+" data.length="+data.length);
return this.data.charCodeAt(this.p + i - 1);
}
LT(i) {
return this.LA(i);
}
/** Return the current input symbol index 0..n where n indicates the
* last symbol has been read. The index is the index of char to
* be returned from LA(1).
*/
get index() {
return this.p;
}
get size() {
return this.n;
}
/** mark/release do nothing; we have entire buffer */
mark() {
return -1;
}
release(marker) {
// No default implementation since this stream buffers the entire input
}
/** consume() ahead until p==index; can't just set p=index as we must
* update line and charPositionInLine. If we seek backwards, just set p
*/
seek(index) {
if (index <= this.p) {
this.p = index; // just jump; don't update stream state (line, ...)
return;
}
// seek forward, consume until p hits index or n (whichever comes first)
index = Math.min(index, this.n);
while (this.p < index) {
this.consume();
}
}
getText(interval) {
let start = interval.a;
let stop = interval.b;
if (stop >= this.n) {
stop = this.n - 1;
}
let count = stop - start + 1;
if (start >= this.n) {
return "";
}
// System.err.println("data: "+Arrays.toString(data)+", n="+n+
// ", start="+start+
// ", stop="+stop);
return this.data.substr(start, count);
}
get sourceName() {
if (!this.name) {
return IntStream_1.IntStream.UNKNOWN_SOURCE_NAME;
}
return this.name;
}
toString() { return this.data; }
}
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "consume", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "LA", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "index", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "size", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "mark", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "release", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "seek", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "getText", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "sourceName", null);
__decorate([
Decorators_1.Override
], ANTLRInputStream.prototype, "toString", null);
exports.ANTLRInputStream = ANTLRInputStream;
//# sourceMappingURL=ANTLRInputStream.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,48 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { DefaultErrorStrategy } from "./DefaultErrorStrategy";
import { Parser } from "./Parser";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
/**
* This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
* by immediately canceling the parse operation with a
* {@link ParseCancellationException}. The implementation ensures that the
* {@link ParserRuleContext#exception} field is set for all parse tree nodes
* that were not completed prior to encountering the error.
*
* This error strategy is useful in the following scenarios.
*
* * **Two-stage parsing:** This error strategy allows the first
* stage of two-stage parsing to immediately terminate if an error is
* encountered, and immediately fall back to the second stage. In addition to
* avoiding wasted work by attempting to recover from errors here, the empty
* implementation of {@link BailErrorStrategy#sync} improves the performance of
* the first stage.
* * **Silent validation:** When syntax errors are not being
* reported or logged, and the parse result is simply ignored if errors occur,
* the {@link BailErrorStrategy} avoids wasting work on recovering from errors
* when the result will be ignored either way.
*
* ```
* myparser.errorHandler = new BailErrorStrategy();
* ```
*
* @see Parser.errorHandler
*/
export declare class BailErrorStrategy extends DefaultErrorStrategy {
/** Instead of recovering from exception `e`, re-throw it wrapped
* in a {@link ParseCancellationException} so it is not caught by the
* rule function catches. Use {@link Exception#getCause()} to get the
* original {@link RecognitionException}.
*/
recover(recognizer: Parser, e: RecognitionException): void;
/** Make sure we don't attempt to recover inline; if the parser
* successfully recovers, it won't throw an exception.
*/
recoverInline(recognizer: Parser): Token;
/** Make sure we don't attempt to recover from problems in subrules. */
sync(recognizer: Parser): void;
}

View File

@@ -0,0 +1,82 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.BailErrorStrategy = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:49.2855056-07:00
const DefaultErrorStrategy_1 = require("./DefaultErrorStrategy");
const InputMismatchException_1 = require("./InputMismatchException");
const Decorators_1 = require("./Decorators");
const ParseCancellationException_1 = require("./misc/ParseCancellationException");
/**
* This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
* by immediately canceling the parse operation with a
* {@link ParseCancellationException}. The implementation ensures that the
* {@link ParserRuleContext#exception} field is set for all parse tree nodes
* that were not completed prior to encountering the error.
*
* This error strategy is useful in the following scenarios.
*
* * **Two-stage parsing:** This error strategy allows the first
* stage of two-stage parsing to immediately terminate if an error is
* encountered, and immediately fall back to the second stage. In addition to
* avoiding wasted work by attempting to recover from errors here, the empty
* implementation of {@link BailErrorStrategy#sync} improves the performance of
* the first stage.
* * **Silent validation:** When syntax errors are not being
* reported or logged, and the parse result is simply ignored if errors occur,
* the {@link BailErrorStrategy} avoids wasting work on recovering from errors
* when the result will be ignored either way.
*
* ```
* myparser.errorHandler = new BailErrorStrategy();
* ```
*
* @see Parser.errorHandler
*/
class BailErrorStrategy extends DefaultErrorStrategy_1.DefaultErrorStrategy {
/** Instead of recovering from exception `e`, re-throw it wrapped
* in a {@link ParseCancellationException} so it is not caught by the
* rule function catches. Use {@link Exception#getCause()} to get the
* original {@link RecognitionException}.
*/
recover(recognizer, e) {
for (let context = recognizer.context; context; context = context.parent) {
context.exception = e;
}
throw new ParseCancellationException_1.ParseCancellationException(e);
}
/** Make sure we don't attempt to recover inline; if the parser
* successfully recovers, it won't throw an exception.
*/
recoverInline(recognizer) {
let e = new InputMismatchException_1.InputMismatchException(recognizer);
for (let context = recognizer.context; context; context = context.parent) {
context.exception = e;
}
throw new ParseCancellationException_1.ParseCancellationException(e);
}
/** Make sure we don't attempt to recover from problems in subrules. */
sync(recognizer) {
// intentionally empty
}
}
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "recover", null);
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "recoverInline", null);
__decorate([
Decorators_1.Override
], BailErrorStrategy.prototype, "sync", null);
exports.BailErrorStrategy = BailErrorStrategy;
//# sourceMappingURL=BailErrorStrategy.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"BailErrorStrategy.js","sourceRoot":"","sources":["../../src/BailErrorStrategy.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;AAEH,wDAAwD;AAExD,iEAA8D;AAE9D,qEAAkE;AAClE,6CAAwC;AACxC,kFAA+E;AAK/E;;;;;;;;;;;;;;;;;;;;;;;;;GAyBG;AACH,MAAa,iBAAkB,SAAQ,2CAAoB;IAC1D;;;;OAIG;IAEI,OAAO,CAAC,UAAkB,EAAE,CAAuB;QACzD,KAAK,IAAI,OAAO,GAAkC,UAAU,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,GAAG,OAAO,CAAC,MAAM,EAAE;YACxG,OAAO,CAAC,SAAS,GAAG,CAAC,CAAC;SACtB;QAED,MAAM,IAAI,uDAA0B,CAAC,CAAC,CAAC,CAAC;IACzC,CAAC;IAED;;OAEG;IAEI,aAAa,CAAC,UAAkB;QACtC,IAAI,CAAC,GAAG,IAAI,+CAAsB,CAAC,UAAU,CAAC,CAAC;QAC/C,KAAK,IAAI,OAAO,GAAkC,UAAU,CAAC,OAAO,EAAE,OAAO,EAAE,OAAO,GAAG,OAAO,CAAC,MAAM,EAAE;YACxG,OAAO,CAAC,SAAS,GAAG,CAAC,CAAC;SACtB;QAED,MAAM,IAAI,uDAA0B,CAAC,CAAC,CAAC,CAAC;IACzC,CAAC;IAED,uEAAuE;IAEhE,IAAI,CAAC,UAAkB;QAC7B,sBAAsB;IACvB,CAAC;CACD;AA1BA;IADC,qBAAQ;gDAOR;AAMD;IADC,qBAAQ;sDAQR;AAID;IADC,qBAAQ;6CAGR;AAhCF,8CAiCC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:49.2855056-07:00\r\n\r\nimport { DefaultErrorStrategy } from \"./DefaultErrorStrategy\";\r\nimport { Parser } from \"./Parser\";\r\nimport { InputMismatchException } from \"./InputMismatchException\";\r\nimport { Override } from \"./Decorators\";\r\nimport { ParseCancellationException } from \"./misc/ParseCancellationException\";\r\nimport { ParserRuleContext } from \"./ParserRuleContext\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Token } from \"./Token\";\r\n\r\n/**\r\n * This implementation of {@link ANTLRErrorStrategy} responds to syntax errors\r\n * by immediately canceling the parse operation with a\r\n * {@link ParseCancellationException}. The implementation ensures that the\r\n * {@link ParserRuleContext#exception} field is set for all parse tree nodes\r\n * that were not completed prior to encountering the error.\r\n *\r\n * This error strategy is useful in the following scenarios.\r\n *\r\n * * **Two-stage parsing:** This error strategy allows the first\r\n * stage of two-stage parsing to immediately terminate if an error is\r\n * encountered, and immediately fall back to the second stage. In addition to\r\n * avoiding wasted work by attempting to recover from errors here, the empty\r\n * implementation of {@link BailErrorStrategy#sync} improves the performance of\r\n * the first stage.\r\n * * **Silent validation:** When syntax errors are not being\r\n * reported or logged, and the parse result is simply ignored if errors occur,\r\n * the {@link BailErrorStrategy} avoids wasting work on recovering from errors\r\n * when the result will be ignored either way.\r\n *\r\n * ```\r\n * myparser.errorHandler = new BailErrorStrategy();\r\n * ```\r\n *\r\n * @see Parser.errorHandler\r\n */\r\nexport class BailErrorStrategy extends DefaultErrorStrategy {\r\n\t/** Instead of recovering from exception `e`, re-throw it wrapped\r\n\t * in a {@link ParseCancellationException} so it is not caught by the\r\n\t * rule function catches. Use {@link Exception#getCause()} to get the\r\n\t * original {@link RecognitionException}.\r\n\t */\r\n\t@Override\r\n\tpublic recover(recognizer: Parser, e: RecognitionException): void {\r\n\t\tfor (let context: ParserRuleContext | undefined = recognizer.context; context; context = context.parent) {\r\n\t\t\tcontext.exception = e;\r\n\t\t}\r\n\r\n\t\tthrow new ParseCancellationException(e);\r\n\t}\r\n\r\n\t/** Make sure we don't attempt to recover inline; if the parser\r\n\t * successfully recovers, it won't throw an exception.\r\n\t */\r\n\t@Override\r\n\tpublic recoverInline(recognizer: Parser): Token {\r\n\t\tlet e = new InputMismatchException(recognizer);\r\n\t\tfor (let context: ParserRuleContext | undefined = recognizer.context; context; context = context.parent) {\r\n\t\t\tcontext.exception = e;\r\n\t\t}\r\n\r\n\t\tthrow new ParseCancellationException(e);\r\n\t}\r\n\r\n\t/** Make sure we don't attempt to recover from problems in subrules. */\r\n\t@Override\r\n\tpublic sync(recognizer: Parser): void {\r\n\t\t// intentionally empty\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,143 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { Interval } from "./misc/Interval";
import { RuleContext } from "./RuleContext";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
import { TokenStream } from "./TokenStream";
/**
* This implementation of {@link TokenStream} loads tokens from a
* {@link TokenSource} on-demand, and places the tokens in a buffer to provide
* access to any previous token by index.
*
* This token stream ignores the value of {@link Token#getChannel}. If your
* parser requires the token stream filter tokens to only those on a particular
* channel, such as {@link Token#DEFAULT_CHANNEL} or
* {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
* {@link CommonTokenStream}.
*/
export declare class BufferedTokenStream implements TokenStream {
/**
* The {@link TokenSource} from which tokens for this stream are fetched.
*/
private _tokenSource;
/**
* A collection of all tokens fetched from the token source. The list is
* considered a complete view of the input once {@link #fetchedEOF} is set
* to `true`.
*/
protected tokens: Token[];
/**
* The index into {@link #tokens} of the current token (next token to
* {@link #consume}). {@link #tokens}`[`{@link #p}`]` should be
* {@link #LT LT(1)}.
*
* This field is set to -1 when the stream is first constructed or when
* {@link #setTokenSource} is called, indicating that the first token has
* not yet been fetched from the token source. For additional information,
* see the documentation of {@link IntStream} for a description of
* Initializing Methods.
*/
protected p: number;
/**
* Indicates whether the {@link Token#EOF} token has been fetched from
* {@link #tokenSource} and added to {@link #tokens}. This field improves
* performance for the following cases:
*
* * {@link #consume}: The lookahead check in {@link #consume} to prevent
* consuming the EOF symbol is optimized by checking the values of
* {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
* * {@link #fetch}: The check to prevent adding multiple EOF symbols into
* {@link #tokens} is trivial with this field.
*/
protected fetchedEOF: boolean;
constructor(tokenSource: TokenSource);
get tokenSource(): TokenSource;
/** Reset this token stream by setting its token source. */
set tokenSource(tokenSource: TokenSource);
get index(): number;
mark(): number;
release(marker: number): void;
seek(index: number): void;
get size(): number;
consume(): void;
/** Make sure index `i` in tokens has a token.
*
* @returns `true` if a token is located at index `i`, otherwise
* `false`.
* @see #get(int i)
*/
protected sync(i: number): boolean;
/** Add `n` elements to buffer.
*
* @returns The actual number of elements added to the buffer.
*/
protected fetch(n: number): number;
get(i: number): Token;
/** Get all tokens from start..stop inclusively. */
getRange(start: number, stop: number): Token[];
LA(i: number): number;
protected tryLB(k: number): Token | undefined;
LT(k: number): Token;
tryLT(k: number): Token | undefined;
/**
* Allowed derived classes to modify the behavior of operations which change
* the current stream position by adjusting the target token index of a seek
* operation. The default implementation simply returns `i`. If an
* exception is thrown in this method, the current stream index should not be
* changed.
*
* For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.
*
* @param i The target token index.
* @returns The adjusted target token index.
*/
protected adjustSeekIndex(i: number): number;
protected lazyInit(): void;
protected setup(): void;
getTokens(): Token[];
getTokens(start: number, stop: number): Token[];
getTokens(start: number, stop: number, types: Set<number>): Token[];
getTokens(start: number, stop: number, ttype: number): Token[];
/**
* Given a starting index, return the index of the next token on channel.
* Return `i` if `tokens[i]` is on channel. Return the index of
* the EOF token if there are no tokens on channel between `i` and
* EOF.
*/
protected nextTokenOnChannel(i: number, channel: number): number;
/**
* Given a starting index, return the index of the previous token on
* channel. Return `i` if `tokens[i]` is on channel. Return -1
* if there are no tokens on channel between `i` and 0.
*
* If `i` specifies an index at or after the EOF token, the EOF token
* index is returned. This is due to the fact that the EOF token is treated
* as though it were on every channel.
*/
protected previousTokenOnChannel(i: number, channel: number): number;
/** Collect all tokens on specified channel to the right of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL} or
* EOF. If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToRight(tokenIndex: number, channel?: number): Token[];
/** Collect all tokens on specified channel to the left of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL}.
* If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToLeft(tokenIndex: number, channel?: number): Token[];
protected filterForChannel(from: number, to: number, channel: number): Token[];
get sourceName(): string;
/** Get the text of all tokens in this buffer. */
getText(): string;
getText(interval: Interval): string;
getText(context: RuleContext): string;
getTextFromRange(start: any, stop: any): string;
/** Get all tokens from lexer until EOF. */
fill(): void;
private isWritableToken;
private isToken;
}

View File

@@ -0,0 +1,489 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.BufferedTokenStream = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:49.6074365-07:00
const assert = require("assert");
const CommonToken_1 = require("./CommonToken");
const Interval_1 = require("./misc/Interval");
const Lexer_1 = require("./Lexer");
const Decorators_1 = require("./Decorators");
const Token_1 = require("./Token");
/**
* This implementation of {@link TokenStream} loads tokens from a
* {@link TokenSource} on-demand, and places the tokens in a buffer to provide
* access to any previous token by index.
*
* This token stream ignores the value of {@link Token#getChannel}. If your
* parser requires the token stream filter tokens to only those on a particular
* channel, such as {@link Token#DEFAULT_CHANNEL} or
* {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
* {@link CommonTokenStream}.
*/
let BufferedTokenStream = class BufferedTokenStream {
constructor(tokenSource) {
/**
* A collection of all tokens fetched from the token source. The list is
* considered a complete view of the input once {@link #fetchedEOF} is set
* to `true`.
*/
this.tokens = [];
/**
* The index into {@link #tokens} of the current token (next token to
* {@link #consume}). {@link #tokens}`[`{@link #p}`]` should be
* {@link #LT LT(1)}.
*
* This field is set to -1 when the stream is first constructed or when
* {@link #setTokenSource} is called, indicating that the first token has
* not yet been fetched from the token source. For additional information,
* see the documentation of {@link IntStream} for a description of
* Initializing Methods.
*/
this.p = -1;
/**
* Indicates whether the {@link Token#EOF} token has been fetched from
* {@link #tokenSource} and added to {@link #tokens}. This field improves
* performance for the following cases:
*
* * {@link #consume}: The lookahead check in {@link #consume} to prevent
* consuming the EOF symbol is optimized by checking the values of
* {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.
* * {@link #fetch}: The check to prevent adding multiple EOF symbols into
* {@link #tokens} is trivial with this field.
*/
this.fetchedEOF = false;
if (tokenSource == null) {
throw new Error("tokenSource cannot be null");
}
this._tokenSource = tokenSource;
}
get tokenSource() {
return this._tokenSource;
}
/** Reset this token stream by setting its token source. */
set tokenSource(tokenSource) {
this._tokenSource = tokenSource;
this.tokens.length = 0;
this.p = -1;
this.fetchedEOF = false;
}
get index() {
return this.p;
}
mark() {
return 0;
}
release(marker) {
// no resources to release
}
seek(index) {
this.lazyInit();
this.p = this.adjustSeekIndex(index);
}
get size() {
return this.tokens.length;
}
consume() {
let skipEofCheck;
if (this.p >= 0) {
if (this.fetchedEOF) {
// the last token in tokens is EOF. skip check if p indexes any
// fetched token except the last.
skipEofCheck = this.p < this.tokens.length - 1;
}
else {
// no EOF token in tokens. skip check if p indexes a fetched token.
skipEofCheck = this.p < this.tokens.length;
}
}
else {
// not yet initialized
skipEofCheck = false;
}
if (!skipEofCheck && this.LA(1) === Token_1.Token.EOF) {
throw new Error("cannot consume EOF");
}
if (this.sync(this.p + 1)) {
this.p = this.adjustSeekIndex(this.p + 1);
}
}
/** Make sure index `i` in tokens has a token.
*
* @returns `true` if a token is located at index `i`, otherwise
* `false`.
* @see #get(int i)
*/
sync(i) {
assert(i >= 0);
let n = i - this.tokens.length + 1; // how many more elements we need?
//System.out.println("sync("+i+") needs "+n);
if (n > 0) {
let fetched = this.fetch(n);
return fetched >= n;
}
return true;
}
/** Add `n` elements to buffer.
*
* @returns The actual number of elements added to the buffer.
*/
fetch(n) {
if (this.fetchedEOF) {
return 0;
}
for (let i = 0; i < n; i++) {
let t = this.tokenSource.nextToken();
if (this.isWritableToken(t)) {
t.tokenIndex = this.tokens.length;
}
this.tokens.push(t);
if (t.type === Token_1.Token.EOF) {
this.fetchedEOF = true;
return i + 1;
}
}
return n;
}
get(i) {
if (i < 0 || i >= this.tokens.length) {
throw new RangeError("token index " + i + " out of range 0.." + (this.tokens.length - 1));
}
return this.tokens[i];
}
/** Get all tokens from start..stop inclusively. */
getRange(start, stop) {
if (start < 0 || stop < 0) {
return [];
}
this.lazyInit();
let subset = new Array();
if (stop >= this.tokens.length) {
stop = this.tokens.length - 1;
}
for (let i = start; i <= stop; i++) {
let t = this.tokens[i];
if (t.type === Token_1.Token.EOF) {
break;
}
subset.push(t);
}
return subset;
}
LA(i) {
let token = this.LT(i);
if (!token) {
return Token_1.Token.INVALID_TYPE;
}
return token.type;
}
tryLB(k) {
if ((this.p - k) < 0) {
return undefined;
}
return this.tokens[this.p - k];
}
LT(k) {
let result = this.tryLT(k);
if (result === undefined) {
throw new RangeError("requested lookback index out of range");
}
return result;
}
tryLT(k) {
this.lazyInit();
if (k === 0) {
throw new RangeError("0 is not a valid lookahead index");
}
if (k < 0) {
return this.tryLB(-k);
}
let i = this.p + k - 1;
this.sync(i);
if (i >= this.tokens.length) {
// return EOF token
// EOF must be last token
return this.tokens[this.tokens.length - 1];
}
// if ( i>range ) range = i;
return this.tokens[i];
}
/**
* Allowed derived classes to modify the behavior of operations which change
* the current stream position by adjusting the target token index of a seek
* operation. The default implementation simply returns `i`. If an
* exception is thrown in this method, the current stream index should not be
* changed.
*
* For example, {@link CommonTokenStream} overrides this method to ensure that
* the seek target is always an on-channel token.
*
* @param i The target token index.
* @returns The adjusted target token index.
*/
adjustSeekIndex(i) {
return i;
}
lazyInit() {
if (this.p === -1) {
this.setup();
}
}
setup() {
this.sync(0);
this.p = this.adjustSeekIndex(0);
}
/** Given a start and stop index, return a `List` of all tokens in
* the token type `BitSet`. Return an empty array if no tokens were found. This
* method looks at both on and off channel tokens.
*/
getTokens(start, stop, types) {
this.lazyInit();
if (start === undefined) {
assert(stop === undefined && types === undefined);
return this.tokens;
}
else if (stop === undefined) {
stop = this.tokens.length - 1;
}
if (start < 0 || stop >= this.tokens.length || stop < 0 || start >= this.tokens.length) {
throw new RangeError("start " + start + " or stop " + stop + " not in 0.." + (this.tokens.length - 1));
}
if (start > stop) {
return [];
}
if (types === undefined) {
return this.tokens.slice(start, stop + 1);
}
else if (typeof types === "number") {
types = new Set().add(types);
}
let typesSet = types;
// list = tokens[start:stop]:{T t, t.type in types}
let filteredTokens = this.tokens.slice(start, stop + 1);
filteredTokens = filteredTokens.filter((value) => typesSet.has(value.type));
return filteredTokens;
}
/**
* Given a starting index, return the index of the next token on channel.
* Return `i` if `tokens[i]` is on channel. Return the index of
* the EOF token if there are no tokens on channel between `i` and
* EOF.
*/
nextTokenOnChannel(i, channel) {
this.sync(i);
if (i >= this.size) {
return this.size - 1;
}
let token = this.tokens[i];
while (token.channel !== channel) {
if (token.type === Token_1.Token.EOF) {
return i;
}
i++;
this.sync(i);
token = this.tokens[i];
}
return i;
}
/**
* Given a starting index, return the index of the previous token on
* channel. Return `i` if `tokens[i]` is on channel. Return -1
* if there are no tokens on channel between `i` and 0.
*
* If `i` specifies an index at or after the EOF token, the EOF token
* index is returned. This is due to the fact that the EOF token is treated
* as though it were on every channel.
*/
previousTokenOnChannel(i, channel) {
this.sync(i);
if (i >= this.size) {
// the EOF token is on every channel
return this.size - 1;
}
while (i >= 0) {
let token = this.tokens[i];
if (token.type === Token_1.Token.EOF || token.channel === channel) {
return i;
}
i--;
}
return i;
}
/** Collect all tokens on specified channel to the right of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL} or
* EOF. If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToRight(tokenIndex, channel = -1) {
this.lazyInit();
if (tokenIndex < 0 || tokenIndex >= this.tokens.length) {
throw new RangeError(tokenIndex + " not in 0.." + (this.tokens.length - 1));
}
let nextOnChannel = this.nextTokenOnChannel(tokenIndex + 1, Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL);
let to;
let from = tokenIndex + 1;
// if none onchannel to right, nextOnChannel=-1 so set to = last token
if (nextOnChannel === -1) {
to = this.size - 1;
}
else {
to = nextOnChannel;
}
return this.filterForChannel(from, to, channel);
}
/** Collect all tokens on specified channel to the left of
* the current token up until we see a token on {@link Lexer#DEFAULT_TOKEN_CHANNEL}.
* If `channel` is `-1`, find any non default channel token.
*/
getHiddenTokensToLeft(tokenIndex, channel = -1) {
this.lazyInit();
if (tokenIndex < 0 || tokenIndex >= this.tokens.length) {
throw new RangeError(tokenIndex + " not in 0.." + (this.tokens.length - 1));
}
if (tokenIndex === 0) {
// obviously no tokens can appear before the first token
return [];
}
let prevOnChannel = this.previousTokenOnChannel(tokenIndex - 1, Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL);
if (prevOnChannel === tokenIndex - 1) {
return [];
}
// if none onchannel to left, prevOnChannel=-1 then from=0
let from = prevOnChannel + 1;
let to = tokenIndex - 1;
return this.filterForChannel(from, to, channel);
}
filterForChannel(from, to, channel) {
let hidden = new Array();
for (let i = from; i <= to; i++) {
let t = this.tokens[i];
if (channel === -1) {
if (t.channel !== Lexer_1.Lexer.DEFAULT_TOKEN_CHANNEL) {
hidden.push(t);
}
}
else {
if (t.channel === channel) {
hidden.push(t);
}
}
}
return hidden;
}
get sourceName() {
return this.tokenSource.sourceName;
}
getText(interval) {
if (interval === undefined) {
interval = Interval_1.Interval.of(0, this.size - 1);
}
else if (!(interval instanceof Interval_1.Interval)) {
// Note: the more obvious check for 'instanceof RuleContext' results in a circular dependency problem
interval = interval.sourceInterval;
}
let start = interval.a;
let stop = interval.b;
if (start < 0 || stop < 0) {
return "";
}
this.fill();
if (stop >= this.tokens.length) {
stop = this.tokens.length - 1;
}
let buf = "";
for (let i = start; i <= stop; i++) {
let t = this.tokens[i];
if (t.type === Token_1.Token.EOF) {
break;
}
buf += t.text;
}
return buf.toString();
}
getTextFromRange(start, stop) {
if (this.isToken(start) && this.isToken(stop)) {
return this.getText(Interval_1.Interval.of(start.tokenIndex, stop.tokenIndex));
}
return "";
}
/** Get all tokens from lexer until EOF. */
fill() {
this.lazyInit();
const blockSize = 1000;
while (true) {
let fetched = this.fetch(blockSize);
if (fetched < blockSize) {
return;
}
}
}
// TODO: Figure out a way to make this more flexible?
isWritableToken(t) {
return t instanceof CommonToken_1.CommonToken;
}
// TODO: Figure out a way to make this more flexible?
isToken(t) {
return t instanceof CommonToken_1.CommonToken;
}
};
__decorate([
Decorators_1.NotNull
], BufferedTokenStream.prototype, "_tokenSource", void 0);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "tokenSource", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "index", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "mark", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "release", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "seek", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "size", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "consume", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "get", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "LA", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "LT", null);
__decorate([
Decorators_1.Override
], BufferedTokenStream.prototype, "sourceName", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "getText", null);
__decorate([
Decorators_1.NotNull,
Decorators_1.Override
], BufferedTokenStream.prototype, "getTextFromRange", null);
BufferedTokenStream = __decorate([
__param(0, Decorators_1.NotNull)
], BufferedTokenStream);
exports.BufferedTokenStream = BufferedTokenStream;
//# sourceMappingURL=BufferedTokenStream.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,26 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { Interval } from "./misc/Interval";
import { IntStream } from "./IntStream";
/** A source of characters for an ANTLR lexer. */
export interface CharStream extends IntStream {
/**
* This method returns the text for a range of characters within this input
* stream. This method is guaranteed to not throw an exception if the
* specified `interval` lies entirely within a marked range. For more
* information about marked ranges, see {@link IntStream#mark}.
*
* @param interval an interval within the stream
* @returns the text of the specified interval
*
* @throws NullPointerException if `interval` is `undefined`
* @throws IllegalArgumentException if `interval.a < 0`, or if
* `interval.b < interval.a - 1`, or if `interval.b` lies at or
* past the end of the stream
* @throws UnsupportedOperationException if the stream does not support
* getting the text of the specified interval
*/
getText(/*@NotNull*/ interval: Interval): string;
}

View File

@@ -0,0 +1,7 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=CharStream.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"CharStream.js","sourceRoot":"","sources":["../../src/CharStream.ts"],"names":[],"mappings":";AAAA;;;GAGG","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:50.0659297-07:00\r\n\r\nimport { Interval } from \"./misc/Interval\";\r\nimport { IntStream } from \"./IntStream\";\r\n\r\n/** A source of characters for an ANTLR lexer. */\r\nexport interface CharStream extends IntStream {\r\n\t/**\r\n\t * This method returns the text for a range of characters within this input\r\n\t * stream. This method is guaranteed to not throw an exception if the\r\n\t * specified `interval` lies entirely within a marked range. For more\r\n\t * information about marked ranges, see {@link IntStream#mark}.\r\n\t *\r\n\t * @param interval an interval within the stream\r\n\t * @returns the text of the specified interval\r\n\t *\r\n\t * @throws NullPointerException if `interval` is `undefined`\r\n\t * @throws IllegalArgumentException if `interval.a < 0`, or if\r\n\t * `interval.b < interval.a - 1`, or if `interval.b` lies at or\r\n\t * past the end of the stream\r\n\t * @throws UnsupportedOperationException if the stream does not support\r\n\t * getting the text of the specified interval\r\n\t */\r\n\t//@NotNull\r\n\tgetText(/*@NotNull*/ interval: Interval): string;\r\n}\r\n"]}

View File

@@ -0,0 +1,54 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CodePointCharStream } from "./CodePointCharStream";
/** This class represents the primary interface for creating {@link CharStream}s
* from a variety of sources as of 4.7. The motivation was to support
* Unicode code points > U+FFFF. {@link ANTLRInputStream} and
* {@link ANTLRFileStream} are now deprecated in favor of the streams created
* by this interface.
*
* DEPRECATED: {@code new ANTLRFileStream("myinputfile")}
* NEW: {@code CharStreams.fromFileName("myinputfile")}
*
* WARNING: If you use both the deprecated and the new streams, you will see
* a nontrivial performance degradation. This speed hit is because the
* {@link Lexer}'s internal code goes from a monomorphic to megamorphic
* dynamic dispatch to get characters from the input stream. Java's
* on-the-fly compiler (JIT) is unable to perform the same optimizations
* so stick with either the old or the new streams, if performance is
* a primary concern. See the extreme debugging and spelunking
* needed to identify this issue in our timing rig:
*
* https://github.com/antlr/antlr4/pull/1781
*
* The ANTLR character streams still buffer all the input when you create
* the stream, as they have done for ~20 years. If you need unbuffered
* access, please note that it becomes challenging to create
* parse trees. The parse tree has to point to tokens which will either
* point into a stale location in an unbuffered stream or you have to copy
* the characters out of the buffer into the token. That defeats the purpose
* of unbuffered input. Per the ANTLR book, unbuffered streams are primarily
* useful for processing infinite streams *during the parse.*
*
* The new streams also use 8-bit buffers when possible so this new
* interface supports character streams that use half as much memory
* as the old {@link ANTLRFileStream}, which assumed 16-bit characters.
*
* A big shout out to Ben Hamilton (github bhamiltoncx) for his superhuman
* efforts across all targets to get true Unicode 3.1 support for U+10FFFF.
*
* @since 4.7
*/
export declare namespace CharStreams {
/**
* Creates a {@link CharStream} given a {@link String}.
*/
function fromString(s: string): CodePointCharStream;
/**
* Creates a {@link CharStream} given a {@link String} and the {@code sourceName}
* from which it came.
*/
function fromString(s: string, sourceName: string): CodePointCharStream;
}

View File

@@ -0,0 +1,133 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.CharStreams = void 0;
const CodePointBuffer_1 = require("./CodePointBuffer");
const CodePointCharStream_1 = require("./CodePointCharStream");
const IntStream_1 = require("./IntStream");
// const DEFAULT_BUFFER_SIZE: number = 4096;
/** This class represents the primary interface for creating {@link CharStream}s
* from a variety of sources as of 4.7. The motivation was to support
* Unicode code points > U+FFFF. {@link ANTLRInputStream} and
* {@link ANTLRFileStream} are now deprecated in favor of the streams created
* by this interface.
*
* DEPRECATED: {@code new ANTLRFileStream("myinputfile")}
* NEW: {@code CharStreams.fromFileName("myinputfile")}
*
* WARNING: If you use both the deprecated and the new streams, you will see
* a nontrivial performance degradation. This speed hit is because the
* {@link Lexer}'s internal code goes from a monomorphic to megamorphic
* dynamic dispatch to get characters from the input stream. Java's
* on-the-fly compiler (JIT) is unable to perform the same optimizations
* so stick with either the old or the new streams, if performance is
* a primary concern. See the extreme debugging and spelunking
* needed to identify this issue in our timing rig:
*
* https://github.com/antlr/antlr4/pull/1781
*
* The ANTLR character streams still buffer all the input when you create
* the stream, as they have done for ~20 years. If you need unbuffered
* access, please note that it becomes challenging to create
* parse trees. The parse tree has to point to tokens which will either
* point into a stale location in an unbuffered stream or you have to copy
* the characters out of the buffer into the token. That defeats the purpose
* of unbuffered input. Per the ANTLR book, unbuffered streams are primarily
* useful for processing infinite streams *during the parse.*
*
* The new streams also use 8-bit buffers when possible so this new
* interface supports character streams that use half as much memory
* as the old {@link ANTLRFileStream}, which assumed 16-bit characters.
*
* A big shout out to Ben Hamilton (github bhamiltoncx) for his superhuman
* efforts across all targets to get true Unicode 3.1 support for U+10FFFF.
*
* @since 4.7
*/
var CharStreams;
(function (CharStreams) {
// /**
// * Creates a {@link CharStream} given a path to a UTF-8
// * encoded file on disk.
// *
// * Reads the entire contents of the file into the result before returning.
// */
// export function fromFile(file: File): CharStream;
// export function fromFile(file: File, charset: Charset): CharStream;
// export function fromFile(file: File, charset?: Charset): CharStream {
// if (charset === undefined) {
// charset = Charset.forName("UTF-8");
// }
function fromString(s, sourceName) {
if (sourceName === undefined || sourceName.length === 0) {
sourceName = IntStream_1.IntStream.UNKNOWN_SOURCE_NAME;
}
// Initial guess assumes no code points > U+FFFF: one code
// point for each code unit in the string
let codePointBufferBuilder = CodePointBuffer_1.CodePointBuffer.builder(s.length);
// TODO: CharBuffer.wrap(String) rightfully returns a read-only buffer
// which doesn't expose its array, so we make a copy.
let cb = new Uint16Array(s.length);
for (let i = 0; i < s.length; i++) {
cb[i] = s.charCodeAt(i);
}
codePointBufferBuilder.append(cb);
return CodePointCharStream_1.CodePointCharStream.fromBuffer(codePointBufferBuilder.build(), sourceName);
}
CharStreams.fromString = fromString;
// export function bufferFromChannel(
// channel: ReadableByteChannel,
// charset: Charset,
// bufferSize: number,
// decodingErrorAction: CodingErrorAction,
// inputSize: number): CodePointBuffer {
// try {
// let utf8BytesIn: Uint8Array = new Uint8Array(bufferSize);
// let utf16CodeUnitsOut: Uint16Array = new Uint16Array(bufferSize);
// if (inputSize === -1) {
// inputSize = bufferSize;
// } else if (inputSize > Integer.MAX_VALUE) {
// // ByteBuffer et al don't support long sizes
// throw new RangeError(`inputSize ${inputSize} larger than max ${Integer.MAX_VALUE}`);
// }
// let codePointBufferBuilder: CodePointBuffer.Builder = CodePointBuffer.builder(inputSize);
// let decoder: CharsetDecoder = charset
// .newDecoder()
// .onMalformedInput(decodingErrorAction)
// .onUnmappableCharacter(decodingErrorAction);
// let endOfInput: boolean = false;
// while (!endOfInput) {
// let bytesRead: number = channel.read(utf8BytesIn);
// endOfInput = (bytesRead === -1);
// utf8BytesIn.flip();
// let result: CoderResult = decoder.decode(
// utf8BytesIn,
// utf16CodeUnitsOut,
// endOfInput);
// if (result.isError() && decodingErrorAction === CodingErrorAction.REPORT) {
// result.throwException();
// }
// utf16CodeUnitsOut.flip();
// codePointBufferBuilder.append(utf16CodeUnitsOut);
// utf8BytesIn.compact();
// utf16CodeUnitsOut.compact();
// }
// // Handle any bytes at the end of the file which need to
// // be represented as errors or substitution characters.
// let flushResult: CoderResult = decoder.flush(utf16CodeUnitsOut);
// if (flushResult.isError() && decodingErrorAction === CodingErrorAction.REPORT) {
// flushResult.throwException();
// }
// utf16CodeUnitsOut.flip();
// codePointBufferBuilder.append(utf16CodeUnitsOut);
// return codePointBufferBuilder.build();
// }
// finally {
// channel.close();
// }
// }
})(CharStreams = exports.CharStreams || (exports.CharStreams = {}));
//# sourceMappingURL=CharStreams.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,40 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
/**
* Wrapper for `Uint8Array` / `Uint16Array` / `Int32Array`.
*/
export declare class CodePointBuffer {
private readonly buffer;
private _position;
private _size;
constructor(buffer: Uint8Array | Uint16Array | Int32Array, size: number);
static withArray(buffer: Uint8Array | Uint16Array | Int32Array): CodePointBuffer;
get position(): number;
set position(newPosition: number);
get remaining(): number;
get(offset: number): number;
array(): Uint8Array | Uint16Array | Int32Array;
static builder(initialBufferSize: number): CodePointBuffer.Builder;
}
export declare namespace CodePointBuffer {
class Builder {
private type;
private buffer;
private prevHighSurrogate;
private position;
constructor(initialBufferSize: number);
build(): CodePointBuffer;
private static roundUpToNextPowerOfTwo;
ensureRemaining(remainingNeeded: number): void;
append(utf16In: Uint16Array): void;
private appendArray;
private appendArrayByte;
private appendArrayChar;
private appendArrayInt;
private byteToCharBuffer;
private byteToIntBuffer;
private charToIntBuffer;
}
}

View File

@@ -0,0 +1,234 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.CodePointBuffer = void 0;
const assert = require("assert");
const Character = require("./misc/Character");
/**
* Wrapper for `Uint8Array` / `Uint16Array` / `Int32Array`.
*/
class CodePointBuffer {
constructor(buffer, size) {
this.buffer = buffer;
this._position = 0;
this._size = size;
}
static withArray(buffer) {
return new CodePointBuffer(buffer, buffer.length);
}
get position() {
return this._position;
}
set position(newPosition) {
if (newPosition < 0 || newPosition > this._size) {
throw new RangeError();
}
this._position = newPosition;
}
get remaining() {
return this._size - this.position;
}
get(offset) {
return this.buffer[offset];
}
array() {
return this.buffer.slice(0, this._size);
}
static builder(initialBufferSize) {
return new CodePointBuffer.Builder(initialBufferSize);
}
}
exports.CodePointBuffer = CodePointBuffer;
(function (CodePointBuffer) {
let Type;
(function (Type) {
Type[Type["BYTE"] = 0] = "BYTE";
Type[Type["CHAR"] = 1] = "CHAR";
Type[Type["INT"] = 2] = "INT";
})(Type || (Type = {}));
class Builder {
constructor(initialBufferSize) {
this.type = 0 /* BYTE */;
this.buffer = new Uint8Array(initialBufferSize);
this.prevHighSurrogate = -1;
this.position = 0;
}
build() {
return new CodePointBuffer(this.buffer, this.position);
}
static roundUpToNextPowerOfTwo(i) {
let nextPowerOfTwo = 32 - Math.clz32(i - 1);
return Math.pow(2, nextPowerOfTwo);
}
ensureRemaining(remainingNeeded) {
switch (this.type) {
case 0 /* BYTE */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Uint8Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
case 1 /* CHAR */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Uint16Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
case 2 /* INT */:
if (this.buffer.length - this.position < remainingNeeded) {
let newCapacity = Builder.roundUpToNextPowerOfTwo(this.buffer.length + remainingNeeded);
let newBuffer = new Int32Array(newCapacity);
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.buffer = newBuffer;
}
break;
}
}
append(utf16In) {
this.ensureRemaining(utf16In.length);
this.appendArray(utf16In);
}
appendArray(utf16In) {
switch (this.type) {
case 0 /* BYTE */:
this.appendArrayByte(utf16In);
break;
case 1 /* CHAR */:
this.appendArrayChar(utf16In);
break;
case 2 /* INT */:
this.appendArrayInt(utf16In);
break;
}
}
appendArrayByte(utf16In) {
assert(this.prevHighSurrogate === -1);
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outByte = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
if (c <= 0xFF) {
outByte[outOffset] = c;
}
else {
utf16In = utf16In.subarray(inOffset, inLimit);
this.position = outOffset;
if (!Character.isHighSurrogate(c)) {
this.byteToCharBuffer(utf16In.length);
this.appendArrayChar(utf16In);
return;
}
else {
this.byteToIntBuffer(utf16In.length);
this.appendArrayInt(utf16In);
return;
}
}
inOffset++;
outOffset++;
}
this.position = outOffset;
}
appendArrayChar(utf16In) {
assert(this.prevHighSurrogate === -1);
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outChar = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
if (!Character.isHighSurrogate(c)) {
outChar[outOffset] = c;
}
else {
utf16In = utf16In.subarray(inOffset, inLimit);
this.position = outOffset;
this.charToIntBuffer(utf16In.length);
this.appendArrayInt(utf16In);
return;
}
inOffset++;
outOffset++;
}
this.position = outOffset;
}
appendArrayInt(utf16In) {
let input = utf16In;
let inOffset = 0;
let inLimit = utf16In.length;
let outInt = this.buffer;
let outOffset = this.position;
while (inOffset < inLimit) {
let c = input[inOffset];
inOffset++;
if (this.prevHighSurrogate !== -1) {
if (Character.isLowSurrogate(c)) {
outInt[outOffset] = String.fromCharCode(this.prevHighSurrogate, c).codePointAt(0);
outOffset++;
this.prevHighSurrogate = -1;
}
else {
// Dangling high surrogate
outInt[outOffset] = this.prevHighSurrogate;
outOffset++;
if (Character.isHighSurrogate(c)) {
this.prevHighSurrogate = c;
}
else {
outInt[outOffset] = c;
outOffset++;
this.prevHighSurrogate = -1;
}
}
}
else if (Character.isHighSurrogate(c)) {
this.prevHighSurrogate = c;
}
else {
outInt[outOffset] = c;
outOffset++;
}
}
if (this.prevHighSurrogate !== -1) {
// Dangling high surrogate
outInt[outOffset] = this.prevHighSurrogate;
outOffset++;
}
this.position = outOffset;
}
byteToCharBuffer(toAppend) {
// CharBuffers hold twice as much per unit as ByteBuffers, so start with half the capacity.
let newBuffer = new Uint16Array(Math.max(this.position + toAppend, this.buffer.length >> 1));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 1 /* CHAR */;
this.buffer = newBuffer;
}
byteToIntBuffer(toAppend) {
// IntBuffers hold four times as much per unit as ByteBuffers, so start with one quarter the capacity.
let newBuffer = new Int32Array(Math.max(this.position + toAppend, this.buffer.length >> 2));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 2 /* INT */;
this.buffer = newBuffer;
}
charToIntBuffer(toAppend) {
// IntBuffers hold two times as much per unit as ByteBuffers, so start with one half the capacity.
let newBuffer = new Int32Array(Math.max(this.position + toAppend, this.buffer.length >> 1));
newBuffer.set(this.buffer.subarray(0, this.position), 0);
this.type = 2 /* INT */;
this.buffer = newBuffer;
}
}
CodePointBuffer.Builder = Builder;
})(CodePointBuffer = exports.CodePointBuffer || (exports.CodePointBuffer = {}));
//# sourceMappingURL=CodePointBuffer.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,45 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { CodePointBuffer } from "./CodePointBuffer";
import { Interval } from "./misc/Interval";
/**
* Alternative to {@link ANTLRInputStream} which treats the input
* as a series of Unicode code points, instead of a series of UTF-16
* code units.
*
* Use this if you need to parse input which potentially contains
* Unicode values > U+FFFF.
*/
export declare class CodePointCharStream implements CharStream {
private readonly _array;
private readonly _size;
private readonly _name;
private _position;
protected constructor(array: Uint8Array | Uint16Array | Int32Array, position: number, remaining: number, name: string);
get internalStorage(): Uint8Array | Uint16Array | Int32Array;
/**
* Constructs a {@link CodePointCharStream} which provides access
* to the Unicode code points stored in {@code codePointBuffer}.
*/
static fromBuffer(codePointBuffer: CodePointBuffer): CodePointCharStream;
/**
* Constructs a named {@link CodePointCharStream} which provides access
* to the Unicode code points stored in {@code codePointBuffer}.
*/
static fromBuffer(codePointBuffer: CodePointBuffer, name: string): CodePointCharStream;
consume(): void;
get index(): number;
get size(): number;
/** mark/release do nothing; we have entire buffer */
mark(): number;
release(marker: number): void;
seek(index: number): void;
get sourceName(): string;
toString(): string;
LA(i: number): number;
/** Return the UTF-16 encoded string for the given interval */
getText(interval: Interval): string;
}

View File

@@ -0,0 +1,149 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.CodePointCharStream = void 0;
const assert = require("assert");
const IntStream_1 = require("./IntStream");
const Interval_1 = require("./misc/Interval");
const Decorators_1 = require("./Decorators");
/**
* Alternative to {@link ANTLRInputStream} which treats the input
* as a series of Unicode code points, instead of a series of UTF-16
* code units.
*
* Use this if you need to parse input which potentially contains
* Unicode values > U+FFFF.
*/
class CodePointCharStream {
// Use the factory method {@link #fromBuffer(CodePointBuffer)} to
// construct instances of this type.
constructor(array, position, remaining, name) {
// TODO
assert(position === 0);
this._array = array;
this._size = remaining;
this._name = name;
this._position = 0;
}
get internalStorage() {
return this._array;
}
static fromBuffer(codePointBuffer, name) {
if (name === undefined || name.length === 0) {
name = IntStream_1.IntStream.UNKNOWN_SOURCE_NAME;
}
// Java lacks generics on primitive types.
//
// To avoid lots of calls to virtual methods in the
// very hot codepath of LA() below, we construct one
// of three concrete subclasses.
//
// The concrete subclasses directly access the code
// points stored in the underlying array (byte[],
// char[], or int[]), so we can avoid lots of virtual
// method calls to ByteBuffer.get(offset).
return new CodePointCharStream(codePointBuffer.array(), codePointBuffer.position, codePointBuffer.remaining, name);
}
consume() {
if (this._size - this._position === 0) {
assert(this.LA(1) === IntStream_1.IntStream.EOF);
throw new RangeError("cannot consume EOF");
}
this._position++;
}
get index() {
return this._position;
}
get size() {
return this._size;
}
/** mark/release do nothing; we have entire buffer */
mark() {
return -1;
}
release(marker) {
// No default implementation since this stream buffers the entire input
}
seek(index) {
this._position = index;
}
get sourceName() {
return this._name;
}
toString() {
return this.getText(Interval_1.Interval.of(0, this.size - 1));
}
LA(i) {
let offset;
switch (Math.sign(i)) {
case -1:
offset = this.index + i;
if (offset < 0) {
return IntStream_1.IntStream.EOF;
}
return this._array[offset];
case 0:
// Undefined
return 0;
case 1:
offset = this.index + i - 1;
if (offset >= this.size) {
return IntStream_1.IntStream.EOF;
}
return this._array[offset];
}
throw new RangeError("Not reached");
}
/** Return the UTF-16 encoded string for the given interval */
getText(interval) {
const startIdx = Math.min(interval.a, this.size);
const len = Math.min(interval.b - interval.a + 1, this.size - startIdx);
if (this._array instanceof Int32Array) {
return String.fromCodePoint(...Array.from(this._array.subarray(startIdx, startIdx + len)));
}
else {
return String.fromCharCode(...Array.from(this._array.subarray(startIdx, startIdx + len)));
}
}
}
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "consume", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "index", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "size", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "mark", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "release", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "seek", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "sourceName", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "toString", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "LA", null);
__decorate([
Decorators_1.Override
], CodePointCharStream.prototype, "getText", null);
exports.CodePointCharStream = CodePointCharStream;
//# sourceMappingURL=CodePointCharStream.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,116 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNSimulator } from "./atn/ATNSimulator";
import { CharStream } from "./CharStream";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
import { WritableToken } from "./WritableToken";
export declare class CommonToken implements WritableToken {
/**
* An empty {@link Tuple2} which is used as the default value of
* {@link #source} for tokens that do not have a source.
*/
protected static readonly EMPTY_SOURCE: {
source?: TokenSource;
stream?: CharStream;
};
/**
* This is the backing field for `type`.
*/
private _type;
/**
* This is the backing field for {@link #getLine} and {@link #setLine}.
*/
private _line;
/**
* This is the backing field for {@link #getCharPositionInLine} and
* {@link #setCharPositionInLine}.
*/
private _charPositionInLine;
/**
* This is the backing field for {@link #getChannel} and
* {@link #setChannel}.
*/
private _channel;
/**
* This is the backing field for {@link #getTokenSource} and
* {@link #getInputStream}.
*
* These properties share a field to reduce the memory footprint of
* {@link CommonToken}. Tokens created by a {@link CommonTokenFactory} from
* the same source and input stream share a reference to the same
* {@link Tuple2} containing these values.
*/
protected source: {
source?: TokenSource;
stream?: CharStream;
};
/**
* This is the backing field for {@link #getText} when the token text is
* explicitly set in the constructor or via {@link #setText}.
*
* @see `text`
*/
private _text?;
/**
* This is the backing field for `tokenIndex`.
*/
protected index: number;
/**
* This is the backing field for `startIndex`.
*/
protected start: number;
/**
* This is the backing field for `stopIndex`.
*/
private stop;
constructor(type: number, text?: string, source?: {
source?: TokenSource;
stream?: CharStream;
}, channel?: number, start?: number, stop?: number);
/**
* Constructs a new {@link CommonToken} as a copy of another {@link Token}.
*
* If `oldToken` is also a {@link CommonToken} instance, the newly
* constructed token will share a reference to the {@link #text} field and
* the {@link Tuple2} stored in {@link #source}. Otherwise, {@link #text} will
* be assigned the result of calling {@link #getText}, and {@link #source}
* will be constructed from the result of {@link Token#getTokenSource} and
* {@link Token#getInputStream}.
*
* @param oldToken The token to copy.
*/
static fromToken(oldToken: Token): CommonToken;
get type(): number;
set type(type: number);
get line(): number;
set line(line: number);
get text(): string | undefined;
/**
* Explicitly set the text for this token. If {code text} is not
* `undefined`, then {@link #getText} will return this value rather than
* extracting the text from the input.
*
* @param text The explicit text of the token, or `undefined` if the text
* should be obtained from the input along with the start and stop indexes
* of the token.
*/
set text(text: string | undefined);
get charPositionInLine(): number;
set charPositionInLine(charPositionInLine: number);
get channel(): number;
set channel(channel: number);
get startIndex(): number;
set startIndex(start: number);
get stopIndex(): number;
set stopIndex(stop: number);
get tokenIndex(): number;
set tokenIndex(index: number);
get tokenSource(): TokenSource | undefined;
get inputStream(): CharStream | undefined;
toString(): string;
toString<TSymbol, ATNInterpreter extends ATNSimulator>(recognizer: Recognizer<TSymbol, ATNInterpreter> | undefined): string;
}

View File

@@ -0,0 +1,229 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.CommonToken = void 0;
const Interval_1 = require("./misc/Interval");
const Decorators_1 = require("./Decorators");
const Token_1 = require("./Token");
let CommonToken = class CommonToken {
constructor(type, text, source = CommonToken.EMPTY_SOURCE, channel = Token_1.Token.DEFAULT_CHANNEL, start = 0, stop = 0) {
/**
* This is the backing field for {@link #getLine} and {@link #setLine}.
*/
this._line = 0;
/**
* This is the backing field for {@link #getCharPositionInLine} and
* {@link #setCharPositionInLine}.
*/
this._charPositionInLine = -1; // set to invalid position
/**
* This is the backing field for {@link #getChannel} and
* {@link #setChannel}.
*/
this._channel = Token_1.Token.DEFAULT_CHANNEL;
/**
* This is the backing field for `tokenIndex`.
*/
this.index = -1;
this._text = text;
this._type = type;
this.source = source;
this._channel = channel;
this.start = start;
this.stop = stop;
if (source.source != null) {
this._line = source.source.line;
this._charPositionInLine = source.source.charPositionInLine;
}
}
/**
* Constructs a new {@link CommonToken} as a copy of another {@link Token}.
*
* If `oldToken` is also a {@link CommonToken} instance, the newly
* constructed token will share a reference to the {@link #text} field and
* the {@link Tuple2} stored in {@link #source}. Otherwise, {@link #text} will
* be assigned the result of calling {@link #getText}, and {@link #source}
* will be constructed from the result of {@link Token#getTokenSource} and
* {@link Token#getInputStream}.
*
* @param oldToken The token to copy.
*/
static fromToken(oldToken) {
let result = new CommonToken(oldToken.type, undefined, CommonToken.EMPTY_SOURCE, oldToken.channel, oldToken.startIndex, oldToken.stopIndex);
result._line = oldToken.line;
result.index = oldToken.tokenIndex;
result._charPositionInLine = oldToken.charPositionInLine;
if (oldToken instanceof CommonToken) {
result._text = oldToken._text;
result.source = oldToken.source;
}
else {
result._text = oldToken.text;
result.source = { source: oldToken.tokenSource, stream: oldToken.inputStream };
}
return result;
}
get type() {
return this._type;
}
// @Override
set type(type) {
this._type = type;
}
get line() {
return this._line;
}
// @Override
set line(line) {
this._line = line;
}
get text() {
if (this._text != null) {
return this._text;
}
let input = this.inputStream;
if (input == null) {
return undefined;
}
let n = input.size;
if (this.start < n && this.stop < n) {
return input.getText(Interval_1.Interval.of(this.start, this.stop));
}
else {
return "<EOF>";
}
}
/**
* Explicitly set the text for this token. If {code text} is not
* `undefined`, then {@link #getText} will return this value rather than
* extracting the text from the input.
*
* @param text The explicit text of the token, or `undefined` if the text
* should be obtained from the input along with the start and stop indexes
* of the token.
*/
// @Override
set text(text) {
this._text = text;
}
get charPositionInLine() {
return this._charPositionInLine;
}
// @Override
set charPositionInLine(charPositionInLine) {
this._charPositionInLine = charPositionInLine;
}
get channel() {
return this._channel;
}
// @Override
set channel(channel) {
this._channel = channel;
}
get startIndex() {
return this.start;
}
set startIndex(start) {
this.start = start;
}
get stopIndex() {
return this.stop;
}
set stopIndex(stop) {
this.stop = stop;
}
get tokenIndex() {
return this.index;
}
// @Override
set tokenIndex(index) {
this.index = index;
}
get tokenSource() {
return this.source.source;
}
get inputStream() {
return this.source.stream;
}
toString(recognizer) {
let channelStr = "";
if (this._channel > 0) {
channelStr = ",channel=" + this._channel;
}
let txt = this.text;
if (txt != null) {
txt = txt.replace(/\n/g, "\\n");
txt = txt.replace(/\r/g, "\\r");
txt = txt.replace(/\t/g, "\\t");
}
else {
txt = "<no text>";
}
let typeString = String(this._type);
if (recognizer) {
typeString = recognizer.vocabulary.getDisplayName(this._type);
}
return "[@" + this.tokenIndex + "," + this.start + ":" + this.stop + "='" + txt + "',<" + typeString + ">" + channelStr + "," + this._line + ":" + this.charPositionInLine + "]";
}
};
/**
* An empty {@link Tuple2} which is used as the default value of
* {@link #source} for tokens that do not have a source.
*/
CommonToken.EMPTY_SOURCE = { source: undefined, stream: undefined };
__decorate([
Decorators_1.NotNull
], CommonToken.prototype, "source", void 0);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "type", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "line", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "text", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "charPositionInLine", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "channel", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "startIndex", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "stopIndex", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "tokenIndex", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "tokenSource", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "inputStream", null);
__decorate([
Decorators_1.Override
], CommonToken.prototype, "toString", null);
__decorate([
__param(0, Decorators_1.NotNull)
], CommonToken, "fromToken", null);
CommonToken = __decorate([
__param(2, Decorators_1.NotNull)
], CommonToken);
exports.CommonToken = CommonToken;
//# sourceMappingURL=CommonToken.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,53 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { CommonToken } from "./CommonToken";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/**
* This default implementation of {@link TokenFactory} creates
* {@link CommonToken} objects.
*/
export declare class CommonTokenFactory implements TokenFactory {
/**
* Indicates whether {@link CommonToken#setText} should be called after
* constructing tokens to explicitly set the text. This is useful for cases
* where the input stream might not be able to provide arbitrary substrings
* of text from the input after the lexer creates a token (e.g. the
* implementation of {@link CharStream#getText} in
* {@link UnbufferedCharStream}
* {@link UnsupportedOperationException}). Explicitly setting the token text
* allows {@link Token#getText} to be called at any time regardless of the
* input stream implementation.
*
* The default value is `false` to avoid the performance and memory
* overhead of copying text for every token unless explicitly requested.
*/
protected copyText: boolean;
/**
* Constructs a {@link CommonTokenFactory} with the specified value for
* {@link #copyText}.
*
* When `copyText` is `false`, the {@link #DEFAULT} instance
* should be used instead of constructing a new instance.
*
* @param copyText The value for {@link #copyText}.
*/
constructor(copyText?: boolean);
create(source: {
source?: TokenSource;
stream?: CharStream;
}, type: number, text: string | undefined, channel: number, start: number, stop: number, line: number, charPositionInLine: number): CommonToken;
createSimple(type: number, text: string): CommonToken;
}
export declare namespace CommonTokenFactory {
/**
* The default {@link CommonTokenFactory} instance.
*
* This token factory does not explicitly copy token text when constructing
* tokens.
*/
const DEFAULT: TokenFactory;
}

View File

@@ -0,0 +1,63 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.CommonTokenFactory = void 0;
const CommonToken_1 = require("./CommonToken");
const Interval_1 = require("./misc/Interval");
const Decorators_1 = require("./Decorators");
/**
* This default implementation of {@link TokenFactory} creates
* {@link CommonToken} objects.
*/
class CommonTokenFactory {
/**
* Constructs a {@link CommonTokenFactory} with the specified value for
* {@link #copyText}.
*
* When `copyText` is `false`, the {@link #DEFAULT} instance
* should be used instead of constructing a new instance.
*
* @param copyText The value for {@link #copyText}.
*/
constructor(copyText = false) {
this.copyText = copyText;
}
create(source, type, text, channel, start, stop, line, charPositionInLine) {
let t = new CommonToken_1.CommonToken(type, text, source, channel, start, stop);
t.line = line;
t.charPositionInLine = charPositionInLine;
if (text == null && this.copyText && source.stream != null) {
t.text = source.stream.getText(Interval_1.Interval.of(start, stop));
}
return t;
}
createSimple(type, text) {
return new CommonToken_1.CommonToken(type, text);
}
}
__decorate([
Decorators_1.Override
], CommonTokenFactory.prototype, "create", null);
__decorate([
Decorators_1.Override
], CommonTokenFactory.prototype, "createSimple", null);
exports.CommonTokenFactory = CommonTokenFactory;
(function (CommonTokenFactory) {
/**
* The default {@link CommonTokenFactory} instance.
*
* This token factory does not explicitly copy token text when constructing
* tokens.
*/
CommonTokenFactory.DEFAULT = new CommonTokenFactory();
})(CommonTokenFactory = exports.CommonTokenFactory || (exports.CommonTokenFactory = {}));
//# sourceMappingURL=CommonTokenFactory.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"CommonTokenFactory.js","sourceRoot":"","sources":["../../src/CommonTokenFactory.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;AAKH,+CAA4C;AAC5C,8CAA2C;AAC3C,6CAAwC;AAIxC;;;GAGG;AACH,MAAa,kBAAkB;IAiB9B;;;;;;;;OAQG;IACH,YAAY,WAAoB,KAAK;QACpC,IAAI,CAAC,QAAQ,GAAG,QAAQ,CAAC;IAC1B,CAAC;IAGM,MAAM,CACZ,MAAqD,EACrD,IAAY,EACZ,IAAwB,EACxB,OAAe,EACf,KAAa,EACb,IAAY,EACZ,IAAY,EACZ,kBAA0B;QAE1B,IAAI,CAAC,GAAgB,IAAI,yBAAW,CAAC,IAAI,EAAE,IAAI,EAAE,MAAM,EAAE,OAAO,EAAE,KAAK,EAAE,IAAI,CAAC,CAAC;QAC/E,CAAC,CAAC,IAAI,GAAG,IAAI,CAAC;QACd,CAAC,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;QAC1C,IAAI,IAAI,IAAI,IAAI,IAAI,IAAI,CAAC,QAAQ,IAAI,MAAM,CAAC,MAAM,IAAI,IAAI,EAAE;YAC3D,CAAC,CAAC,IAAI,GAAG,MAAM,CAAC,MAAM,CAAC,OAAO,CAAC,mBAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC;SACzD;QAED,OAAO,CAAC,CAAC;IACV,CAAC;IAGM,YAAY,CAAC,IAAY,EAAE,IAAY;QAC7C,OAAO,IAAI,yBAAW,CAAC,IAAI,EAAE,IAAI,CAAC,CAAC;IACpC,CAAC;CACD;AAxBA;IADC,qBAAQ;gDAmBR;AAGD;IADC,qBAAQ;sDAGR;AAtDF,gDAuDC;AAED,WAAiB,kBAAkB;IAClC;;;;;OAKG;IACU,0BAAO,GAAiB,IAAI,kBAAkB,EAAE,CAAC;AAC/D,CAAC,EARgB,kBAAkB,GAAlB,0BAAkB,KAAlB,0BAAkB,QAQlC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:50.3010112-07:00\r\n\r\nimport { CharStream } from \"./CharStream\";\r\nimport { CommonToken } from \"./CommonToken\";\r\nimport { Interval } from \"./misc/Interval\";\r\nimport { Override } from \"./Decorators\";\r\nimport { TokenFactory } from \"./TokenFactory\";\r\nimport { TokenSource } from \"./TokenSource\";\r\n\r\n/**\r\n * This default implementation of {@link TokenFactory} creates\r\n * {@link CommonToken} objects.\r\n */\r\nexport class CommonTokenFactory implements TokenFactory {\r\n\t/**\r\n\t * Indicates whether {@link CommonToken#setText} should be called after\r\n\t * constructing tokens to explicitly set the text. This is useful for cases\r\n\t * where the input stream might not be able to provide arbitrary substrings\r\n\t * of text from the input after the lexer creates a token (e.g. the\r\n\t * implementation of {@link CharStream#getText} in\r\n\t * {@link UnbufferedCharStream}\r\n\t * {@link UnsupportedOperationException}). Explicitly setting the token text\r\n\t * allows {@link Token#getText} to be called at any time regardless of the\r\n\t * input stream implementation.\r\n\t *\r\n\t * The default value is `false` to avoid the performance and memory\r\n\t * overhead of copying text for every token unless explicitly requested.\r\n\t */\r\n\tprotected copyText: boolean;\r\n\r\n\t/**\r\n\t * Constructs a {@link CommonTokenFactory} with the specified value for\r\n\t * {@link #copyText}.\r\n\t *\r\n\t * When `copyText` is `false`, the {@link #DEFAULT} instance\r\n\t * should be used instead of constructing a new instance.\r\n\t *\r\n\t * @param copyText The value for {@link #copyText}.\r\n\t */\r\n\tconstructor(copyText: boolean = false) {\r\n\t\tthis.copyText = copyText;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic create(\r\n\t\tsource: { source?: TokenSource, stream?: CharStream },\r\n\t\ttype: number,\r\n\t\ttext: string | undefined,\r\n\t\tchannel: number,\r\n\t\tstart: number,\r\n\t\tstop: number,\r\n\t\tline: number,\r\n\t\tcharPositionInLine: number): CommonToken {\r\n\r\n\t\tlet t: CommonToken = new CommonToken(type, text, source, channel, start, stop);\r\n\t\tt.line = line;\r\n\t\tt.charPositionInLine = charPositionInLine;\r\n\t\tif (text == null && this.copyText && source.stream != null) {\r\n\t\t\tt.text = source.stream.getText(Interval.of(start, stop));\r\n\t\t}\r\n\r\n\t\treturn t;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic createSimple(type: number, text: string): CommonToken {\r\n\t\treturn new CommonToken(type, text);\r\n\t}\r\n}\r\n\r\nexport namespace CommonTokenFactory {\r\n\t/**\r\n\t * The default {@link CommonTokenFactory} instance.\r\n\t *\r\n\t * This token factory does not explicitly copy token text when constructing\r\n\t * tokens.\r\n\t */\r\n\texport const DEFAULT: TokenFactory = new CommonTokenFactory();\r\n}\r\n"]}

View File

@@ -0,0 +1,52 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { BufferedTokenStream } from "./BufferedTokenStream";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/**
* This class extends {@link BufferedTokenStream} with functionality to filter
* token streams to tokens on a particular channel (tokens where
* {@link Token#getChannel} returns a particular value).
*
* This token stream provides access to all tokens by index or when calling
* methods like {@link #getText}. The channel filtering is only used for code
* accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
* {@link #LB}.
*
* By default, tokens are placed on the default channel
* ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the
* `->channel(HIDDEN)` lexer command, or by using an embedded action to
* call {@link Lexer#setChannel}.
*
* Note: lexer rules which use the `->skip` lexer command or call
* {@link Lexer#skip} do not produce tokens at all, so input text matched by
* such a rule will not be available as part of the token stream, regardless of
* channel.
*/
export declare class CommonTokenStream extends BufferedTokenStream {
/**
* Specifies the channel to use for filtering tokens.
*
* The default value is {@link Token#DEFAULT_CHANNEL}, which matches the
* default channel assigned to tokens created by the lexer.
*/
protected channel: number;
/**
* Constructs a new {@link CommonTokenStream} using the specified token
* source and filtering tokens to the specified channel. Only tokens whose
* {@link Token#getChannel} matches `channel` or have the
* `Token.type` equal to {@link Token#EOF} will be returned by the
* token stream lookahead methods.
*
* @param tokenSource The token source.
* @param channel The channel to use for filtering tokens.
*/
constructor(tokenSource: TokenSource, channel?: number);
protected adjustSeekIndex(i: number): number;
protected tryLB(k: number): Token | undefined;
tryLT(k: number): Token | undefined;
/** Count EOF just once. */
getNumberOfOnChannelTokens(): number;
}

View File

@@ -0,0 +1,126 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.CommonTokenStream = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:50.3953157-07:00
const BufferedTokenStream_1 = require("./BufferedTokenStream");
const Decorators_1 = require("./Decorators");
const Token_1 = require("./Token");
/**
* This class extends {@link BufferedTokenStream} with functionality to filter
* token streams to tokens on a particular channel (tokens where
* {@link Token#getChannel} returns a particular value).
*
* This token stream provides access to all tokens by index or when calling
* methods like {@link #getText}. The channel filtering is only used for code
* accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
* {@link #LB}.
*
* By default, tokens are placed on the default channel
* ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the
* `->channel(HIDDEN)` lexer command, or by using an embedded action to
* call {@link Lexer#setChannel}.
*
* Note: lexer rules which use the `->skip` lexer command or call
* {@link Lexer#skip} do not produce tokens at all, so input text matched by
* such a rule will not be available as part of the token stream, regardless of
* channel.
*/
let CommonTokenStream = class CommonTokenStream extends BufferedTokenStream_1.BufferedTokenStream {
/**
* Constructs a new {@link CommonTokenStream} using the specified token
* source and filtering tokens to the specified channel. Only tokens whose
* {@link Token#getChannel} matches `channel` or have the
* `Token.type` equal to {@link Token#EOF} will be returned by the
* token stream lookahead methods.
*
* @param tokenSource The token source.
* @param channel The channel to use for filtering tokens.
*/
constructor(tokenSource, channel = Token_1.Token.DEFAULT_CHANNEL) {
super(tokenSource);
this.channel = channel;
}
adjustSeekIndex(i) {
return this.nextTokenOnChannel(i, this.channel);
}
tryLB(k) {
if ((this.p - k) < 0) {
return undefined;
}
let i = this.p;
let n = 1;
// find k good tokens looking backwards
while (n <= k && i > 0) {
// skip off-channel tokens
i = this.previousTokenOnChannel(i - 1, this.channel);
n++;
}
if (i < 0) {
return undefined;
}
return this.tokens[i];
}
tryLT(k) {
//System.out.println("enter LT("+k+")");
this.lazyInit();
if (k === 0) {
throw new RangeError("0 is not a valid lookahead index");
}
if (k < 0) {
return this.tryLB(-k);
}
let i = this.p;
let n = 1; // we know tokens[p] is a good one
// find k good tokens
while (n < k) {
// skip off-channel tokens, but make sure to not look past EOF
if (this.sync(i + 1)) {
i = this.nextTokenOnChannel(i + 1, this.channel);
}
n++;
}
// if ( i>range ) range = i;
return this.tokens[i];
}
/** Count EOF just once. */
getNumberOfOnChannelTokens() {
let n = 0;
this.fill();
for (let t of this.tokens) {
if (t.channel === this.channel) {
n++;
}
if (t.type === Token_1.Token.EOF) {
break;
}
}
return n;
}
};
__decorate([
Decorators_1.Override
], CommonTokenStream.prototype, "adjustSeekIndex", null);
__decorate([
Decorators_1.Override
], CommonTokenStream.prototype, "tryLB", null);
__decorate([
Decorators_1.Override
], CommonTokenStream.prototype, "tryLT", null);
CommonTokenStream = __decorate([
__param(0, Decorators_1.NotNull)
], CommonTokenStream);
exports.CommonTokenStream = CommonTokenStream;
//# sourceMappingURL=CommonTokenStream.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,29 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
/**
*
* @author Sam Harwell
*/
export declare class ConsoleErrorListener implements ANTLRErrorListener<any> {
/**
* Provides a default instance of {@link ConsoleErrorListener}.
*/
static readonly INSTANCE: ConsoleErrorListener;
/**
* {@inheritDoc}
*
* This implementation prints messages to {@link System#err} containing the
* values of `line`, `charPositionInLine`, and `msg` using
* the following format.
*
* <pre>
* line *line*:*charPositionInLine* *msg*
* </pre>
*/
syntaxError<T>(recognizer: Recognizer<T, any>, offendingSymbol: T, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
}

View File

@@ -0,0 +1,33 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.ConsoleErrorListener = void 0;
/**
*
* @author Sam Harwell
*/
class ConsoleErrorListener {
/**
* {@inheritDoc}
*
* This implementation prints messages to {@link System#err} containing the
* values of `line`, `charPositionInLine`, and `msg` using
* the following format.
*
* <pre>
* line *line*:*charPositionInLine* *msg*
* </pre>
*/
syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e) {
console.error(`line ${line}:${charPositionInLine} ${msg}`);
}
}
exports.ConsoleErrorListener = ConsoleErrorListener;
/**
* Provides a default instance of {@link ConsoleErrorListener}.
*/
ConsoleErrorListener.INSTANCE = new ConsoleErrorListener();
//# sourceMappingURL=ConsoleErrorListener.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"ConsoleErrorListener.js","sourceRoot":"","sources":["../../src/ConsoleErrorListener.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;AAQH;;;GAGG;AACH,MAAa,oBAAoB;IAMhC;;;;;;;;;;OAUG;IACI,WAAW,CACjB,UAA8B,EAC9B,eAAkB,EAClB,IAAY,EACZ,kBAA0B,EAC1B,GAAW,EACX,CAAmC;QACnC,OAAO,CAAC,KAAK,CAAC,QAAQ,IAAI,IAAI,kBAAkB,IAAI,GAAG,EAAE,CAAC,CAAC;IAC5D,CAAC;;AAzBF,oDA0BC;AAzBA;;GAEG;AACoB,6BAAQ,GAAyB,IAAI,oBAAoB,EAAE,CAAC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:50.5479602-07:00\r\n\r\nimport { ANTLRErrorListener } from \"./ANTLRErrorListener\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Recognizer } from \"./Recognizer\";\r\n\r\n/**\r\n *\r\n * @author Sam Harwell\r\n */\r\nexport class ConsoleErrorListener implements ANTLRErrorListener<any> {\r\n\t/**\r\n\t * Provides a default instance of {@link ConsoleErrorListener}.\r\n\t */\r\n\tpublic static readonly INSTANCE: ConsoleErrorListener = new ConsoleErrorListener();\r\n\r\n\t/**\r\n\t * {@inheritDoc}\r\n\t *\r\n\t * This implementation prints messages to {@link System#err} containing the\r\n\t * values of `line`, `charPositionInLine`, and `msg` using\r\n\t * the following format.\r\n\t *\r\n\t * <pre>\r\n\t * line *line*:*charPositionInLine* *msg*\r\n\t * </pre>\r\n\t */\r\n\tpublic syntaxError<T>(\r\n\t\trecognizer: Recognizer<T, any>,\r\n\t\toffendingSymbol: T,\r\n\t\tline: number,\r\n\t\tcharPositionInLine: number,\r\n\t\tmsg: string,\r\n\t\te: RecognitionException | undefined): void {\r\n\t\tconsole.error(`line ${line}:${charPositionInLine} ${msg}`);\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,8 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
export declare function NotNull(target: any, propertyKey: PropertyKey, propertyDescriptor?: PropertyDescriptor | number): void;
export declare function Nullable(target: any, propertyKey: PropertyKey, propertyDescriptor?: PropertyDescriptor | number): void;
export declare function Override(target: any, propertyKey: PropertyKey, propertyDescriptor?: PropertyDescriptor): void;
export declare function SuppressWarnings(options: string): (target: any, propertyKey: PropertyKey, descriptor?: PropertyDescriptor | undefined) => void;

View File

@@ -0,0 +1,26 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.SuppressWarnings = exports.Override = exports.Nullable = exports.NotNull = void 0;
function NotNull(target, propertyKey, propertyDescriptor) {
// intentionally empty
}
exports.NotNull = NotNull;
function Nullable(target, propertyKey, propertyDescriptor) {
// intentionally empty
}
exports.Nullable = Nullable;
function Override(target, propertyKey, propertyDescriptor) {
// do something with 'target' ...
}
exports.Override = Override;
function SuppressWarnings(options) {
return (target, propertyKey, descriptor) => {
// intentionally empty
};
}
exports.SuppressWarnings = SuppressWarnings;
//# sourceMappingURL=Decorators.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"Decorators.js","sourceRoot":"","sources":["../../src/Decorators.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;AAEH,SAAgB,OAAO,CACtB,MAAW,EACX,WAAwB,EACxB,kBAAgD;IAChD,sBAAsB;AACvB,CAAC;AALD,0BAKC;AAED,SAAgB,QAAQ,CACvB,MAAW,EACX,WAAwB,EACxB,kBAAgD;IAChD,sBAAsB;AACvB,CAAC;AALD,4BAKC;AAED,SAAgB,QAAQ,CACvB,MAAW,EACX,WAAwB,EACxB,kBAAuC;IACvC,iCAAiC;AAClC,CAAC;AALD,4BAKC;AAED,SAAgB,gBAAgB,CAAC,OAAe;IAC/C,OAAO,CAAC,MAAW,EAAE,WAAwB,EAAE,UAA+B,EAAE,EAAE;QACjF,sBAAsB;IACvB,CAAC,CAAC;AACH,CAAC;AAJD,4CAIC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\nexport function NotNull(\r\n\ttarget: any,\r\n\tpropertyKey: PropertyKey,\r\n\tpropertyDescriptor?: PropertyDescriptor | number) {\r\n\t// intentionally empty\r\n}\r\n\r\nexport function Nullable(\r\n\ttarget: any,\r\n\tpropertyKey: PropertyKey,\r\n\tpropertyDescriptor?: PropertyDescriptor | number) {\r\n\t// intentionally empty\r\n}\r\n\r\nexport function Override(\r\n\ttarget: any,\r\n\tpropertyKey: PropertyKey,\r\n\tpropertyDescriptor?: PropertyDescriptor) {\r\n\t// do something with 'target' ...\r\n}\r\n\r\nexport function SuppressWarnings(options: string) {\r\n\treturn (target: any, propertyKey: PropertyKey, descriptor?: PropertyDescriptor) => {\r\n\t\t// intentionally empty\r\n\t};\r\n}\r\n"]}

View File

@@ -0,0 +1,347 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ANTLRErrorStrategy } from "./ANTLRErrorStrategy";
import { FailedPredicateException } from "./FailedPredicateException";
import { InputMismatchException } from "./InputMismatchException";
import { IntervalSet } from "./misc/IntervalSet";
import { NoViableAltException } from "./NoViableAltException";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
import { TokenSource } from "./TokenSource";
/**
* This is the default implementation of {@link ANTLRErrorStrategy} used for
* error reporting and recovery in ANTLR parsers.
*/
export declare class DefaultErrorStrategy implements ANTLRErrorStrategy {
/**
* Indicates whether the error strategy is currently "recovering from an
* error". This is used to suppress reporting multiple error messages while
* attempting to recover from a detected syntax error.
*
* @see #inErrorRecoveryMode
*/
protected errorRecoveryMode: boolean;
/** The index into the input stream where the last error occurred.
* This is used to prevent infinite loops where an error is found
* but no token is consumed during recovery...another error is found,
* ad nauseum. This is a failsafe mechanism to guarantee that at least
* one token/tree node is consumed for two errors.
*/
protected lastErrorIndex: number;
protected lastErrorStates?: IntervalSet;
/**
* This field is used to propagate information about the lookahead following
* the previous match. Since prediction prefers completing the current rule
* to error recovery efforts, error reporting may occur later than the
* original point where it was discoverable. The original context is used to
* compute the true expected sets as though the reporting occurred as early
* as possible.
*/
protected nextTokensContext?: ParserRuleContext;
/**
* @see #nextTokensContext
*/
protected nextTokensState: number;
/**
* {@inheritDoc}
*
* The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.
*/
reset(recognizer: Parser): void;
/**
* This method is called to enter error recovery mode when a recognition
* exception is reported.
*
* @param recognizer the parser instance
*/
protected beginErrorCondition(recognizer: Parser): void;
/**
* {@inheritDoc}
*/
inErrorRecoveryMode(recognizer: Parser): boolean;
/**
* This method is called to leave error recovery mode after recovering from
* a recognition exception.
*
* @param recognizer
*/
protected endErrorCondition(recognizer: Parser): void;
/**
* {@inheritDoc}
*
* The default implementation simply calls {@link #endErrorCondition}.
*/
reportMatch(recognizer: Parser): void;
/**
* {@inheritDoc}
*
* The default implementation returns immediately if the handler is already
* in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
* and dispatches the reporting task based on the runtime type of `e`
* according to the following table.
*
* * {@link NoViableAltException}: Dispatches the call to
* {@link #reportNoViableAlternative}
* * {@link InputMismatchException}: Dispatches the call to
* {@link #reportInputMismatch}
* * {@link FailedPredicateException}: Dispatches the call to
* {@link #reportFailedPredicate}
* * All other types: calls {@link Parser#notifyErrorListeners} to report
* the exception
*/
reportError(recognizer: Parser, e: RecognitionException): void;
protected notifyErrorListeners(recognizer: Parser, message: string, e: RecognitionException): void;
/**
* {@inheritDoc}
*
* The default implementation resynchronizes the parser by consuming tokens
* until we find one in the resynchronization set--loosely the set of tokens
* that can follow the current rule.
*/
recover(recognizer: Parser, e: RecognitionException): void;
/**
* The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
* that the current lookahead symbol is consistent with what were expecting
* at this point in the ATN. You can call this anytime but ANTLR only
* generates code to check before subrules/loops and each iteration.
*
* Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,
*
* ```antlr
* a : sync ( stuff sync )* ;
* sync : {consume to what can follow sync} ;
* ```
*
* At the start of a sub rule upon error, {@link #sync} performs single
* token deletion, if possible. If it can't do that, it bails on the current
* rule and uses the default error recovery, which consumes until the
* resynchronization set of the current rule.
*
* If the sub rule is optional (`(...)?`, `(...)*`, or block
* with an empty alternative), then the expected set includes what follows
* the subrule.
*
* During loop iteration, it consumes until it sees a token that can start a
* sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
* stay in the loop as long as possible.
*
* **ORIGINS**
*
* Previous versions of ANTLR did a poor job of their recovery within loops.
* A single mismatch token or missing token would force the parser to bail
* out of the entire rules surrounding the loop. So, for rule
*
* ```antlr
* classDef : 'class' ID '{' member* '}'
* ```
*
* input with an extra token between members would force the parser to
* consume until it found the next class definition rather than the next
* member definition of the current class.
*
* This functionality cost a little bit of effort because the parser has to
* compare token set at the start of the loop and at each iteration. If for
* some reason speed is suffering for you, you can turn off this
* functionality by simply overriding this method as a blank { }.
*/
sync(recognizer: Parser): void;
/**
* This is called by {@link #reportError} when the exception is a
* {@link NoViableAltException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
protected reportNoViableAlternative(recognizer: Parser, e: NoViableAltException): void;
/**
* This is called by {@link #reportError} when the exception is an
* {@link InputMismatchException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
protected reportInputMismatch(recognizer: Parser, e: InputMismatchException): void;
/**
* This is called by {@link #reportError} when the exception is a
* {@link FailedPredicateException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
protected reportFailedPredicate(recognizer: Parser, e: FailedPredicateException): void;
/**
* This method is called to report a syntax error which requires the removal
* of a token from the input stream. At the time this method is called, the
* erroneous symbol is current `LT(1)` symbol and has not yet been
* removed from the input stream. When this method returns,
* `recognizer` is in error recovery mode.
*
* This method is called when {@link #singleTokenDeletion} identifies
* single-token deletion as a viable recovery strategy for a mismatched
* input error.
*
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
*
* @param recognizer the parser instance
*/
protected reportUnwantedToken(recognizer: Parser): void;
/**
* This method is called to report a syntax error which requires the
* insertion of a missing token into the input stream. At the time this
* method is called, the missing token has not yet been inserted. When this
* method returns, `recognizer` is in error recovery mode.
*
* This method is called when {@link #singleTokenInsertion} identifies
* single-token insertion as a viable recovery strategy for a mismatched
* input error.
*
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
*
* @param recognizer the parser instance
*/
protected reportMissingToken(recognizer: Parser): void;
/**
* {@inheritDoc}
*
* The default implementation attempts to recover from the mismatched input
* by using single token insertion and deletion as described below. If the
* recovery attempt fails, this method
* {@link InputMismatchException}.
*
* **EXTRA TOKEN** (single token deletion)
*
* `LA(1)` is not what we are looking for. If `LA(2)` has the
* right token, however, then assume `LA(1)` is some extra spurious
* token and delete it. Then consume and return the next token (which was
* the `LA(2)` token) as the successful result of the match operation.
*
* This recovery strategy is implemented by {@link #singleTokenDeletion}.
*
* **MISSING TOKEN** (single token insertion)
*
* If current token (at `LA(1)`) is consistent with what could come
* after the expected `LA(1)` token, then assume the token is missing
* and use the parser's {@link TokenFactory} to create it on the fly. The
* "insertion" is performed by returning the created token as the successful
* result of the match operation.
*
* This recovery strategy is implemented by {@link #singleTokenInsertion}.
*
* **EXAMPLE**
*
* For example, Input `i=(3;` is clearly missing the `')'`. When
* the parser returns from the nested call to `expr`, it will have
* call chain:
*
* ```
* stat → expr → atom
* ```
*
* and it will be trying to match the `')'` at this point in the
* derivation:
*
* ```
* => ID '=' '(' INT ')' ('+' atom)* ';'
* ^
* ```
*
* The attempt to match `')'` will fail when it sees `';'` and
* call {@link #recoverInline}. To recover, it sees that `LA(1)==';'`
* is in the set of tokens that can follow the `')'` token reference
* in rule `atom`. It can assume that you forgot the `')'`.
*/
recoverInline(recognizer: Parser): Token;
/**
* This method implements the single-token insertion inline error recovery
* strategy. It is called by {@link #recoverInline} if the single-token
* deletion strategy fails to recover from the mismatched input. If this
* method returns `true`, `recognizer` will be in error recovery
* mode.
*
* This method determines whether or not single-token insertion is viable by
* checking if the `LA(1)` input symbol could be successfully matched
* if it were instead the `LA(2)` symbol. If this method returns
* `true`, the caller is responsible for creating and inserting a
* token with the correct type to produce this behavior.
*
* @param recognizer the parser instance
* @returns `true` if single-token insertion is a viable recovery
* strategy for the current mismatched input, otherwise `false`
*/
protected singleTokenInsertion(recognizer: Parser): boolean;
/**
* This method implements the single-token deletion inline error recovery
* strategy. It is called by {@link #recoverInline} to attempt to recover
* from mismatched input. If this method returns `undefined`, the parser and error
* handler state will not have changed. If this method returns non-`undefined`,
* `recognizer` will *not* be in error recovery mode since the
* returned token was a successful match.
*
* If the single-token deletion is successful, this method calls
* {@link #reportUnwantedToken} to report the error, followed by
* {@link Parser#consume} to actually "delete" the extraneous token. Then,
* before returning {@link #reportMatch} is called to signal a successful
* match.
*
* @param recognizer the parser instance
* @returns the successfully matched {@link Token} instance if single-token
* deletion successfully recovers from the mismatched input, otherwise
* `undefined`
*/
protected singleTokenDeletion(recognizer: Parser): Token | undefined;
/** Conjure up a missing token during error recovery.
*
* The recognizer attempts to recover from single missing
* symbols. But, actions might refer to that missing symbol.
* For example, x=ID {f($x);}. The action clearly assumes
* that there has been an identifier matched previously and that
* $x points at that token. If that token is missing, but
* the next token in the stream is what we want we assume that
* this token is missing and we keep going. Because we
* have to return some token to replace the missing token,
* we have to conjure one up. This method gives the user control
* over the tokens returned for missing tokens. Mostly,
* you will want to create something special for identifier
* tokens. For literals such as '{' and ',', the default
* action in the parser or tree parser works. It simply creates
* a CommonToken of the appropriate type. The text will be the token.
* If you change what tokens must be created by the lexer,
* override this method to create the appropriate tokens.
*/
protected getMissingSymbol(recognizer: Parser): Token;
protected constructToken(tokenSource: TokenSource, expectedTokenType: number, tokenText: string, current: Token): Token;
protected getExpectedTokens(recognizer: Parser): IntervalSet;
/** How should a token be displayed in an error message? The default
* is to display just the text, but during development you might
* want to have a lot of information spit out. Override in that case
* to use t.toString() (which, for CommonToken, dumps everything about
* the token). This is better than forcing you to override a method in
* your token objects because you don't have to go modify your lexer
* so that it creates a new Java type.
*/
protected getTokenErrorDisplay(t: Token | undefined): string;
protected getSymbolText(symbol: Token): string | undefined;
protected getSymbolType(symbol: Token): number;
protected escapeWSAndQuote(s: string): string;
protected getErrorRecoverySet(recognizer: Parser): IntervalSet;
/** Consume tokens until one matches the given token set. */
protected consumeUntil(recognizer: Parser, set: IntervalSet): void;
}

View File

@@ -0,0 +1,813 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.DefaultErrorStrategy = void 0;
const ATNState_1 = require("./atn/ATNState");
const ATNStateType_1 = require("./atn/ATNStateType");
const FailedPredicateException_1 = require("./FailedPredicateException");
const InputMismatchException_1 = require("./InputMismatchException");
const IntervalSet_1 = require("./misc/IntervalSet");
const NoViableAltException_1 = require("./NoViableAltException");
const PredictionContext_1 = require("./atn/PredictionContext");
const Token_1 = require("./Token");
const Decorators_1 = require("./Decorators");
/**
* This is the default implementation of {@link ANTLRErrorStrategy} used for
* error reporting and recovery in ANTLR parsers.
*/
class DefaultErrorStrategy {
constructor() {
/**
* Indicates whether the error strategy is currently "recovering from an
* error". This is used to suppress reporting multiple error messages while
* attempting to recover from a detected syntax error.
*
* @see #inErrorRecoveryMode
*/
this.errorRecoveryMode = false;
/** The index into the input stream where the last error occurred.
* This is used to prevent infinite loops where an error is found
* but no token is consumed during recovery...another error is found,
* ad nauseum. This is a failsafe mechanism to guarantee that at least
* one token/tree node is consumed for two errors.
*/
this.lastErrorIndex = -1;
/**
* @see #nextTokensContext
*/
this.nextTokensState = ATNState_1.ATNState.INVALID_STATE_NUMBER;
}
/**
* {@inheritDoc}
*
* The default implementation simply calls {@link #endErrorCondition} to
* ensure that the handler is not in error recovery mode.
*/
reset(recognizer) {
this.endErrorCondition(recognizer);
}
/**
* This method is called to enter error recovery mode when a recognition
* exception is reported.
*
* @param recognizer the parser instance
*/
beginErrorCondition(recognizer) {
this.errorRecoveryMode = true;
}
/**
* {@inheritDoc}
*/
inErrorRecoveryMode(recognizer) {
return this.errorRecoveryMode;
}
/**
* This method is called to leave error recovery mode after recovering from
* a recognition exception.
*
* @param recognizer
*/
endErrorCondition(recognizer) {
this.errorRecoveryMode = false;
this.lastErrorStates = undefined;
this.lastErrorIndex = -1;
}
/**
* {@inheritDoc}
*
* The default implementation simply calls {@link #endErrorCondition}.
*/
reportMatch(recognizer) {
this.endErrorCondition(recognizer);
}
/**
* {@inheritDoc}
*
* The default implementation returns immediately if the handler is already
* in error recovery mode. Otherwise, it calls {@link #beginErrorCondition}
* and dispatches the reporting task based on the runtime type of `e`
* according to the following table.
*
* * {@link NoViableAltException}: Dispatches the call to
* {@link #reportNoViableAlternative}
* * {@link InputMismatchException}: Dispatches the call to
* {@link #reportInputMismatch}
* * {@link FailedPredicateException}: Dispatches the call to
* {@link #reportFailedPredicate}
* * All other types: calls {@link Parser#notifyErrorListeners} to report
* the exception
*/
reportError(recognizer, e) {
// if we've already reported an error and have not matched a token
// yet successfully, don't report any errors.
if (this.inErrorRecoveryMode(recognizer)) {
// System.err.print("[SPURIOUS] ");
return; // don't report spurious errors
}
this.beginErrorCondition(recognizer);
if (e instanceof NoViableAltException_1.NoViableAltException) {
this.reportNoViableAlternative(recognizer, e);
}
else if (e instanceof InputMismatchException_1.InputMismatchException) {
this.reportInputMismatch(recognizer, e);
}
else if (e instanceof FailedPredicateException_1.FailedPredicateException) {
this.reportFailedPredicate(recognizer, e);
}
else {
console.error(`unknown recognition error type: ${e}`);
this.notifyErrorListeners(recognizer, e.toString(), e);
}
}
notifyErrorListeners(recognizer, message, e) {
let offendingToken = e.getOffendingToken(recognizer);
if (offendingToken === undefined) {
// Pass null to notifyErrorListeners so it in turn calls the error listeners with undefined as the offending
// token. If we passed undefined, it would instead call the listeners with currentToken from the parser.
offendingToken = null;
}
recognizer.notifyErrorListeners(message, offendingToken, e);
}
/**
* {@inheritDoc}
*
* The default implementation resynchronizes the parser by consuming tokens
* until we find one in the resynchronization set--loosely the set of tokens
* that can follow the current rule.
*/
recover(recognizer, e) {
// System.out.println("recover in "+recognizer.getRuleInvocationStack()+
// " index="+recognizer.inputStream.index+
// ", lastErrorIndex="+
// lastErrorIndex+
// ", states="+lastErrorStates);
if (this.lastErrorIndex === recognizer.inputStream.index &&
this.lastErrorStates &&
this.lastErrorStates.contains(recognizer.state)) {
// uh oh, another error at same token index and previously-visited
// state in ATN; must be a case where LT(1) is in the recovery
// token set so nothing got consumed. Consume a single token
// at least to prevent an infinite loop; this is a failsafe.
// System.err.println("seen error condition before index="+
// lastErrorIndex+", states="+lastErrorStates);
// System.err.println("FAILSAFE consumes "+recognizer.getTokenNames()[recognizer.inputStream.LA(1)]);
recognizer.consume();
}
this.lastErrorIndex = recognizer.inputStream.index;
if (!this.lastErrorStates) {
this.lastErrorStates = new IntervalSet_1.IntervalSet();
}
this.lastErrorStates.add(recognizer.state);
let followSet = this.getErrorRecoverySet(recognizer);
this.consumeUntil(recognizer, followSet);
}
/**
* The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
* that the current lookahead symbol is consistent with what were expecting
* at this point in the ATN. You can call this anytime but ANTLR only
* generates code to check before subrules/loops and each iteration.
*
* Implements Jim Idle's magic sync mechanism in closures and optional
* subrules. E.g.,
*
* ```antlr
* a : sync ( stuff sync )* ;
* sync : {consume to what can follow sync} ;
* ```
*
* At the start of a sub rule upon error, {@link #sync} performs single
* token deletion, if possible. If it can't do that, it bails on the current
* rule and uses the default error recovery, which consumes until the
* resynchronization set of the current rule.
*
* If the sub rule is optional (`(...)?`, `(...)*`, or block
* with an empty alternative), then the expected set includes what follows
* the subrule.
*
* During loop iteration, it consumes until it sees a token that can start a
* sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
* stay in the loop as long as possible.
*
* **ORIGINS**
*
* Previous versions of ANTLR did a poor job of their recovery within loops.
* A single mismatch token or missing token would force the parser to bail
* out of the entire rules surrounding the loop. So, for rule
*
* ```antlr
* classDef : 'class' ID '{' member* '}'
* ```
*
* input with an extra token between members would force the parser to
* consume until it found the next class definition rather than the next
* member definition of the current class.
*
* This functionality cost a little bit of effort because the parser has to
* compare token set at the start of the loop and at each iteration. If for
* some reason speed is suffering for you, you can turn off this
* functionality by simply overriding this method as a blank { }.
*/
sync(recognizer) {
let s = recognizer.interpreter.atn.states[recognizer.state];
// System.err.println("sync @ "+s.stateNumber+"="+s.getClass().getSimpleName());
// If already recovering, don't try to sync
if (this.inErrorRecoveryMode(recognizer)) {
return;
}
let tokens = recognizer.inputStream;
let la = tokens.LA(1);
// try cheaper subset first; might get lucky. seems to shave a wee bit off
let nextTokens = recognizer.atn.nextTokens(s);
if (nextTokens.contains(la)) {
// We are sure the token matches
this.nextTokensContext = undefined;
this.nextTokensState = ATNState_1.ATNState.INVALID_STATE_NUMBER;
return;
}
if (nextTokens.contains(Token_1.Token.EPSILON)) {
if (this.nextTokensContext === undefined) {
// It's possible the next token won't match; information tracked
// by sync is restricted for performance.
this.nextTokensContext = recognizer.context;
this.nextTokensState = recognizer.state;
}
return;
}
switch (s.stateType) {
case ATNStateType_1.ATNStateType.BLOCK_START:
case ATNStateType_1.ATNStateType.STAR_BLOCK_START:
case ATNStateType_1.ATNStateType.PLUS_BLOCK_START:
case ATNStateType_1.ATNStateType.STAR_LOOP_ENTRY:
// report error and recover if possible
if (this.singleTokenDeletion(recognizer)) {
return;
}
throw new InputMismatchException_1.InputMismatchException(recognizer);
case ATNStateType_1.ATNStateType.PLUS_LOOP_BACK:
case ATNStateType_1.ATNStateType.STAR_LOOP_BACK:
// System.err.println("at loop back: "+s.getClass().getSimpleName());
this.reportUnwantedToken(recognizer);
let expecting = recognizer.getExpectedTokens();
let whatFollowsLoopIterationOrRule = expecting.or(this.getErrorRecoverySet(recognizer));
this.consumeUntil(recognizer, whatFollowsLoopIterationOrRule);
break;
default:
// do nothing if we can't identify the exact kind of ATN state
break;
}
}
/**
* This is called by {@link #reportError} when the exception is a
* {@link NoViableAltException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
reportNoViableAlternative(recognizer, e) {
let tokens = recognizer.inputStream;
let input;
if (tokens) {
if (e.startToken.type === Token_1.Token.EOF) {
input = "<EOF>";
}
else {
input = tokens.getTextFromRange(e.startToken, e.getOffendingToken());
}
}
else {
input = "<unknown input>";
}
let msg = "no viable alternative at input " + this.escapeWSAndQuote(input);
this.notifyErrorListeners(recognizer, msg, e);
}
/**
* This is called by {@link #reportError} when the exception is an
* {@link InputMismatchException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
reportInputMismatch(recognizer, e) {
let expected = e.expectedTokens;
let expectedString = expected ? expected.toStringVocabulary(recognizer.vocabulary) : "";
let msg = "mismatched input " + this.getTokenErrorDisplay(e.getOffendingToken(recognizer)) +
" expecting " + expectedString;
this.notifyErrorListeners(recognizer, msg, e);
}
/**
* This is called by {@link #reportError} when the exception is a
* {@link FailedPredicateException}.
*
* @see #reportError
*
* @param recognizer the parser instance
* @param e the recognition exception
*/
reportFailedPredicate(recognizer, e) {
let ruleName = recognizer.ruleNames[recognizer.context.ruleIndex];
let msg = "rule " + ruleName + " " + e.message;
this.notifyErrorListeners(recognizer, msg, e);
}
/**
* This method is called to report a syntax error which requires the removal
* of a token from the input stream. At the time this method is called, the
* erroneous symbol is current `LT(1)` symbol and has not yet been
* removed from the input stream. When this method returns,
* `recognizer` is in error recovery mode.
*
* This method is called when {@link #singleTokenDeletion} identifies
* single-token deletion as a viable recovery strategy for a mismatched
* input error.
*
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
*
* @param recognizer the parser instance
*/
reportUnwantedToken(recognizer) {
if (this.inErrorRecoveryMode(recognizer)) {
return;
}
this.beginErrorCondition(recognizer);
let t = recognizer.currentToken;
let tokenName = this.getTokenErrorDisplay(t);
let expecting = this.getExpectedTokens(recognizer);
let msg = "extraneous input " + tokenName + " expecting " +
expecting.toStringVocabulary(recognizer.vocabulary);
recognizer.notifyErrorListeners(msg, t, undefined);
}
/**
* This method is called to report a syntax error which requires the
* insertion of a missing token into the input stream. At the time this
* method is called, the missing token has not yet been inserted. When this
* method returns, `recognizer` is in error recovery mode.
*
* This method is called when {@link #singleTokenInsertion} identifies
* single-token insertion as a viable recovery strategy for a mismatched
* input error.
*
* The default implementation simply returns if the handler is already in
* error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
* enter error recovery mode, followed by calling
* {@link Parser#notifyErrorListeners}.
*
* @param recognizer the parser instance
*/
reportMissingToken(recognizer) {
if (this.inErrorRecoveryMode(recognizer)) {
return;
}
this.beginErrorCondition(recognizer);
let t = recognizer.currentToken;
let expecting = this.getExpectedTokens(recognizer);
let msg = "missing " + expecting.toStringVocabulary(recognizer.vocabulary) +
" at " + this.getTokenErrorDisplay(t);
recognizer.notifyErrorListeners(msg, t, undefined);
}
/**
* {@inheritDoc}
*
* The default implementation attempts to recover from the mismatched input
* by using single token insertion and deletion as described below. If the
* recovery attempt fails, this method
* {@link InputMismatchException}.
*
* **EXTRA TOKEN** (single token deletion)
*
* `LA(1)` is not what we are looking for. If `LA(2)` has the
* right token, however, then assume `LA(1)` is some extra spurious
* token and delete it. Then consume and return the next token (which was
* the `LA(2)` token) as the successful result of the match operation.
*
* This recovery strategy is implemented by {@link #singleTokenDeletion}.
*
* **MISSING TOKEN** (single token insertion)
*
* If current token (at `LA(1)`) is consistent with what could come
* after the expected `LA(1)` token, then assume the token is missing
* and use the parser's {@link TokenFactory} to create it on the fly. The
* "insertion" is performed by returning the created token as the successful
* result of the match operation.
*
* This recovery strategy is implemented by {@link #singleTokenInsertion}.
*
* **EXAMPLE**
*
* For example, Input `i=(3;` is clearly missing the `')'`. When
* the parser returns from the nested call to `expr`, it will have
* call chain:
*
* ```
* stat → expr → atom
* ```
*
* and it will be trying to match the `')'` at this point in the
* derivation:
*
* ```
* => ID '=' '(' INT ')' ('+' atom)* ';'
* ^
* ```
*
* The attempt to match `')'` will fail when it sees `';'` and
* call {@link #recoverInline}. To recover, it sees that `LA(1)==';'`
* is in the set of tokens that can follow the `')'` token reference
* in rule `atom`. It can assume that you forgot the `')'`.
*/
recoverInline(recognizer) {
// SINGLE TOKEN DELETION
let matchedSymbol = this.singleTokenDeletion(recognizer);
if (matchedSymbol) {
// we have deleted the extra token.
// now, move past ttype token as if all were ok
recognizer.consume();
return matchedSymbol;
}
// SINGLE TOKEN INSERTION
if (this.singleTokenInsertion(recognizer)) {
return this.getMissingSymbol(recognizer);
}
// even that didn't work; must throw the exception
if (this.nextTokensContext === undefined) {
throw new InputMismatchException_1.InputMismatchException(recognizer);
}
else {
throw new InputMismatchException_1.InputMismatchException(recognizer, this.nextTokensState, this.nextTokensContext);
}
}
/**
* This method implements the single-token insertion inline error recovery
* strategy. It is called by {@link #recoverInline} if the single-token
* deletion strategy fails to recover from the mismatched input. If this
* method returns `true`, `recognizer` will be in error recovery
* mode.
*
* This method determines whether or not single-token insertion is viable by
* checking if the `LA(1)` input symbol could be successfully matched
* if it were instead the `LA(2)` symbol. If this method returns
* `true`, the caller is responsible for creating and inserting a
* token with the correct type to produce this behavior.
*
* @param recognizer the parser instance
* @returns `true` if single-token insertion is a viable recovery
* strategy for the current mismatched input, otherwise `false`
*/
singleTokenInsertion(recognizer) {
let currentSymbolType = recognizer.inputStream.LA(1);
// if current token is consistent with what could come after current
// ATN state, then we know we're missing a token; error recovery
// is free to conjure up and insert the missing token
let currentState = recognizer.interpreter.atn.states[recognizer.state];
let next = currentState.transition(0).target;
let atn = recognizer.interpreter.atn;
let expectingAtLL2 = atn.nextTokens(next, PredictionContext_1.PredictionContext.fromRuleContext(atn, recognizer.context));
// console.warn("LT(2) set="+expectingAtLL2.toString(recognizer.getTokenNames()));
if (expectingAtLL2.contains(currentSymbolType)) {
this.reportMissingToken(recognizer);
return true;
}
return false;
}
/**
* This method implements the single-token deletion inline error recovery
* strategy. It is called by {@link #recoverInline} to attempt to recover
* from mismatched input. If this method returns `undefined`, the parser and error
* handler state will not have changed. If this method returns non-`undefined`,
* `recognizer` will *not* be in error recovery mode since the
* returned token was a successful match.
*
* If the single-token deletion is successful, this method calls
* {@link #reportUnwantedToken} to report the error, followed by
* {@link Parser#consume} to actually "delete" the extraneous token. Then,
* before returning {@link #reportMatch} is called to signal a successful
* match.
*
* @param recognizer the parser instance
* @returns the successfully matched {@link Token} instance if single-token
* deletion successfully recovers from the mismatched input, otherwise
* `undefined`
*/
singleTokenDeletion(recognizer) {
let nextTokenType = recognizer.inputStream.LA(2);
let expecting = this.getExpectedTokens(recognizer);
if (expecting.contains(nextTokenType)) {
this.reportUnwantedToken(recognizer);
/*
System.err.println("recoverFromMismatchedToken deleting "+
((TokenStream)recognizer.inputStream).LT(1)+
" since "+((TokenStream)recognizer.inputStream).LT(2)+
" is what we want");
*/
recognizer.consume(); // simply delete extra token
// we want to return the token we're actually matching
let matchedSymbol = recognizer.currentToken;
this.reportMatch(recognizer); // we know current token is correct
return matchedSymbol;
}
return undefined;
}
/** Conjure up a missing token during error recovery.
*
* The recognizer attempts to recover from single missing
* symbols. But, actions might refer to that missing symbol.
* For example, x=ID {f($x);}. The action clearly assumes
* that there has been an identifier matched previously and that
* $x points at that token. If that token is missing, but
* the next token in the stream is what we want we assume that
* this token is missing and we keep going. Because we
* have to return some token to replace the missing token,
* we have to conjure one up. This method gives the user control
* over the tokens returned for missing tokens. Mostly,
* you will want to create something special for identifier
* tokens. For literals such as '{' and ',', the default
* action in the parser or tree parser works. It simply creates
* a CommonToken of the appropriate type. The text will be the token.
* If you change what tokens must be created by the lexer,
* override this method to create the appropriate tokens.
*/
getMissingSymbol(recognizer) {
let currentSymbol = recognizer.currentToken;
let expecting = this.getExpectedTokens(recognizer);
let expectedTokenType = Token_1.Token.INVALID_TYPE;
if (!expecting.isNil) {
// get any element
expectedTokenType = expecting.minElement;
}
let tokenText;
if (expectedTokenType === Token_1.Token.EOF) {
tokenText = "<missing EOF>";
}
else {
tokenText = "<missing " + recognizer.vocabulary.getDisplayName(expectedTokenType) + ">";
}
let current = currentSymbol;
let lookback = recognizer.inputStream.tryLT(-1);
if (current.type === Token_1.Token.EOF && lookback != null) {
current = lookback;
}
return this.constructToken(recognizer.inputStream.tokenSource, expectedTokenType, tokenText, current);
}
constructToken(tokenSource, expectedTokenType, tokenText, current) {
let factory = tokenSource.tokenFactory;
let x = current.tokenSource;
let stream = x ? x.inputStream : undefined;
return factory.create({ source: tokenSource, stream }, expectedTokenType, tokenText, Token_1.Token.DEFAULT_CHANNEL, -1, -1, current.line, current.charPositionInLine);
}
getExpectedTokens(recognizer) {
return recognizer.getExpectedTokens();
}
/** How should a token be displayed in an error message? The default
* is to display just the text, but during development you might
* want to have a lot of information spit out. Override in that case
* to use t.toString() (which, for CommonToken, dumps everything about
* the token). This is better than forcing you to override a method in
* your token objects because you don't have to go modify your lexer
* so that it creates a new Java type.
*/
getTokenErrorDisplay(t) {
if (!t) {
return "<no token>";
}
let s = this.getSymbolText(t);
if (!s) {
if (this.getSymbolType(t) === Token_1.Token.EOF) {
s = "<EOF>";
}
else {
s = `<${this.getSymbolType(t)}>`;
}
}
return this.escapeWSAndQuote(s);
}
getSymbolText(symbol) {
return symbol.text;
}
getSymbolType(symbol) {
return symbol.type;
}
escapeWSAndQuote(s) {
// if ( s==null ) return s;
s = s.replace("\n", "\\n");
s = s.replace("\r", "\\r");
s = s.replace("\t", "\\t");
return "'" + s + "'";
}
/* Compute the error recovery set for the current rule. During
* rule invocation, the parser pushes the set of tokens that can
* follow that rule reference on the stack; this amounts to
* computing FIRST of what follows the rule reference in the
* enclosing rule. See LinearApproximator.FIRST().
* This local follow set only includes tokens
* from within the rule; i.e., the FIRST computation done by
* ANTLR stops at the end of a rule.
*
* EXAMPLE
*
* When you find a "no viable alt exception", the input is not
* consistent with any of the alternatives for rule r. The best
* thing to do is to consume tokens until you see something that
* can legally follow a call to r *or* any rule that called r.
* You don't want the exact set of viable next tokens because the
* input might just be missing a token--you might consume the
* rest of the input looking for one of the missing tokens.
*
* Consider grammar:
*
* a : '[' b ']'
* | '(' b ')'
* ;
* b : c '^' INT ;
* c : ID
* | INT
* ;
*
* At each rule invocation, the set of tokens that could follow
* that rule is pushed on a stack. Here are the various
* context-sensitive follow sets:
*
* FOLLOW(b1_in_a) = FIRST(']') = ']'
* FOLLOW(b2_in_a) = FIRST(')') = ')'
* FOLLOW(c_in_b) = FIRST('^') = '^'
*
* Upon erroneous input "[]", the call chain is
*
* a -> b -> c
*
* and, hence, the follow context stack is:
*
* depth follow set start of rule execution
* 0 <EOF> a (from main())
* 1 ']' b
* 2 '^' c
*
* Notice that ')' is not included, because b would have to have
* been called from a different context in rule a for ')' to be
* included.
*
* For error recovery, we cannot consider FOLLOW(c)
* (context-sensitive or otherwise). We need the combined set of
* all context-sensitive FOLLOW sets--the set of all tokens that
* could follow any reference in the call chain. We need to
* resync to one of those tokens. Note that FOLLOW(c)='^' and if
* we resync'd to that token, we'd consume until EOF. We need to
* sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
* In this case, for input "[]", LA(1) is ']' and in the set, so we would
* not consume anything. After printing an error, rule c would
* return normally. Rule b would not find the required '^' though.
* At this point, it gets a mismatched token error and
* exception (since LA(1) is not in the viable following token
* set). The rule exception handler tries to recover, but finds
* the same recovery set and doesn't consume anything. Rule b
* exits normally returning to rule a. Now it finds the ']' (and
* with the successful match exits errorRecovery mode).
*
* So, you can see that the parser walks up the call chain looking
* for the token that was a member of the recovery set.
*
* Errors are not generated in errorRecovery mode.
*
* ANTLR's error recovery mechanism is based upon original ideas:
*
* "Algorithms + Data Structures = Programs" by Niklaus Wirth
*
* and
*
* "A note on error recovery in recursive descent parsers":
* http://portal.acm.org/citation.cfm?id=947902.947905
*
* Later, Josef Grosch had some good ideas:
*
* "Efficient and Comfortable Error Recovery in Recursive Descent
* Parsers":
* ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
*
* Like Grosch I implement context-sensitive FOLLOW sets that are combined
* at run-time upon error to avoid overhead during parsing.
*/
getErrorRecoverySet(recognizer) {
let atn = recognizer.interpreter.atn;
let ctx = recognizer.context;
let recoverSet = new IntervalSet_1.IntervalSet();
while (ctx && ctx.invokingState >= 0) {
// compute what follows who invoked us
let invokingState = atn.states[ctx.invokingState];
let rt = invokingState.transition(0);
let follow = atn.nextTokens(rt.followState);
recoverSet.addAll(follow);
ctx = ctx._parent;
}
recoverSet.remove(Token_1.Token.EPSILON);
// System.out.println("recover set "+recoverSet.toString(recognizer.getTokenNames()));
return recoverSet;
}
/** Consume tokens until one matches the given token set. */
consumeUntil(recognizer, set) {
// System.err.println("consumeUntil("+set.toString(recognizer.getTokenNames())+")");
let ttype = recognizer.inputStream.LA(1);
while (ttype !== Token_1.Token.EOF && !set.contains(ttype)) {
//System.out.println("consume during recover LA(1)="+getTokenNames()[input.LA(1)]);
// recognizer.inputStream.consume();
recognizer.consume();
ttype = recognizer.inputStream.LA(1);
}
}
}
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "reset", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "beginErrorCondition", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "inErrorRecoveryMode", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "endErrorCondition", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "reportMatch", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "reportError", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "notifyErrorListeners", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "recover", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "sync", null);
__decorate([
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "reportNoViableAlternative", null);
__decorate([
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "reportInputMismatch", null);
__decorate([
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "reportFailedPredicate", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "reportUnwantedToken", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "reportMissingToken", null);
__decorate([
Decorators_1.Override
], DefaultErrorStrategy.prototype, "recoverInline", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "singleTokenInsertion", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "singleTokenDeletion", null);
__decorate([
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "getMissingSymbol", null);
__decorate([
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "getExpectedTokens", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "getSymbolText", null);
__decorate([
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "getSymbolType", null);
__decorate([
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "escapeWSAndQuote", null);
__decorate([
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "getErrorRecoverySet", null);
__decorate([
__param(0, Decorators_1.NotNull), __param(1, Decorators_1.NotNull)
], DefaultErrorStrategy.prototype, "consumeUntil", null);
exports.DefaultErrorStrategy = DefaultErrorStrategy;
//# sourceMappingURL=DefaultErrorStrategy.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,69 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
/**
*
* @author Sam Harwell
*/
export declare enum Dependents {
/**
* The element is dependent upon the specified rule.
*/
SELF = 0,
/**
* The element is dependent upon the set of the specified rule's parents
* (rules which directly reference it).
*/
PARENTS = 1,
/**
* The element is dependent upon the set of the specified rule's children
* (rules which it directly references).
*/
CHILDREN = 2,
/**
* The element is dependent upon the set of the specified rule's ancestors
* (the transitive closure of `PARENTS` rules).
*/
ANCESTORS = 3,
/**
* The element is dependent upon the set of the specified rule's descendants
* (the transitive closure of `CHILDREN` rules).
*/
DESCENDANTS = 4,
/**
* The element is dependent upon the set of the specified rule's siblings
* (the union of `CHILDREN` of its `PARENTS`).
*/
SIBLINGS = 5,
/**
* The element is dependent upon the set of the specified rule's preceeding
* siblings (the union of `CHILDREN` of its `PARENTS` which
* appear before a reference to the rule).
*/
PRECEEDING_SIBLINGS = 6,
/**
* The element is dependent upon the set of the specified rule's following
* siblings (the union of `CHILDREN` of its `PARENTS` which
* appear after a reference to the rule).
*/
FOLLOWING_SIBLINGS = 7,
/**
* The element is dependent upon the set of the specified rule's preceeding
* elements (rules which might end before the start of the specified rule
* while parsing). This is calculated by taking the
* `PRECEEDING_SIBLINGS` of the rule and each of its
* `ANCESTORS`, along with the `DESCENDANTS` of those
* elements.
*/
PRECEEDING = 8,
/**
* The element is dependent upon the set of the specified rule's following
* elements (rules which might start after the end of the specified rule
* while parsing). This is calculated by taking the
* `FOLLOWING_SIBLINGS` of the rule and each of its
* `ANCESTORS`, along with the `DESCENDANTS` of those
* elements.
*/
FOLLOWING = 9
}

View File

@@ -0,0 +1,75 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.Dependents = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:51.1349829-07:00
/**
*
* @author Sam Harwell
*/
var Dependents;
(function (Dependents) {
/**
* The element is dependent upon the specified rule.
*/
Dependents[Dependents["SELF"] = 0] = "SELF";
/**
* The element is dependent upon the set of the specified rule's parents
* (rules which directly reference it).
*/
Dependents[Dependents["PARENTS"] = 1] = "PARENTS";
/**
* The element is dependent upon the set of the specified rule's children
* (rules which it directly references).
*/
Dependents[Dependents["CHILDREN"] = 2] = "CHILDREN";
/**
* The element is dependent upon the set of the specified rule's ancestors
* (the transitive closure of `PARENTS` rules).
*/
Dependents[Dependents["ANCESTORS"] = 3] = "ANCESTORS";
/**
* The element is dependent upon the set of the specified rule's descendants
* (the transitive closure of `CHILDREN` rules).
*/
Dependents[Dependents["DESCENDANTS"] = 4] = "DESCENDANTS";
/**
* The element is dependent upon the set of the specified rule's siblings
* (the union of `CHILDREN` of its `PARENTS`).
*/
Dependents[Dependents["SIBLINGS"] = 5] = "SIBLINGS";
/**
* The element is dependent upon the set of the specified rule's preceeding
* siblings (the union of `CHILDREN` of its `PARENTS` which
* appear before a reference to the rule).
*/
Dependents[Dependents["PRECEEDING_SIBLINGS"] = 6] = "PRECEEDING_SIBLINGS";
/**
* The element is dependent upon the set of the specified rule's following
* siblings (the union of `CHILDREN` of its `PARENTS` which
* appear after a reference to the rule).
*/
Dependents[Dependents["FOLLOWING_SIBLINGS"] = 7] = "FOLLOWING_SIBLINGS";
/**
* The element is dependent upon the set of the specified rule's preceeding
* elements (rules which might end before the start of the specified rule
* while parsing). This is calculated by taking the
* `PRECEEDING_SIBLINGS` of the rule and each of its
* `ANCESTORS`, along with the `DESCENDANTS` of those
* elements.
*/
Dependents[Dependents["PRECEEDING"] = 8] = "PRECEEDING";
/**
* The element is dependent upon the set of the specified rule's following
* elements (rules which might start after the end of the specified rule
* while parsing). This is calculated by taking the
* `FOLLOWING_SIBLINGS` of the rule and each of its
* `ANCESTORS`, along with the `DESCENDANTS` of those
* elements.
*/
Dependents[Dependents["FOLLOWING"] = 9] = "FOLLOWING";
})(Dependents = exports.Dependents || (exports.Dependents = {}));
//# sourceMappingURL=Dependents.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"Dependents.js","sourceRoot":"","sources":["../../src/Dependents.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;AAEH,wDAAwD;AAExD;;;GAGG;AACH,IAAY,UA6DX;AA7DD,WAAY,UAAU;IAErB;;OAEG;IACH,2CAAI,CAAA;IACJ;;;OAGG;IACH,iDAAO,CAAA;IACP;;;OAGG;IACH,mDAAQ,CAAA;IACR;;;OAGG;IACH,qDAAS,CAAA;IACT;;;OAGG;IACH,yDAAW,CAAA;IACX;;;OAGG;IACH,mDAAQ,CAAA;IACR;;;;OAIG;IACH,yEAAmB,CAAA;IACnB;;;;OAIG;IACH,uEAAkB,CAAA;IAClB;;;;;;;OAOG;IACH,uDAAU,CAAA;IACV;;;;;;;OAOG;IACH,qDAAS,CAAA;AACV,CAAC,EA7DW,UAAU,GAAV,kBAAU,KAAV,kBAAU,QA6DrB","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:51.1349829-07:00\r\n\r\n/**\r\n *\r\n * @author Sam Harwell\r\n */\r\nexport enum Dependents {\r\n\r\n\t/**\r\n\t * The element is dependent upon the specified rule.\r\n\t */\r\n\tSELF,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's parents\r\n\t * (rules which directly reference it).\r\n\t */\r\n\tPARENTS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's children\r\n\t * (rules which it directly references).\r\n\t */\r\n\tCHILDREN,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's ancestors\r\n\t * (the transitive closure of `PARENTS` rules).\r\n\t */\r\n\tANCESTORS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's descendants\r\n\t * (the transitive closure of `CHILDREN` rules).\r\n\t */\r\n\tDESCENDANTS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's siblings\r\n\t * (the union of `CHILDREN` of its `PARENTS`).\r\n\t */\r\n\tSIBLINGS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's preceeding\r\n\t * siblings (the union of `CHILDREN` of its `PARENTS` which\r\n\t * appear before a reference to the rule).\r\n\t */\r\n\tPRECEEDING_SIBLINGS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's following\r\n\t * siblings (the union of `CHILDREN` of its `PARENTS` which\r\n\t * appear after a reference to the rule).\r\n\t */\r\n\tFOLLOWING_SIBLINGS,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's preceeding\r\n\t * elements (rules which might end before the start of the specified rule\r\n\t * while parsing). This is calculated by taking the\r\n\t * `PRECEEDING_SIBLINGS` of the rule and each of its\r\n\t * `ANCESTORS`, along with the `DESCENDANTS` of those\r\n\t * elements.\r\n\t */\r\n\tPRECEEDING,\r\n\t/**\r\n\t * The element is dependent upon the set of the specified rule's following\r\n\t * elements (rules which might start after the end of the specified rule\r\n\t * while parsing). This is calculated by taking the\r\n\t * `FOLLOWING_SIBLINGS` of the rule and each of its\r\n\t * `ANCESTORS`, along with the `DESCENDANTS` of those\r\n\t * elements.\r\n\t */\r\n\tFOLLOWING,\r\n}\r\n"]}

View File

@@ -0,0 +1,60 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { ParserErrorListener } from "./ParserErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/**
* This implementation of {@link ANTLRErrorListener} can be used to identify
* certain potential correctness and performance problems in grammars. "Reports"
* are made by calling {@link Parser#notifyErrorListeners} with the appropriate
* message.
*
* * **Ambiguities**: These are cases where more than one path through the
* grammar can match the input.
* * **Weak context sensitivity**: These are cases where full-context
* prediction resolved an SLL conflict to a unique alternative which equaled the
* minimum alternative of the SLL conflict.
* * **Strong (forced) context sensitivity**: These are cases where the
* full-context prediction resolved an SLL conflict to a unique alternative,
* *and* the minimum alternative of the SLL conflict was found to not be
* a truly viable alternative. Two-stage parsing cannot be used for inputs where
* this situation occurs.
*
* @author Sam Harwell
*/
export declare class DiagnosticErrorListener implements ParserErrorListener {
protected exactOnly: boolean;
/**
* Initializes a new instance of {@link DiagnosticErrorListener}, specifying
* whether all ambiguities or only exact ambiguities are reported.
*
* @param exactOnly `true` to report only exact ambiguities, otherwise
* `false` to report all ambiguities. Defaults to true.
*/
constructor(exactOnly?: boolean);
syntaxError<T extends Token>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
reportAmbiguity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, exact: boolean, ambigAlts: BitSet | undefined, configs: ATNConfigSet): void;
reportAttemptingFullContext(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, conflictingAlts: BitSet | undefined, conflictState: SimulatorState): void;
reportContextSensitivity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, prediction: number, acceptState: SimulatorState): void;
protected getDecisionDescription(recognizer: Parser, dfa: DFA): string;
/**
* Computes the set of conflicting or ambiguous alternatives from a
* configuration set, if that information was not already provided by the
* parser.
*
* @param reportedAlts The set of conflicting or ambiguous alternatives, as
* reported by the parser.
* @param configs The conflicting or ambiguous configuration set.
* @returns Returns `reportedAlts` if it is not `undefined`, otherwise
* returns the set of alternatives represented in `configs`.
*/
protected getConflictingAlts(reportedAlts: BitSet | undefined, configs: ATNConfigSet): BitSet;
}

View File

@@ -0,0 +1,147 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.DiagnosticErrorListener = void 0;
const BitSet_1 = require("./misc/BitSet");
const Decorators_1 = require("./Decorators");
const Interval_1 = require("./misc/Interval");
/**
* This implementation of {@link ANTLRErrorListener} can be used to identify
* certain potential correctness and performance problems in grammars. "Reports"
* are made by calling {@link Parser#notifyErrorListeners} with the appropriate
* message.
*
* * **Ambiguities**: These are cases where more than one path through the
* grammar can match the input.
* * **Weak context sensitivity**: These are cases where full-context
* prediction resolved an SLL conflict to a unique alternative which equaled the
* minimum alternative of the SLL conflict.
* * **Strong (forced) context sensitivity**: These are cases where the
* full-context prediction resolved an SLL conflict to a unique alternative,
* *and* the minimum alternative of the SLL conflict was found to not be
* a truly viable alternative. Two-stage parsing cannot be used for inputs where
* this situation occurs.
*
* @author Sam Harwell
*/
class DiagnosticErrorListener {
/**
* Initializes a new instance of {@link DiagnosticErrorListener}, specifying
* whether all ambiguities or only exact ambiguities are reported.
*
* @param exactOnly `true` to report only exact ambiguities, otherwise
* `false` to report all ambiguities. Defaults to true.
*/
constructor(exactOnly = true) {
this.exactOnly = exactOnly;
this.exactOnly = exactOnly;
}
syntaxError(
/*@NotNull*/
recognizer, offendingSymbol, line, charPositionInLine,
/*@NotNull*/
msg, e) {
// intentionally empty
}
reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
if (this.exactOnly && !exact) {
return;
}
let decision = this.getDecisionDescription(recognizer, dfa);
let conflictingAlts = this.getConflictingAlts(ambigAlts, configs);
let text = recognizer.inputStream.getText(Interval_1.Interval.of(startIndex, stopIndex));
let message = `reportAmbiguity d=${decision}: ambigAlts=${conflictingAlts}, input='${text}'`;
recognizer.notifyErrorListeners(message);
}
reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, conflictState) {
let format = "reportAttemptingFullContext d=%s, input='%s'";
let decision = this.getDecisionDescription(recognizer, dfa);
let text = recognizer.inputStream.getText(Interval_1.Interval.of(startIndex, stopIndex));
let message = `reportAttemptingFullContext d=${decision}, input='${text}'`;
recognizer.notifyErrorListeners(message);
}
reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, acceptState) {
let format = "reportContextSensitivity d=%s, input='%s'";
let decision = this.getDecisionDescription(recognizer, dfa);
let text = recognizer.inputStream.getText(Interval_1.Interval.of(startIndex, stopIndex));
let message = `reportContextSensitivity d=${decision}, input='${text}'`;
recognizer.notifyErrorListeners(message);
}
getDecisionDescription(recognizer, dfa) {
let decision = dfa.decision;
let ruleIndex = dfa.atnStartState.ruleIndex;
let ruleNames = recognizer.ruleNames;
if (ruleIndex < 0 || ruleIndex >= ruleNames.length) {
return decision.toString();
}
let ruleName = ruleNames[ruleIndex];
if (!ruleName) {
return decision.toString();
}
return `${decision} (${ruleName})`;
}
/**
* Computes the set of conflicting or ambiguous alternatives from a
* configuration set, if that information was not already provided by the
* parser.
*
* @param reportedAlts The set of conflicting or ambiguous alternatives, as
* reported by the parser.
* @param configs The conflicting or ambiguous configuration set.
* @returns Returns `reportedAlts` if it is not `undefined`, otherwise
* returns the set of alternatives represented in `configs`.
*/
getConflictingAlts(reportedAlts, configs) {
if (reportedAlts != null) {
return reportedAlts;
}
let result = new BitSet_1.BitSet();
for (let config of configs) {
result.set(config.alt);
}
return result;
}
}
__decorate([
Decorators_1.Override
], DiagnosticErrorListener.prototype, "syntaxError", null);
__decorate([
Decorators_1.Override,
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull),
__param(6, Decorators_1.NotNull)
], DiagnosticErrorListener.prototype, "reportAmbiguity", null);
__decorate([
Decorators_1.Override,
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull),
__param(5, Decorators_1.NotNull)
], DiagnosticErrorListener.prototype, "reportAttemptingFullContext", null);
__decorate([
Decorators_1.Override,
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull),
__param(5, Decorators_1.NotNull)
], DiagnosticErrorListener.prototype, "reportContextSensitivity", null);
__decorate([
__param(0, Decorators_1.NotNull),
__param(1, Decorators_1.NotNull)
], DiagnosticErrorListener.prototype, "getDecisionDescription", null);
__decorate([
Decorators_1.NotNull,
__param(1, Decorators_1.NotNull)
], DiagnosticErrorListener.prototype, "getConflictingAlts", null);
exports.DiagnosticErrorListener = DiagnosticErrorListener;
//# sourceMappingURL=DiagnosticErrorListener.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,21 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { Parser } from "./Parser";
import { RecognitionException } from "./RecognitionException";
/** A semantic predicate failed during validation. Validation of predicates
* occurs when normally parsing the alternative just like matching a token.
* Disambiguating predicate evaluation occurs when we test a predicate during
* prediction.
*/
export declare class FailedPredicateException extends RecognitionException {
private _ruleIndex;
private _predicateIndex;
private _predicate?;
constructor(recognizer: Parser, predicate?: string, message?: string);
get ruleIndex(): number;
get predicateIndex(): number;
get predicate(): string | undefined;
private static formatMessage;
}

View File

@@ -0,0 +1,64 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.FailedPredicateException = void 0;
const RecognitionException_1 = require("./RecognitionException");
const Decorators_1 = require("./Decorators");
const PredicateTransition_1 = require("./atn/PredicateTransition");
/** A semantic predicate failed during validation. Validation of predicates
* occurs when normally parsing the alternative just like matching a token.
* Disambiguating predicate evaluation occurs when we test a predicate during
* prediction.
*/
let FailedPredicateException = class FailedPredicateException extends RecognitionException_1.RecognitionException {
constructor(recognizer, predicate, message) {
super(recognizer, recognizer.inputStream, recognizer.context, FailedPredicateException.formatMessage(predicate, message));
let s = recognizer.interpreter.atn.states[recognizer.state];
let trans = s.transition(0);
if (trans instanceof PredicateTransition_1.PredicateTransition) {
this._ruleIndex = trans.ruleIndex;
this._predicateIndex = trans.predIndex;
}
else {
this._ruleIndex = 0;
this._predicateIndex = 0;
}
this._predicate = predicate;
super.setOffendingToken(recognizer, recognizer.currentToken);
}
get ruleIndex() {
return this._ruleIndex;
}
get predicateIndex() {
return this._predicateIndex;
}
get predicate() {
return this._predicate;
}
static formatMessage(predicate, message) {
if (message) {
return message;
}
return `failed predicate: {${predicate}}?`;
}
};
__decorate([
Decorators_1.NotNull
], FailedPredicateException, "formatMessage", null);
FailedPredicateException = __decorate([
__param(0, Decorators_1.NotNull)
], FailedPredicateException);
exports.FailedPredicateException = FailedPredicateException;
//# sourceMappingURL=FailedPredicateException.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"FailedPredicateException.js","sourceRoot":"","sources":["../../src/FailedPredicateException.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;;;;AAQH,iEAA8D;AAE9D,6CAAuC;AACvC,mEAAgE;AAEhE;;;;GAIG;AACH,IAAa,wBAAwB,GAArC,MAAa,wBAAyB,SAAQ,2CAAoB;IAOjE,YAAqB,UAAkB,EAAE,SAAkB,EAAE,OAAgB;QAC5E,KAAK,CACJ,UAAU,EACV,UAAU,CAAC,WAAW,EACtB,UAAU,CAAC,OAAO,EAClB,wBAAwB,CAAC,aAAa,CAAC,SAAS,EAAE,OAAO,CAAC,CAAC,CAAC;QAC7D,IAAI,CAAC,GAAa,UAAU,CAAC,WAAW,CAAC,GAAG,CAAC,MAAM,CAAC,UAAU,CAAC,KAAK,CAAC,CAAC;QAEtE,IAAI,KAAK,GAAG,CAAC,CAAC,UAAU,CAAC,CAAC,CAAgC,CAAC;QAC3D,IAAI,KAAK,YAAY,yCAAmB,EAAE;YACzC,IAAI,CAAC,UAAU,GAAG,KAAK,CAAC,SAAS,CAAC;YAClC,IAAI,CAAC,eAAe,GAAG,KAAK,CAAC,SAAS,CAAC;SACvC;aACI;YACJ,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC;YACpB,IAAI,CAAC,eAAe,GAAG,CAAC,CAAC;SACzB;QAED,IAAI,CAAC,UAAU,GAAG,SAAS,CAAC;QAC5B,KAAK,CAAC,iBAAiB,CAAC,UAAU,EAAE,UAAU,CAAC,YAAY,CAAC,CAAC;IAC9D,CAAC;IAED,IAAI,SAAS;QACZ,OAAO,IAAI,CAAC,UAAU,CAAC;IACxB,CAAC;IAED,IAAI,cAAc;QACjB,OAAO,IAAI,CAAC,eAAe,CAAC;IAC7B,CAAC;IAED,IAAI,SAAS;QACZ,OAAO,IAAI,CAAC,UAAU,CAAC;IACxB,CAAC;IAGO,MAAM,CAAC,aAAa,CAAC,SAA6B,EAAE,OAA2B;QACtF,IAAI,OAAO,EAAE;YACZ,OAAO,OAAO,CAAC;SACf;QAED,OAAO,sBAAsB,SAAS,IAAI,CAAC;IAC5C,CAAC;CACD,CAAA;AAPA;IADC,oBAAO;mDAOP;AAhDW,wBAAwB;IAOvB,WAAA,oBAAO,CAAA;GAPR,wBAAwB,CAiDpC;AAjDY,4DAAwB","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:51.4099946-07:00\r\n\r\nimport { AbstractPredicateTransition } from \"./atn/AbstractPredicateTransition\";\r\nimport { ATN } from \"./atn/ATN\";\r\nimport { ATNState } from \"./atn/ATNState\";\r\nimport { Parser } from \"./Parser\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Recognizer } from \"./Recognizer\";\r\nimport { NotNull } from \"./Decorators\";\r\nimport { PredicateTransition } from \"./atn/PredicateTransition\";\r\n\r\n/** A semantic predicate failed during validation. Validation of predicates\r\n * occurs when normally parsing the alternative just like matching a token.\r\n * Disambiguating predicate evaluation occurs when we test a predicate during\r\n * prediction.\r\n */\r\nexport class FailedPredicateException extends RecognitionException {\r\n\t//private static serialVersionUID: number = 5379330841495778709L;\r\n\r\n\tprivate _ruleIndex: number;\r\n\tprivate _predicateIndex: number;\r\n\tprivate _predicate?: string;\r\n\r\n\tconstructor(@NotNull recognizer: Parser, predicate?: string, message?: string) {\r\n\t\tsuper(\r\n\t\t\trecognizer,\r\n\t\t\trecognizer.inputStream,\r\n\t\t\trecognizer.context,\r\n\t\t\tFailedPredicateException.formatMessage(predicate, message));\r\n\t\tlet s: ATNState = recognizer.interpreter.atn.states[recognizer.state];\r\n\r\n\t\tlet trans = s.transition(0) as AbstractPredicateTransition;\r\n\t\tif (trans instanceof PredicateTransition) {\r\n\t\t\tthis._ruleIndex = trans.ruleIndex;\r\n\t\t\tthis._predicateIndex = trans.predIndex;\r\n\t\t}\r\n\t\telse {\r\n\t\t\tthis._ruleIndex = 0;\r\n\t\t\tthis._predicateIndex = 0;\r\n\t\t}\r\n\r\n\t\tthis._predicate = predicate;\r\n\t\tsuper.setOffendingToken(recognizer, recognizer.currentToken);\r\n\t}\r\n\r\n\tget ruleIndex(): number {\r\n\t\treturn this._ruleIndex;\r\n\t}\r\n\r\n\tget predicateIndex(): number {\r\n\t\treturn this._predicateIndex;\r\n\t}\r\n\r\n\tget predicate(): string | undefined {\r\n\t\treturn this._predicate;\r\n\t}\r\n\r\n\t@NotNull\r\n\tprivate static formatMessage(predicate: string | undefined, message: string | undefined): string {\r\n\t\tif (message) {\r\n\t\t\treturn message;\r\n\t\t}\r\n\r\n\t\treturn `failed predicate: {${predicate}}?`;\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,14 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { RecognitionException } from "./RecognitionException";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
/** This signifies any kind of mismatched input exceptions such as
* when the current input does not match the expected token.
*/
export declare class InputMismatchException extends RecognitionException {
constructor(/*@NotNull*/ recognizer: Parser);
constructor(/*@NotNull*/ recognizer: Parser, state: number, context: ParserRuleContext);
}

View File

@@ -0,0 +1,39 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.InputMismatchException = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:51.5187682-07:00
const RecognitionException_1 = require("./RecognitionException");
const Decorators_1 = require("./Decorators");
/** This signifies any kind of mismatched input exceptions such as
* when the current input does not match the expected token.
*/
let InputMismatchException = class InputMismatchException extends RecognitionException_1.RecognitionException {
constructor(recognizer, state, context) {
if (context === undefined) {
context = recognizer.context;
}
super(recognizer, recognizer.inputStream, context);
if (state !== undefined) {
this.setOffendingState(state);
}
this.setOffendingToken(recognizer, recognizer.currentToken);
}
};
InputMismatchException = __decorate([
__param(0, Decorators_1.NotNull)
], InputMismatchException);
exports.InputMismatchException = InputMismatchException;
//# sourceMappingURL=InputMismatchException.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"InputMismatchException.js","sourceRoot":"","sources":["../../src/InputMismatchException.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;;;;AAEH,wDAAwD;AAExD,iEAA8D;AAC9D,6CAAuC;AAIvC;;GAEG;AACH,IAAa,sBAAsB,GAAnC,MAAa,sBAAuB,SAAQ,2CAAoB;IAK/D,YAAqB,UAAkB,EAAE,KAAc,EAAE,OAA2B;QACnF,IAAI,OAAO,KAAK,SAAS,EAAE;YAC1B,OAAO,GAAG,UAAU,CAAC,OAAO,CAAC;SAC7B;QAED,KAAK,CAAC,UAAU,EAAE,UAAU,CAAC,WAAW,EAAE,OAAO,CAAC,CAAC;QAEnD,IAAI,KAAK,KAAK,SAAS,EAAE;YACxB,IAAI,CAAC,iBAAiB,CAAC,KAAK,CAAC,CAAC;SAC9B;QAED,IAAI,CAAC,iBAAiB,CAAC,UAAU,EAAE,UAAU,CAAC,YAAY,CAAC,CAAC;IAC7D,CAAC;CACD,CAAA;AAlBY,sBAAsB;IAKrB,WAAA,oBAAO,CAAA;GALR,sBAAsB,CAkBlC;AAlBY,wDAAsB","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:51.5187682-07:00\r\n\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { NotNull } from \"./Decorators\";\r\nimport { Parser } from \"./Parser\";\r\nimport { ParserRuleContext } from \"./ParserRuleContext\";\r\n\r\n/** This signifies any kind of mismatched input exceptions such as\r\n * when the current input does not match the expected token.\r\n */\r\nexport class InputMismatchException extends RecognitionException {\r\n\t//private static serialVersionUID: number = 1532568338707443067L;\r\n\r\n\tconstructor(/*@NotNull*/ recognizer: Parser);\r\n\tconstructor(/*@NotNull*/ recognizer: Parser, state: number, context: ParserRuleContext);\r\n\tconstructor(@NotNull recognizer: Parser, state?: number, context?: ParserRuleContext) {\r\n\t\tif (context === undefined) {\r\n\t\t\tcontext = recognizer.context;\r\n\t\t}\r\n\r\n\t\tsuper(recognizer, recognizer.inputStream, context);\r\n\r\n\t\tif (state !== undefined) {\r\n\t\t\tthis.setOffendingState(state);\r\n\t\t}\r\n\r\n\t\tthis.setOffendingToken(recognizer, recognizer.currentToken);\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,197 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
export declare namespace IntStream {
/**
* The value returned by {@link #LA LA()} when the end of the stream is
* reached.
*/
const EOF: number;
/**
* The value returned by {@link #getSourceName} when the actual name of the
* underlying source is not known.
*/
const UNKNOWN_SOURCE_NAME: string;
}
/**
* A simple stream of symbols whose values are represented as integers. This
* interface provides *marked ranges* with support for a minimum level
* of buffering necessary to implement arbitrary lookahead during prediction.
* For more information on marked ranges, see {@link #mark}.
*
* **Initializing Methods:** Some methods in this interface have
* unspecified behavior if no call to an initializing method has occurred after
* the stream was constructed. The following is a list of initializing methods:
*
* * {@link #LA}
* * {@link #consume}
* * {@link #size}
*/
export interface IntStream {
/**
* Consumes the current symbol in the stream. This method has the following
* effects:
*
* * **Forward movement:** The value of `index`
* before calling this method is less than the value of `index`
* after calling this method.
* * **Ordered lookahead:** The value of `LA(1)` before
* calling this method becomes the value of `LA(-1)` after calling
* this method.
*
* Note that calling this method does not guarantee that `index` is
* incremented by exactly 1, as that would preclude the ability to implement
* filtering streams (e.g. {@link CommonTokenStream} which distinguishes
* between "on-channel" and "off-channel" tokens).
*
* @throws IllegalStateException if an attempt is made to consume the
* end of the stream (i.e. if `LA(1)==`{@link #EOF EOF} before calling
* `consume`).
*/
consume(): void;
/**
* Gets the value of the symbol at offset `i` from the current
* position. When `i==1`, this method returns the value of the current
* symbol in the stream (which is the next symbol to be consumed). When
* `i==-1`, this method returns the value of the previously read
* symbol in the stream. It is not valid to call this method with
* `i==0`, but the specific behavior is unspecified because this
* method is frequently called from performance-critical code.
*
* This method is guaranteed to succeed if any of the following are true:
*
* * `i>0`
* * `i==-1` and `index` returns a value greater
* than the value of `index` after the stream was constructed
* and `LA(1)` was called in that order. Specifying the current
* `index` relative to the index after the stream was created
* allows for filtering implementations that do not return every symbol
* from the underlying source. Specifying the call to `LA(1)`
* allows for lazily initialized streams.
* * `LA(i)` refers to a symbol consumed within a marked region
* that has not yet been released.
*
* If `i` represents a position at or beyond the end of the stream,
* this method returns {@link #EOF}.
*
* The return value is unspecified if `i<0` and fewer than `-i`
* calls to {@link #consume consume()} have occurred from the beginning of
* the stream before calling this method.
*
* @throws UnsupportedOperationException if the stream does not support
* retrieving the value of the specified symbol
*/
LA(i: number): number;
/**
* A mark provides a guarantee that {@link #seek seek()} operations will be
* valid over a "marked range" extending from the index where `mark()`
* was called to the current `index`. This allows the use of
* streaming input sources by specifying the minimum buffering requirements
* to support arbitrary lookahead during prediction.
*
* The returned mark is an opaque handle (type `int`) which is passed
* to {@link #release release()} when the guarantees provided by the marked
* range are no longer necessary. When calls to
* `mark()`/`release()` are nested, the marks must be released
* in reverse order of which they were obtained. Since marked regions are
* used during performance-critical sections of prediction, the specific
* behavior of invalid usage is unspecified (i.e. a mark is not released, or
* a mark is released twice, or marks are not released in reverse order from
* which they were created).
*
* The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.
*
* This method does not change the current position in the input stream.
*
* The following example shows the use of {@link #mark mark()},
* {@link #release release(mark)}, `index`, and
* {@link #seek seek(index)} as part of an operation to safely work within a
* marked region, then restore the stream position to its original value and
* release the mark.
*
* ```
* IntStream stream = ...;
* int index = -1;
* int mark = stream.mark();
* try {
* index = stream.index;
* // perform work here...
* } finally {
* if (index != -1) {
* stream.seek(index);
* }
* stream.release(mark);
* }
* ```
*
* @returns An opaque marker which should be passed to
* {@link #release release()} when the marked range is no longer required.
*/
mark(): number;
/**
* This method releases a marked range created by a call to
* {@link #mark mark()}. Calls to `release()` must appear in the
* reverse order of the corresponding calls to `mark()`. If a mark is
* released twice, or if marks are not released in reverse order of the
* corresponding calls to `mark()`, the behavior is unspecified.
*
* For more information and an example, see {@link #mark}.
*
* @param marker A marker returned by a call to `mark()`.
* @see #mark
*/
release(marker: number): void;
/**
* Return the index into the stream of the input symbol referred to by
* `LA(1)`.
*
* The behavior of this method is unspecified if no call to an
* {@link IntStream initializing method} has occurred after this stream was
* constructed.
*/
readonly index: number;
/**
* Set the input cursor to the position indicated by `index`. If the
* specified index lies past the end of the stream, the operation behaves as
* though `index` was the index of the EOF symbol. After this method
* returns without throwing an exception, then at least one of the following
* will be true.
*
* * `index` will return the index of the first symbol
* appearing at or after the specified `index`. Specifically,
* implementations which filter their sources should automatically
* adjust `index` forward the minimum amount required for the
* operation to target a non-ignored symbol.
* * `LA(1)` returns {@link #EOF}
*
* This operation is guaranteed to not throw an exception if `index`
* lies within a marked region. For more information on marked regions, see
* {@link #mark}. The behavior of this method is unspecified if no call to
* an {@link IntStream initializing method} has occurred after this stream
* was constructed.
*
* @param index The absolute index to seek to.
*
* @throws IllegalArgumentException if `index` is less than 0
* @throws UnsupportedOperationException if the stream does not support
* seeking to the specified index
*/
seek(index: number): void;
/**
* Returns the total number of symbols in the stream, including a single EOF
* symbol.
*
* @throws UnsupportedOperationException if the size of the stream is
* unknown.
*/
readonly size: number;
/**
* Gets the name of the underlying symbol source. This method returns a
* non-undefined, non-empty string. If such a name is not known, this method
* returns {@link #UNKNOWN_SOURCE_NAME}.
*/
readonly sourceName: string;
}

View File

@@ -0,0 +1,22 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.IntStream = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:51.6934376-07:00
var IntStream;
(function (IntStream) {
/**
* The value returned by {@link #LA LA()} when the end of the stream is
* reached.
*/
IntStream.EOF = -1;
/**
* The value returned by {@link #getSourceName} when the actual name of the
* underlying source is not known.
*/
IntStream.UNKNOWN_SOURCE_NAME = "<unknown>";
})(IntStream = exports.IntStream || (exports.IntStream = {}));
//# sourceMappingURL=IntStream.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,33 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ParserRuleContext } from "./ParserRuleContext";
/**
* This class extends {@link ParserRuleContext} by allowing the value of
* {@link #getRuleIndex} to be explicitly set for the context.
*
* {@link ParserRuleContext} does not include field storage for the rule index
* since the context classes created by the code generator override the
* {@link #getRuleIndex} method to return the correct value for that context.
* Since the parser interpreter does not use the context classes generated for a
* parser, this class (with slightly more memory overhead per node) is used to
* provide equivalent functionality.
*/
export declare class InterpreterRuleContext extends ParserRuleContext {
/**
* This is the backing field for {@link #getRuleIndex}.
*/
private _ruleIndex;
constructor(ruleIndex: number);
/**
* Constructs a new {@link InterpreterRuleContext} with the specified
* parent, invoking state, and rule index.
*
* @param ruleIndex The rule index for the current context.
* @param parent The parent context.
* @param invokingStateNumber The invoking state number.
*/
constructor(ruleIndex: number, parent: ParserRuleContext | undefined, invokingStateNumber: number);
get ruleIndex(): number;
}

View File

@@ -0,0 +1,46 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.InterpreterRuleContext = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:51.5898546-07:00
const Decorators_1 = require("./Decorators");
const ParserRuleContext_1 = require("./ParserRuleContext");
/**
* This class extends {@link ParserRuleContext} by allowing the value of
* {@link #getRuleIndex} to be explicitly set for the context.
*
* {@link ParserRuleContext} does not include field storage for the rule index
* since the context classes created by the code generator override the
* {@link #getRuleIndex} method to return the correct value for that context.
* Since the parser interpreter does not use the context classes generated for a
* parser, this class (with slightly more memory overhead per node) is used to
* provide equivalent functionality.
*/
class InterpreterRuleContext extends ParserRuleContext_1.ParserRuleContext {
constructor(ruleIndex, parent, invokingStateNumber) {
if (invokingStateNumber !== undefined) {
super(parent, invokingStateNumber);
}
else {
super();
}
this._ruleIndex = ruleIndex;
}
get ruleIndex() {
return this._ruleIndex;
}
}
__decorate([
Decorators_1.Override
], InterpreterRuleContext.prototype, "ruleIndex", null);
exports.InterpreterRuleContext = InterpreterRuleContext;
//# sourceMappingURL=InterpreterRuleContext.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"InterpreterRuleContext.js","sourceRoot":"","sources":["../../src/InterpreterRuleContext.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;AAEH,wDAAwD;AAExD,6CAAwC;AACxC,2DAAwD;AAExD;;;;;;;;;;GAUG;AACH,MAAa,sBAAuB,SAAQ,qCAAiB;IAkB5D,YAAY,SAAiB,EAAE,MAA0B,EAAE,mBAA4B;QACtF,IAAI,mBAAmB,KAAK,SAAS,EAAE;YACtC,KAAK,CAAC,MAAM,EAAE,mBAAmB,CAAC,CAAC;SACnC;aAAM;YACN,KAAK,EAAE,CAAC;SACR;QAED,IAAI,CAAC,UAAU,GAAG,SAAS,CAAC;IAC7B,CAAC;IAGD,IAAI,SAAS;QACZ,OAAO,IAAI,CAAC,UAAU,CAAC;IACxB,CAAC;CACD;AAHA;IADC,qBAAQ;uDAGR;AA/BF,wDAgCC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:51.5898546-07:00\r\n\r\nimport { Override } from \"./Decorators\";\r\nimport { ParserRuleContext } from \"./ParserRuleContext\";\r\n\r\n/**\r\n * This class extends {@link ParserRuleContext} by allowing the value of\r\n * {@link #getRuleIndex} to be explicitly set for the context.\r\n *\r\n * {@link ParserRuleContext} does not include field storage for the rule index\r\n * since the context classes created by the code generator override the\r\n * {@link #getRuleIndex} method to return the correct value for that context.\r\n * Since the parser interpreter does not use the context classes generated for a\r\n * parser, this class (with slightly more memory overhead per node) is used to\r\n * provide equivalent functionality.\r\n */\r\nexport class InterpreterRuleContext extends ParserRuleContext {\r\n\t/**\r\n\t * This is the backing field for {@link #getRuleIndex}.\r\n\t */\r\n\tprivate _ruleIndex: number;\r\n\r\n\tconstructor(ruleIndex: number);\r\n\r\n\t/**\r\n\t * Constructs a new {@link InterpreterRuleContext} with the specified\r\n\t * parent, invoking state, and rule index.\r\n\t *\r\n\t * @param ruleIndex The rule index for the current context.\r\n\t * @param parent The parent context.\r\n\t * @param invokingStateNumber The invoking state number.\r\n\t */\r\n\tconstructor(ruleIndex: number, parent: ParserRuleContext | undefined, invokingStateNumber: number);\r\n\r\n\tconstructor(ruleIndex: number, parent?: ParserRuleContext, invokingStateNumber?: number) {\r\n\t\tif (invokingStateNumber !== undefined) {\r\n\t\t\tsuper(parent, invokingStateNumber);\r\n\t\t} else {\r\n\t\t\tsuper();\r\n\t\t}\r\n\r\n\t\tthis._ruleIndex = ruleIndex;\r\n\t}\r\n\r\n\t@Override\r\n\tget ruleIndex(): number {\r\n\t\treturn this._ruleIndex;\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,27 @@
[The "BSD license"]
Copyright (c) 2016 The ANTLR Project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -0,0 +1,141 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { IntegerStack } from "./misc/IntegerStack";
import { LexerATNSimulator } from "./atn/LexerATNSimulator";
import { LexerNoViableAltException } from "./LexerNoViableAltException";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/** A lexer is recognizer that draws input symbols from a character stream.
* lexer grammars result in a subclass of this object. A Lexer object
* uses simplified match() and error recovery mechanisms in the interest
* of speed.
*/
export declare abstract class Lexer extends Recognizer<number, LexerATNSimulator> implements TokenSource {
static readonly DEFAULT_MODE: number;
static readonly MORE: number;
static readonly SKIP: number;
static get DEFAULT_TOKEN_CHANNEL(): number;
static get HIDDEN(): number;
static readonly MIN_CHAR_VALUE: number;
static readonly MAX_CHAR_VALUE: number;
_input: CharStream;
protected _tokenFactorySourcePair: {
source: TokenSource;
stream: CharStream;
};
/** How to create token objects */
protected _factory: TokenFactory;
/** The goal of all lexer rules/methods is to create a token object.
* This is an instance variable as multiple rules may collaborate to
* create a single token. nextToken will return this object after
* matching lexer rule(s). If you subclass to allow multiple token
* emissions, then set this to the last token to be matched or
* something non-undefined so that the auto token emit mechanism will not
* emit another token.
*/
_token: Token | undefined;
/** What character index in the stream did the current token start at?
* Needed, for example, to get the text for current token. Set at
* the start of nextToken.
*/
_tokenStartCharIndex: number;
/** The line on which the first character of the token resides */
_tokenStartLine: number;
/** The character position of first character within the line */
_tokenStartCharPositionInLine: number;
/** Once we see EOF on char stream, next token will be EOF.
* If you have DONE : EOF ; then you see DONE EOF.
*/
_hitEOF: boolean;
/** The channel number for the current token */
_channel: number;
/** The token type for the current token */
_type: number;
readonly _modeStack: IntegerStack;
_mode: number;
/** You can set the text for the current token to override what is in
* the input char buffer. Set `text` or can set this instance var.
*/
_text: string | undefined;
constructor(input: CharStream);
reset(): void;
reset(resetInput: boolean): void;
/** Return a token from this source; i.e., match a token on the char
* stream.
*/
nextToken(): Token;
/** Instruct the lexer to skip creating a token for current lexer rule
* and look for another token. nextToken() knows to keep looking when
* a lexer rule finishes with token set to SKIP_TOKEN. Recall that
* if token==undefined at end of any token rule, it creates one for you
* and emits it.
*/
skip(): void;
more(): void;
mode(m: number): void;
pushMode(m: number): void;
popMode(): number;
get tokenFactory(): TokenFactory;
set tokenFactory(factory: TokenFactory);
get inputStream(): CharStream;
/** Set the char stream and reset the lexer */
set inputStream(input: CharStream);
get sourceName(): string;
/** The standard method called to automatically emit a token at the
* outermost lexical rule. The token object should point into the
* char buffer start..stop. If there is a text override in 'text',
* use that to set the token's text. Override this method to emit
* custom Token objects or provide a new factory.
*/
emit(token: Token): Token;
/** By default does not support multiple emits per nextToken invocation
* for efficiency reasons. Subclass and override this method, nextToken,
* and getToken (to push tokens into a list and pull from that list
* rather than a single variable as this implementation does).
*/
emit(): Token;
emitEOF(): Token;
get line(): number;
set line(line: number);
get charPositionInLine(): number;
set charPositionInLine(charPositionInLine: number);
/** What is the index of the current character of lookahead? */
get charIndex(): number;
/** Return the text matched so far for the current token or any
* text override.
*/
get text(): string;
/** Set the complete text of this token; it wipes any previous
* changes to the text.
*/
set text(text: string);
/** Override if emitting multiple tokens. */
get token(): Token | undefined;
set token(_token: Token | undefined);
set type(ttype: number);
get type(): number;
set channel(channel: number);
get channel(): number;
abstract readonly channelNames: string[];
abstract readonly modeNames: string[];
/** Return a list of all Token objects in input char stream.
* Forces load of all tokens. Does not include EOF token.
*/
getAllTokens(): Token[];
notifyListeners(e: LexerNoViableAltException): void;
getErrorDisplay(s: string | number): string;
getCharErrorDisplay(c: number): string;
/** Lexers can normally match any char in it's vocabulary after matching
* a token, so do the easy thing and just kill a character and hope
* it all works out. You can instead use the rule invocation stack
* to do sophisticated error recovery if you are in a fragment rule.
*/
recover(re: RecognitionException): void;
recover(re: LexerNoViableAltException): void;
}

View File

@@ -0,0 +1,335 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Lexer = void 0;
const CommonTokenFactory_1 = require("./CommonTokenFactory");
const IntegerStack_1 = require("./misc/IntegerStack");
const Interval_1 = require("./misc/Interval");
const IntStream_1 = require("./IntStream");
const LexerATNSimulator_1 = require("./atn/LexerATNSimulator");
const LexerNoViableAltException_1 = require("./LexerNoViableAltException");
const Decorators_1 = require("./Decorators");
const Recognizer_1 = require("./Recognizer");
const Token_1 = require("./Token");
/** A lexer is recognizer that draws input symbols from a character stream.
* lexer grammars result in a subclass of this object. A Lexer object
* uses simplified match() and error recovery mechanisms in the interest
* of speed.
*/
class Lexer extends Recognizer_1.Recognizer {
constructor(input) {
super();
/** How to create token objects */
this._factory = CommonTokenFactory_1.CommonTokenFactory.DEFAULT;
/** What character index in the stream did the current token start at?
* Needed, for example, to get the text for current token. Set at
* the start of nextToken.
*/
this._tokenStartCharIndex = -1;
/** The line on which the first character of the token resides */
this._tokenStartLine = 0;
/** The character position of first character within the line */
this._tokenStartCharPositionInLine = 0;
/** Once we see EOF on char stream, next token will be EOF.
* If you have DONE : EOF ; then you see DONE EOF.
*/
this._hitEOF = false;
/** The channel number for the current token */
this._channel = 0;
/** The token type for the current token */
this._type = 0;
this._modeStack = new IntegerStack_1.IntegerStack();
this._mode = Lexer.DEFAULT_MODE;
this._input = input;
this._tokenFactorySourcePair = { source: this, stream: input };
}
static get DEFAULT_TOKEN_CHANNEL() {
return Token_1.Token.DEFAULT_CHANNEL;
}
static get HIDDEN() {
return Token_1.Token.HIDDEN_CHANNEL;
}
reset(resetInput) {
// wack Lexer state variables
if (resetInput === undefined || resetInput) {
this._input.seek(0); // rewind the input
}
this._token = undefined;
this._type = Token_1.Token.INVALID_TYPE;
this._channel = Token_1.Token.DEFAULT_CHANNEL;
this._tokenStartCharIndex = -1;
this._tokenStartCharPositionInLine = -1;
this._tokenStartLine = -1;
this._text = undefined;
this._hitEOF = false;
this._mode = Lexer.DEFAULT_MODE;
this._modeStack.clear();
this.interpreter.reset();
}
/** Return a token from this source; i.e., match a token on the char
* stream.
*/
nextToken() {
if (this._input == null) {
throw new Error("nextToken requires a non-null input stream.");
}
// Mark start location in char stream so unbuffered streams are
// guaranteed at least have text of current token
let tokenStartMarker = this._input.mark();
try {
outer: while (true) {
if (this._hitEOF) {
return this.emitEOF();
}
this._token = undefined;
this._channel = Token_1.Token.DEFAULT_CHANNEL;
this._tokenStartCharIndex = this._input.index;
this._tokenStartCharPositionInLine = this.interpreter.charPositionInLine;
this._tokenStartLine = this.interpreter.line;
this._text = undefined;
do {
this._type = Token_1.Token.INVALID_TYPE;
// System.out.println("nextToken line "+tokenStartLine+" at "+((char)input.LA(1))+
// " in mode "+mode+
// " at index "+input.index);
let ttype;
try {
ttype = this.interpreter.match(this._input, this._mode);
}
catch (e) {
if (e instanceof LexerNoViableAltException_1.LexerNoViableAltException) {
this.notifyListeners(e); // report error
this.recover(e);
ttype = Lexer.SKIP;
}
else {
throw e;
}
}
if (this._input.LA(1) === IntStream_1.IntStream.EOF) {
this._hitEOF = true;
}
if (this._type === Token_1.Token.INVALID_TYPE) {
this._type = ttype;
}
if (this._type === Lexer.SKIP) {
continue outer;
}
} while (this._type === Lexer.MORE);
if (this._token == null) {
return this.emit();
}
return this._token;
}
}
finally {
// make sure we release marker after match or
// unbuffered char stream will keep buffering
this._input.release(tokenStartMarker);
}
}
/** Instruct the lexer to skip creating a token for current lexer rule
* and look for another token. nextToken() knows to keep looking when
* a lexer rule finishes with token set to SKIP_TOKEN. Recall that
* if token==undefined at end of any token rule, it creates one for you
* and emits it.
*/
skip() {
this._type = Lexer.SKIP;
}
more() {
this._type = Lexer.MORE;
}
mode(m) {
this._mode = m;
}
pushMode(m) {
if (LexerATNSimulator_1.LexerATNSimulator.debug) {
console.log("pushMode " + m);
}
this._modeStack.push(this._mode);
this.mode(m);
}
popMode() {
if (this._modeStack.isEmpty) {
throw new Error("EmptyStackException");
}
if (LexerATNSimulator_1.LexerATNSimulator.debug) {
console.log("popMode back to " + this._modeStack.peek());
}
this.mode(this._modeStack.pop());
return this._mode;
}
get tokenFactory() {
return this._factory;
}
// @Override
set tokenFactory(factory) {
this._factory = factory;
}
get inputStream() {
return this._input;
}
/** Set the char stream and reset the lexer */
set inputStream(input) {
this.reset(false);
this._input = input;
this._tokenFactorySourcePair = { source: this, stream: this._input };
}
get sourceName() {
return this._input.sourceName;
}
emit(token) {
if (!token) {
token = this._factory.create(this._tokenFactorySourcePair, this._type, this._text, this._channel, this._tokenStartCharIndex, this.charIndex - 1, this._tokenStartLine, this._tokenStartCharPositionInLine);
}
this._token = token;
return token;
}
emitEOF() {
let cpos = this.charPositionInLine;
let line = this.line;
let eof = this._factory.create(this._tokenFactorySourcePair, Token_1.Token.EOF, undefined, Token_1.Token.DEFAULT_CHANNEL, this._input.index, this._input.index - 1, line, cpos);
this.emit(eof);
return eof;
}
get line() {
return this.interpreter.line;
}
set line(line) {
this.interpreter.line = line;
}
get charPositionInLine() {
return this.interpreter.charPositionInLine;
}
set charPositionInLine(charPositionInLine) {
this.interpreter.charPositionInLine = charPositionInLine;
}
/** What is the index of the current character of lookahead? */
get charIndex() {
return this._input.index;
}
/** Return the text matched so far for the current token or any
* text override.
*/
get text() {
if (this._text != null) {
return this._text;
}
return this.interpreter.getText(this._input);
}
/** Set the complete text of this token; it wipes any previous
* changes to the text.
*/
set text(text) {
this._text = text;
}
/** Override if emitting multiple tokens. */
get token() { return this._token; }
set token(_token) {
this._token = _token;
}
set type(ttype) {
this._type = ttype;
}
get type() {
return this._type;
}
set channel(channel) {
this._channel = channel;
}
get channel() {
return this._channel;
}
/** Return a list of all Token objects in input char stream.
* Forces load of all tokens. Does not include EOF token.
*/
getAllTokens() {
let tokens = [];
let t = this.nextToken();
while (t.type !== Token_1.Token.EOF) {
tokens.push(t);
t = this.nextToken();
}
return tokens;
}
notifyListeners(e) {
let text = this._input.getText(Interval_1.Interval.of(this._tokenStartCharIndex, this._input.index));
let msg = "token recognition error at: '" +
this.getErrorDisplay(text) + "'";
let listener = this.getErrorListenerDispatch();
if (listener.syntaxError) {
listener.syntaxError(this, undefined, this._tokenStartLine, this._tokenStartCharPositionInLine, msg, e);
}
}
getErrorDisplay(s) {
if (typeof s === "number") {
switch (s) {
case Token_1.Token.EOF:
return "<EOF>";
case 0x0a:
return "\\n";
case 0x09:
return "\\t";
case 0x0d:
return "\\r";
}
return String.fromCharCode(s);
}
return s.replace(/\n/g, "\\n")
.replace(/\t/g, "\\t")
.replace(/\r/g, "\\r");
}
getCharErrorDisplay(c) {
let s = this.getErrorDisplay(c);
return "'" + s + "'";
}
recover(re) {
if (re instanceof LexerNoViableAltException_1.LexerNoViableAltException) {
if (this._input.LA(1) !== IntStream_1.IntStream.EOF) {
// skip a char and try again
this.interpreter.consume(this._input);
}
}
else {
//System.out.println("consuming char "+(char)input.LA(1)+" during recovery");
//re.printStackTrace();
// TODO: Do we lose character or line position information?
this._input.consume();
}
}
}
Lexer.DEFAULT_MODE = 0;
Lexer.MORE = -2;
Lexer.SKIP = -3;
Lexer.MIN_CHAR_VALUE = 0x0000;
Lexer.MAX_CHAR_VALUE = 0x10FFFF;
__decorate([
Decorators_1.Override
], Lexer.prototype, "nextToken", null);
__decorate([
Decorators_1.Override
], Lexer.prototype, "tokenFactory", null);
__decorate([
Decorators_1.Override
], Lexer.prototype, "inputStream", null);
__decorate([
Decorators_1.Override
], Lexer.prototype, "sourceName", null);
__decorate([
Decorators_1.Override
], Lexer.prototype, "line", null);
__decorate([
Decorators_1.Override
], Lexer.prototype, "charPositionInLine", null);
exports.Lexer = Lexer;
//# sourceMappingURL=Lexer.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,23 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATN } from "./atn/ATN";
import { CharStream } from "./CharStream";
import { Lexer } from "./Lexer";
import { Vocabulary } from "./Vocabulary";
export declare class LexerInterpreter extends Lexer {
protected _grammarFileName: string;
protected _atn: ATN;
protected _ruleNames: string[];
protected _channelNames: string[];
protected _modeNames: string[];
private _vocabulary;
constructor(grammarFileName: string, vocabulary: Vocabulary, ruleNames: string[], channelNames: string[], modeNames: string[], atn: ATN, input: CharStream);
get atn(): ATN;
get grammarFileName(): string;
get ruleNames(): string[];
get channelNames(): string[];
get modeNames(): string[];
get vocabulary(): Vocabulary;
}

View File

@@ -0,0 +1,79 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.LexerInterpreter = void 0;
const Lexer_1 = require("./Lexer");
const LexerATNSimulator_1 = require("./atn/LexerATNSimulator");
const Decorators_1 = require("./Decorators");
const Decorators_2 = require("./Decorators");
let LexerInterpreter = class LexerInterpreter extends Lexer_1.Lexer {
constructor(grammarFileName, vocabulary, ruleNames, channelNames, modeNames, atn, input) {
super(input);
if (atn.grammarType !== 0 /* LEXER */) {
throw new Error("IllegalArgumentException: The ATN must be a lexer ATN.");
}
this._grammarFileName = grammarFileName;
this._atn = atn;
this._ruleNames = ruleNames.slice(0);
this._channelNames = channelNames.slice(0);
this._modeNames = modeNames.slice(0);
this._vocabulary = vocabulary;
this._interp = new LexerATNSimulator_1.LexerATNSimulator(atn, this);
}
get atn() {
return this._atn;
}
get grammarFileName() {
return this._grammarFileName;
}
get ruleNames() {
return this._ruleNames;
}
get channelNames() {
return this._channelNames;
}
get modeNames() {
return this._modeNames;
}
get vocabulary() {
return this._vocabulary;
}
};
__decorate([
Decorators_1.NotNull
], LexerInterpreter.prototype, "_vocabulary", void 0);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "atn", null);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "grammarFileName", null);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "ruleNames", null);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "channelNames", null);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "modeNames", null);
__decorate([
Decorators_2.Override
], LexerInterpreter.prototype, "vocabulary", null);
LexerInterpreter = __decorate([
__param(1, Decorators_1.NotNull)
], LexerInterpreter);
exports.LexerInterpreter = LexerInterpreter;
//# sourceMappingURL=LexerInterpreter.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"LexerInterpreter.js","sourceRoot":"","sources":["../../src/LexerInterpreter.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;;;;AAOH,mCAAgC;AAChC,+DAA4D;AAC5D,6CAAuC;AACvC,6CAAwC;AAGxC,IAAa,gBAAgB,GAA7B,MAAa,gBAAiB,SAAQ,aAAK;IAU1C,YAAY,eAAuB,EAAW,UAAsB,EAAE,SAAmB,EAAE,YAAsB,EAAE,SAAmB,EAAE,GAAQ,EAAE,KAAiB;QAClK,KAAK,CAAC,KAAK,CAAC,CAAC;QAEb,IAAI,GAAG,CAAC,WAAW,kBAAkB,EAAE;YACtC,MAAM,IAAI,KAAK,CAAC,wDAAwD,CAAC,CAAC;SAC1E;QAED,IAAI,CAAC,gBAAgB,GAAG,eAAe,CAAC;QACxC,IAAI,CAAC,IAAI,GAAG,GAAG,CAAC;QAEhB,IAAI,CAAC,UAAU,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrC,IAAI,CAAC,aAAa,GAAG,YAAY,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QAC3C,IAAI,CAAC,UAAU,GAAG,SAAS,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;QACrC,IAAI,CAAC,WAAW,GAAG,UAAU,CAAC;QAC9B,IAAI,CAAC,OAAO,GAAG,IAAI,qCAAiB,CAAC,GAAG,EAAE,IAAI,CAAC,CAAC;IACjD,CAAC;IAGD,IAAI,GAAG;QACN,OAAO,IAAI,CAAC,IAAI,CAAC;IAClB,CAAC;IAGD,IAAI,eAAe;QAClB,OAAO,IAAI,CAAC,gBAAgB,CAAC;IAC9B,CAAC;IAGD,IAAI,SAAS;QACZ,OAAO,IAAI,CAAC,UAAU,CAAC;IACxB,CAAC;IAGD,IAAI,YAAY;QACf,OAAO,IAAI,CAAC,aAAa,CAAC;IAC3B,CAAC;IAGD,IAAI,SAAS;QACZ,OAAO,IAAI,CAAC,UAAU,CAAC;IACxB,CAAC;IAGD,IAAI,UAAU;QACb,OAAO,IAAI,CAAC,WAAW,CAAC;IACzB,CAAC;CACD,CAAA;AAhDA;IADC,oBAAO;qDACwB;AAoBhC;IADC,qBAAQ;2CAGR;AAGD;IADC,qBAAQ;uDAGR;AAGD;IADC,qBAAQ;iDAGR;AAGD;IADC,qBAAQ;oDAGR;AAGD;IADC,qBAAQ;iDAGR;AAGD;IADC,qBAAQ;kDAGR;AAvDW,gBAAgB;IAUU,WAAA,oBAAO,CAAA;GAVjC,gBAAgB,CAwD5B;AAxDY,4CAAgB","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:51.9954566-07:00\r\n\r\nimport { ATN } from \"./atn/ATN\";\r\nimport { ATNType } from \"./atn/ATNType\";\r\nimport { CharStream } from \"./CharStream\";\r\nimport { Lexer } from \"./Lexer\";\r\nimport { LexerATNSimulator } from \"./atn/LexerATNSimulator\";\r\nimport { NotNull } from \"./Decorators\";\r\nimport { Override } from \"./Decorators\";\r\nimport { Vocabulary } from \"./Vocabulary\";\r\n\r\nexport class LexerInterpreter extends Lexer {\r\n\tprotected _grammarFileName: string;\r\n\tprotected _atn: ATN;\r\n\r\n\tprotected _ruleNames: string[];\r\n\tprotected _channelNames: string[];\r\n\tprotected _modeNames: string[];\r\n\t@NotNull\r\n\tprivate _vocabulary: Vocabulary;\r\n\r\n\tconstructor(grammarFileName: string, @NotNull vocabulary: Vocabulary, ruleNames: string[], channelNames: string[], modeNames: string[], atn: ATN, input: CharStream) {\r\n\t\tsuper(input);\r\n\r\n\t\tif (atn.grammarType !== ATNType.LEXER) {\r\n\t\t\tthrow new Error(\"IllegalArgumentException: The ATN must be a lexer ATN.\");\r\n\t\t}\r\n\r\n\t\tthis._grammarFileName = grammarFileName;\r\n\t\tthis._atn = atn;\r\n\r\n\t\tthis._ruleNames = ruleNames.slice(0);\r\n\t\tthis._channelNames = channelNames.slice(0);\r\n\t\tthis._modeNames = modeNames.slice(0);\r\n\t\tthis._vocabulary = vocabulary;\r\n\t\tthis._interp = new LexerATNSimulator(atn, this);\r\n\t}\r\n\r\n\t@Override\r\n\tget atn(): ATN {\r\n\t\treturn this._atn;\r\n\t}\r\n\r\n\t@Override\r\n\tget grammarFileName(): string {\r\n\t\treturn this._grammarFileName;\r\n\t}\r\n\r\n\t@Override\r\n\tget ruleNames(): string[] {\r\n\t\treturn this._ruleNames;\r\n\t}\r\n\r\n\t@Override\r\n\tget channelNames(): string[] {\r\n\t\treturn this._channelNames;\r\n\t}\r\n\r\n\t@Override\r\n\tget modeNames(): string[] {\r\n\t\treturn this._modeNames;\r\n\t}\r\n\r\n\t@Override\r\n\tget vocabulary(): Vocabulary {\r\n\t\treturn this._vocabulary;\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,19 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { RecognitionException } from "./RecognitionException";
import { Lexer } from "./Lexer";
import { CharStream } from "./CharStream";
export declare class LexerNoViableAltException extends RecognitionException {
/** Matching attempted at what input index? */
private _startIndex;
/** Which configurations did we try at input.index that couldn't match input.LA(1)? */
private _deadEndConfigs?;
constructor(lexer: Lexer | undefined, input: CharStream, startIndex: number, deadEndConfigs: ATNConfigSet | undefined);
get startIndex(): number;
get deadEndConfigs(): ATNConfigSet | undefined;
get inputStream(): CharStream;
toString(): string;
}

View File

@@ -0,0 +1,56 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.LexerNoViableAltException = void 0;
const RecognitionException_1 = require("./RecognitionException");
const Decorators_1 = require("./Decorators");
const Interval_1 = require("./misc/Interval");
const Utils = require("./misc/Utils");
let LexerNoViableAltException = class LexerNoViableAltException extends RecognitionException_1.RecognitionException {
constructor(lexer, input, startIndex, deadEndConfigs) {
super(lexer, input);
this._startIndex = startIndex;
this._deadEndConfigs = deadEndConfigs;
}
get startIndex() {
return this._startIndex;
}
get deadEndConfigs() {
return this._deadEndConfigs;
}
get inputStream() {
return super.inputStream;
}
toString() {
let symbol = "";
if (this._startIndex >= 0 && this._startIndex < this.inputStream.size) {
symbol = this.inputStream.getText(Interval_1.Interval.of(this._startIndex, this._startIndex));
symbol = Utils.escapeWhitespace(symbol, false);
}
// return String.format(Locale.getDefault(), "%s('%s')", LexerNoViableAltException.class.getSimpleName(), symbol);
return `LexerNoViableAltException('${symbol}')`;
}
};
__decorate([
Decorators_1.Override
], LexerNoViableAltException.prototype, "inputStream", null);
__decorate([
Decorators_1.Override
], LexerNoViableAltException.prototype, "toString", null);
LexerNoViableAltException = __decorate([
__param(1, Decorators_1.NotNull)
], LexerNoViableAltException);
exports.LexerNoViableAltException = LexerNoViableAltException;
//# sourceMappingURL=LexerNoViableAltException.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"LexerNoViableAltException.js","sourceRoot":"","sources":["../../src/LexerNoViableAltException.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;;;;AAKH,iEAA8D;AAC9D,6CAAiD;AAGjD,8CAA2C;AAC3C,sCAAsC;AAEtC,IAAa,yBAAyB,GAAtC,MAAa,yBAA0B,SAAQ,2CAAoB;IASlE,YACC,KAAwB,EACf,KAAiB,EAC1B,UAAkB,EAClB,cAAwC;QACxC,KAAK,CAAC,KAAK,EAAE,KAAK,CAAC,CAAC;QACpB,IAAI,CAAC,WAAW,GAAG,UAAU,CAAC;QAC9B,IAAI,CAAC,eAAe,GAAG,cAAc,CAAC;IACvC,CAAC;IAED,IAAI,UAAU;QACb,OAAO,IAAI,CAAC,WAAW,CAAC;IACzB,CAAC;IAED,IAAI,cAAc;QACjB,OAAO,IAAI,CAAC,eAAe,CAAC;IAC7B,CAAC;IAGD,IAAI,WAAW;QACd,OAAO,KAAK,CAAC,WAAyB,CAAC;IACxC,CAAC;IAGM,QAAQ;QACd,IAAI,MAAM,GAAG,EAAE,CAAC;QAChB,IAAI,IAAI,CAAC,WAAW,IAAI,CAAC,IAAI,IAAI,CAAC,WAAW,GAAG,IAAI,CAAC,WAAW,CAAC,IAAI,EAAE;YACtE,MAAM,GAAG,IAAI,CAAC,WAAW,CAAC,OAAO,CAAC,mBAAQ,CAAC,EAAE,CAAC,IAAI,CAAC,WAAW,EAAE,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC;YACnF,MAAM,GAAG,KAAK,CAAC,gBAAgB,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;SAC/C;QAED,kHAAkH;QAClH,OAAO,8BAA8B,MAAM,IAAI,CAAC;IACjD,CAAC;CACD,CAAA;AAfA;IADC,qBAAQ;4DAGR;AAGD;IADC,qBAAQ;yDAUR;AA1CW,yBAAyB;IAWnC,WAAA,oBAAO,CAAA;GAXG,yBAAyB,CA2CrC;AA3CY,8DAAyB","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:52.0961136-07:00\r\n\r\nimport { ATNConfigSet } from \"./atn/ATNConfigSet\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { NotNull, Override } from \"./Decorators\";\r\nimport { Lexer } from \"./Lexer\";\r\nimport { CharStream } from \"./CharStream\";\r\nimport { Interval } from \"./misc/Interval\";\r\nimport * as Utils from \"./misc/Utils\";\r\n\r\nexport class LexerNoViableAltException extends RecognitionException {\r\n\t//private static serialVersionUID: number = -730999203913001726L;\r\n\r\n\t/** Matching attempted at what input index? */\r\n\tprivate _startIndex: number;\r\n\r\n\t/** Which configurations did we try at input.index that couldn't match input.LA(1)? */\r\n\tprivate _deadEndConfigs?: ATNConfigSet;\r\n\r\n\tconstructor(\r\n\t\tlexer: Lexer | undefined,\r\n\t\t@NotNull input: CharStream,\r\n\t\tstartIndex: number,\r\n\t\tdeadEndConfigs: ATNConfigSet | undefined) {\r\n\t\tsuper(lexer, input);\r\n\t\tthis._startIndex = startIndex;\r\n\t\tthis._deadEndConfigs = deadEndConfigs;\r\n\t}\r\n\r\n\tget startIndex(): number {\r\n\t\treturn this._startIndex;\r\n\t}\r\n\r\n\tget deadEndConfigs(): ATNConfigSet | undefined {\r\n\t\treturn this._deadEndConfigs;\r\n\t}\r\n\r\n\t@Override\r\n\tget inputStream(): CharStream {\r\n\t\treturn super.inputStream as CharStream;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic toString(): string {\r\n\t\tlet symbol = \"\";\r\n\t\tif (this._startIndex >= 0 && this._startIndex < this.inputStream.size) {\r\n\t\t\tsymbol = this.inputStream.getText(Interval.of(this._startIndex, this._startIndex));\r\n\t\t\tsymbol = Utils.escapeWhitespace(symbol, false);\r\n\t\t}\r\n\r\n\t\t// return String.format(Locale.getDefault(), \"%s('%s')\", LexerNoViableAltException.class.getSimpleName(), symbol);\r\n\t\treturn `LexerNoViableAltException('${symbol}')`;\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,86 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenSource } from "./TokenSource";
/**
* Provides an implementation of {@link TokenSource} as a wrapper around a list
* of {@link Token} objects.
*
* If the final token in the list is an {@link Token#EOF} token, it will be used
* as the EOF token for every call to {@link #nextToken} after the end of the
* list is reached. Otherwise, an EOF token will be created.
*/
export declare class ListTokenSource implements TokenSource {
/**
* The wrapped collection of {@link Token} objects to return.
*/
protected tokens: Token[];
/**
* The name of the input source. If this value is `undefined`, a call to
* {@link #getSourceName} should return the source name used to create the
* the next token in {@link #tokens} (or the previous token if the end of
* the input has been reached).
*/
private _sourceName?;
/**
* The index into {@link #tokens} of token to return by the next call to
* {@link #nextToken}. The end of the input is indicated by this value
* being greater than or equal to the number of items in {@link #tokens}.
*/
protected i: number;
/**
* This field caches the EOF token for the token source.
*/
protected eofToken?: Token;
/**
* This is the backing field for {@link #getTokenFactory} and
* {@link setTokenFactory}.
*/
private _factory;
/**
* Constructs a new {@link ListTokenSource} instance from the specified
* collection of {@link Token} objects and source name.
*
* @param tokens The collection of {@link Token} objects to provide as a
* {@link TokenSource}.
* @param sourceName The name of the {@link TokenSource}. If this value is
* `undefined`, {@link #getSourceName} will attempt to infer the name from
* the next {@link Token} (or the previous token if the end of the input has
* been reached).
*
* @exception NullPointerException if `tokens` is `undefined`
*/
constructor(tokens: Token[], sourceName?: string);
/**
* {@inheritDoc}
*/
get charPositionInLine(): number;
/**
* {@inheritDoc}
*/
nextToken(): Token;
/**
* {@inheritDoc}
*/
get line(): number;
/**
* {@inheritDoc}
*/
get inputStream(): CharStream | undefined;
/**
* {@inheritDoc}
*/
get sourceName(): string;
/**
* {@inheritDoc}
*/
set tokenFactory(factory: TokenFactory);
/**
* {@inheritDoc}
*/
get tokenFactory(): TokenFactory;
}

View File

@@ -0,0 +1,209 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ListTokenSource = void 0;
const CommonTokenFactory_1 = require("./CommonTokenFactory");
const Decorators_1 = require("./Decorators");
const Token_1 = require("./Token");
/**
* Provides an implementation of {@link TokenSource} as a wrapper around a list
* of {@link Token} objects.
*
* If the final token in the list is an {@link Token#EOF} token, it will be used
* as the EOF token for every call to {@link #nextToken} after the end of the
* list is reached. Otherwise, an EOF token will be created.
*/
let ListTokenSource = class ListTokenSource {
/**
* Constructs a new {@link ListTokenSource} instance from the specified
* collection of {@link Token} objects and source name.
*
* @param tokens The collection of {@link Token} objects to provide as a
* {@link TokenSource}.
* @param sourceName The name of the {@link TokenSource}. If this value is
* `undefined`, {@link #getSourceName} will attempt to infer the name from
* the next {@link Token} (or the previous token if the end of the input has
* been reached).
*
* @exception NullPointerException if `tokens` is `undefined`
*/
constructor(tokens, sourceName) {
/**
* The index into {@link #tokens} of token to return by the next call to
* {@link #nextToken}. The end of the input is indicated by this value
* being greater than or equal to the number of items in {@link #tokens}.
*/
this.i = 0;
/**
* This is the backing field for {@link #getTokenFactory} and
* {@link setTokenFactory}.
*/
this._factory = CommonTokenFactory_1.CommonTokenFactory.DEFAULT;
if (tokens == null) {
throw new Error("tokens cannot be null");
}
this.tokens = tokens;
this._sourceName = sourceName;
}
/**
* {@inheritDoc}
*/
get charPositionInLine() {
if (this.i < this.tokens.length) {
return this.tokens[this.i].charPositionInLine;
}
else if (this.eofToken != null) {
return this.eofToken.charPositionInLine;
}
else if (this.tokens.length > 0) {
// have to calculate the result from the line/column of the previous
// token, along with the text of the token.
let lastToken = this.tokens[this.tokens.length - 1];
let tokenText = lastToken.text;
if (tokenText != null) {
let lastNewLine = tokenText.lastIndexOf("\n");
if (lastNewLine >= 0) {
return tokenText.length - lastNewLine - 1;
}
}
return lastToken.charPositionInLine + lastToken.stopIndex - lastToken.startIndex + 1;
}
// only reach this if tokens is empty, meaning EOF occurs at the first
// position in the input
return 0;
}
/**
* {@inheritDoc}
*/
nextToken() {
if (this.i >= this.tokens.length) {
if (this.eofToken == null) {
let start = -1;
if (this.tokens.length > 0) {
let previousStop = this.tokens[this.tokens.length - 1].stopIndex;
if (previousStop !== -1) {
start = previousStop + 1;
}
}
let stop = Math.max(-1, start - 1);
this.eofToken = this._factory.create({ source: this, stream: this.inputStream }, Token_1.Token.EOF, "EOF", Token_1.Token.DEFAULT_CHANNEL, start, stop, this.line, this.charPositionInLine);
}
return this.eofToken;
}
let t = this.tokens[this.i];
if (this.i === this.tokens.length - 1 && t.type === Token_1.Token.EOF) {
this.eofToken = t;
}
this.i++;
return t;
}
/**
* {@inheritDoc}
*/
get line() {
if (this.i < this.tokens.length) {
return this.tokens[this.i].line;
}
else if (this.eofToken != null) {
return this.eofToken.line;
}
else if (this.tokens.length > 0) {
// have to calculate the result from the line/column of the previous
// token, along with the text of the token.
let lastToken = this.tokens[this.tokens.length - 1];
let line = lastToken.line;
let tokenText = lastToken.text;
if (tokenText != null) {
for (let i = 0; i < tokenText.length; i++) {
if (tokenText.charAt(i) === "\n") {
line++;
}
}
}
// if no text is available, assume the token did not contain any newline characters.
return line;
}
// only reach this if tokens is empty, meaning EOF occurs at the first
// position in the input
return 1;
}
/**
* {@inheritDoc}
*/
get inputStream() {
if (this.i < this.tokens.length) {
return this.tokens[this.i].inputStream;
}
else if (this.eofToken != null) {
return this.eofToken.inputStream;
}
else if (this.tokens.length > 0) {
return this.tokens[this.tokens.length - 1].inputStream;
}
// no input stream information is available
return undefined;
}
/**
* {@inheritDoc}
*/
get sourceName() {
if (this._sourceName) {
return this._sourceName;
}
let inputStream = this.inputStream;
if (inputStream != null) {
return inputStream.sourceName;
}
return "List";
}
/**
* {@inheritDoc}
*/
// @Override
set tokenFactory(factory) {
this._factory = factory;
}
/**
* {@inheritDoc}
*/
get tokenFactory() {
return this._factory;
}
};
__decorate([
Decorators_1.Override
], ListTokenSource.prototype, "charPositionInLine", null);
__decorate([
Decorators_1.Override
], ListTokenSource.prototype, "nextToken", null);
__decorate([
Decorators_1.Override
], ListTokenSource.prototype, "line", null);
__decorate([
Decorators_1.Override
], ListTokenSource.prototype, "inputStream", null);
__decorate([
Decorators_1.Override
], ListTokenSource.prototype, "sourceName", null);
__decorate([
Decorators_1.Override,
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], ListTokenSource.prototype, "tokenFactory", null);
ListTokenSource = __decorate([
__param(0, Decorators_1.NotNull)
], ListTokenSource);
exports.ListTokenSource = ListTokenSource;
//# sourceMappingURL=ListTokenSource.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,30 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
/** Indicates that the parser could not decide which of two or more paths
* to take based upon the remaining input. It tracks the starting token
* of the offending input and also knows where the parser was
* in the various paths when the error. Reported by reportNoViableAlternative()
*/
export declare class NoViableAltException extends RecognitionException {
/** Which configurations did we try at input.index that couldn't match input.LT(1)? */
private _deadEndConfigs?;
/** The token object at the start index; the input stream might
* not be buffering tokens so get a reference to it. (At the
* time the error occurred, of course the stream needs to keep a
* buffer all of the tokens but later we might not have access to those.)
*/
private _startToken;
constructor(/*@NotNull*/ recognizer: Parser);
constructor(recognizer: Recognizer<Token, any>, input: TokenStream, startToken: Token, offendingToken: Token, deadEndConfigs: ATNConfigSet | undefined, ctx: ParserRuleContext);
get startToken(): Token;
get deadEndConfigs(): ATNConfigSet | undefined;
}

View File

@@ -0,0 +1,54 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.NoViableAltException = void 0;
const Parser_1 = require("./Parser");
const RecognitionException_1 = require("./RecognitionException");
const Decorators_1 = require("./Decorators");
/** Indicates that the parser could not decide which of two or more paths
* to take based upon the remaining input. It tracks the starting token
* of the offending input and also knows where the parser was
* in the various paths when the error. Reported by reportNoViableAlternative()
*/
class NoViableAltException extends RecognitionException_1.RecognitionException {
constructor(recognizer, input, startToken, offendingToken, deadEndConfigs, ctx) {
if (recognizer instanceof Parser_1.Parser) {
if (input === undefined) {
input = recognizer.inputStream;
}
if (startToken === undefined) {
startToken = recognizer.currentToken;
}
if (offendingToken === undefined) {
offendingToken = recognizer.currentToken;
}
if (ctx === undefined) {
ctx = recognizer.context;
}
}
super(recognizer, input, ctx);
this._deadEndConfigs = deadEndConfigs;
this._startToken = startToken;
this.setOffendingToken(recognizer, offendingToken);
}
get startToken() {
return this._startToken;
}
get deadEndConfigs() {
return this._deadEndConfigs;
}
}
__decorate([
Decorators_1.NotNull
], NoViableAltException.prototype, "_startToken", void 0);
exports.NoViableAltException = NoViableAltException;
//# sourceMappingURL=NoViableAltException.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"NoViableAltException.js","sourceRoot":"","sources":["../../src/NoViableAltException.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;AAKH,qCAAkC;AAElC,iEAA8D;AAK9D,6CAAuC;AAEvC;;;;GAIG;AACH,MAAa,oBAAqB,SAAQ,2CAAoB;IA4B7D,YACC,UAAkC,EAClC,KAAmB,EACnB,UAAkB,EAClB,cAAsB,EACtB,cAA6B,EAC7B,GAAuB;QACvB,IAAI,UAAU,YAAY,eAAM,EAAE;YACjC,IAAI,KAAK,KAAK,SAAS,EAAE;gBACxB,KAAK,GAAG,UAAU,CAAC,WAAW,CAAC;aAC/B;YAED,IAAI,UAAU,KAAK,SAAS,EAAE;gBAC7B,UAAU,GAAG,UAAU,CAAC,YAAY,CAAC;aACrC;YAED,IAAI,cAAc,KAAK,SAAS,EAAE;gBACjC,cAAc,GAAG,UAAU,CAAC,YAAY,CAAC;aACzC;YAED,IAAI,GAAG,KAAK,SAAS,EAAE;gBACtB,GAAG,GAAG,UAAU,CAAC,OAAO,CAAC;aACzB;SACD;QAED,KAAK,CAAC,UAAU,EAAE,KAAK,EAAE,GAAG,CAAC,CAAC;QAC9B,IAAI,CAAC,eAAe,GAAG,cAAc,CAAC;QACtC,IAAI,CAAC,WAAW,GAAG,UAAmB,CAAC;QACvC,IAAI,CAAC,iBAAiB,CAAC,UAAU,EAAE,cAAc,CAAC,CAAC;IACpD,CAAC;IAED,IAAI,UAAU;QACb,OAAO,IAAI,CAAC,WAAW,CAAC;IACzB,CAAC;IAED,IAAI,cAAc;QACjB,OAAO,IAAI,CAAC,eAAe,CAAC;IAC7B,CAAC;CAED;AAvDA;IADC,oBAAO;yDACmB;AAZ5B,oDAmEC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:52.3255548-07:00\r\n\r\nimport { ATNConfigSet } from \"./atn/ATNConfigSet\";\r\nimport { Parser } from \"./Parser\";\r\nimport { ParserRuleContext } from \"./ParserRuleContext\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Recognizer } from \"./Recognizer\";\r\nimport { Token } from \"./Token\";\r\nimport { TokenStream } from \"./TokenStream\";\r\nimport { IntStream } from \"./IntStream\";\r\nimport { NotNull } from \"./Decorators\";\r\n\r\n/** Indicates that the parser could not decide which of two or more paths\r\n * to take based upon the remaining input. It tracks the starting token\r\n * of the offending input and also knows where the parser was\r\n * in the various paths when the error. Reported by reportNoViableAlternative()\r\n */\r\nexport class NoViableAltException extends RecognitionException {\r\n\t//private static serialVersionUID: number = 5096000008992867052L;\r\n\r\n\t/** Which configurations did we try at input.index that couldn't match input.LT(1)? */\r\n\tprivate _deadEndConfigs?: ATNConfigSet;\r\n\r\n\t/** The token object at the start index; the input stream might\r\n\t * \tnot be buffering tokens so get a reference to it. (At the\r\n\t * time the error occurred, of course the stream needs to keep a\r\n\t * buffer all of the tokens but later we might not have access to those.)\r\n\t */\r\n\t@NotNull\r\n\tprivate _startToken: Token;\r\n\r\n\tconstructor(/*@NotNull*/ recognizer: Parser);\r\n\tconstructor(\r\n\t\t/*@NotNull*/\r\n\t\trecognizer: Recognizer<Token, any>,\r\n\t\t/*@NotNull*/\r\n\t\tinput: TokenStream,\r\n\t\t/*@NotNull*/\r\n\t\tstartToken: Token,\r\n\t\t/*@NotNull*/\r\n\t\toffendingToken: Token,\r\n\t\tdeadEndConfigs: ATNConfigSet | undefined,\r\n\t\t/*@NotNull*/\r\n\t\tctx: ParserRuleContext);\r\n\r\n\tconstructor(\r\n\t\trecognizer: Recognizer<Token, any>,\r\n\t\tinput?: TokenStream,\r\n\t\tstartToken?: Token,\r\n\t\toffendingToken?: Token,\r\n\t\tdeadEndConfigs?: ATNConfigSet,\r\n\t\tctx?: ParserRuleContext) {\r\n\t\tif (recognizer instanceof Parser) {\r\n\t\t\tif (input === undefined) {\r\n\t\t\t\tinput = recognizer.inputStream;\r\n\t\t\t}\r\n\r\n\t\t\tif (startToken === undefined) {\r\n\t\t\t\tstartToken = recognizer.currentToken;\r\n\t\t\t}\r\n\r\n\t\t\tif (offendingToken === undefined) {\r\n\t\t\t\toffendingToken = recognizer.currentToken;\r\n\t\t\t}\r\n\r\n\t\t\tif (ctx === undefined) {\r\n\t\t\t\tctx = recognizer.context;\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tsuper(recognizer, input, ctx);\r\n\t\tthis._deadEndConfigs = deadEndConfigs;\r\n\t\tthis._startToken = startToken as Token;\r\n\t\tthis.setOffendingToken(recognizer, offendingToken);\r\n\t}\r\n\r\n\tget startToken(): Token {\r\n\t\treturn this._startToken;\r\n\t}\r\n\r\n\tget deadEndConfigs(): ATNConfigSet | undefined {\r\n\t\treturn this._deadEndConfigs;\r\n\t}\r\n\r\n}\r\n"]}

View File

@@ -0,0 +1,372 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ANTLRErrorStrategy } from "./ANTLRErrorStrategy";
import { ATN } from "./atn/ATN";
import { ErrorNode } from "./tree/ErrorNode";
import { IntegerStack } from "./misc/IntegerStack";
import { IntervalSet } from "./misc/IntervalSet";
import { Lexer } from "./Lexer";
import { ParseInfo } from "./atn/ParseInfo";
import { ParserATNSimulator } from "./atn/ParserATNSimulator";
import { ParserErrorListener } from "./ParserErrorListener";
import { ParserRuleContext } from "./ParserRuleContext";
import { ParseTreeListener } from "./tree/ParseTreeListener";
import { ParseTreePattern } from "./tree/pattern/ParseTreePattern";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
import { RuleContext } from "./RuleContext";
import { TerminalNode } from "./tree/TerminalNode";
import { Token } from "./Token";
import { TokenFactory } from "./TokenFactory";
import { TokenStream } from "./TokenStream";
/** This is all the parsing support code essentially; most of it is error recovery stuff. */
export declare abstract class Parser extends Recognizer<Token, ParserATNSimulator> {
/**
* This field maps from the serialized ATN string to the deserialized {@link ATN} with
* bypass alternatives.
*
* @see ATNDeserializationOptions.isGenerateRuleBypassTransitions
*/
private static readonly bypassAltsAtnCache;
/**
* The error handling strategy for the parser. The default value is a new
* instance of {@link DefaultErrorStrategy}.
*
* @see #getErrorHandler
* @see #setErrorHandler
*/
protected _errHandler: ANTLRErrorStrategy;
/**
* The input stream.
*
* @see #getInputStream
* @see #setInputStream
*/
protected _input: TokenStream;
protected readonly _precedenceStack: IntegerStack;
/**
* The {@link ParserRuleContext} object for the currently executing rule.
*
* This is always non-undefined during the parsing process.
*/
protected _ctx: ParserRuleContext;
/**
* Specifies whether or not the parser should construct a parse tree during
* the parsing process. The default value is `true`.
*
* @see `buildParseTree`
*/
private _buildParseTrees;
/**
* When {@link #setTrace}`(true)` is called, a reference to the
* {@link TraceListener} is stored here so it can be easily removed in a
* later call to {@link #setTrace}`(false)`. The listener itself is
* implemented as a parser listener so this field is not directly used by
* other parser methods.
*/
private _tracer;
/**
* The list of {@link ParseTreeListener} listeners registered to receive
* events during the parse.
*
* @see #addParseListener
*/
protected _parseListeners: ParseTreeListener[];
/**
* The number of syntax errors reported during parsing. This value is
* incremented each time {@link #notifyErrorListeners} is called.
*/
protected _syntaxErrors: number;
/** Indicates parser has match()ed EOF token. See {@link #exitRule()}. */
protected matchedEOF: boolean;
constructor(input: TokenStream);
/** reset the parser's state */
reset(): void;
reset(resetInput: boolean): void;
/**
* Match current input symbol against `ttype`. If the symbol type
* matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are
* called to complete the match process.
*
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @param ttype the token type to match
* @returns the matched symbol
* @ if the current input symbol did not match
* `ttype` and the error strategy could not recover from the
* mismatched symbol
*/
match(ttype: number): Token;
/**
* Match current input symbol as a wildcard. If the symbol type matches
* (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch}
* and {@link #consume} are called to complete the match process.
*
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link Parser#createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @returns the matched symbol
* @ if the current input symbol did not match
* a wildcard and the error strategy could not recover from the mismatched
* symbol
*/
matchWildcard(): Token;
/**
* Track the {@link ParserRuleContext} objects during the parse and hook
* them up using the {@link ParserRuleContext#children} list so that it
* forms a parse tree. The {@link ParserRuleContext} returned from the start
* rule represents the root of the parse tree.
*
* Note that if we are not building parse trees, rule contexts only point
* upwards. When a rule exits, it returns the context but that gets garbage
* collected if nobody holds a reference. It points upwards but nobody
* points at it.
*
* When we build parse trees, we are adding all of these contexts to
* {@link ParserRuleContext#children} list. Contexts are then not candidates
* for garbage collection.
*/
set buildParseTree(buildParseTrees: boolean);
/**
* Gets whether or not a complete parse tree will be constructed while
* parsing. This property is `true` for a newly constructed parser.
*
* @returns `true` if a complete parse tree will be constructed while
* parsing, otherwise `false`
*/
get buildParseTree(): boolean;
getParseListeners(): ParseTreeListener[];
/**
* Registers `listener` to receive events during the parsing process.
*
* To support output-preserving grammar transformations (including but not
* limited to left-recursion removal, automated left-factoring, and
* optimized code generation), calls to listener methods during the parse
* may differ substantially from calls made by
* {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In
* particular, rule entry and exit events may occur in a different order
* during the parse than after the parser. In addition, calls to certain
* rule entry methods may be omitted.
*
* With the following specific exceptions, calls to listener events are
* *deterministic*, i.e. for identical input the calls to listener
* methods will be the same.
*
* * Alterations to the grammar used to generate code may change the
* behavior of the listener calls.
* * Alterations to the command line options passed to ANTLR 4 when
* generating the parser may change the behavior of the listener calls.
* * Changing the version of the ANTLR Tool used to generate the parser
* may change the behavior of the listener calls.
*
* @param listener the listener to add
*
* @throws {@link TypeError} if `listener` is `undefined`
*/
addParseListener(listener: ParseTreeListener): void;
/**
* Remove `listener` from the list of parse listeners.
*
* If `listener` is `undefined` or has not been added as a parse
* listener, this method does nothing.
*
* @see #addParseListener
*
* @param listener the listener to remove
*/
removeParseListener(listener: ParseTreeListener): void;
/**
* Remove all parse listeners.
*
* @see #addParseListener
*/
removeParseListeners(): void;
/**
* Notify any parse listeners of an enter rule event.
*
* @see #addParseListener
*/
protected triggerEnterRuleEvent(): void;
/**
* Notify any parse listeners of an exit rule event.
*
* @see #addParseListener
*/
protected triggerExitRuleEvent(): void;
/**
* Gets the number of syntax errors reported during parsing. This value is
* incremented each time {@link #notifyErrorListeners} is called.
*
* @see #notifyErrorListeners
*/
get numberOfSyntaxErrors(): number;
get tokenFactory(): TokenFactory;
/**
* The ATN with bypass alternatives is expensive to create so we create it
* lazily.
*
* @ if the current parser does not
* implement the `serializedATN` property.
*/
getATNWithBypassAlts(): ATN;
/**
* The preferred method of getting a tree pattern. For example, here's a
* sample use:
*
* ```
* let t: ParseTree = parser.expr();
* let p: ParseTreePattern = await parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
* let m: ParseTreeMatch = p.match(t);
* let id: string = m.get("ID");
* ```
*/
compileParseTreePattern(pattern: string, patternRuleIndex: number): Promise<ParseTreePattern>;
/**
* The same as {@link #compileParseTreePattern(String, int)} but specify a
* {@link Lexer} rather than trying to deduce it from this parser.
*/
compileParseTreePattern(pattern: string, patternRuleIndex: number, lexer?: Lexer): Promise<ParseTreePattern>;
get errorHandler(): ANTLRErrorStrategy;
set errorHandler(handler: ANTLRErrorStrategy);
get inputStream(): TokenStream;
/** Set the token stream and reset the parser. */
set inputStream(input: TokenStream);
/** Match needs to return the current input symbol, which gets put
* into the label for the associated token ref; e.g., x=ID.
*/
get currentToken(): Token;
notifyErrorListeners(/*@NotNull*/ msg: string): void;
notifyErrorListeners(/*@NotNull*/ msg: string, /*@NotNull*/ offendingToken: Token | null, e: RecognitionException | undefined): void;
/**
* Consume and return the [current symbol](`currentToken`).
*
* E.g., given the following input with `A` being the current
* lookahead symbol, this function moves the cursor to `B` and returns
* `A`.
*
* ```
* A B
* ^
* ```
*
* If the parser is not in error recovery mode, the consumed symbol is added
* to the parse tree using {@link ParserRuleContext#addChild(TerminalNode)}, and
* {@link ParseTreeListener#visitTerminal} is called on any parse listeners.
* If the parser *is* in error recovery mode, the consumed symbol is
* added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)} and
* {@link ParseTreeListener#visitErrorNode} is called on any parse
* listeners.
*/
consume(): Token;
/**
* How to create a token leaf node associated with a parent.
* Typically, the terminal node to create is not a function of the parent.
*
* @since 4.7
*/
createTerminalNode(parent: ParserRuleContext, t: Token): TerminalNode;
/**
* How to create an error node, given a token, associated with a parent.
* Typically, the error node to create is not a function of the parent.
*
* @since 4.7
*/
createErrorNode(parent: ParserRuleContext, t: Token): ErrorNode;
protected addContextToParseTree(): void;
/**
* Always called by generated parsers upon entry to a rule. Access field
* {@link #_ctx} get the current context.
*/
enterRule(localctx: ParserRuleContext, state: number, ruleIndex: number): void;
enterLeftFactoredRule(localctx: ParserRuleContext, state: number, ruleIndex: number): void;
exitRule(): void;
enterOuterAlt(localctx: ParserRuleContext, altNum: number): void;
/**
* Get the precedence level for the top-most precedence rule.
*
* @returns The precedence level for the top-most precedence rule, or -1 if
* the parser context is not nested within a precedence rule.
*/
get precedence(): number;
enterRecursionRule(localctx: ParserRuleContext, state: number, ruleIndex: number, precedence: number): void;
/** Like {@link #enterRule} but for recursive rules.
* Make the current context the child of the incoming localctx.
*/
pushNewRecursionContext(localctx: ParserRuleContext, state: number, ruleIndex: number): void;
unrollRecursionContexts(_parentctx: ParserRuleContext): void;
getInvokingContext(ruleIndex: number): ParserRuleContext | undefined;
get context(): ParserRuleContext;
set context(ctx: ParserRuleContext);
precpred(localctx: RuleContext, precedence: number): boolean;
getErrorListenerDispatch(): ParserErrorListener;
inContext(context: string): boolean;
/**
* Checks whether or not `symbol` can follow the current state in the
* ATN. The behavior of this method is equivalent to the following, but is
* implemented such that the complete context-sensitive follow set does not
* need to be explicitly constructed.
*
* ```
* return getExpectedTokens().contains(symbol);
* ```
*
* @param symbol the symbol type to check
* @returns `true` if `symbol` can follow the current state in
* the ATN, otherwise `false`.
*/
isExpectedToken(symbol: number): boolean;
get isMatchedEOF(): boolean;
/**
* Computes the set of input symbols which could follow the current parser
* state and context, as given by {@link #getState} and {@link #getContext},
* respectively.
*
* @see ATN#getExpectedTokens(int, RuleContext)
*/
getExpectedTokens(): IntervalSet;
getExpectedTokensWithinCurrentRule(): IntervalSet;
/** Get a rule's index (i.e., `RULE_ruleName` field) or -1 if not found. */
getRuleIndex(ruleName: string): number;
get ruleContext(): ParserRuleContext;
/** Return List&lt;String&gt; of the rule names in your parser instance
* leading up to a call to the current rule. You could override if
* you want more details such as the file/line info of where
* in the ATN a rule is invoked.
*
* This is very useful for error messages.
*/
getRuleInvocationStack(ctx?: RuleContext): string[];
/** For debugging and other purposes. */
getDFAStrings(): string[];
/** For debugging and other purposes. */
dumpDFA(): void;
get sourceName(): string;
get parseInfo(): Promise<ParseInfo | undefined>;
/**
* @since 4.3
*/
setProfile(profile: boolean): Promise<void>;
/** During a parse is sometimes useful to listen in on the rule entry and exit
* events as well as token matches. This is for quick and dirty debugging.
*/
set isTrace(trace: boolean);
/**
* Gets whether a {@link TraceListener} is registered as a parse listener
* for the parser.
*/
get isTrace(): boolean;
}

View File

@@ -0,0 +1,843 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
return new (P || (P = Promise))(function (resolve, reject) {
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
step((generator = generator.apply(thisArg, _arguments || [])).next());
});
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.Parser = void 0;
const Utils = require("./misc/Utils");
const ATNDeserializationOptions_1 = require("./atn/ATNDeserializationOptions");
const ATNDeserializer_1 = require("./atn/ATNDeserializer");
const DefaultErrorStrategy_1 = require("./DefaultErrorStrategy");
const ErrorNode_1 = require("./tree/ErrorNode");
const IntegerStack_1 = require("./misc/IntegerStack");
const Lexer_1 = require("./Lexer");
const Decorators_1 = require("./Decorators");
const ParseInfo_1 = require("./atn/ParseInfo");
const ParserATNSimulator_1 = require("./atn/ParserATNSimulator");
const ProxyParserErrorListener_1 = require("./ProxyParserErrorListener");
const Recognizer_1 = require("./Recognizer");
const TerminalNode_1 = require("./tree/TerminalNode");
const Token_1 = require("./Token");
class TraceListener {
constructor(ruleNames, tokenStream) {
this.ruleNames = ruleNames;
this.tokenStream = tokenStream;
}
enterEveryRule(ctx) {
console.log("enter " + this.ruleNames[ctx.ruleIndex] +
", LT(1)=" + this.tokenStream.LT(1).text);
}
exitEveryRule(ctx) {
console.log("exit " + this.ruleNames[ctx.ruleIndex] +
", LT(1)=" + this.tokenStream.LT(1).text);
}
visitErrorNode(node) {
// intentionally empty
}
visitTerminal(node) {
let parent = node.parent.ruleContext;
let token = node.symbol;
console.log("consume " + token + " rule " + this.ruleNames[parent.ruleIndex]);
}
}
__decorate([
Decorators_1.Override
], TraceListener.prototype, "enterEveryRule", null);
__decorate([
Decorators_1.Override
], TraceListener.prototype, "exitEveryRule", null);
__decorate([
Decorators_1.Override
], TraceListener.prototype, "visitErrorNode", null);
__decorate([
Decorators_1.Override
], TraceListener.prototype, "visitTerminal", null);
/** This is all the parsing support code essentially; most of it is error recovery stuff. */
class Parser extends Recognizer_1.Recognizer {
constructor(input) {
super();
/**
* The error handling strategy for the parser. The default value is a new
* instance of {@link DefaultErrorStrategy}.
*
* @see #getErrorHandler
* @see #setErrorHandler
*/
this._errHandler = new DefaultErrorStrategy_1.DefaultErrorStrategy();
this._precedenceStack = new IntegerStack_1.IntegerStack();
/**
* Specifies whether or not the parser should construct a parse tree during
* the parsing process. The default value is `true`.
*
* @see `buildParseTree`
*/
this._buildParseTrees = true;
/**
* The list of {@link ParseTreeListener} listeners registered to receive
* events during the parse.
*
* @see #addParseListener
*/
this._parseListeners = [];
/**
* The number of syntax errors reported during parsing. This value is
* incremented each time {@link #notifyErrorListeners} is called.
*/
this._syntaxErrors = 0;
/** Indicates parser has match()ed EOF token. See {@link #exitRule()}. */
this.matchedEOF = false;
this._precedenceStack.push(0);
this.inputStream = input;
}
reset(resetInput) {
// Note: this method executes when not parsing, so _ctx can be undefined
if (resetInput === undefined || resetInput) {
this.inputStream.seek(0);
}
this._errHandler.reset(this);
this._ctx = undefined;
this._syntaxErrors = 0;
this.matchedEOF = false;
this.isTrace = false;
this._precedenceStack.clear();
this._precedenceStack.push(0);
let interpreter = this.interpreter;
if (interpreter != null) {
interpreter.reset();
}
}
/**
* Match current input symbol against `ttype`. If the symbol type
* matches, {@link ANTLRErrorStrategy#reportMatch} and {@link #consume} are
* called to complete the match process.
*
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @param ttype the token type to match
* @returns the matched symbol
* @ if the current input symbol did not match
* `ttype` and the error strategy could not recover from the
* mismatched symbol
*/
match(ttype) {
let t = this.currentToken;
if (t.type === ttype) {
if (ttype === Token_1.Token.EOF) {
this.matchedEOF = true;
}
this._errHandler.reportMatch(this);
this.consume();
}
else {
t = this._errHandler.recoverInline(this);
if (this._buildParseTrees && t.tokenIndex === -1) {
// we must have conjured up a new token during single token insertion
// if it's not the current symbol
this._ctx.addErrorNode(this.createErrorNode(this._ctx, t));
}
}
return t;
}
/**
* Match current input symbol as a wildcard. If the symbol type matches
* (i.e. has a value greater than 0), {@link ANTLRErrorStrategy#reportMatch}
* and {@link #consume} are called to complete the match process.
*
* If the symbol type does not match,
* {@link ANTLRErrorStrategy#recoverInline} is called on the current error
* strategy to attempt recovery. If {@link #getBuildParseTree} is
* `true` and the token index of the symbol returned by
* {@link ANTLRErrorStrategy#recoverInline} is -1, the symbol is added to
* the parse tree by calling {@link Parser#createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)}.
*
* @returns the matched symbol
* @ if the current input symbol did not match
* a wildcard and the error strategy could not recover from the mismatched
* symbol
*/
matchWildcard() {
let t = this.currentToken;
if (t.type > 0) {
this._errHandler.reportMatch(this);
this.consume();
}
else {
t = this._errHandler.recoverInline(this);
if (this._buildParseTrees && t.tokenIndex === -1) {
// we must have conjured up a new token during single token insertion
// if it's not the current symbol
this._ctx.addErrorNode(this.createErrorNode(this._ctx, t));
}
}
return t;
}
/**
* Track the {@link ParserRuleContext} objects during the parse and hook
* them up using the {@link ParserRuleContext#children} list so that it
* forms a parse tree. The {@link ParserRuleContext} returned from the start
* rule represents the root of the parse tree.
*
* Note that if we are not building parse trees, rule contexts only point
* upwards. When a rule exits, it returns the context but that gets garbage
* collected if nobody holds a reference. It points upwards but nobody
* points at it.
*
* When we build parse trees, we are adding all of these contexts to
* {@link ParserRuleContext#children} list. Contexts are then not candidates
* for garbage collection.
*/
set buildParseTree(buildParseTrees) {
this._buildParseTrees = buildParseTrees;
}
/**
* Gets whether or not a complete parse tree will be constructed while
* parsing. This property is `true` for a newly constructed parser.
*
* @returns `true` if a complete parse tree will be constructed while
* parsing, otherwise `false`
*/
get buildParseTree() {
return this._buildParseTrees;
}
getParseListeners() {
return this._parseListeners;
}
/**
* Registers `listener` to receive events during the parsing process.
*
* To support output-preserving grammar transformations (including but not
* limited to left-recursion removal, automated left-factoring, and
* optimized code generation), calls to listener methods during the parse
* may differ substantially from calls made by
* {@link ParseTreeWalker#DEFAULT} used after the parse is complete. In
* particular, rule entry and exit events may occur in a different order
* during the parse than after the parser. In addition, calls to certain
* rule entry methods may be omitted.
*
* With the following specific exceptions, calls to listener events are
* *deterministic*, i.e. for identical input the calls to listener
* methods will be the same.
*
* * Alterations to the grammar used to generate code may change the
* behavior of the listener calls.
* * Alterations to the command line options passed to ANTLR 4 when
* generating the parser may change the behavior of the listener calls.
* * Changing the version of the ANTLR Tool used to generate the parser
* may change the behavior of the listener calls.
*
* @param listener the listener to add
*
* @throws {@link TypeError} if `listener` is `undefined`
*/
addParseListener(listener) {
if (listener == null) {
throw new TypeError("listener cannot be null");
}
this._parseListeners.push(listener);
}
/**
* Remove `listener` from the list of parse listeners.
*
* If `listener` is `undefined` or has not been added as a parse
* listener, this method does nothing.
*
* @see #addParseListener
*
* @param listener the listener to remove
*/
removeParseListener(listener) {
let index = this._parseListeners.findIndex((l) => l === listener);
if (index !== -1) {
this._parseListeners.splice(index, 1);
}
}
/**
* Remove all parse listeners.
*
* @see #addParseListener
*/
removeParseListeners() {
this._parseListeners.length = 0;
}
/**
* Notify any parse listeners of an enter rule event.
*
* @see #addParseListener
*/
triggerEnterRuleEvent() {
for (let listener of this._parseListeners) {
if (listener.enterEveryRule) {
listener.enterEveryRule(this._ctx);
}
this._ctx.enterRule(listener);
}
}
/**
* Notify any parse listeners of an exit rule event.
*
* @see #addParseListener
*/
triggerExitRuleEvent() {
// reverse order walk of listeners
for (let i = this._parseListeners.length - 1; i >= 0; i--) {
let listener = this._parseListeners[i];
this._ctx.exitRule(listener);
if (listener.exitEveryRule) {
listener.exitEveryRule(this._ctx);
}
}
}
/**
* Gets the number of syntax errors reported during parsing. This value is
* incremented each time {@link #notifyErrorListeners} is called.
*
* @see #notifyErrorListeners
*/
get numberOfSyntaxErrors() {
return this._syntaxErrors;
}
get tokenFactory() {
return this._input.tokenSource.tokenFactory;
}
/**
* The ATN with bypass alternatives is expensive to create so we create it
* lazily.
*
* @ if the current parser does not
* implement the `serializedATN` property.
*/
getATNWithBypassAlts() {
let serializedAtn = this.serializedATN;
if (serializedAtn == null) {
throw new Error("The current parser does not support an ATN with bypass alternatives.");
}
let result = Parser.bypassAltsAtnCache.get(serializedAtn);
if (result == null) {
let deserializationOptions = new ATNDeserializationOptions_1.ATNDeserializationOptions();
deserializationOptions.isGenerateRuleBypassTransitions = true;
result = new ATNDeserializer_1.ATNDeserializer(deserializationOptions).deserialize(Utils.toCharArray(serializedAtn));
Parser.bypassAltsAtnCache.set(serializedAtn, result);
}
return result;
}
compileParseTreePattern(pattern, patternRuleIndex, lexer) {
return __awaiter(this, void 0, void 0, function* () {
if (!lexer) {
if (this.inputStream) {
let tokenSource = this.inputStream.tokenSource;
if (tokenSource instanceof Lexer_1.Lexer) {
lexer = tokenSource;
}
}
if (!lexer) {
throw new Error("Parser can't discover a lexer to use");
}
}
let currentLexer = lexer;
let m = yield Promise.resolve().then(() => require("./tree/pattern/ParseTreePatternMatcher"));
let matcher = new m.ParseTreePatternMatcher(currentLexer, this);
return matcher.compile(pattern, patternRuleIndex);
});
}
get errorHandler() {
return this._errHandler;
}
set errorHandler(handler) {
this._errHandler = handler;
}
get inputStream() {
return this._input;
}
/** Set the token stream and reset the parser. */
set inputStream(input) {
this.reset(false);
this._input = input;
}
/** Match needs to return the current input symbol, which gets put
* into the label for the associated token ref; e.g., x=ID.
*/
get currentToken() {
return this._input.LT(1);
}
notifyErrorListeners(msg, offendingToken, e) {
if (offendingToken === undefined) {
offendingToken = this.currentToken;
}
else if (offendingToken === null) {
offendingToken = undefined;
}
this._syntaxErrors++;
let line = -1;
let charPositionInLine = -1;
if (offendingToken != null) {
line = offendingToken.line;
charPositionInLine = offendingToken.charPositionInLine;
}
let listener = this.getErrorListenerDispatch();
if (listener.syntaxError) {
listener.syntaxError(this, offendingToken, line, charPositionInLine, msg, e);
}
}
/**
* Consume and return the [current symbol](`currentToken`).
*
* E.g., given the following input with `A` being the current
* lookahead symbol, this function moves the cursor to `B` and returns
* `A`.
*
* ```
* A B
* ^
* ```
*
* If the parser is not in error recovery mode, the consumed symbol is added
* to the parse tree using {@link ParserRuleContext#addChild(TerminalNode)}, and
* {@link ParseTreeListener#visitTerminal} is called on any parse listeners.
* If the parser *is* in error recovery mode, the consumed symbol is
* added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then
* {@link ParserRuleContext#addErrorNode(ErrorNode)} and
* {@link ParseTreeListener#visitErrorNode} is called on any parse
* listeners.
*/
consume() {
let o = this.currentToken;
if (o.type !== Parser.EOF) {
this.inputStream.consume();
}
let hasListener = this._parseListeners.length !== 0;
if (this._buildParseTrees || hasListener) {
if (this._errHandler.inErrorRecoveryMode(this)) {
let node = this._ctx.addErrorNode(this.createErrorNode(this._ctx, o));
if (hasListener) {
for (let listener of this._parseListeners) {
if (listener.visitErrorNode) {
listener.visitErrorNode(node);
}
}
}
}
else {
let node = this.createTerminalNode(this._ctx, o);
this._ctx.addChild(node);
if (hasListener) {
for (let listener of this._parseListeners) {
if (listener.visitTerminal) {
listener.visitTerminal(node);
}
}
}
}
}
return o;
}
/**
* How to create a token leaf node associated with a parent.
* Typically, the terminal node to create is not a function of the parent.
*
* @since 4.7
*/
createTerminalNode(parent, t) {
return new TerminalNode_1.TerminalNode(t);
}
/**
* How to create an error node, given a token, associated with a parent.
* Typically, the error node to create is not a function of the parent.
*
* @since 4.7
*/
createErrorNode(parent, t) {
return new ErrorNode_1.ErrorNode(t);
}
addContextToParseTree() {
let parent = this._ctx._parent;
// add current context to parent if we have a parent
if (parent != null) {
parent.addChild(this._ctx);
}
}
/**
* Always called by generated parsers upon entry to a rule. Access field
* {@link #_ctx} get the current context.
*/
enterRule(localctx, state, ruleIndex) {
this.state = state;
this._ctx = localctx;
this._ctx._start = this._input.LT(1);
if (this._buildParseTrees) {
this.addContextToParseTree();
}
this.triggerEnterRuleEvent();
}
enterLeftFactoredRule(localctx, state, ruleIndex) {
this.state = state;
if (this._buildParseTrees) {
let factoredContext = this._ctx.getChild(this._ctx.childCount - 1);
this._ctx.removeLastChild();
factoredContext._parent = localctx;
localctx.addChild(factoredContext);
}
this._ctx = localctx;
this._ctx._start = this._input.LT(1);
if (this._buildParseTrees) {
this.addContextToParseTree();
}
this.triggerEnterRuleEvent();
}
exitRule() {
if (this.matchedEOF) {
// if we have matched EOF, it cannot consume past EOF so we use LT(1) here
this._ctx._stop = this._input.LT(1); // LT(1) will be end of file
}
else {
this._ctx._stop = this._input.tryLT(-1); // stop node is what we just matched
}
// trigger event on _ctx, before it reverts to parent
this.triggerExitRuleEvent();
this.state = this._ctx.invokingState;
this._ctx = this._ctx._parent;
}
enterOuterAlt(localctx, altNum) {
localctx.altNumber = altNum;
// if we have new localctx, make sure we replace existing ctx
// that is previous child of parse tree
if (this._buildParseTrees && this._ctx !== localctx) {
let parent = this._ctx._parent;
if (parent != null) {
parent.removeLastChild();
parent.addChild(localctx);
}
}
this._ctx = localctx;
}
/**
* Get the precedence level for the top-most precedence rule.
*
* @returns The precedence level for the top-most precedence rule, or -1 if
* the parser context is not nested within a precedence rule.
*/
get precedence() {
if (this._precedenceStack.isEmpty) {
return -1;
}
return this._precedenceStack.peek();
}
enterRecursionRule(localctx, state, ruleIndex, precedence) {
this.state = state;
this._precedenceStack.push(precedence);
this._ctx = localctx;
this._ctx._start = this._input.LT(1);
this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
}
/** Like {@link #enterRule} but for recursive rules.
* Make the current context the child of the incoming localctx.
*/
pushNewRecursionContext(localctx, state, ruleIndex) {
let previous = this._ctx;
previous._parent = localctx;
previous.invokingState = state;
previous._stop = this._input.tryLT(-1);
this._ctx = localctx;
this._ctx._start = previous._start;
if (this._buildParseTrees) {
this._ctx.addChild(previous);
}
this.triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
}
unrollRecursionContexts(_parentctx) {
this._precedenceStack.pop();
this._ctx._stop = this._input.tryLT(-1);
let retctx = this._ctx; // save current ctx (return value)
// unroll so _ctx is as it was before call to recursive method
if (this._parseListeners.length > 0) {
while (this._ctx !== _parentctx) {
this.triggerExitRuleEvent();
this._ctx = this._ctx._parent;
}
}
else {
this._ctx = _parentctx;
}
// hook into tree
retctx._parent = _parentctx;
if (this._buildParseTrees && _parentctx != null) {
// add return ctx into invoking rule's tree
_parentctx.addChild(retctx);
}
}
getInvokingContext(ruleIndex) {
let p = this._ctx;
while (p && p.ruleIndex !== ruleIndex) {
p = p._parent;
}
return p;
}
get context() {
return this._ctx;
}
set context(ctx) {
this._ctx = ctx;
}
precpred(localctx, precedence) {
return precedence >= this._precedenceStack.peek();
}
getErrorListenerDispatch() {
return new ProxyParserErrorListener_1.ProxyParserErrorListener(this.getErrorListeners());
}
inContext(context) {
// TODO: useful in parser?
return false;
}
/**
* Checks whether or not `symbol` can follow the current state in the
* ATN. The behavior of this method is equivalent to the following, but is
* implemented such that the complete context-sensitive follow set does not
* need to be explicitly constructed.
*
* ```
* return getExpectedTokens().contains(symbol);
* ```
*
* @param symbol the symbol type to check
* @returns `true` if `symbol` can follow the current state in
* the ATN, otherwise `false`.
*/
isExpectedToken(symbol) {
// return interpreter.atn.nextTokens(_ctx);
let atn = this.interpreter.atn;
let ctx = this._ctx;
let s = atn.states[this.state];
let following = atn.nextTokens(s);
if (following.contains(symbol)) {
return true;
}
// System.out.println("following "+s+"="+following);
if (!following.contains(Token_1.Token.EPSILON)) {
return false;
}
while (ctx != null && ctx.invokingState >= 0 && following.contains(Token_1.Token.EPSILON)) {
let invokingState = atn.states[ctx.invokingState];
let rt = invokingState.transition(0);
following = atn.nextTokens(rt.followState);
if (following.contains(symbol)) {
return true;
}
ctx = ctx._parent;
}
if (following.contains(Token_1.Token.EPSILON) && symbol === Token_1.Token.EOF) {
return true;
}
return false;
}
get isMatchedEOF() {
return this.matchedEOF;
}
/**
* Computes the set of input symbols which could follow the current parser
* state and context, as given by {@link #getState} and {@link #getContext},
* respectively.
*
* @see ATN#getExpectedTokens(int, RuleContext)
*/
getExpectedTokens() {
return this.atn.getExpectedTokens(this.state, this.context);
}
getExpectedTokensWithinCurrentRule() {
let atn = this.interpreter.atn;
let s = atn.states[this.state];
return atn.nextTokens(s);
}
/** Get a rule's index (i.e., `RULE_ruleName` field) or -1 if not found. */
getRuleIndex(ruleName) {
let ruleIndex = this.getRuleIndexMap().get(ruleName);
if (ruleIndex != null) {
return ruleIndex;
}
return -1;
}
get ruleContext() { return this._ctx; }
/** Return List&lt;String&gt; of the rule names in your parser instance
* leading up to a call to the current rule. You could override if
* you want more details such as the file/line info of where
* in the ATN a rule is invoked.
*
* This is very useful for error messages.
*/
getRuleInvocationStack(ctx = this._ctx) {
let p = ctx; // Workaround for Microsoft/TypeScript#14487
let ruleNames = this.ruleNames;
let stack = [];
while (p != null) {
// compute what follows who invoked us
let ruleIndex = p.ruleIndex;
if (ruleIndex < 0) {
stack.push("n/a");
}
else {
stack.push(ruleNames[ruleIndex]);
}
p = p._parent;
}
return stack;
}
/** For debugging and other purposes. */
getDFAStrings() {
let s = [];
for (let dfa of this._interp.atn.decisionToDFA) {
s.push(dfa.toString(this.vocabulary, this.ruleNames));
}
return s;
}
/** For debugging and other purposes. */
dumpDFA() {
let seenOne = false;
for (let dfa of this._interp.atn.decisionToDFA) {
if (!dfa.isEmpty) {
if (seenOne) {
console.log();
}
console.log("Decision " + dfa.decision + ":");
process.stdout.write(dfa.toString(this.vocabulary, this.ruleNames));
seenOne = true;
}
}
}
get sourceName() {
return this._input.sourceName;
}
get parseInfo() {
return Promise.resolve().then(() => require("./atn/ProfilingATNSimulator")).then((m) => {
let interp = this.interpreter;
if (interp instanceof m.ProfilingATNSimulator) {
return new ParseInfo_1.ParseInfo(interp);
}
return undefined;
});
}
/**
* @since 4.3
*/
setProfile(profile) {
return __awaiter(this, void 0, void 0, function* () {
let m = yield Promise.resolve().then(() => require("./atn/ProfilingATNSimulator"));
let interp = this.interpreter;
if (profile) {
if (!(interp instanceof m.ProfilingATNSimulator)) {
this.interpreter = new m.ProfilingATNSimulator(this);
}
}
else if (interp instanceof m.ProfilingATNSimulator) {
this.interpreter = new ParserATNSimulator_1.ParserATNSimulator(this.atn, this);
}
this.interpreter.setPredictionMode(interp.getPredictionMode());
});
}
/** During a parse is sometimes useful to listen in on the rule entry and exit
* events as well as token matches. This is for quick and dirty debugging.
*/
set isTrace(trace) {
if (!trace) {
if (this._tracer) {
this.removeParseListener(this._tracer);
this._tracer = undefined;
}
}
else {
if (this._tracer) {
this.removeParseListener(this._tracer);
}
else {
this._tracer = new TraceListener(this.ruleNames, this._input);
}
this.addParseListener(this._tracer);
}
}
/**
* Gets whether a {@link TraceListener} is registered as a parse listener
* for the parser.
*/
get isTrace() {
return this._tracer != null;
}
}
/**
* This field maps from the serialized ATN string to the deserialized {@link ATN} with
* bypass alternatives.
*
* @see ATNDeserializationOptions.isGenerateRuleBypassTransitions
*/
Parser.bypassAltsAtnCache = new Map();
__decorate([
Decorators_1.NotNull
], Parser.prototype, "_errHandler", void 0);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "match", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "matchWildcard", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "getParseListeners", null);
__decorate([
__param(0, Decorators_1.NotNull)
], Parser.prototype, "addParseListener", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "getATNWithBypassAlts", null);
__decorate([
Decorators_1.NotNull,
__param(0, Decorators_1.NotNull)
], Parser.prototype, "errorHandler", null);
__decorate([
Decorators_1.Override
], Parser.prototype, "inputStream", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "currentToken", null);
__decorate([
__param(0, Decorators_1.NotNull)
], Parser.prototype, "enterRule", null);
__decorate([
Decorators_1.Override,
__param(0, Decorators_1.Nullable)
], Parser.prototype, "precpred", null);
__decorate([
Decorators_1.Override
], Parser.prototype, "getErrorListenerDispatch", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "getExpectedTokens", null);
__decorate([
Decorators_1.NotNull
], Parser.prototype, "getExpectedTokensWithinCurrentRule", null);
__decorate([
Decorators_1.Override
], Parser.prototype, "parseInfo", null);
exports.Parser = Parser;
//# sourceMappingURL=Parser.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,112 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/** How to emit recognition errors for parsers.
*/
export interface ParserErrorListener extends ANTLRErrorListener<Token> {
/**
* This method is called by the parser when a full-context prediction
* results in an ambiguity.
*
* Each full-context prediction which does not result in a syntax error
* will call either {@link #reportContextSensitivity} or
* {@link #reportAmbiguity}.
*
* When `ambigAlts` is not `undefined`, it contains the set of potentially
* viable alternatives identified by the prediction algorithm. When
* `ambigAlts` is `undefined`, use
* {@link ATNConfigSet#getRepresentedAlternatives} to obtain the represented
* alternatives from the `configs` argument.
*
* When `exact` is `true`, *all* of the potentially
* viable alternatives are truly viable, i.e. this is reporting an exact
* ambiguity. When `exact` is `false`, *at least two* of
* the potentially viable alternatives are viable for the current input, but
* the prediction algorithm terminated as soon as it determined that at
* least the *minimum* potentially viable alternative is truly
* viable.
*
* When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
* mode is used, the parser is required to identify exact ambiguities so
* `exact` will always be `true`.
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision
* @param startIndex the input index where the decision started
* @param stopIndex the input input where the ambiguity was identified
* @param exact `true` if the ambiguity is exactly known, otherwise
* `false`. This is always `true` when
* {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
* @param ambigAlts the potentially ambiguous alternatives, or `undefined`
* to indicate that the potentially ambiguous alternatives are the complete
* set of represented alternatives in `configs`
* @param configs the ATN configuration set where the ambiguity was
* identified
*/
reportAmbiguity?: (recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, exact: boolean, ambigAlts: BitSet | undefined, configs: ATNConfigSet) => void;
/**
* This method is called when an SLL conflict occurs and the parser is about
* to use the full context information to make an LL decision.
*
* If one or more configurations in `configs` contains a semantic
* predicate, the predicates are evaluated before this method is called. The
* subset of alternatives which are still viable after predicates are
* evaluated is reported in `conflictingAlts`.
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision
* @param startIndex the input index where the decision started
* @param stopIndex the input index where the SLL conflict occurred
* @param conflictingAlts The specific conflicting alternatives. If this is
* `undefined`, the conflicting alternatives are all alternatives
* represented in `configs`.
* @param conflictState the simulator state when the SLL conflict was
* detected
*/
reportAttemptingFullContext?: (recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, conflictingAlts: BitSet | undefined, conflictState: SimulatorState) => void;
/**
* This method is called by the parser when a full-context prediction has a
* unique result.
*
* Each full-context prediction which does not result in a syntax error
* will call either {@link #reportContextSensitivity} or
* {@link #reportAmbiguity}.
*
* For prediction implementations that only evaluate full-context
* predictions when an SLL conflict is found (including the default
* {@link ParserATNSimulator} implementation), this method reports cases
* where SLL conflicts were resolved to unique full-context predictions,
* i.e. the decision was context-sensitive. This report does not necessarily
* indicate a problem, and it may appear even in completely unambiguous
* grammars.
*
* `configs` may have more than one represented alternative if the
* full-context prediction algorithm does not evaluate predicates before
* beginning the full-context prediction. In all cases, the final prediction
* is passed as the `prediction` argument.
*
* Note that the definition of "context sensitivity" in this method
* differs from the concept in {@link DecisionInfo#contextSensitivities}.
* This method reports all instances where an SLL conflict occurred but LL
* parsing produced a unique result, whether or not that unique result
* matches the minimum alternative in the SLL conflicting set.
*
* @param recognizer the parser instance
* @param dfa the DFA for the current decision
* @param startIndex the input index where the decision started
* @param stopIndex the input index where the context sensitivity was
* finally determined
* @param prediction the unambiguous result of the full-context prediction
* @param acceptState the simulator state when the unambiguous prediction
* was determined
*/
reportContextSensitivity?: (recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, prediction: number, acceptState: SimulatorState) => void;
}

View File

@@ -0,0 +1,7 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
//# sourceMappingURL=ParserErrorListener.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,151 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATN } from "./atn/ATN";
import { ATNState } from "./atn/ATNState";
import { BitSet } from "./misc/BitSet";
import { DecisionState } from "./atn/DecisionState";
import { InterpreterRuleContext } from "./InterpreterRuleContext";
import { Parser } from "./Parser";
import { ParserRuleContext } from "./ParserRuleContext";
import { RecognitionException } from "./RecognitionException";
import { Token } from "./Token";
import { TokenStream } from "./TokenStream";
import { Vocabulary } from "./Vocabulary";
/** A parser simulator that mimics what ANTLR's generated
* parser code does. A ParserATNSimulator is used to make
* predictions via adaptivePredict but this class moves a pointer through the
* ATN to simulate parsing. ParserATNSimulator just
* makes us efficient rather than having to backtrack, for example.
*
* This properly creates parse trees even for left recursive rules.
*
* We rely on the left recursive rule invocation and special predicate
* transitions to make left recursive rules work.
*
* See TestParserInterpreter for examples.
*/
export declare class ParserInterpreter extends Parser {
protected _grammarFileName: string;
protected _atn: ATN;
/** This identifies StarLoopEntryState's that begin the (...)*
* precedence loops of left recursive rules.
*/
protected pushRecursionContextStates: BitSet;
protected _ruleNames: string[];
private _vocabulary;
/** This stack corresponds to the _parentctx, _parentState pair of locals
* that would exist on call stack frames with a recursive descent parser;
* in the generated function for a left-recursive rule you'd see:
*
* private EContext e(int _p) {
* ParserRuleContext _parentctx = _ctx; // Pair.a
* int _parentState = state; // Pair.b
* ...
* }
*
* Those values are used to create new recursive rule invocation contexts
* associated with left operand of an alt like "expr '*' expr".
*/
protected readonly _parentContextStack: Array<[ParserRuleContext, number]>;
/** We need a map from (decision,inputIndex)->forced alt for computing ambiguous
* parse trees. For now, we allow exactly one override.
*/
protected overrideDecision: number;
protected overrideDecisionInputIndex: number;
protected overrideDecisionAlt: number;
protected overrideDecisionReached: boolean;
/** What is the current context when we override a decisions? This tells
* us what the root of the parse tree is when using override
* for an ambiguity/lookahead check.
*/
protected _overrideDecisionRoot?: InterpreterRuleContext;
protected _rootContext: InterpreterRuleContext;
/** A copy constructor that creates a new parser interpreter by reusing
* the fields of a previous interpreter.
*
* @param old The interpreter to copy
*
* @since 4.5
*/
constructor(/*@NotNull*/ old: ParserInterpreter);
constructor(grammarFileName: string, /*@NotNull*/ vocabulary: Vocabulary, ruleNames: string[], atn: ATN, input: TokenStream);
reset(resetInput?: boolean): void;
get atn(): ATN;
get vocabulary(): Vocabulary;
get ruleNames(): string[];
get grammarFileName(): string;
/** Begin parsing at startRuleIndex */
parse(startRuleIndex: number): ParserRuleContext;
enterRecursionRule(localctx: ParserRuleContext, state: number, ruleIndex: number, precedence: number): void;
protected get atnState(): ATNState;
protected visitState(p: ATNState): void;
/** Method visitDecisionState() is called when the interpreter reaches
* a decision state (instance of DecisionState). It gives an opportunity
* for subclasses to track interesting things.
*/
protected visitDecisionState(p: DecisionState): number;
/** Provide simple "factory" for InterpreterRuleContext's.
* @since 4.5.1
*/
protected createInterpreterRuleContext(parent: ParserRuleContext | undefined, invokingStateNumber: number, ruleIndex: number): InterpreterRuleContext;
protected visitRuleStopState(p: ATNState): void;
/** Override this parser interpreters normal decision-making process
* at a particular decision and input token index. Instead of
* allowing the adaptive prediction mechanism to choose the
* first alternative within a block that leads to a successful parse,
* force it to take the alternative, 1..n for n alternatives.
*
* As an implementation limitation right now, you can only specify one
* override. This is sufficient to allow construction of different
* parse trees for ambiguous input. It means re-parsing the entire input
* in general because you're never sure where an ambiguous sequence would
* live in the various parse trees. For example, in one interpretation,
* an ambiguous input sequence would be matched completely in expression
* but in another it could match all the way back to the root.
*
* s : e '!'? ;
* e : ID
* | ID '!'
* ;
*
* Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
* case, the ambiguous sequence is fully contained only by the root.
* In the second case, the ambiguous sequences fully contained within just
* e, as in: (e ID !).
*
* Rather than trying to optimize this and make
* some intelligent decisions for optimization purposes, I settled on
* just re-parsing the whole input and then using
* {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
* subtree that contains the ambiguous sequence. I originally tried to
* record the call stack at the point the parser detected and ambiguity but
* left recursive rules create a parse tree stack that does not reflect
* the actual call stack. That impedance mismatch was enough to make
* it it challenging to restart the parser at a deeply nested rule
* invocation.
*
* Only parser interpreters can override decisions so as to avoid inserting
* override checking code in the critical ALL(*) prediction execution path.
*
* @since 4.5
*/
addDecisionOverride(decision: number, tokenIndex: number, forcedAlt: number): void;
get overrideDecisionRoot(): InterpreterRuleContext | undefined;
/** Rely on the error handler for this parser but, if no tokens are consumed
* to recover, add an error node. Otherwise, nothing is seen in the parse
* tree.
*/
protected recover(e: RecognitionException): void;
protected recoverInline(): Token;
/** Return the root of the parse, which can be useful if the parser
* bails out. You still can access the top node. Note that,
* because of the way left recursive rules add children, it's possible
* that the root will not have any children if the start rule immediately
* called and left recursive rule that fails.
*
* @since 4.5.1
*/
get rootContext(): InterpreterRuleContext;
}

View File

@@ -0,0 +1,407 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParserInterpreter = void 0;
const ATNState_1 = require("./atn/ATNState");
const ATNStateType_1 = require("./atn/ATNStateType");
const BitSet_1 = require("./misc/BitSet");
const FailedPredicateException_1 = require("./FailedPredicateException");
const InputMismatchException_1 = require("./InputMismatchException");
const InterpreterRuleContext_1 = require("./InterpreterRuleContext");
const LoopEndState_1 = require("./atn/LoopEndState");
const Decorators_1 = require("./Decorators");
const Decorators_2 = require("./Decorators");
const Parser_1 = require("./Parser");
const ParserATNSimulator_1 = require("./atn/ParserATNSimulator");
const RecognitionException_1 = require("./RecognitionException");
const StarLoopEntryState_1 = require("./atn/StarLoopEntryState");
const Token_1 = require("./Token");
/** A parser simulator that mimics what ANTLR's generated
* parser code does. A ParserATNSimulator is used to make
* predictions via adaptivePredict but this class moves a pointer through the
* ATN to simulate parsing. ParserATNSimulator just
* makes us efficient rather than having to backtrack, for example.
*
* This properly creates parse trees even for left recursive rules.
*
* We rely on the left recursive rule invocation and special predicate
* transitions to make left recursive rules work.
*
* See TestParserInterpreter for examples.
*/
let ParserInterpreter = class ParserInterpreter extends Parser_1.Parser {
constructor(grammarFileName, vocabulary, ruleNames, atn, input) {
super(grammarFileName instanceof ParserInterpreter ? grammarFileName.inputStream : input);
/** This stack corresponds to the _parentctx, _parentState pair of locals
* that would exist on call stack frames with a recursive descent parser;
* in the generated function for a left-recursive rule you'd see:
*
* private EContext e(int _p) {
* ParserRuleContext _parentctx = _ctx; // Pair.a
* int _parentState = state; // Pair.b
* ...
* }
*
* Those values are used to create new recursive rule invocation contexts
* associated with left operand of an alt like "expr '*' expr".
*/
this._parentContextStack = [];
/** We need a map from (decision,inputIndex)->forced alt for computing ambiguous
* parse trees. For now, we allow exactly one override.
*/
this.overrideDecision = -1;
this.overrideDecisionInputIndex = -1;
this.overrideDecisionAlt = -1;
this.overrideDecisionReached = false; // latch and only override once; error might trigger infinite loop
/** What is the current context when we override a decisions? This tells
* us what the root of the parse tree is when using override
* for an ambiguity/lookahead check.
*/
this._overrideDecisionRoot = undefined;
if (grammarFileName instanceof ParserInterpreter) {
let old = grammarFileName;
this._grammarFileName = old._grammarFileName;
this._atn = old._atn;
this.pushRecursionContextStates = old.pushRecursionContextStates;
this._ruleNames = old._ruleNames;
this._vocabulary = old._vocabulary;
this.interpreter = new ParserATNSimulator_1.ParserATNSimulator(this._atn, this);
}
else {
// The second constructor requires non-null arguments
vocabulary = vocabulary;
ruleNames = ruleNames;
atn = atn;
this._grammarFileName = grammarFileName;
this._atn = atn;
this._ruleNames = ruleNames.slice(0);
this._vocabulary = vocabulary;
// identify the ATN states where pushNewRecursionContext() must be called
this.pushRecursionContextStates = new BitSet_1.BitSet(atn.states.length);
for (let state of atn.states) {
if (!(state instanceof StarLoopEntryState_1.StarLoopEntryState)) {
continue;
}
if (state.precedenceRuleDecision) {
this.pushRecursionContextStates.set(state.stateNumber);
}
}
// get atn simulator that knows how to do predictions
this.interpreter = new ParserATNSimulator_1.ParserATNSimulator(atn, this);
}
}
reset(resetInput) {
if (resetInput === undefined) {
super.reset();
}
else {
super.reset(resetInput);
}
this.overrideDecisionReached = false;
this._overrideDecisionRoot = undefined;
}
get atn() {
return this._atn;
}
get vocabulary() {
return this._vocabulary;
}
get ruleNames() {
return this._ruleNames;
}
get grammarFileName() {
return this._grammarFileName;
}
/** Begin parsing at startRuleIndex */
parse(startRuleIndex) {
let startRuleStartState = this._atn.ruleToStartState[startRuleIndex];
this._rootContext = this.createInterpreterRuleContext(undefined, ATNState_1.ATNState.INVALID_STATE_NUMBER, startRuleIndex);
if (startRuleStartState.isPrecedenceRule) {
this.enterRecursionRule(this._rootContext, startRuleStartState.stateNumber, startRuleIndex, 0);
}
else {
this.enterRule(this._rootContext, startRuleStartState.stateNumber, startRuleIndex);
}
while (true) {
let p = this.atnState;
switch (p.stateType) {
case ATNStateType_1.ATNStateType.RULE_STOP:
// pop; return from rule
if (this._ctx.isEmpty) {
if (startRuleStartState.isPrecedenceRule) {
let result = this._ctx;
let parentContext = this._parentContextStack.pop();
this.unrollRecursionContexts(parentContext[0]);
return result;
}
else {
this.exitRule();
return this._rootContext;
}
}
this.visitRuleStopState(p);
break;
default:
try {
this.visitState(p);
}
catch (e) {
if (e instanceof RecognitionException_1.RecognitionException) {
this.state = this._atn.ruleToStopState[p.ruleIndex].stateNumber;
this.context.exception = e;
this.errorHandler.reportError(this, e);
this.recover(e);
}
else {
throw e;
}
}
break;
}
}
}
enterRecursionRule(localctx, state, ruleIndex, precedence) {
this._parentContextStack.push([this._ctx, localctx.invokingState]);
super.enterRecursionRule(localctx, state, ruleIndex, precedence);
}
get atnState() {
return this._atn.states[this.state];
}
visitState(p) {
let predictedAlt = 1;
if (p.numberOfTransitions > 1) {
predictedAlt = this.visitDecisionState(p);
}
let transition = p.transition(predictedAlt - 1);
switch (transition.serializationType) {
case 1 /* EPSILON */:
if (this.pushRecursionContextStates.get(p.stateNumber) &&
!(transition.target instanceof LoopEndState_1.LoopEndState)) {
// We are at the start of a left recursive rule's (...)* loop
// and we're not taking the exit branch of loop.
let parentContext = this._parentContextStack[this._parentContextStack.length - 1];
let localctx = this.createInterpreterRuleContext(parentContext[0], parentContext[1], this._ctx.ruleIndex);
this.pushNewRecursionContext(localctx, this._atn.ruleToStartState[p.ruleIndex].stateNumber, this._ctx.ruleIndex);
}
break;
case 5 /* ATOM */:
this.match(transition._label);
break;
case 2 /* RANGE */:
case 7 /* SET */:
case 8 /* NOT_SET */:
if (!transition.matches(this._input.LA(1), Token_1.Token.MIN_USER_TOKEN_TYPE, 65535)) {
this.recoverInline();
}
this.matchWildcard();
break;
case 9 /* WILDCARD */:
this.matchWildcard();
break;
case 3 /* RULE */:
let ruleStartState = transition.target;
let ruleIndex = ruleStartState.ruleIndex;
let newctx = this.createInterpreterRuleContext(this._ctx, p.stateNumber, ruleIndex);
if (ruleStartState.isPrecedenceRule) {
this.enterRecursionRule(newctx, ruleStartState.stateNumber, ruleIndex, transition.precedence);
}
else {
this.enterRule(newctx, transition.target.stateNumber, ruleIndex);
}
break;
case 4 /* PREDICATE */:
let predicateTransition = transition;
if (!this.sempred(this._ctx, predicateTransition.ruleIndex, predicateTransition.predIndex)) {
throw new FailedPredicateException_1.FailedPredicateException(this);
}
break;
case 6 /* ACTION */:
let actionTransition = transition;
this.action(this._ctx, actionTransition.ruleIndex, actionTransition.actionIndex);
break;
case 10 /* PRECEDENCE */:
if (!this.precpred(this._ctx, transition.precedence)) {
let precedence = transition.precedence;
throw new FailedPredicateException_1.FailedPredicateException(this, `precpred(_ctx, ${precedence})`);
}
break;
default:
throw new Error("UnsupportedOperationException: Unrecognized ATN transition type.");
}
this.state = transition.target.stateNumber;
}
/** Method visitDecisionState() is called when the interpreter reaches
* a decision state (instance of DecisionState). It gives an opportunity
* for subclasses to track interesting things.
*/
visitDecisionState(p) {
let predictedAlt;
this.errorHandler.sync(this);
let decision = p.decision;
if (decision === this.overrideDecision && this._input.index === this.overrideDecisionInputIndex && !this.overrideDecisionReached) {
predictedAlt = this.overrideDecisionAlt;
this.overrideDecisionReached = true;
}
else {
predictedAlt = this.interpreter.adaptivePredict(this._input, decision, this._ctx);
}
return predictedAlt;
}
/** Provide simple "factory" for InterpreterRuleContext's.
* @since 4.5.1
*/
createInterpreterRuleContext(parent, invokingStateNumber, ruleIndex) {
return new InterpreterRuleContext_1.InterpreterRuleContext(ruleIndex, parent, invokingStateNumber);
}
visitRuleStopState(p) {
let ruleStartState = this._atn.ruleToStartState[p.ruleIndex];
if (ruleStartState.isPrecedenceRule) {
let parentContext = this._parentContextStack.pop();
this.unrollRecursionContexts(parentContext[0]);
this.state = parentContext[1];
}
else {
this.exitRule();
}
let ruleTransition = this._atn.states[this.state].transition(0);
this.state = ruleTransition.followState.stateNumber;
}
/** Override this parser interpreters normal decision-making process
* at a particular decision and input token index. Instead of
* allowing the adaptive prediction mechanism to choose the
* first alternative within a block that leads to a successful parse,
* force it to take the alternative, 1..n for n alternatives.
*
* As an implementation limitation right now, you can only specify one
* override. This is sufficient to allow construction of different
* parse trees for ambiguous input. It means re-parsing the entire input
* in general because you're never sure where an ambiguous sequence would
* live in the various parse trees. For example, in one interpretation,
* an ambiguous input sequence would be matched completely in expression
* but in another it could match all the way back to the root.
*
* s : e '!'? ;
* e : ID
* | ID '!'
* ;
*
* Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
* case, the ambiguous sequence is fully contained only by the root.
* In the second case, the ambiguous sequences fully contained within just
* e, as in: (e ID !).
*
* Rather than trying to optimize this and make
* some intelligent decisions for optimization purposes, I settled on
* just re-parsing the whole input and then using
* {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
* subtree that contains the ambiguous sequence. I originally tried to
* record the call stack at the point the parser detected and ambiguity but
* left recursive rules create a parse tree stack that does not reflect
* the actual call stack. That impedance mismatch was enough to make
* it it challenging to restart the parser at a deeply nested rule
* invocation.
*
* Only parser interpreters can override decisions so as to avoid inserting
* override checking code in the critical ALL(*) prediction execution path.
*
* @since 4.5
*/
addDecisionOverride(decision, tokenIndex, forcedAlt) {
this.overrideDecision = decision;
this.overrideDecisionInputIndex = tokenIndex;
this.overrideDecisionAlt = forcedAlt;
}
get overrideDecisionRoot() {
return this._overrideDecisionRoot;
}
/** Rely on the error handler for this parser but, if no tokens are consumed
* to recover, add an error node. Otherwise, nothing is seen in the parse
* tree.
*/
recover(e) {
let i = this._input.index;
this.errorHandler.recover(this, e);
if (this._input.index === i) {
// no input consumed, better add an error node
let tok = e.getOffendingToken();
if (!tok) {
throw new Error("Expected exception to have an offending token");
}
let source = tok.tokenSource;
let stream = source !== undefined ? source.inputStream : undefined;
let sourcePair = { source, stream };
if (e instanceof InputMismatchException_1.InputMismatchException) {
let expectedTokens = e.expectedTokens;
if (expectedTokens === undefined) {
throw new Error("Expected the exception to provide expected tokens");
}
let expectedTokenType = Token_1.Token.INVALID_TYPE;
if (!expectedTokens.isNil) {
// get any element
expectedTokenType = expectedTokens.minElement;
}
let errToken = this.tokenFactory.create(sourcePair, expectedTokenType, tok.text, Token_1.Token.DEFAULT_CHANNEL, -1, -1, // invalid start/stop
tok.line, tok.charPositionInLine);
this._ctx.addErrorNode(this.createErrorNode(this._ctx, errToken));
}
else { // NoViableAlt
let source = tok.tokenSource;
let errToken = this.tokenFactory.create(sourcePair, Token_1.Token.INVALID_TYPE, tok.text, Token_1.Token.DEFAULT_CHANNEL, -1, -1, // invalid start/stop
tok.line, tok.charPositionInLine);
this._ctx.addErrorNode(this.createErrorNode(this._ctx, errToken));
}
}
}
recoverInline() {
return this._errHandler.recoverInline(this);
}
/** Return the root of the parse, which can be useful if the parser
* bails out. You still can access the top node. Note that,
* because of the way left recursive rules add children, it's possible
* that the root will not have any children if the start rule immediately
* called and left recursive rule that fails.
*
* @since 4.5.1
*/
get rootContext() {
return this._rootContext;
}
};
__decorate([
Decorators_1.NotNull
], ParserInterpreter.prototype, "_vocabulary", void 0);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "reset", null);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "atn", null);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "vocabulary", null);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "ruleNames", null);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "grammarFileName", null);
__decorate([
Decorators_2.Override
], ParserInterpreter.prototype, "enterRecursionRule", null);
ParserInterpreter = __decorate([
__param(1, Decorators_1.NotNull)
], ParserInterpreter);
exports.ParserInterpreter = ParserInterpreter;
//# sourceMappingURL=ParserInterpreter.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,169 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ErrorNode } from "./tree/ErrorNode";
import { Interval } from "./misc/Interval";
import { Parser } from "./Parser";
import { ParseTree } from "./tree/ParseTree";
import { ParseTreeListener } from "./tree/ParseTreeListener";
import { RecognitionException } from "./RecognitionException";
import { RuleContext } from "./RuleContext";
import { TerminalNode } from "./tree/TerminalNode";
import { Token } from "./Token";
/** A rule invocation record for parsing.
*
* Contains all of the information about the current rule not stored in the
* RuleContext. It handles parse tree children list, Any ATN state
* tracing, and the default values available for rule invocations:
* start, stop, rule index, current alt number.
*
* Subclasses made for each rule and grammar track the parameters,
* return values, locals, and labels specific to that rule. These
* are the objects that are returned from rules.
*
* Note text is not an actual field of a rule return value; it is computed
* from start and stop using the input stream's toString() method. I
* could add a ctor to this so that we can pass in and store the input
* stream, but I'm not sure we want to do that. It would seem to be undefined
* to get the .text property anyway if the rule matches tokens from multiple
* input streams.
*
* I do not use getters for fields of objects that are used simply to
* group values such as this aggregate. The getters/setters are there to
* satisfy the superclass interface.
*/
export declare class ParserRuleContext extends RuleContext {
private static readonly EMPTY;
/** If we are debugging or building a parse tree for a visitor,
* we need to track all of the tokens and rule invocations associated
* with this rule's context. This is empty for parsing w/o tree constr.
* operation because we don't the need to track the details about
* how we parse this rule.
*/
children?: ParseTree[];
/** For debugging/tracing purposes, we want to track all of the nodes in
* the ATN traversed by the parser for a particular rule.
* This list indicates the sequence of ATN nodes used to match
* the elements of the children list. This list does not include
* ATN nodes and other rules used to match rule invocations. It
* traces the rule invocation node itself but nothing inside that
* other rule's ATN submachine.
*
* There is NOT a one-to-one correspondence between the children and
* states list. There are typically many nodes in the ATN traversed
* for each element in the children list. For example, for a rule
* invocation there is the invoking state and the following state.
*
* The parser state property updates field s and adds it to this list
* if we are debugging/tracing.
*
* This does not trace states visited during prediction.
*/
_start: Token;
_stop: Token | undefined;
/**
* The exception that forced this rule to return. If the rule successfully
* completed, this is `undefined`.
*/
exception?: RecognitionException;
constructor();
constructor(parent: ParserRuleContext | undefined, invokingStateNumber: number);
static emptyContext(): ParserRuleContext;
/**
* COPY a ctx (I'm deliberately not using copy constructor) to avoid
* confusion with creating node with parent. Does not copy children
* (except error leaves).
*
* This is used in the generated parser code to flip a generic XContext
* node for rule X to a YContext for alt label Y. In that sense, it is not
* really a generic copy function.
*
* If we do an error sync() at start of a rule, we might add error nodes
* to the generic XContext so this function must copy those nodes to the
* YContext as well else they are lost!
*/
copyFrom(ctx: ParserRuleContext): void;
enterRule(listener: ParseTreeListener): void;
exitRule(listener: ParseTreeListener): void;
/** Add a parse tree node to this as a child. Works for
* internal and leaf nodes. Does not set parent link;
* other add methods must do that. Other addChild methods
* call this.
*
* We cannot set the parent pointer of the incoming node
* because the existing interfaces do not have a setParent()
* method and I don't want to break backward compatibility for this.
*
* @since 4.7
*/
addAnyChild<T extends ParseTree>(t: T): T;
/** Add a token leaf node child and force its parent to be this node. */
addChild(t: TerminalNode): void;
addChild(ruleInvocation: RuleContext): void;
/**
* Add a child to this node based upon matchedToken. It
* creates a TerminalNodeImpl rather than using
* {@link Parser#createTerminalNode(ParserRuleContext, Token)}. I'm leaving this
* in for compatibility but the parser doesn't use this anymore.
*
* @deprecated Use another overload instead.
*/
addChild(matchedToken: Token): TerminalNode;
/** Add an error node child and force its parent to be this node.
*
* @since 4.7
*/
addErrorNode(errorNode: ErrorNode): ErrorNode;
/**
* Add a child to this node based upon badToken. It
* creates a ErrorNode rather than using
* {@link Parser#createErrorNode(ParserRuleContext, Token)}. I'm leaving this
* in for compatibility but the parser doesn't use this anymore.
*
* @deprecated Use another overload instead.
*/
addErrorNode(badToken: Token): ErrorNode;
/** Used by enterOuterAlt to toss out a RuleContext previously added as
* we entered a rule. If we have # label, we will need to remove
* generic ruleContext object.
*/
removeLastChild(): void;
get parent(): ParserRuleContext | undefined;
getChild(i: number): ParseTree;
getChild<T extends ParseTree>(i: number, ctxType: {
new (...args: any[]): T;
}): T;
tryGetChild<T extends ParseTree>(i: number, ctxType: {
new (...args: any[]): T;
}): T | undefined;
getToken(ttype: number, i: number): TerminalNode;
tryGetToken(ttype: number, i: number): TerminalNode | undefined;
getTokens(ttype: number): TerminalNode[];
get ruleContext(): this;
getRuleContext<T extends ParserRuleContext>(i: number, ctxType: {
new (...args: any[]): T;
}): T;
tryGetRuleContext<T extends ParserRuleContext>(i: number, ctxType: {
new (...args: any[]): T;
}): T | undefined;
getRuleContexts<T extends ParserRuleContext>(ctxType: {
new (...args: any[]): T;
}): T[];
get childCount(): number;
get sourceInterval(): Interval;
/**
* Get the initial token in this context.
* Note that the range from start to stop is inclusive, so for rules that do not consume anything
* (for example, zero length or error productions) this token may exceed stop.
*/
get start(): Token;
/**
* Get the final token in this context.
* Note that the range from start to stop is inclusive, so for rules that do not consume anything
* (for example, zero length or error productions) this token may precede start.
*/
get stop(): Token | undefined;
/** Used for rule context info debugging during parse-time, not so much for ATN debugging */
toInfoString(recognizer: Parser): string;
}

View File

@@ -0,0 +1,300 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ParserRuleContext = void 0;
// ConvertTo-TS run at 2016-10-04T11:26:56.6285494-07:00
const ErrorNode_1 = require("./tree/ErrorNode");
const Interval_1 = require("./misc/Interval");
const Decorators_1 = require("./Decorators");
const RuleContext_1 = require("./RuleContext");
const TerminalNode_1 = require("./tree/TerminalNode");
/** A rule invocation record for parsing.
*
* Contains all of the information about the current rule not stored in the
* RuleContext. It handles parse tree children list, Any ATN state
* tracing, and the default values available for rule invocations:
* start, stop, rule index, current alt number.
*
* Subclasses made for each rule and grammar track the parameters,
* return values, locals, and labels specific to that rule. These
* are the objects that are returned from rules.
*
* Note text is not an actual field of a rule return value; it is computed
* from start and stop using the input stream's toString() method. I
* could add a ctor to this so that we can pass in and store the input
* stream, but I'm not sure we want to do that. It would seem to be undefined
* to get the .text property anyway if the rule matches tokens from multiple
* input streams.
*
* I do not use getters for fields of objects that are used simply to
* group values such as this aggregate. The getters/setters are there to
* satisfy the superclass interface.
*/
class ParserRuleContext extends RuleContext_1.RuleContext {
constructor(parent, invokingStateNumber) {
if (invokingStateNumber == null) {
super();
}
else {
super(parent, invokingStateNumber);
}
}
static emptyContext() {
return ParserRuleContext.EMPTY;
}
/**
* COPY a ctx (I'm deliberately not using copy constructor) to avoid
* confusion with creating node with parent. Does not copy children
* (except error leaves).
*
* This is used in the generated parser code to flip a generic XContext
* node for rule X to a YContext for alt label Y. In that sense, it is not
* really a generic copy function.
*
* If we do an error sync() at start of a rule, we might add error nodes
* to the generic XContext so this function must copy those nodes to the
* YContext as well else they are lost!
*/
copyFrom(ctx) {
this._parent = ctx._parent;
this.invokingState = ctx.invokingState;
this._start = ctx._start;
this._stop = ctx._stop;
// copy any error nodes to alt label node
if (ctx.children) {
this.children = [];
// reset parent pointer for any error nodes
for (let child of ctx.children) {
if (child instanceof ErrorNode_1.ErrorNode) {
this.addChild(child);
}
}
}
}
// Double dispatch methods for listeners
enterRule(listener) {
// intentionally empty
}
exitRule(listener) {
// intentionally empty
}
/** Add a parse tree node to this as a child. Works for
* internal and leaf nodes. Does not set parent link;
* other add methods must do that. Other addChild methods
* call this.
*
* We cannot set the parent pointer of the incoming node
* because the existing interfaces do not have a setParent()
* method and I don't want to break backward compatibility for this.
*
* @since 4.7
*/
addAnyChild(t) {
if (!this.children) {
this.children = [t];
}
else {
this.children.push(t);
}
return t;
}
addChild(t) {
let result;
if (t instanceof TerminalNode_1.TerminalNode) {
t.setParent(this);
this.addAnyChild(t);
return;
}
else if (t instanceof RuleContext_1.RuleContext) {
// Does not set parent link
this.addAnyChild(t);
return;
}
else {
// Deprecated code path
t = new TerminalNode_1.TerminalNode(t);
this.addAnyChild(t);
t.setParent(this);
return t;
}
}
addErrorNode(node) {
if (node instanceof ErrorNode_1.ErrorNode) {
const errorNode = node;
errorNode.setParent(this);
return this.addAnyChild(errorNode);
}
else {
// deprecated path
const badToken = node;
let t = new ErrorNode_1.ErrorNode(badToken);
this.addAnyChild(t);
t.setParent(this);
return t;
}
}
// public void trace(int s) {
// if ( states==null ) states = new ArrayList<Integer>();
// states.add(s);
// }
/** Used by enterOuterAlt to toss out a RuleContext previously added as
* we entered a rule. If we have # label, we will need to remove
* generic ruleContext object.
*/
removeLastChild() {
if (this.children) {
this.children.pop();
}
}
get parent() {
let parent = super.parent;
if (parent === undefined || parent instanceof ParserRuleContext) {
return parent;
}
throw new TypeError("Invalid parent type for ParserRuleContext");
}
// Note: in TypeScript, order or arguments reversed
getChild(i, ctxType) {
if (!this.children || i < 0 || i >= this.children.length) {
throw new RangeError("index parameter must be between >= 0 and <= number of children.");
}
if (ctxType == null) {
return this.children[i];
}
let result = this.tryGetChild(i, ctxType);
if (result === undefined) {
throw new Error("The specified node does not exist");
}
return result;
}
tryGetChild(i, ctxType) {
if (!this.children || i < 0 || i >= this.children.length) {
return undefined;
}
let j = -1; // what node with ctxType have we found?
for (let o of this.children) {
if (o instanceof ctxType) {
j++;
if (j === i) {
return o;
}
}
}
return undefined;
}
getToken(ttype, i) {
let result = this.tryGetToken(ttype, i);
if (result === undefined) {
throw new Error("The specified token does not exist");
}
return result;
}
tryGetToken(ttype, i) {
if (!this.children || i < 0 || i >= this.children.length) {
return undefined;
}
let j = -1; // what token with ttype have we found?
for (let o of this.children) {
if (o instanceof TerminalNode_1.TerminalNode) {
let symbol = o.symbol;
if (symbol.type === ttype) {
j++;
if (j === i) {
return o;
}
}
}
}
return undefined;
}
getTokens(ttype) {
let tokens = [];
if (!this.children) {
return tokens;
}
for (let o of this.children) {
if (o instanceof TerminalNode_1.TerminalNode) {
let symbol = o.symbol;
if (symbol.type === ttype) {
tokens.push(o);
}
}
}
return tokens;
}
get ruleContext() {
return this;
}
// NOTE: argument order change from Java version
getRuleContext(i, ctxType) {
return this.getChild(i, ctxType);
}
tryGetRuleContext(i, ctxType) {
return this.tryGetChild(i, ctxType);
}
getRuleContexts(ctxType) {
let contexts = [];
if (!this.children) {
return contexts;
}
for (let o of this.children) {
if (o instanceof ctxType) {
contexts.push(o);
}
}
return contexts;
}
get childCount() {
return this.children ? this.children.length : 0;
}
get sourceInterval() {
if (!this._start) {
return Interval_1.Interval.INVALID;
}
if (!this._stop || this._stop.tokenIndex < this._start.tokenIndex) {
return Interval_1.Interval.of(this._start.tokenIndex, this._start.tokenIndex - 1); // empty
}
return Interval_1.Interval.of(this._start.tokenIndex, this._stop.tokenIndex);
}
/**
* Get the initial token in this context.
* Note that the range from start to stop is inclusive, so for rules that do not consume anything
* (for example, zero length or error productions) this token may exceed stop.
*/
get start() { return this._start; }
/**
* Get the final token in this context.
* Note that the range from start to stop is inclusive, so for rules that do not consume anything
* (for example, zero length or error productions) this token may precede start.
*/
get stop() { return this._stop; }
/** Used for rule context info debugging during parse-time, not so much for ATN debugging */
toInfoString(recognizer) {
let rules = recognizer.getRuleInvocationStack(this).reverse();
return "ParserRuleContext" + rules + "{" +
"start=" + this._start +
", stop=" + this._stop +
"}";
}
}
ParserRuleContext.EMPTY = new ParserRuleContext();
__decorate([
Decorators_1.Override
], ParserRuleContext.prototype, "parent", null);
__decorate([
Decorators_1.Override
], ParserRuleContext.prototype, "childCount", null);
__decorate([
Decorators_1.Override
], ParserRuleContext.prototype, "sourceInterval", null);
exports.ParserRuleContext = ParserRuleContext;
//# sourceMappingURL=ParserRuleContext.js.map

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,20 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ANTLRErrorListener } from "./ANTLRErrorListener";
import { RecognitionException } from "./RecognitionException";
import { Recognizer } from "./Recognizer";
/**
* This implementation of {@link ANTLRErrorListener} dispatches all calls to a
* collection of delegate listeners. This reduces the effort required to support multiple
* listeners.
*
* @author Sam Harwell
*/
export declare class ProxyErrorListener<TSymbol, TListener extends ANTLRErrorListener<TSymbol>> implements ANTLRErrorListener<TSymbol> {
private delegates;
constructor(delegates: TListener[]);
protected getDelegates(): ReadonlyArray<TListener>;
syntaxError<T extends TSymbol>(recognizer: Recognizer<T, any>, offendingSymbol: T | undefined, line: number, charPositionInLine: number, msg: string, e: RecognitionException | undefined): void;
}

View File

@@ -0,0 +1,49 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
var __param = (this && this.__param) || function (paramIndex, decorator) {
return function (target, key) { decorator(target, key, paramIndex); }
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ProxyErrorListener = void 0;
const Decorators_1 = require("./Decorators");
/**
* This implementation of {@link ANTLRErrorListener} dispatches all calls to a
* collection of delegate listeners. This reduces the effort required to support multiple
* listeners.
*
* @author Sam Harwell
*/
class ProxyErrorListener {
constructor(delegates) {
this.delegates = delegates;
if (!delegates) {
throw new Error("Invalid delegates");
}
}
getDelegates() {
return this.delegates;
}
syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e) {
this.delegates.forEach((listener) => {
if (listener.syntaxError) {
listener.syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
}
});
}
}
__decorate([
Decorators_1.Override,
__param(0, Decorators_1.NotNull),
__param(4, Decorators_1.NotNull)
], ProxyErrorListener.prototype, "syntaxError", null);
exports.ProxyErrorListener = ProxyErrorListener;
//# sourceMappingURL=ProxyErrorListener.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"ProxyErrorListener.js","sourceRoot":"","sources":["../../src/ProxyErrorListener.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;;;;AAMH,6CAAiD;AAEjD;;;;;;GAMG;AACH,MAAa,kBAAkB;IAE9B,YAAoB,SAAsB;QAAtB,cAAS,GAAT,SAAS,CAAa;QACzC,IAAI,CAAC,SAAS,EAAE;YACf,MAAM,IAAI,KAAK,CAAC,mBAAmB,CAAC,CAAC;SACrC;IACF,CAAC;IAES,YAAY;QACrB,OAAO,IAAI,CAAC,SAAS,CAAC;IACvB,CAAC;IAGM,WAAW,CACR,UAA8B,EACvC,eAA8B,EAC9B,IAAY,EACZ,kBAA0B,EACjB,GAAW,EACpB,CAAmC;QACnC,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,CAAC,QAAQ,EAAE,EAAE;YACnC,IAAI,QAAQ,CAAC,WAAW,EAAE;gBACzB,QAAQ,CAAC,WAAW,CAAC,UAAU,EAAE,eAAe,EAAE,IAAI,EAAE,kBAAkB,EAAE,GAAG,EAAE,CAAC,CAAC,CAAC;aACpF;QACF,CAAC,CAAC,CAAC;IACJ,CAAC;CACD;AAbA;IADC,qBAAQ;IAEP,WAAA,oBAAO,CAAA;IAIP,WAAA,oBAAO,CAAA;qDAOR;AAzBF,gDA0BC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:56.8126690-07:00\r\nimport { ANTLRErrorListener } from \"./ANTLRErrorListener\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Recognizer } from \"./Recognizer\";\r\nimport { Override, NotNull } from \"./Decorators\";\r\n\r\n/**\r\n * This implementation of {@link ANTLRErrorListener} dispatches all calls to a\r\n * collection of delegate listeners. This reduces the effort required to support multiple\r\n * listeners.\r\n *\r\n * @author Sam Harwell\r\n */\r\nexport class ProxyErrorListener<TSymbol, TListener extends ANTLRErrorListener<TSymbol>> implements ANTLRErrorListener<TSymbol> {\r\n\r\n\tconstructor(private delegates: TListener[]) {\r\n\t\tif (!delegates) {\r\n\t\t\tthrow new Error(\"Invalid delegates\");\r\n\t\t}\r\n\t}\r\n\r\n\tprotected getDelegates(): ReadonlyArray<TListener> {\r\n\t\treturn this.delegates;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic syntaxError<T extends TSymbol>(\r\n\t\t@NotNull recognizer: Recognizer<T, any>,\r\n\t\toffendingSymbol: T | undefined,\r\n\t\tline: number,\r\n\t\tcharPositionInLine: number,\r\n\t\t@NotNull msg: string,\r\n\t\te: RecognitionException | undefined): void {\r\n\t\tthis.delegates.forEach((listener) => {\r\n\t\t\tif (listener.syntaxError) {\r\n\t\t\t\tlistener.syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e);\r\n\t\t\t}\r\n\t\t});\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,21 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { ATNConfigSet } from "./atn/ATNConfigSet";
import { BitSet } from "./misc/BitSet";
import { DFA } from "./dfa/DFA";
import { Parser } from "./Parser";
import { ProxyErrorListener } from "./ProxyErrorListener";
import { ParserErrorListener } from "./ParserErrorListener";
import { SimulatorState } from "./atn/SimulatorState";
import { Token } from "./Token";
/**
* @author Sam Harwell
*/
export declare class ProxyParserErrorListener extends ProxyErrorListener<Token, ParserErrorListener> implements ParserErrorListener {
constructor(delegates: ParserErrorListener[]);
reportAmbiguity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, exact: boolean, ambigAlts: BitSet | undefined, configs: ATNConfigSet): void;
reportAttemptingFullContext(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, conflictingAlts: BitSet | undefined, conflictState: SimulatorState): void;
reportContextSensitivity(recognizer: Parser, dfa: DFA, startIndex: number, stopIndex: number, prediction: number, acceptState: SimulatorState): void;
}

View File

@@ -0,0 +1,58 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.ProxyParserErrorListener = void 0;
const ProxyErrorListener_1 = require("./ProxyErrorListener");
const Decorators_1 = require("./Decorators");
/**
* @author Sam Harwell
*/
class ProxyParserErrorListener extends ProxyErrorListener_1.ProxyErrorListener {
constructor(delegates) {
super(delegates);
}
reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) {
this.getDelegates()
.forEach((listener) => {
if (listener.reportAmbiguity) {
listener.reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
}
});
}
reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, conflictState) {
this.getDelegates()
.forEach((listener) => {
if (listener.reportAttemptingFullContext) {
listener.reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, conflictState);
}
});
}
reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, acceptState) {
this.getDelegates()
.forEach((listener) => {
if (listener.reportContextSensitivity) {
listener.reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, acceptState);
}
});
}
}
__decorate([
Decorators_1.Override
], ProxyParserErrorListener.prototype, "reportAmbiguity", null);
__decorate([
Decorators_1.Override
], ProxyParserErrorListener.prototype, "reportAttemptingFullContext", null);
__decorate([
Decorators_1.Override
], ProxyParserErrorListener.prototype, "reportContextSensitivity", null);
exports.ProxyParserErrorListener = ProxyParserErrorListener;
//# sourceMappingURL=ProxyParserErrorListener.js.map

View File

@@ -0,0 +1 @@
{"version":3,"file":"ProxyParserErrorListener.js","sourceRoot":"","sources":["../../src/ProxyParserErrorListener.ts"],"names":[],"mappings":";AAAA;;;GAGG;;;;;;;;;AAUH,6DAA0D;AAI1D,6CAAwC;AAExC;;GAEG;AACH,MAAa,wBAAyB,SAAQ,uCAA8C;IAG3F,YAAY,SAAgC;QAC3C,KAAK,CAAC,SAAS,CAAC,CAAC;IAClB,CAAC;IAGM,eAAe,CACrB,UAAkB,EAClB,GAAQ,EACR,UAAkB,EAClB,SAAiB,EACjB,KAAc,EACd,SAA6B,EAC7B,OAAqB;QACrB,IAAI,CAAC,YAAY,EAAE;aACjB,OAAO,CAAC,CAAC,QAAQ,EAAE,EAAE;YACrB,IAAI,QAAQ,CAAC,eAAe,EAAE;gBAC7B,QAAQ,CAAC,eAAe,CACvB,UAAU,EACV,GAAG,EACH,UAAU,EACV,SAAS,EACT,KAAK,EACL,SAAS,EACT,OAAO,CAAC,CAAC;aACV;QAEF,CAAC,CAAC,CAAC;IACL,CAAC;IAGM,2BAA2B,CACjC,UAAkB,EAClB,GAAQ,EACR,UAAkB,EAClB,SAAiB,EACjB,eAAmC,EACnC,aAA6B;QAC7B,IAAI,CAAC,YAAY,EAAE;aACjB,OAAO,CAAC,CAAC,QAAQ,EAAE,EAAE;YACrB,IAAI,QAAQ,CAAC,2BAA2B,EAAE;gBACzC,QAAQ,CAAC,2BAA2B,CACnC,UAAU,EACV,GAAG,EACH,UAAU,EACV,SAAS,EACT,eAAe,EACf,aAAa,CAAC,CAAC;aAChB;QACF,CAAC,CAAC,CAAC;IACL,CAAC;IAGM,wBAAwB,CAC9B,UAAkB,EAClB,GAAQ,EACR,UAAkB,EAClB,SAAiB,EACjB,UAAkB,EAClB,WAA2B;QAC3B,IAAI,CAAC,YAAY,EAAE;aACjB,OAAO,CAAC,CAAC,QAAQ,EAAE,EAAE;YACrB,IAAI,QAAQ,CAAC,wBAAwB,EAAE;gBACtC,QAAQ,CAAC,wBAAwB,CAChC,UAAU,EACV,GAAG,EACH,UAAU,EACV,SAAS,EACT,UAAU,EACV,WAAW,CAAC,CAAC;aACd;QACF,CAAC,CAAC,CAAC;IACL,CAAC;CACD;AAnEA;IADC,qBAAQ;+DAuBR;AAGD;IADC,qBAAQ;2EAoBR;AAGD;IADC,qBAAQ;wEAoBR;AA1EF,4DA2EC","sourcesContent":["/*!\r\n * Copyright 2016 The ANTLR Project. All rights reserved.\r\n * Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.\r\n */\r\n\r\n// ConvertTo-TS run at 2016-10-04T11:26:56.9812284-07:00\r\nimport { ANTLRErrorListener } from \"./ANTLRErrorListener\";\r\nimport { ATNConfigSet } from \"./atn/ATNConfigSet\";\r\nimport { BitSet } from \"./misc/BitSet\";\r\nimport { DFA } from \"./dfa/DFA\";\r\nimport { Parser } from \"./Parser\";\r\nimport { RecognitionException } from \"./RecognitionException\";\r\nimport { Recognizer } from \"./Recognizer\";\r\nimport { ProxyErrorListener } from \"./ProxyErrorListener\";\r\nimport { ParserErrorListener } from \"./ParserErrorListener\";\r\nimport { SimulatorState } from \"./atn/SimulatorState\";\r\nimport { Token } from \"./Token\";\r\nimport { Override } from \"./Decorators\";\r\n\r\n/**\r\n * @author Sam Harwell\r\n */\r\nexport class ProxyParserErrorListener extends ProxyErrorListener<Token, ParserErrorListener>\r\n\timplements ParserErrorListener {\r\n\r\n\tconstructor(delegates: ParserErrorListener[]) {\r\n\t\tsuper(delegates);\r\n\t}\r\n\r\n\t@Override\r\n\tpublic reportAmbiguity(\r\n\t\trecognizer: Parser,\r\n\t\tdfa: DFA,\r\n\t\tstartIndex: number,\r\n\t\tstopIndex: number,\r\n\t\texact: boolean,\r\n\t\tambigAlts: BitSet | undefined,\r\n\t\tconfigs: ATNConfigSet): void {\r\n\t\tthis.getDelegates()\r\n\t\t\t.forEach((listener) => {\r\n\t\t\t\tif (listener.reportAmbiguity) {\r\n\t\t\t\t\tlistener.reportAmbiguity(\r\n\t\t\t\t\t\trecognizer,\r\n\t\t\t\t\t\tdfa,\r\n\t\t\t\t\t\tstartIndex,\r\n\t\t\t\t\t\tstopIndex,\r\n\t\t\t\t\t\texact,\r\n\t\t\t\t\t\tambigAlts,\r\n\t\t\t\t\t\tconfigs);\r\n\t\t\t\t}\r\n\r\n\t\t\t});\r\n\t}\r\n\r\n\t@Override\r\n\tpublic reportAttemptingFullContext(\r\n\t\trecognizer: Parser,\r\n\t\tdfa: DFA,\r\n\t\tstartIndex: number,\r\n\t\tstopIndex: number,\r\n\t\tconflictingAlts: BitSet | undefined,\r\n\t\tconflictState: SimulatorState): void {\r\n\t\tthis.getDelegates()\r\n\t\t\t.forEach((listener) => {\r\n\t\t\t\tif (listener.reportAttemptingFullContext) {\r\n\t\t\t\t\tlistener.reportAttemptingFullContext(\r\n\t\t\t\t\t\trecognizer,\r\n\t\t\t\t\t\tdfa,\r\n\t\t\t\t\t\tstartIndex,\r\n\t\t\t\t\t\tstopIndex,\r\n\t\t\t\t\t\tconflictingAlts,\r\n\t\t\t\t\t\tconflictState);\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t}\r\n\r\n\t@Override\r\n\tpublic reportContextSensitivity(\r\n\t\trecognizer: Parser,\r\n\t\tdfa: DFA,\r\n\t\tstartIndex: number,\r\n\t\tstopIndex: number,\r\n\t\tprediction: number,\r\n\t\tacceptState: SimulatorState): void {\r\n\t\tthis.getDelegates()\r\n\t\t\t.forEach((listener) => {\r\n\t\t\t\tif (listener.reportContextSensitivity) {\r\n\t\t\t\t\tlistener.reportContextSensitivity(\r\n\t\t\t\t\t\trecognizer,\r\n\t\t\t\t\t\tdfa,\r\n\t\t\t\t\t\tstartIndex,\r\n\t\t\t\t\t\tstopIndex,\r\n\t\t\t\t\t\tprediction,\r\n\t\t\t\t\t\tacceptState);\r\n\t\t\t\t}\r\n\t\t\t});\r\n\t}\r\n}\r\n"]}

View File

@@ -0,0 +1,143 @@
# antlr4ts - TypeScript/JavaScript target for ANTLR 4
[![Join the chat at https://gitter.im/tunnelvisionlabs/antlr4ts](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tunnelvisionlabs/antlr4ts?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build status](https://ci.appveyor.com/api/projects/status/d4gpmnrkfo3tb2t1/branch/master?svg=true)](https://ci.appveyor.com/project/sharwell/antlr4ts/branch/master)
[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](./LICENSE)
## Overview
* **Releases:** See the [GitHub Releases](https://github.com/tunnelvisionlabs/antlr4ts/releases) page for release notes and
links to the distribution
* **Feedback:** Use [GitHub Issues](https://github.com/tunnelvisionlabs/antlr4ts/issues) for issues (bugs, enhancements,
features, and questions)
## Requirements
This project has separate requirements for developers and end users.
> :bulb: The requirements listed on this page only cover user scenarios - that is, scenarios where developers wish to
> use ANTLR 4 for parsing tasks inside of a TypeScript application. If you are interested in contributing to ANTLR 4
> itself, see [CONTRIBUTING.md](CONTRIBUTING.md) for contributor documentation.
### End user requirements
Parsers generated by the ANTLR 4 TypeScript target have a runtime dependency on the
[antlr4ts](https://www.npmjs.com/package/antlr4ts) package. The package is tested and known to work with Node.js 6.7.
### Development requirements
The tool used to generate TypeScript code from an ANTLR 4 grammar is written in Java. To fully utilize the ANTLR 4
TypeScript target (including the ability to regenerate code from a grammar file after changes are made), a Java Runtime
Environment (JRE) needs to be installed on the developer machine. The generated code itself uses several features new to
TypeScript 2.0.
* Java Runtime Environment 1.6+ (1.8+ recommended)
* TypeScript 2.0+
## Getting started
1. Install `antlr4ts` as a runtime dependency using your preferred package manager.
```bash
npm install antlr4ts --save
```
```bash
yarn add antlr4ts
```
2. Install `antlr4ts-cli` as a development dependency using your preferred package manager.
```bash
npm install antlr4ts-cli --save-dev
```
```bash
yarn add -D antlr4ts-cli
```
3. Add a grammar to your project, e.g. **path/to/MyGrammar.g4**
4. Add a script to **package.json** for compiling your grammar to TypeScript
```
"scripts": {
// ...
"antlr4ts": "antlr4ts -visitor path/to/MyGrammar.g4"
}
```
5. Use your grammar in TypeScript
```typescript
import { ANTLRInputStream, CommonTokenStream } from 'antlr4ts';
// Create the lexer and parser
let inputStream = new ANTLRInputStream("text");
let lexer = new MyGrammarLexer(inputStream);
let tokenStream = new CommonTokenStream(lexer);
let parser = new MyGrammarParser(tokenStream);
// Parse the input, where `compilationUnit` is whatever entry point you defined
let tree = parser.compilationUnit();
```
The two main ways to inspect the tree are by using a listener or a visitor, you can read about the differences between the two [here](https://github.com/antlr/antlr4/blob/master/doc/listeners.md).
###### Listener Approach
```typescript
// ...
import { MyGrammarParserListener } from './MyGrammarParserListener'
import { FunctionDeclarationContext } from './MyGrammarParser'
import { ParseTreeWalker } from 'antlr4ts/tree/ParseTreeWalker'
class EnterFunctionListener implements MyGrammarParserListener {
// Assuming a parser rule with name: `functionDeclaration`
enterFunctionDeclaration(context: FunctionDeclarationContext) {
console.log(`Function start line number ${context._start.line}`)
// ...
}
// other enterX functions...
}
// Create the listener
const listener: MyGrammarParserListener = new EnterFunctionListener();
// Use the entry point for listeners
ParseTreeWalker.DEFAULT.walk(listener, tree)
```
###### Visitor Approach
Note you must pass the `-visitor` flag to antlr4ts to get the generated visitor file.
```typescript
// ...
import { MyGrammarParserVisitor } from './MyGrammarParserVisitor'
import { AbstractParseTreeVisitor } from 'antlr4ts/tree/AbstractParseTreeVisitor'
// Extend the AbstractParseTreeVisitor to get default visitor behaviour
class CountFunctionsVisitor extends AbstractParseTreeVisitor<number> implements MyGrammarParserVisitor<number> {
defaultResult() {
return 0
}
aggregateResult(aggregate: number, nextResult: number) {
return aggregate + nextResult
}
visitFunctionDeclaration(context: FunctionDeclarationContext): number {
return 1 + super.visitChildren(context)
}
}
// Create the visitor
const countFunctionsVisitor = new CountFunctionsVisitor()
// Use the visitor entry point
countFunctionsVisitor.visit(tree)
```

View File

@@ -0,0 +1,87 @@
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
import { CharStream } from "./CharStream";
import { IntervalSet } from "./misc/IntervalSet";
import { IntStream } from "./IntStream";
import { Lexer } from "./Lexer";
import { ParserRuleContext } from "./ParserRuleContext";
import { Recognizer } from "./Recognizer";
import { RuleContext } from "./RuleContext";
import { Token } from "./Token";
/** The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
* 3 kinds of errors: prediction errors, failed predicate errors, and
* mismatched input errors. In each case, the parser knows where it is
* in the input, where it is in the ATN, the rule invocation stack,
* and what kind of problem occurred.
*/
export declare class RecognitionException extends Error {
/** The {@link Recognizer} where this exception originated. */
private _recognizer?;
private ctx?;
private input?;
/**
* The current {@link Token} when an error occurred. Since not all streams
* support accessing symbols by index, we have to track the {@link Token}
* instance itself.
*/
private offendingToken?;
private _offendingState;
constructor(lexer: Lexer | undefined, input: CharStream);
constructor(recognizer: Recognizer<Token, any> | undefined, input: IntStream | undefined, ctx: ParserRuleContext | undefined);
constructor(recognizer: Recognizer<Token, any> | undefined, input: IntStream | undefined, ctx: ParserRuleContext | undefined, message: string);
/**
* Get the ATN state number the parser was in at the time the error
* occurred. For {@link NoViableAltException} and
* {@link LexerNoViableAltException} exceptions, this is the
* {@link DecisionState} number. For others, it is the state whose outgoing
* edge we couldn't match.
*
* If the state number is not known, this method returns -1.
*/
get offendingState(): number;
protected setOffendingState(offendingState: number): void;
/**
* Gets the set of input symbols which could potentially follow the
* previously matched symbol at the time this exception was thrown.
*
* If the set of expected tokens is not known and could not be computed,
* this method returns `undefined`.
*
* @returns The set of token types that could potentially follow the current
* state in the ATN, or `undefined` if the information is not available.
*/
get expectedTokens(): IntervalSet | undefined;
/**
* Gets the {@link RuleContext} at the time this exception was thrown.
*
* If the context is not available, this method returns `undefined`.
*
* @returns The {@link RuleContext} at the time this exception was thrown.
* If the context is not available, this method returns `undefined`.
*/
get context(): RuleContext | undefined;
/**
* Gets the input stream which is the symbol source for the recognizer where
* this exception was thrown.
*
* If the input stream is not available, this method returns `undefined`.
*
* @returns The input stream which is the symbol source for the recognizer
* where this exception was thrown, or `undefined` if the stream is not
* available.
*/
get inputStream(): IntStream | undefined;
getOffendingToken(recognizer?: Recognizer<Token, any>): Token | undefined;
protected setOffendingToken<TSymbol extends Token>(recognizer: Recognizer<TSymbol, any>, offendingToken?: TSymbol): void;
/**
* Gets the {@link Recognizer} where this exception occurred.
*
* If the recognizer is not available, this method returns `undefined`.
*
* @returns The recognizer where this exception occurred, or `undefined` if
* the recognizer is not available.
*/
get recognizer(): Recognizer<any, any> | undefined;
}

View File

@@ -0,0 +1,104 @@
"use strict";
/*!
* Copyright 2016 The ANTLR Project. All rights reserved.
* Licensed under the BSD-3-Clause license. See LICENSE file in the project root for license information.
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.RecognitionException = void 0;
/** The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
* 3 kinds of errors: prediction errors, failed predicate errors, and
* mismatched input errors. In each case, the parser knows where it is
* in the input, where it is in the ATN, the rule invocation stack,
* and what kind of problem occurred.
*/
class RecognitionException extends Error {
constructor(recognizer, input, ctx, message) {
super(message);
this._offendingState = -1;
this._recognizer = recognizer;
this.input = input;
this.ctx = ctx;
if (recognizer) {
this._offendingState = recognizer.state;
}
}
/**
* Get the ATN state number the parser was in at the time the error
* occurred. For {@link NoViableAltException} and
* {@link LexerNoViableAltException} exceptions, this is the
* {@link DecisionState} number. For others, it is the state whose outgoing
* edge we couldn't match.
*
* If the state number is not known, this method returns -1.
*/
get offendingState() {
return this._offendingState;
}
setOffendingState(offendingState) {
this._offendingState = offendingState;
}
/**
* Gets the set of input symbols which could potentially follow the
* previously matched symbol at the time this exception was thrown.
*
* If the set of expected tokens is not known and could not be computed,
* this method returns `undefined`.
*
* @returns The set of token types that could potentially follow the current
* state in the ATN, or `undefined` if the information is not available.
*/
get expectedTokens() {
if (this._recognizer) {
return this._recognizer.atn.getExpectedTokens(this._offendingState, this.ctx);
}
return undefined;
}
/**
* Gets the {@link RuleContext} at the time this exception was thrown.
*
* If the context is not available, this method returns `undefined`.
*
* @returns The {@link RuleContext} at the time this exception was thrown.
* If the context is not available, this method returns `undefined`.
*/
get context() {
return this.ctx;
}
/**
* Gets the input stream which is the symbol source for the recognizer where
* this exception was thrown.
*
* If the input stream is not available, this method returns `undefined`.
*
* @returns The input stream which is the symbol source for the recognizer
* where this exception was thrown, or `undefined` if the stream is not
* available.
*/
get inputStream() {
return this.input;
}
getOffendingToken(recognizer) {
if (recognizer && recognizer !== this._recognizer) {
return undefined;
}
return this.offendingToken;
}
setOffendingToken(recognizer, offendingToken) {
if (recognizer === this._recognizer) {
this.offendingToken = offendingToken;
}
}
/**
* Gets the {@link Recognizer} where this exception occurred.
*
* If the recognizer is not available, this method returns `undefined`.
*
* @returns The recognizer where this exception occurred, or `undefined` if
* the recognizer is not available.
*/
get recognizer() {
return this._recognizer;
}
}
exports.RecognitionException = RecognitionException;
//# sourceMappingURL=RecognitionException.js.map

Some files were not shown because too many files have changed in this diff Show More