From 6b1004eaa827f1eba13443c58e28bf19a19770df Mon Sep 17 00:00:00 2001 From: Yaroslav Kishchenko Date: Thu, 5 Nov 2020 16:57:55 +0300 Subject: [PATCH 01/52] [86] Extract class ast getter. --- test/baselines/semi/utils.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/test/baselines/semi/utils.py b/test/baselines/semi/utils.py index 2fff4e05..68f1246c 100644 --- a/test/baselines/semi/utils.py +++ b/test/baselines/semi/utils.py @@ -30,7 +30,7 @@ def fill_extraction_opportunity(node: Union[Block, Statement]): return tuple(extraction_opportunity_list), block_statement_graph -def get_method_ast(filename: str, class_name: str, method_name: str) -> AST: +def get_class_ast(filename: str, class_name: str) -> Tuple[AST, Path]: current_directory = Path(__file__).absolute().parent filepath = current_directory / filename ast = AST.build_from_javalang(build_ast(str(filepath))) @@ -41,9 +41,20 @@ def get_method_ast(filename: str, class_name: str, method_name: str) -> AST: for node in ast.get_root().types if node.node_type == ASTNodeType.CLASS_DECLARATION and node.name == class_name ) + except StopIteration: + raise RuntimeError(f"Failed to find class {class_name} in file {filepath}") + + return ast.get_subtree(class_declaration), filepath - method_declaration = next(node for node in class_declaration.methods if node.name == method_name) + +def get_method_ast(filename: str, class_name: str, method_name: str) -> AST: + class_ast, filepath = get_class_ast(filename, class_name) + + try: + method_declaration = next(node for node in class_ast.get_root().methods if node.name == method_name) except StopIteration: raise RuntimeError(f"Failed to find method {method_name} in class {class_name} in file {filepath}") - return ast.get_subtree(method_declaration) + return class_ast.get_subtree(method_declaration) + + From bdf4d7f6e9cf006a58eac74d31323919b78761db Mon Sep 17 00:00:00 2001 From: Yaroslav Kishchenko Date: Thu, 5 Nov 2020 17:18:09 +0300 Subject: [PATCH 02/52] [86] Split test helper to inject tests with constructor. --- test/baselines/semi/test_extract_semantic.py | 56 +++++++++++--------- 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/test/baselines/semi/test_extract_semantic.py b/test/baselines/semi/test_extract_semantic.py index aadd7e2d..7848077d 100644 --- a/test/baselines/semi/test_extract_semantic.py +++ b/test/baselines/semi/test_extract_semantic.py @@ -4,27 +4,28 @@ from veniq.baselines.semi.extract_semantic import extract_method_statements_semantic from veniq.baselines.semi._common_types import StatementSemantic +from veniq.ast_framework import AST from .utils import objects_semantic, get_method_ast class ExtractStatementSemanticTestCase(TestCase): def test_block_method(self): - self._test_helper("block", [StatementSemantic(), objects_semantic("x"), StatementSemantic()]) + self._test_method("block", [StatementSemantic(), objects_semantic("x"), StatementSemantic()]) def test_for_cycle_method(self): - self._test_helper( + self._test_method( "forCycle", [objects_semantic("x", "i"), objects_semantic("x", "i", "result"), StatementSemantic()], ) def test_while_cycle_method(self): - self._test_helper("whileCycle", [objects_semantic("x"), objects_semantic("x"), StatementSemantic()]) + self._test_method("whileCycle", [objects_semantic("x"), objects_semantic("x"), StatementSemantic()]) def test_do_while_cycle_method(self): - self._test_helper("doWhileCycle", [objects_semantic("x"), objects_semantic("x"), StatementSemantic()]) + self._test_method("doWhileCycle", [objects_semantic("x"), objects_semantic("x"), StatementSemantic()]) def test_if_branching_method(self): - self._test_helper( + self._test_method( "ifBranching", [ objects_semantic("x"), @@ -38,12 +39,12 @@ def test_if_branching_method(self): ) def test_synchronized_block_method(self): - self._test_helper( + self._test_method( "synchronizedBlock", [objects_semantic("x"), objects_semantic("x"), StatementSemantic()] ) def test_switch_branching_method(self): - self._test_helper( + self._test_method( "switchBranching", [ objects_semantic("x"), @@ -55,7 +56,7 @@ def test_switch_branching_method(self): ) def test_try_block_method(self): - self._test_helper( + self._test_method( "tryBlock", [ StatementSemantic(), @@ -72,47 +73,47 @@ def test_try_block_method(self): ) def test_assert_statement_method(self): - self._test_helper("assertStatement", [objects_semantic("x")]) + self._test_method("assertStatement", [objects_semantic("x")]) def test_return_statement_method(self): - self._test_helper("returnStatement", [objects_semantic("x")]) + self._test_method("returnStatement", [objects_semantic("x")]) def test_expression_method(self): - self._test_helper("expression", [objects_semantic("x")]) + self._test_method("expression", [objects_semantic("x")]) def test_throw_statement_method(self): - self._test_helper("throwStatement", [objects_semantic("x")]) + self._test_method("throwStatement", [objects_semantic("x")]) def test_local_variable_declaration_method(self): - self._test_helper("localVariableDeclaration", [objects_semantic("x")]) + self._test_method("localVariableDeclaration", [objects_semantic("x")]) def test_break_statement_method(self): - self._test_helper("breakStatement", [StatementSemantic(), StatementSemantic(), StatementSemantic()]) + self._test_method("breakStatement", [StatementSemantic(), StatementSemantic(), StatementSemantic()]) def test_continue_statement_method(self): - self._test_helper( + self._test_method( "continueStatement", [StatementSemantic(), StatementSemantic(), StatementSemantic()] ) def test_local_method_call_method(self): - self._test_helper("localMethodCall", [StatementSemantic(used_methods={"localMethod"})]) + self._test_method("localMethodCall", [StatementSemantic(used_methods={"localMethod"})]) def test_object_method_call_method(self): - self._test_helper( + self._test_method( "objectMethodCall", [StatementSemantic(used_objects={"o"}, used_methods={"method"})] ) def test_nested_object_method(self): - self._test_helper("nestedObject", [StatementSemantic(used_objects={"o.x"})]) + self._test_method("nestedObject", [StatementSemantic(used_objects={"o.x"})]) def test_nested_object_method_call_method(self): - self._test_helper( + self._test_method( "nestedObjectMethodCall", [StatementSemantic(used_objects={"o.nestedObject"}, used_methods={"method"})], ) def test_several_statement_method(self): - self._test_helper( + self._test_method( "severalStatements", [ objects_semantic("x"), @@ -124,7 +125,7 @@ def test_several_statement_method(self): ) def test_deep_nesting_method(self): - self._test_helper( + self._test_method( "deepNesting", [ objects_semantic("i"), @@ -140,7 +141,7 @@ def test_deep_nesting_method(self): ) def test_complex_expressions_method(self): - self._test_helper( + self._test_method( "complexExpressions", [ objects_semantic("x", "y"), @@ -156,10 +157,10 @@ def test_complex_expressions_method(self): ) def test_multiline_statement_method(self): - self._test_helper("multilineStatement", [objects_semantic("x", "y", "o")]) + self._test_method("multilineStatement", [objects_semantic("x", "y", "o")]) def test_multiple_statements_per_line_method(self): - self._test_helper( + self._test_method( "multipleStatementsPerLine", [ StatementSemantic(used_methods={"localMethod"}, used_objects={"x"}), @@ -167,9 +168,12 @@ def test_multiple_statements_per_line_method(self): ], ) - def _test_helper(self, method_name: str, expected_statements_semantics: List[StatementSemantic]): + def _test_method(self, method_name: str, expected_statements_semantics: List[StatementSemantic]): method_ast = get_method_ast("SemanticExtractionTest.java", "SimpleMethods", method_name) - method_semantic = extract_method_statements_semantic(method_ast) + self._test_ast(method_ast, expected_statements_semantics) + + def _test_ast(self, ast: AST, expected_statements_semantics: List[StatementSemantic]): + method_semantic = extract_method_statements_semantic(ast) for ( comparison_index, (statement, actual_statement_semantic, expected_statement_semantic), From 50d60d5cb4f7daaba0a35ae8e89a0b4e520c36f1 Mon Sep 17 00:00:00 2001 From: Yaroslav Kishchenko Date: Thu, 5 Nov 2020 17:22:33 +0300 Subject: [PATCH 03/52] [86] Assume constructors in extraction semantic. Add test. --- test/baselines/semi/SemanticExtractionTest.java | 4 ++++ test/baselines/semi/test_extract_semantic.py | 13 ++++++++++++- test/baselines/semi/utils.py | 12 ++++++++++++ veniq/baselines/semi/extract_semantic.py | 1 + 4 files changed, 29 insertions(+), 1 deletion(-) diff --git a/test/baselines/semi/SemanticExtractionTest.java b/test/baselines/semi/SemanticExtractionTest.java index c12c670a..9807d1fd 100644 --- a/test/baselines/semi/SemanticExtractionTest.java +++ b/test/baselines/semi/SemanticExtractionTest.java @@ -155,4 +155,8 @@ void multilineStatement(int x, int y) { void multipleStatementsPerLine(int x, int y) { localMethod(x); localMethod(y); } + + SimpleMethods() { // first constructor + init(); + } } diff --git a/test/baselines/semi/test_extract_semantic.py b/test/baselines/semi/test_extract_semantic.py index 7848077d..3b305dd4 100644 --- a/test/baselines/semi/test_extract_semantic.py +++ b/test/baselines/semi/test_extract_semantic.py @@ -5,7 +5,7 @@ from veniq.baselines.semi.extract_semantic import extract_method_statements_semantic from veniq.baselines.semi._common_types import StatementSemantic from veniq.ast_framework import AST -from .utils import objects_semantic, get_method_ast +from .utils import objects_semantic, get_method_ast, get_constructor_ast class ExtractStatementSemanticTestCase(TestCase): @@ -168,10 +168,21 @@ def test_multiple_statements_per_line_method(self): ], ) + def test_constructor(self): + self._test_constructor(1, [StatementSemantic(used_methods={"init"})]) + def _test_method(self, method_name: str, expected_statements_semantics: List[StatementSemantic]): method_ast = get_method_ast("SemanticExtractionTest.java", "SimpleMethods", method_name) self._test_ast(method_ast, expected_statements_semantics) + def _test_constructor( + self, constructor_index: int, expected_statements_semantics: List[StatementSemantic] + ): + constructor_ast = get_constructor_ast( + "SemanticExtractionTest.java", "SimpleMethods", constructor_index + ) + self._test_ast(constructor_ast, expected_statements_semantics) + def _test_ast(self, ast: AST, expected_statements_semantics: List[StatementSemantic]): method_semantic = extract_method_statements_semantic(ast) for ( diff --git a/test/baselines/semi/utils.py b/test/baselines/semi/utils.py index 68f1246c..a6c31f3b 100644 --- a/test/baselines/semi/utils.py +++ b/test/baselines/semi/utils.py @@ -1,3 +1,4 @@ +from itertools import islice from pathlib import Path from typing import List, Tuple, Union @@ -58,3 +59,14 @@ def get_method_ast(filename: str, class_name: str, method_name: str) -> AST: return class_ast.get_subtree(method_declaration) +def get_constructor_ast(filename: str, class_name: str, constructor_index: int) -> AST: + class_ast, filepath = get_class_ast(filename, class_name) + + try: + constructor_declaration = next(islice(class_ast.get_root().constructors, constructor_index - 1, None)) + except StopIteration: + raise RuntimeError( + f"Failed to find {constructor_index}th constructor in class {class_name} in file {filepath}" + ) + + return class_ast.get_subtree(constructor_declaration) diff --git a/veniq/baselines/semi/extract_semantic.py b/veniq/baselines/semi/extract_semantic.py index fd73ffce..bcecae9d 100644 --- a/veniq/baselines/semi/extract_semantic.py +++ b/veniq/baselines/semi/extract_semantic.py @@ -63,6 +63,7 @@ def _on_statement_entering(self, statement: Statement) -> None: # "If" statements is handled separately in _on_block_entering due to "else if" construction if extraction_statement.node_type in { ASTNodeType.METHOD_DECLARATION, + ASTNodeType.CONSTRUCTOR_DECLARATION, ASTNodeType.BLOCK_STATEMENT, ASTNodeType.TRY_STATEMENT, ASTNodeType.IF_STATEMENT, From 1f22df453808c0fab009e1db686e2f811ae35328 Mon Sep 17 00:00:00 2001 From: Yaroslav Kishchenko Date: Thu, 12 Nov 2020 13:48:07 +0300 Subject: [PATCH 04/52] [62] Make creation of extraction opportunities faster. --- .../semi/create_extraction_opportunities.py | 216 +++++++++++++----- 1 file changed, 162 insertions(+), 54 deletions(-) diff --git a/veniq/baselines/semi/create_extraction_opportunities.py b/veniq/baselines/semi/create_extraction_opportunities.py index d068e6b2..e2d8e238 100644 --- a/veniq/baselines/semi/create_extraction_opportunities.py +++ b/veniq/baselines/semi/create_extraction_opportunities.py @@ -1,4 +1,5 @@ -from typing import Dict, List, Optional +from collections import defaultdict +from typing import Dict, List, Optional, NamedTuple from veniq.ast_framework import AST from .extract_semantic import extract_method_statements_semantic @@ -9,66 +10,173 @@ def create_extraction_opportunities( statements_semantic: Dict[Statement, StatementSemantic] ) -> List[ExtractionOpportunity]: - extraction_opportunities: List[ExtractionOpportunity] = [] - for step in range(1, len(statements_semantic) + 1): - for extraction_opportunity in _ExtractionOpportunityIterator(statements_semantic, step): - if extraction_opportunity and extraction_opportunity not in extraction_opportunities: - extraction_opportunities.append(extraction_opportunity) - - return extraction_opportunities - - -class _ExtractionOpportunityIterator: - def __init__(self, statements_semantic: Dict[Statement, StatementSemantic], step: int): - self._statements_semantic = statements_semantic - self._statements = list(statements_semantic.keys()) - self._step = step - - self._statement_index = 0 - - def __iter__(self): - return self - - def __next__(self) -> ExtractionOpportunity: - if self._statement_index >= len(self._statements_semantic): - raise StopIteration + statements = list(statements_semantic.keys()) + semantics = list(statements_semantic.values()) + + statements_similarity_provider = _StatementsSimilarityProvider(semantics) + statements_ranges = _StatementsRanges(statements) + + extraction_opportunities = statements_ranges.create_initial_ranges(statements_similarity_provider) + + similarity_gaps = statements_similarity_provider.get_similarity_gaps() + for gap in sorted(similarity_gaps.keys()): + # Create a separate list for new extraction opportunities + # created during merge of statements ranges with **fixed** similarity gap + # due to the possible overwrites of those new extraction opportunities + new_extraction_opportunities: List[ExtractionOpportunity] = [] + for statement_index in similarity_gaps[gap]: + new_opportunity = statements_ranges.merge_ranges(statement_index, statement_index + gap) + + # If, for a fixed similarity gap, a new extraction opportunity starts with the same statements + # as previous one, this means, that the last range of first similarity gap is the first range + # of next similarity gap, i.e. those gaps are overlapping, and in the final result the both + # must be in same extraction opportunity. Notice that the resulting opportunity of second merge + # is simply extending the previous one, so we can take it insted of previous opportunity. + if new_extraction_opportunities and new_extraction_opportunities[-1][0] == new_opportunity[0]: + new_extraction_opportunities[-1] = new_opportunity + else: + new_extraction_opportunities.append(new_opportunity) + extraction_opportunities.extend(new_extraction_opportunities) - fails_qty = 0 - first_statement_index = self._statement_index - last_statement_index: Optional[int] = None + extraction_opportunities = [ + tuple(filter(lambda node: not node.is_fake, extraction_opportunity)) + for extraction_opportunity in extraction_opportunities + if any(not node.is_fake for node in extraction_opportunity) + ] - self._statement_index += 1 + return extraction_opportunities - while self._statement_index < len(self._statements) and last_statement_index is None: - previous_statement_semantic = self._get_statement_semantic(self._statement_index - fails_qty - 1) - current_statement_semantic = self._get_statement_semantic(self._statement_index) - if current_statement_semantic.is_similar(previous_statement_semantic): - fails_qty = 0 - self._statement_index += 1 +class _StatementsSimilarityProvider: + def __init__(self, statements_semantic: List[StatementSemantic]): + self._steps_to_next_similar: List[Optional[int]] = [ + self._calculate_steps_to_next_similar(statements_semantic, statement_index) + for statement_index in range(len(statements_semantic)) + ] + + def has_next_similar_statement(self, statement_index: int) -> bool: + return self._steps_to_next_similar[statement_index] is not None + + def get_steps_to_next_similar_statement(self, statement_index: int) -> int: + step = self._steps_to_next_similar[statement_index] + if step is None: + raise ValueError(f"All statements after {statement_index}th are not similar to it.") + + return step + + def get_similarity_gaps(self) -> Dict[int, List[int]]: + """ + Finds all statements, that next similar statement is not following them directly. + Returns dict with steps as keys and list of coresponding statement indexes as values. + Fo example, if next similar statement to 1st is 3rd, to 2nd is 4th and for 5th is 8th, + then the output will be: {2: [1, 2], 3: [5]}. + NOTICE: all statement indexes lists are sorted. + """ + similarity_gaps_by_size: Dict[int, List[int]] = defaultdict(list) + for statement_index, step in enumerate(self._steps_to_next_similar): + if step and step > 1: + similarity_gaps_by_size[step].append(statement_index) + + return similarity_gaps_by_size + + @staticmethod + def _calculate_steps_to_next_similar( + statements_semantic: List[StatementSemantic], statement_index: int + ) -> Optional[int]: + step = 1 + current_statement = statements_semantic[statement_index] + while statement_index + step < len(statements_semantic): + if current_statement.is_similar(statements_semantic[statement_index + step]): + return step + step += 1 + + return None + + +class _StatementsRanges: + """ + Represents a division of a sequence of statements by non overlapping sorted ranges. + """ + + class _Range(NamedTuple): + begin: int + end: int # ! NOTICE: Index past the last element in a range. + + def __init__(self, statements: List[Statement]): + self._statements = statements + self._ranges: List[_StatementsRanges._Range] = [] + + def create_initial_ranges( + self, statements_similarity: _StatementsSimilarityProvider + ) -> List[ExtractionOpportunity]: + """ + A initial statements range is a continuos range of statements, where each statement, + except the first one, is similar to previous. + """ + + extraction_opportunities: List[ExtractionOpportunity] = [] + + range_begin = 0 + range_end = 1 # ! NOTICE: Index past the last element in a range. + + for index, statement in enumerate(self._statements): + if ( + statements_similarity.has_next_similar_statement(index) + and statements_similarity.get_steps_to_next_similar_statement(index) == 1 + ): + range_end += 1 else: - fails_qty += 1 - if fails_qty == self._step: - self._statement_index -= self._step - 1 - last_statement_index = self._statement_index - 1 - else: - self._statement_index += 1 - - # self._statement_index has passed over self._statements - # put last_statement_index to the last statement before sequence of failures - if last_statement_index is None: - last_statement_index = len(self._statements) - fails_qty - 1 - - return tuple( - self._statements[i] - for i in range(first_statement_index, last_statement_index + 1) - if not self._statements[i].is_fake + self._ranges.append(self._Range(range_begin, range_end)) + extraction_opportunities.append(tuple(self._statements[range_begin:range_end])) + range_begin = range_end + range_end += 1 + + if range_begin < len(self._statements): + self._ranges.append(self._Range(range_begin, len(self._statements))) + extraction_opportunities.append(tuple(self._statements[range_begin:])) + + return extraction_opportunities + + def merge_ranges( + self, first_range_statement_index: int, last_range_statement_index + ) -> ExtractionOpportunity: + """ + Identifies first and last ranges by given statements indexes and + merge them two and all other ranges between them. + Returns statements from newly created range. + """ + first_range_index = self._get_range_index(first_range_statement_index) + last_range_index = self._get_range_index(last_range_statement_index) + + first_range = self._ranges[first_range_index] + last_range = self._ranges[last_range_index] + + new_range = self._Range(first_range.begin, last_range.end) + self._ranges[first_range_index:last_range_index + 1] = [new_range] + + return tuple(self._statements[new_range.begin:new_range.end]) + + def _get_range_index(self, statement_index: int) -> int: + if not self._ranges: + raise ValueError("No ranges was created.") + + smallest_index_in_ranges = self._ranges[0].begin + if statement_index < smallest_index_in_ranges: + raise ValueError( + f"Element is before all the ranges. Element index = {statement_index}, " + f"smallest index among elements in ranges = {smallest_index_in_ranges}." + ) + + for range_index, range in enumerate(self._ranges): + if statement_index < range.end: + return range_index + + largets_index_in_ranges = self._ranges[-1].end - 1 + raise ValueError( + f"Element is past all the ranges. Element index = {statement_index}, " + f"greatest index among elements in ranges = {largets_index_in_ranges}." ) - def _get_statement_semantic(self, statement_index: int) -> StatementSemantic: - current_statement = self._statements[statement_index] - return self._statements_semantic[current_statement] - def _print_extraction_opportunities(method_ast: AST, filepath: str, class_name: str, method_name: str): statements_semantic = extract_method_statements_semantic(method_ast) From bc6dd8a6a5b7a51f4aca7b17421881fe14cf7477 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 23 Nov 2020 18:07:58 +0300 Subject: [PATCH 05/52] ADd refMIner execution --- refMiner.py | 79 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 refMiner.py diff --git a/refMiner.py b/refMiner.py new file mode 100644 index 00000000..6d28237c --- /dev/null +++ b/refMiner.py @@ -0,0 +1,79 @@ +from collections import OrderedDict +from functools import partial +from pathlib import Path +import os +import subprocess + +from pebble import ProcessPool +from tqdm import tqdm + +dir_to_analyze = [('rishabh115/Interview-Questions', 102), ('Snailclimb/JavaGuide', 4606), ('MisterBooo/LeetCodeAnimation', 8288), ('lihengming/spring-boot-api-project-seed', 30851), ('ouchuangxin/leave-sample', 51440), ('awsdocs/aws-lambda-developer-guide', 52103), ('GoogleLLP/SuperMarket', 66100), ('JunzhouLiu/BILIBILI-HELPER', 69551), ('arawn/building-modular-monoliths-using-spring', 81334), ('EiletXie/cloud2020', 86282), ('spring-projects/spring-petclinic', 89351), ('zo0r/react-native-push-notification', 104678), ('Snailclimb/guide-rpc-framework', 112343), ('medcl/elasticsearch-analysis-ik', 114714), ('lenve/vhr', 144624), ('react-native-community/react-native-video', 175864), ('intel-isl/OpenBot', 186423), ('nic-delhi/AarogyaSetu_Android', 202079), ('ityouknow/spring-boot-examples', 229057), ('fuzhengwei/itstack-demo-design', 244164), ('trojan-gfw/igniter', 250506), ('newbee-ltd/newbee-mall', 266433), ('alibaba/taokeeper', 305252), ('kekingcn/kkFileView', 313775), ('qq53182347/liugh-parent', 379931), ('Tencent/APIJSON', 393850), ('alibaba/COLA', 396781), ('firebase/quickstart-android', 435428), ('cats-oss/android-gpuimage', 465871), ('xuxueli/xxl-job', 470849), ('TheAlgorithms/Java', 506454), ('react-native-community/react-native-camera', 512604), ('google/grafika', 553156), ('termux/termux-app', 611291), ('DuGuQiuBai/Java', 652678), ('android/user-interface-samples', 665209), ('kdn251/interviews', 674404), ('xkcoding/spring-boot-demo', 675198), ('airbnb/lottie-android', 682626), ('Red5/red5-server', 715723), ('feast-dev/feast', 744875), ('elunez/eladmin', 745583), ('zhisheng17/flink-learning', 753262), ('careercup/CtCI-6th-Edition', 770012), ('jitsi/jitsi-videobridge', 806428), ('Notsfsssf/Pix-EzViewer', 875003), ('wix/react-native-navigation', 897164), ('yuliskov/SmartTubeNext', 911945), ('google/exposure-notifications-android', 929237), ('alibaba/easyexcel', 1041929), ('bigbluebutton/bigbluebutton', 1085022), ('mission-peace/interview', 1111425), ('square/retrofit', 1199658), ('alibaba/spring-cloud-alibaba', 1266374), ('MalitsPlus/ShizuruNotes', 1275918), ('zhangdaiscott/jeecg-boot', 1292101), ('cabaletta/baritone', 1308293), ('williamfiset/Algorithms', 1316427), ('PhilJay/MPAndroidChart', 1317015), ('dromara/soul', 1333632), ('halo-dev/halo', 1341126), ('CarGuo/GSYVideoPlayer', 1423273), ('zuihou/zuihou-admin-cloud', 1424116), ('FabricMC/fabric', 1491033), ('arduino/Arduino', 1524590), ('paascloud/paascloud-master', 1551821), ('square/okhttp', 1793311), ('metersphere/metersphere', 1878372), ('YunaiV/SpringBoot-Labs', 1920961), ('intuit/karate', 1943008), ('didi/kafka-manager', 1964652), ('YunaiV/onemall', 2001309), ('alibaba/arthas', 2025697), ('GeyserMC/Geyser', 2026042), ('seven332/EhViewer', 2038173), ('dromara/hmily', 2056099), ('Netflix/eureka', 2089056), ('hyb1996/Auto.js', 2093478), ('TeamNewPipe/NewPipe', 2096544), ('eclipse/paho.mqtt.java', 2310603), ('Blankj/AndroidUtilCode', 2336083), ('alibaba/DataX', 2337300), ('GoogleCloudPlatform/DataflowTemplates', 2390881), ('zxing/zxing', 2394547), ('skylot/jadx', 2398208), ('gedoor/MyBookshelf', 2433150), ('ctripcorp/apollo', 2484217), ('open-telemetry/opentelemetry-java', 2497056), ('elastic/elasticsearch-hadoop', 2685341), ('macrozheng/mall', 2734011), ('macrozheng/mall-swarm', 2743678), ('antlr/antlr4', 2942372), ('bjmashibing/InternetArchitect', 3023831), ('mockito/mockito', 3100208), ('Anuken/Mindustry', 3165740), ('Tencent/QMUI_Android', 3283264), ('iluwatar/java-design-patterns', 3372026), ('spring-projects/spring-data-examples', 3434461), ('alibaba/Sentinel', 3515353), ('MinecraftForge/MinecraftForge', 3561793), ('macrozheng/mall-learning', 3634489), ('nextcloud/android', 3900547), ('CloudburstMC/Nukkit', 3915530), ('linlinjava/litemall', 3935420), ('alibaba/canal', 4096817), ('awslabs/djl', 4104489), ('apache/incubator-dolphinscheduler', 4344024), ('material-components/material-components-android', 4404295), ('seata/seata', 4609792), ('alibaba/Alink', 4875343), ('alibaba/nacos', 5292544), ('apache/zookeeper', 6622995), ('eclipse/eclipse.jdt.ls', 6635079), ('thingsboard/thingsboard', 6659511), ('alibaba/fastjson', 7307675), ('apache/jmeter', 8936436), ('eclipse/che', 8942195), ('grpc/grpc-java', 9624796), ('apache/shardingsphere', 9863162), ('javaparser/javaparser', 10166521), ('apache/dubbo', 10205555), ('apache/skywalking', 10503947), ('google/ExoPlayer', 11255771), ('runelite/runelite', 12005738), ('eclipse/milo', 12661330), ('Graylog2/graylog2-server', 12889377), ('linkedin/dagli', 13466753), ('eclipse/eclipse-collections', 14274298), ('checkstyle/checkstyle', 14632745), ('oracle/helidon', 14730527), ('eclipse/elk', 15312689), ('netty/netty', 16991168), ('quarkusio/quarkus', 18719448), ('eugenp/tutorials', 19116189), ('jitsi/jitsi', 19250523), ('spring-projects/spring-boot', 19700970), ('alibaba/druid', 19762494), ('dbeaver/dbeaver', 20613997), ('eclipse/jetty.project', 21207188), ('apache/pulsar', 22404445), ('google/guava', 26607422), ('ballerina-platform/ballerina-lang', 27349222), ('OpenAPITools/openapi-generator', 28150393), ('SonarSource/sonarqube', 30102895), ('eclipse/openj9', 31436930), ('openhab/openhab-addons', 36772862), ('eclipse/deeplearning4j', 37688626), ('spring-projects/spring-framework', 42944872), ('prestodb/presto', 43730138), ('bazelbuild/bazel', 44548383), ('androidx/androidx', 46353691), ('oracle/graal', 63148282), ('apache/flink', 64700358), ('NationalSecurityAgency/ghidra', 67423006), ('apache/hadoop', 94700218), ('elastic/elasticsearch', 112288504), ('apache/netbeans', 297452969), ('Azure/azure-sdk-for-java', 471640138)] + +new_dataset = set() +folder_to_analyze = r'/hdd/new_dataset/RefactoringMiner/RefactoringMiner/build/distributions/RefactoringMiner-2.0.1/bin/01' + +# for folder in Path(folder_to_analyze).iterdir(): +# if folder.is_dir(): +# # print(folder) +# for subfolder in folder.iterdir(): +# if subfolder.is_dir(): +# dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] +# #dir = subfolder.parts[-1] +# new_dataset.add(dir) + + +# +# old_dataset = set() +# print('##############################################') +# for folder in Path('/dataset/01').iterdir(): +# if folder.is_dir(): +# # print(folder) +# for subfolder in folder.iterdir(): +# if subfolder.is_dir(): +# dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] +# old_dataset.add(dir) +# # print(dir) +# +# miss = old_dataset.difference(new_dataset) +# os.chdir(Path('/hdd/new_dataset/02')) +# for dir in miss: +# repo_url = f'https://github.com/{dir}.git' +# print(repo_url) +# subprocess.Popen(['git', 'clone', repo_url]) +# # print(miss) + + +def run_ref_miner(folder: str): + p = Path(folder) + f_err = open(f"{'_'.join(p.parts)}.err.txt") + f_out = open(f"{'_'.join(p.parts)}.out.txt") + command = ['./RefactoringMiner', '-a', f"01/{folder}"] + print(command) + subprocess.Popen(command, stderr=f_err, stdout=f_out).wait() + + +system_cores_qty = 4 +# dir_to_analyze = {} +# for x in new_dataset: +# java_files = [x.stat().st_size for x in Path(folder_to_analyze, x).glob('**/*.java')] +# sum_size = sum(java_files) +# dir_to_analyze[x] = sum_size + +# dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) + +dir_to_analyze = [x[0] for x in dir_to_analyze] +dir_to_analyze = [dir_to_analyze[0]] + +with ProcessPool(system_cores_qty) as executor: + # p_analyze = partial( + # run_ref_miner, + # 1 + # ) + future = executor.map(run_ref_miner, dir_to_analyze) + result = future.result() + for filename in tqdm(dir_to_analyze): + next(result) + # for folder in new_dataset: + # command = ['RefactorMiner', '-a', folder] + # # subprocess.Popen(command).wait() + # print(command) From 08af949438375c289b196707f394959d2c8f3dda Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 25 Nov 2020 17:57:48 +0300 Subject: [PATCH 06/52] ADd similarity --- refMiner.py | 79 ------------------------ test/similarity/__init__.py | 0 test/similarity/test_similarity.py | 27 ++++++++ veniq/dataset_collection/augmentation.py | 12 +++- veniq/dataset_mining/code_similarity.py | 58 +++++++++++++++++ veniq/dataset_mining/refMiner.py | 46 ++++++++++++++ 6 files changed, 140 insertions(+), 82 deletions(-) delete mode 100644 refMiner.py create mode 100644 test/similarity/__init__.py create mode 100644 test/similarity/test_similarity.py create mode 100644 veniq/dataset_mining/code_similarity.py create mode 100644 veniq/dataset_mining/refMiner.py diff --git a/refMiner.py b/refMiner.py deleted file mode 100644 index 6d28237c..00000000 --- a/refMiner.py +++ /dev/null @@ -1,79 +0,0 @@ -from collections import OrderedDict -from functools import partial -from pathlib import Path -import os -import subprocess - -from pebble import ProcessPool -from tqdm import tqdm - -dir_to_analyze = [('rishabh115/Interview-Questions', 102), ('Snailclimb/JavaGuide', 4606), ('MisterBooo/LeetCodeAnimation', 8288), ('lihengming/spring-boot-api-project-seed', 30851), ('ouchuangxin/leave-sample', 51440), ('awsdocs/aws-lambda-developer-guide', 52103), ('GoogleLLP/SuperMarket', 66100), ('JunzhouLiu/BILIBILI-HELPER', 69551), ('arawn/building-modular-monoliths-using-spring', 81334), ('EiletXie/cloud2020', 86282), ('spring-projects/spring-petclinic', 89351), ('zo0r/react-native-push-notification', 104678), ('Snailclimb/guide-rpc-framework', 112343), ('medcl/elasticsearch-analysis-ik', 114714), ('lenve/vhr', 144624), ('react-native-community/react-native-video', 175864), ('intel-isl/OpenBot', 186423), ('nic-delhi/AarogyaSetu_Android', 202079), ('ityouknow/spring-boot-examples', 229057), ('fuzhengwei/itstack-demo-design', 244164), ('trojan-gfw/igniter', 250506), ('newbee-ltd/newbee-mall', 266433), ('alibaba/taokeeper', 305252), ('kekingcn/kkFileView', 313775), ('qq53182347/liugh-parent', 379931), ('Tencent/APIJSON', 393850), ('alibaba/COLA', 396781), ('firebase/quickstart-android', 435428), ('cats-oss/android-gpuimage', 465871), ('xuxueli/xxl-job', 470849), ('TheAlgorithms/Java', 506454), ('react-native-community/react-native-camera', 512604), ('google/grafika', 553156), ('termux/termux-app', 611291), ('DuGuQiuBai/Java', 652678), ('android/user-interface-samples', 665209), ('kdn251/interviews', 674404), ('xkcoding/spring-boot-demo', 675198), ('airbnb/lottie-android', 682626), ('Red5/red5-server', 715723), ('feast-dev/feast', 744875), ('elunez/eladmin', 745583), ('zhisheng17/flink-learning', 753262), ('careercup/CtCI-6th-Edition', 770012), ('jitsi/jitsi-videobridge', 806428), ('Notsfsssf/Pix-EzViewer', 875003), ('wix/react-native-navigation', 897164), ('yuliskov/SmartTubeNext', 911945), ('google/exposure-notifications-android', 929237), ('alibaba/easyexcel', 1041929), ('bigbluebutton/bigbluebutton', 1085022), ('mission-peace/interview', 1111425), ('square/retrofit', 1199658), ('alibaba/spring-cloud-alibaba', 1266374), ('MalitsPlus/ShizuruNotes', 1275918), ('zhangdaiscott/jeecg-boot', 1292101), ('cabaletta/baritone', 1308293), ('williamfiset/Algorithms', 1316427), ('PhilJay/MPAndroidChart', 1317015), ('dromara/soul', 1333632), ('halo-dev/halo', 1341126), ('CarGuo/GSYVideoPlayer', 1423273), ('zuihou/zuihou-admin-cloud', 1424116), ('FabricMC/fabric', 1491033), ('arduino/Arduino', 1524590), ('paascloud/paascloud-master', 1551821), ('square/okhttp', 1793311), ('metersphere/metersphere', 1878372), ('YunaiV/SpringBoot-Labs', 1920961), ('intuit/karate', 1943008), ('didi/kafka-manager', 1964652), ('YunaiV/onemall', 2001309), ('alibaba/arthas', 2025697), ('GeyserMC/Geyser', 2026042), ('seven332/EhViewer', 2038173), ('dromara/hmily', 2056099), ('Netflix/eureka', 2089056), ('hyb1996/Auto.js', 2093478), ('TeamNewPipe/NewPipe', 2096544), ('eclipse/paho.mqtt.java', 2310603), ('Blankj/AndroidUtilCode', 2336083), ('alibaba/DataX', 2337300), ('GoogleCloudPlatform/DataflowTemplates', 2390881), ('zxing/zxing', 2394547), ('skylot/jadx', 2398208), ('gedoor/MyBookshelf', 2433150), ('ctripcorp/apollo', 2484217), ('open-telemetry/opentelemetry-java', 2497056), ('elastic/elasticsearch-hadoop', 2685341), ('macrozheng/mall', 2734011), ('macrozheng/mall-swarm', 2743678), ('antlr/antlr4', 2942372), ('bjmashibing/InternetArchitect', 3023831), ('mockito/mockito', 3100208), ('Anuken/Mindustry', 3165740), ('Tencent/QMUI_Android', 3283264), ('iluwatar/java-design-patterns', 3372026), ('spring-projects/spring-data-examples', 3434461), ('alibaba/Sentinel', 3515353), ('MinecraftForge/MinecraftForge', 3561793), ('macrozheng/mall-learning', 3634489), ('nextcloud/android', 3900547), ('CloudburstMC/Nukkit', 3915530), ('linlinjava/litemall', 3935420), ('alibaba/canal', 4096817), ('awslabs/djl', 4104489), ('apache/incubator-dolphinscheduler', 4344024), ('material-components/material-components-android', 4404295), ('seata/seata', 4609792), ('alibaba/Alink', 4875343), ('alibaba/nacos', 5292544), ('apache/zookeeper', 6622995), ('eclipse/eclipse.jdt.ls', 6635079), ('thingsboard/thingsboard', 6659511), ('alibaba/fastjson', 7307675), ('apache/jmeter', 8936436), ('eclipse/che', 8942195), ('grpc/grpc-java', 9624796), ('apache/shardingsphere', 9863162), ('javaparser/javaparser', 10166521), ('apache/dubbo', 10205555), ('apache/skywalking', 10503947), ('google/ExoPlayer', 11255771), ('runelite/runelite', 12005738), ('eclipse/milo', 12661330), ('Graylog2/graylog2-server', 12889377), ('linkedin/dagli', 13466753), ('eclipse/eclipse-collections', 14274298), ('checkstyle/checkstyle', 14632745), ('oracle/helidon', 14730527), ('eclipse/elk', 15312689), ('netty/netty', 16991168), ('quarkusio/quarkus', 18719448), ('eugenp/tutorials', 19116189), ('jitsi/jitsi', 19250523), ('spring-projects/spring-boot', 19700970), ('alibaba/druid', 19762494), ('dbeaver/dbeaver', 20613997), ('eclipse/jetty.project', 21207188), ('apache/pulsar', 22404445), ('google/guava', 26607422), ('ballerina-platform/ballerina-lang', 27349222), ('OpenAPITools/openapi-generator', 28150393), ('SonarSource/sonarqube', 30102895), ('eclipse/openj9', 31436930), ('openhab/openhab-addons', 36772862), ('eclipse/deeplearning4j', 37688626), ('spring-projects/spring-framework', 42944872), ('prestodb/presto', 43730138), ('bazelbuild/bazel', 44548383), ('androidx/androidx', 46353691), ('oracle/graal', 63148282), ('apache/flink', 64700358), ('NationalSecurityAgency/ghidra', 67423006), ('apache/hadoop', 94700218), ('elastic/elasticsearch', 112288504), ('apache/netbeans', 297452969), ('Azure/azure-sdk-for-java', 471640138)] - -new_dataset = set() -folder_to_analyze = r'/hdd/new_dataset/RefactoringMiner/RefactoringMiner/build/distributions/RefactoringMiner-2.0.1/bin/01' - -# for folder in Path(folder_to_analyze).iterdir(): -# if folder.is_dir(): -# # print(folder) -# for subfolder in folder.iterdir(): -# if subfolder.is_dir(): -# dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] -# #dir = subfolder.parts[-1] -# new_dataset.add(dir) - - -# -# old_dataset = set() -# print('##############################################') -# for folder in Path('/dataset/01').iterdir(): -# if folder.is_dir(): -# # print(folder) -# for subfolder in folder.iterdir(): -# if subfolder.is_dir(): -# dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] -# old_dataset.add(dir) -# # print(dir) -# -# miss = old_dataset.difference(new_dataset) -# os.chdir(Path('/hdd/new_dataset/02')) -# for dir in miss: -# repo_url = f'https://github.com/{dir}.git' -# print(repo_url) -# subprocess.Popen(['git', 'clone', repo_url]) -# # print(miss) - - -def run_ref_miner(folder: str): - p = Path(folder) - f_err = open(f"{'_'.join(p.parts)}.err.txt") - f_out = open(f"{'_'.join(p.parts)}.out.txt") - command = ['./RefactoringMiner', '-a', f"01/{folder}"] - print(command) - subprocess.Popen(command, stderr=f_err, stdout=f_out).wait() - - -system_cores_qty = 4 -# dir_to_analyze = {} -# for x in new_dataset: -# java_files = [x.stat().st_size for x in Path(folder_to_analyze, x).glob('**/*.java')] -# sum_size = sum(java_files) -# dir_to_analyze[x] = sum_size - -# dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) - -dir_to_analyze = [x[0] for x in dir_to_analyze] -dir_to_analyze = [dir_to_analyze[0]] - -with ProcessPool(system_cores_qty) as executor: - # p_analyze = partial( - # run_ref_miner, - # 1 - # ) - future = executor.map(run_ref_miner, dir_to_analyze) - result = future.result() - for filename in tqdm(dir_to_analyze): - next(result) - # for folder in new_dataset: - # command = ['RefactorMiner', '-a', folder] - # # subprocess.Popen(command).wait() - # print(command) diff --git a/test/similarity/__init__.py b/test/similarity/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/test/similarity/test_similarity.py b/test/similarity/test_similarity.py new file mode 100644 index 00000000..47c192fa --- /dev/null +++ b/test/similarity/test_similarity.py @@ -0,0 +1,27 @@ +from pathlib import Path +from unittest import TestCase + +from dataset_mining.code_similarity import is_similar_functions + + +class TestSimilarity(TestCase): + current_directory = Path(__file__).absolute().parent + + def test_is_similar(self): + is_similar = is_similar_functions( + str(Path(self.current_directory, 'before\\EduStepicConnector.java')), + str(Path(self.current_directory, 'after\\EduStepicConnector.java')), + [142, 153], + [159, 171] + ) + self.assertEqual(is_similar, True) + + def test_is_not_similar(self): + # real EM, but too many changes + is_similar = is_similar_functions( + str(Path(self.current_directory, 'before\\FixedMembershipToken.java')), + str(Path(self.current_directory, 'after\\FixedMembershipToken.java')), + [55, 88], + [73, 82] + ) + self.assertEqual(is_similar, False) \ No newline at end of file diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index fd42df4b..6dc09c10 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -278,7 +278,8 @@ def insert_code_with_new_file_creation( invocation_node: ASTNode, file_path: Path, output_path: Path, - dict_original_invocations: Dict[str, List[ASTNode]] + dict_original_invocations: Dict[str, List[ASTNode]], + source_filepath: str ) -> Dict[str, Any]: """ If invocations of class methods were found, @@ -310,6 +311,7 @@ def insert_code_with_new_file_creation( algorithm_for_inlining = AlgorithmFactory().create_obj(algorithm_type) if algorithm_type != InlineTypesAlgorithms.DO_NOTHING: line_to_csv = { + 'project': source_filepath, 'input_filename': file_path, 'class_name': class_name, 'invocation_text_string': text_lines[invocation_node.line - 1].lstrip(), @@ -465,6 +467,7 @@ def analyze_file( method_invoked, method_node, output_path, + file_path, results ) except Exception as e: @@ -477,7 +480,7 @@ def analyze_file( def make_insertion(ast, class_declaration, dst_filename, found_method_decl, method_declarations, method_invoked, - method_node, output_path, results): + method_node, output_path, source_filepath, results): is_matched = is_match_to_the_conditions( ast, method_invoked, @@ -491,7 +494,8 @@ def make_insertion(ast, class_declaration, dst_filename, found_method_decl, meth method_invoked, dst_filename, output_path, - method_declarations) + method_declarations, + source_filepath) if log_of_inline: # change source filename, since it will be changed log_of_inline['input_filename'] = str(dst_filename.as_posix()) @@ -579,6 +583,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: df = pd.DataFrame( columns=[ + 'project', 'input_filename', 'class_name', 'invocation_text_string', @@ -612,6 +617,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: for i in single_file_features: # get local path for inlined filename i['output_filename'] = i['output_filename'].relative_to(os.getcwd()).as_posix() + print(i['output_filename'], filename) i['invocation_text_string'] = str(i['invocation_text_string']).encode('utf8') df = df.append(i, ignore_index=True) diff --git a/veniq/dataset_mining/code_similarity.py b/veniq/dataset_mining/code_similarity.py new file mode 100644 index 00000000..8937a35d --- /dev/null +++ b/veniq/dataset_mining/code_similarity.py @@ -0,0 +1,58 @@ +from collections import defaultdict +from typing import List + +import textdistance + +files = [ + ('EduStepicConnector', [142, 153], [159, 171]), + ('FixedMembershipToken', [55, 88], [73, 82]) +] + + +def is_similar_functions( + file_before: str, + file_after: str, + ranges_before: List[int], + ranges_after: List[int]): + d = defaultdict(set) + exc = [' ', '{', '}', ''] + with open(file_before) as before: + before_text = before.read().split('\n') + start_before, end_before = ranges_before + before_lines = before_text[start_before: end_before] + with open(file_after) as after: + start_after, end_after = ranges_after + after_lines = after.read().split('\n')[start_after: end_after] + + for iteration_i, i in enumerate(after_lines, start=start_before): + for iteration_j, j in enumerate(before_lines, start=start_after): + i = i.strip() + j = j.strip() + if (i != '') and (j != '') and (i not in exc) and (j not in exc): + longest_subs = textdistance.ratcliff_obershelp(i, j) + hamm = textdistance.hamming.normalized_similarity(i, j) + d[j].add((longest_subs, hamm, iteration_i, iteration_j, i)) + + matched_strings_before = [] + + for string_before, lst in d.items(): + max_val = -1 + max_hamm = -1 + for subs_val, hamm, iterator_i, iteration_j, string_matched in lst: + if max_val < subs_val: + max_val = subs_val + max_hamm = hamm + if max_val > 0.7000000000000000000000000000000000000000001: + if max_hamm > 0.4: + matched_strings_before.append(string_before) + + lines_number_of_function_before = 0 + for i in before_lines: + if i.strip() not in exc: + lines_number_of_function_before += 1 + + ratio = len(matched_strings_before) / float(lines_number_of_function_before) + if ratio > 0.700000000000000000000000000000001: + return True + + return False diff --git a/veniq/dataset_mining/refMiner.py b/veniq/dataset_mining/refMiner.py new file mode 100644 index 00000000..1ecad79d --- /dev/null +++ b/veniq/dataset_mining/refMiner.py @@ -0,0 +1,46 @@ +from collections import OrderedDict +from functools import partial +from pathlib import Path +import os +import subprocess + +from pebble import ProcessPool +from tqdm import tqdm + +new_dataset = set() +folder_to_analyze = r'/hdd/new_dataset/RefactoringMiner/RefactoringMiner/build/distributions/RefactoringMiner-2.0.1/bin/01' + +for folder in Path(folder_to_analyze).iterdir(): + if folder.is_dir(): + # print(folder) + for subfolder in folder.iterdir(): + if subfolder.is_dir(): + dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] + #dir = subfolder.parts[-1] + new_dataset.add(dir) + + +def run_ref_miner(folder: str): + p = Path(folder) + f_err = open(f"{'_'.join(p.parts)}.err.txt") + f_out = open(f"{'_'.join(p.parts)}.out.txt") + command = ['./RefactoringMiner', '-a', f"01/{folder}"] + print(command) + subprocess.Popen(command, stderr=f_err, stdout=f_out).wait() + + +system_cores_qty = 4 +dir_to_analyze = {} +for x in new_dataset: + java_files = [x.stat().st_size for x in Path(folder_to_analyze, x).glob('**/*.java')] + sum_size = sum(java_files) + dir_to_analyze[x] = sum_size + +dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) +dir_to_analyze = [x[0] for x in dir_to_analyze] + +with ProcessPool(system_cores_qty) as executor: + future = executor.map(run_ref_miner, dir_to_analyze) + result = future.result() + for filename in tqdm(dir_to_analyze): + next(result) From fbd596620897f717fded2bc0971e5a54ba414b6d Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 14:29:04 +0300 Subject: [PATCH 07/52] Create dataset function --- requirements.txt | 3 +- .../__init__.py | 0 .../test_functions_for_mining.py | 324 ++++++++++++++++++ test/similarity/test_similarity.py | 27 -- veniq/dataset_mining/create_dataset_json.py | 129 +++++++ 5 files changed, 455 insertions(+), 28 deletions(-) rename test/{similarity => dataset_mining}/__init__.py (100%) create mode 100644 test/dataset_mining/test_functions_for_mining.py delete mode 100644 test/similarity/test_similarity.py create mode 100644 veniq/dataset_mining/create_dataset_json.py diff --git a/requirements.txt b/requirements.txt index 7440cf1d..614cbae6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,4 +12,5 @@ typing-extensions; python_version<'3.8' tqdm == 4.32.1 bs4==0.0.1 pebble==4.5.3 -pandas==1.1.2 \ No newline at end of file +pandas==1.1.2 +sortedcontainers=2.3.0 \ No newline at end of file diff --git a/test/similarity/__init__.py b/test/dataset_mining/__init__.py similarity index 100% rename from test/similarity/__init__.py rename to test/dataset_mining/__init__.py diff --git a/test/dataset_mining/test_functions_for_mining.py b/test/dataset_mining/test_functions_for_mining.py new file mode 100644 index 00000000..270d19a9 --- /dev/null +++ b/test/dataset_mining/test_functions_for_mining.py @@ -0,0 +1,324 @@ +from pathlib import Path +from unittest import TestCase + +from veniq.dataset_mining.create_dataset_json import find_lines +from veniq.dataset_mining.code_similarity import is_similar_functions + + +class TestFunctionsForMining(TestCase): + current_directory = Path(__file__).absolute().parent + + def test_is_similar(self): + is_similar = is_similar_functions( + str(Path(self.current_directory, 'before/EduStepicConnector.java')), + str(Path(self.current_directory, 'after/EduStepicConnector.java')), + [142, 153], + [159, 171] + ) + self.assertEqual(is_similar, True) + + def test_is_not_similar(self): + # real EM, but too many changes + is_similar = is_similar_functions( + str(Path(self.current_directory, 'before/FixedMembershipToken.java')), + str(Path(self.current_directory, 'after/FixedMembershipToken.java')), + [55, 88], + [73, 82] + ) + self.assertEqual(is_similar, False) + + def test_find_lines_second_line_with_gap(self): + d = [{ + "startLine": 1, + "endLine": 1}, + { + "startLine": 3, + "endLine": 5, + }] + + lst = find_lines(d) + self.assertEqual(lst, ((1, 1), (3, 5))) + + def test_find_lines_second_line_no_gap(self): + d = [{ + "startLine": 1, + "endLine": 1 + }, + { + "startLine": 2, + "endLine": 5, + }] + lst = find_lines(d) + self.assertEqual(lst, ((1, 5), )) + + def test_find_lines_repetitions(self): + d = [{ + "startLine": 1, + "endLine": 4 + }, + { + "endLine": 2, + "startLine": 2, + }, + { + "startLine": 3, + "endLine": 3}, + { + + "startLine": 6, + "endLine": 10, + }, + { + "startLine": 9, + "endLine": 9}, + { + "endLine": 10, + "startLine": 10, + } + ] + lst = find_lines(d) + self.assertEqual(lst, ((1, 4), (6, 10))) + + def test_find_lines_last_wit_gap(self): + d = [{ + "startLine": 1, + "endLine": 4 + }, + { + + "startLine": 6, + "endLine": 10, + }, + { + "startLine": 15, + "endLine": 15 + } + ] + lst = find_lines(d) + self.assertEqual(lst, ((1, 4), (6, 10), (15, 15))) + + def test_large(self): + d = [ + { + "startLine": 327, + "endLine": 327 + }, + { + "startLine": 328, + "endLine": 328 + }, + { + "startLine": 331, + "endLine": 331 + }, + { + "startLine": 334, + "endLine": 334 + }, + { + "startLine": 336, + "endLine": 336 + }, + { + "startLine": 338, + "endLine": 338 + }, + { + "startLine": 340, + "endLine": 340 + }, + { + "startLine": 343, + "endLine": 343 + }, + { + "startLine": 346, + "endLine": 346 + }, + { + "startLine": 347, + "endLine": 347 + }, + { + "startLine": 349, + "endLine": 349 + }, + { + "startLine": 355, + "endLine": 355 + }, + { + "startLine": 358, + "endLine": 358 + }, + { + "startLine": 360, + "endLine": 360 + }, + { + "startLine": 362, + "endLine": 362 + }, + { + "startLine": 367, + "endLine": 367 + }, + { + "startLine": 380, + "endLine": 380 + }, + { + "startLine": 382, + "endLine": 382 + }, + { + "startLine": 387, + "endLine": 387 + }, + { + "startLine": 409, + "endLine": 409 + }, + { + "startLine": 394, + "endLine": 394 + }, + { + "startLine": 395, + "endLine": 395 + }, + { + "startLine": 397, + "endLine": 397 + }, + { + "startLine": 399, + "endLine": 399 + }, + { + "startLine": 390, + "endLine": 390 + }, + { + "startLine": 339, + "endLine": 341 + }, + { + "startLine": 339, + "endLine": 341 + }, + { + "startLine": 337, + "endLine": 342 + }, + { + "startLine": 337, + "endLine": 342 + }, + { + "startLine": 359, + "endLine": 361 + }, + { + "startLine": 359, + "endLine": 361 + }, + { + "startLine": 356, + "endLine": 364 + }, + { + "startLine": 357, + "endLine": 363 + }, + { + "startLine": 357, + "endLine": 363 + }, + { + "startLine": 356, + "endLine": 364 + }, + { + "startLine": 366, + "endLine": 368 + }, + { + "startLine": 366, + "endLine": 368 + }, + { + "startLine": 350, + "endLine": 370 + }, + { + "startLine": 352, + "endLine": 369 + }, + { + "startLine": 352, + "endLine": 369 + }, + { + "startLine": 350, + "endLine": 370 + }, + { + "startLine": 408, + "endLine": 410 + }, + { + "startLine": 324, + "endLine": 411 + }, + { + "startLine": 396, + "endLine": 398 + }, + { + "startLine": 398, + "endLine": 400 + }, + { + "startLine": 396, + "endLine": 400 + }, + { + "startLine": 383, + "endLine": 404 + }, + { + "startLine": 386, + "endLine": 401 + }, + { + "startLine": 384, + "endLine": 403 + }, + { + "startLine": 384, + "endLine": 403 + }, + { + "startLine": 386, + "endLine": 401 + }, + { + "startLine": 383, + "endLine": 404 + }, + { + "startLine": 388, + "endLine": 392 + }, + { + "startLine": 408, + "endLine": 410 + }, + { + "startLine": 389, + "endLine": 391 + } + ] + lst = find_lines(d) + self.assertEqual(lst, ((324, 411), )) diff --git a/test/similarity/test_similarity.py b/test/similarity/test_similarity.py deleted file mode 100644 index 47c192fa..00000000 --- a/test/similarity/test_similarity.py +++ /dev/null @@ -1,27 +0,0 @@ -from pathlib import Path -from unittest import TestCase - -from dataset_mining.code_similarity import is_similar_functions - - -class TestSimilarity(TestCase): - current_directory = Path(__file__).absolute().parent - - def test_is_similar(self): - is_similar = is_similar_functions( - str(Path(self.current_directory, 'before\\EduStepicConnector.java')), - str(Path(self.current_directory, 'after\\EduStepicConnector.java')), - [142, 153], - [159, 171] - ) - self.assertEqual(is_similar, True) - - def test_is_not_similar(self): - # real EM, but too many changes - is_similar = is_similar_functions( - str(Path(self.current_directory, 'before\\FixedMembershipToken.java')), - str(Path(self.current_directory, 'after\\FixedMembershipToken.java')), - [55, 88], - [73, 82] - ) - self.assertEqual(is_similar, False) \ No newline at end of file diff --git a/veniq/dataset_mining/create_dataset_json.py b/veniq/dataset_mining/create_dataset_json.py new file mode 100644 index 00000000..cfe9af78 --- /dev/null +++ b/veniq/dataset_mining/create_dataset_json.py @@ -0,0 +1,129 @@ +import os +from argparse import ArgumentParser +from functools import partial +from pathlib import Path +from typing import List, Dict, Tuple, Any +import pandas as pd +from sortedcontainers import SortedSet +from dataclasses import dataclass, field, asdict +from pebble import ProcessPool +from tqdm import tqdm + +import traceback + +from utils.encoding_detector import read_text_with_autodetected_encoding +import json + + +@dataclass +class RowResult: + filename: str + repository: str + lines: Tuple[Tuple[int]] + sha1: str + description: str + #url: str + + +def find_em_items(file: Path): + text = read_text_with_autodetected_encoding(str(file)) + json_dict = json.loads(text) + results = [] + + for x in json_dict['commits']: + refactorings = x.get('refactorings') + if refactorings: + for ref in refactorings: + if ref.get('type') == 'Extract Method': + res = RowResult( + filename='', + repository=x['repository'], + sha1=x['sha1'], + lines=tuple(), + description=ref.get('description') + #url=x['url'], + ) + ref_items = [ + x for x in ref.get('leftSideLocations', []) + if x.get('codeElementType') != "METHOD_DECLARATION" + ] + lines_list_of_lists = find_lines(ref_items) + res.lines = lines_list_of_lists + if ref_items: + res.filename = ref_items[0]['filePath'] + results.append(res) + + return results + + +def find_lines(ref_items: List[Dict[any, any]]) -> Tuple[Tuple[Any, ...], ...]: + + def add_to_list(small_list, global_list): + range_extraction = tuple([small_list[0], small_list[-1]]) + global_list.append(range_extraction) + + lines = SortedSet() + for ref_block in ref_items: + for j in range(ref_block['startLine'], ref_block['endLine'] + 1): + lines.add(j) + prev = lines[0] + cur_list = [prev] + lines_list_of_lists = [] + for x in lines[1:]: + diff = x - prev + prev = x + if diff > 1: + add_to_list(cur_list, lines_list_of_lists) + cur_list = [x] + else: + cur_list.append(x) + + add_to_list(cur_list, lines_list_of_lists) + return tuple(lines_list_of_lists) + + +if __name__ == '__main__': + + system_cores_qty = os.cpu_count() or 1 + parser = ArgumentParser() + parser.add_argument( + "-d", + "--dir", + required=True, + help="File path where json files of RefMiner2.0 are located" + ) + parser.add_argument( + "-o", "--csv_output", + help="File with output results", + default='ref_miner.csv' + ) + parser.add_argument( + "--jobs", + "-j", + type=int, + default=system_cores_qty - 1, + help="Number of processes to spawn. " + "By default one less than number of cores. " + "Be careful to raise it above, machine may stop responding while creating dataset.", + ) + args = parser.parse_args() + input_dir = Path(args.dir) + files = [x for x in input_dir.iterdir() if x.is_file() and x.name.endswith('out.txt')] + df = pd.DataFrame(columns=list(RowResult.__annotations__.keys())) + + with ProcessPool(1) as executor: + future = executor.map(find_em_items, files) + result = future.result() + + for filename in tqdm(files): + try: + single_file_features = next(result) + if single_file_features: + for i in single_file_features: + df = df.append(asdict(i), ignore_index=True) + df.to_csv(args.csv_output) + + except Exception as e: + traceback.print_exc() + df = df.drop_duplicates() + df.to_csv(args.csv_output) \ No newline at end of file From 028f02373af0b6580e310adb4a210c3a605d0da3 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 17:02:06 +0300 Subject: [PATCH 08/52] Fix flake8 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 614cbae6..7041146e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,4 @@ tqdm == 4.32.1 bs4==0.0.1 pebble==4.5.3 pandas==1.1.2 -sortedcontainers=2.3.0 \ No newline at end of file +sortedcontainers==2.3.0 \ No newline at end of file From 0b9ee56b49c5ef8452562b724411fbdf15040aec Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 17:03:19 +0300 Subject: [PATCH 09/52] Fix tests --- .../test_functions_for_mining.py | 42 +++++----- veniq/dataset_mining/code_similarity.py | 24 +++--- veniq/dataset_mining/create_dataset_json.py | 21 +++-- veniq/dataset_mining/refMiner.py | 76 +++++++++++-------- 4 files changed, 87 insertions(+), 76 deletions(-) diff --git a/test/dataset_mining/test_functions_for_mining.py b/test/dataset_mining/test_functions_for_mining.py index 270d19a9..299b210d 100644 --- a/test/dataset_mining/test_functions_for_mining.py +++ b/test/dataset_mining/test_functions_for_mining.py @@ -28,41 +28,32 @@ def test_is_not_similar(self): self.assertEqual(is_similar, False) def test_find_lines_second_line_with_gap(self): - d = [{ - "startLine": 1, - "endLine": 1}, - { - "startLine": 3, - "endLine": 5, - }] + d = [{"startLine": 1, "endLine": 1}, + {"startLine": 3, "endLine": 5}] lst = find_lines(d) self.assertEqual(lst, ((1, 1), (3, 5))) def test_find_lines_second_line_no_gap(self): - d = [{ - "startLine": 1, - "endLine": 1 - }, - { - "startLine": 2, - "endLine": 5, - }] + d = [{"startLine": 1, "endLine": 1}, + {"startLine": 2, "endLine": 5, }] lst = find_lines(d) - self.assertEqual(lst, ((1, 5), )) + self.assertEqual(lst, ((1, 5),)) def test_find_lines_repetitions(self): - d = [{ - "startLine": 1, - "endLine": 4 + d = [ + { + "startLine": 1, + "endLine": 4 }, { "endLine": 2, - "startLine": 2, + "startLine": 2 }, { "startLine": 3, - "endLine": 3}, + "endLine": 3 + }, { "startLine": 6, @@ -80,9 +71,10 @@ def test_find_lines_repetitions(self): self.assertEqual(lst, ((1, 4), (6, 10))) def test_find_lines_last_wit_gap(self): - d = [{ - "startLine": 1, - "endLine": 4 + d = [ + { + "startLine": 1, + "endLine": 4 }, { @@ -321,4 +313,4 @@ def test_large(self): } ] lst = find_lines(d) - self.assertEqual(lst, ((324, 411), )) + self.assertEqual(lst, ((324, 411),)) diff --git a/veniq/dataset_mining/code_similarity.py b/veniq/dataset_mining/code_similarity.py index 8937a35d..ac848e60 100644 --- a/veniq/dataset_mining/code_similarity.py +++ b/veniq/dataset_mining/code_similarity.py @@ -35,16 +35,7 @@ def is_similar_functions( matched_strings_before = [] - for string_before, lst in d.items(): - max_val = -1 - max_hamm = -1 - for subs_val, hamm, iterator_i, iteration_j, string_matched in lst: - if max_val < subs_val: - max_val = subs_val - max_hamm = hamm - if max_val > 0.7000000000000000000000000000000000000000001: - if max_hamm > 0.4: - matched_strings_before.append(string_before) + find_similar_strings(d, matched_strings_before) lines_number_of_function_before = 0 for i in before_lines: @@ -56,3 +47,16 @@ def is_similar_functions( return True return False + + +def find_similar_strings(d, matched_strings_before): + for string_before, lst in d.items(): + max_val = -1 + max_hamm = -1 + for subs_val, hamm, iterator_i, iteration_j, string_matched in lst: + if max_val < subs_val: + max_val = subs_val + max_hamm = hamm + if max_val > 0.7000000000000000000000000000000000000000001: + if max_hamm > 0.4: + matched_strings_before.append(string_before) diff --git a/veniq/dataset_mining/create_dataset_json.py b/veniq/dataset_mining/create_dataset_json.py index cfe9af78..cb4202cd 100644 --- a/veniq/dataset_mining/create_dataset_json.py +++ b/veniq/dataset_mining/create_dataset_json.py @@ -1,18 +1,17 @@ +import json import os +import traceback from argparse import ArgumentParser -from functools import partial +from dataclasses import dataclass, asdict from pathlib import Path from typing import List, Dict, Tuple, Any + import pandas as pd -from sortedcontainers import SortedSet -from dataclasses import dataclass, field, asdict from pebble import ProcessPool +from sortedcontainers import SortedSet from tqdm import tqdm -import traceback - from utils.encoding_detector import read_text_with_autodetected_encoding -import json @dataclass @@ -22,7 +21,7 @@ class RowResult: lines: Tuple[Tuple[int]] sha1: str description: str - #url: str + url: str def find_em_items(file: Path): @@ -40,8 +39,8 @@ def find_em_items(file: Path): repository=x['repository'], sha1=x['sha1'], lines=tuple(), - description=ref.get('description') - #url=x['url'], + description=ref.get('description'), + url=x['url'] ) ref_items = [ x for x in ref.get('leftSideLocations', []) @@ -123,7 +122,7 @@ def add_to_list(small_list, global_list): df = df.append(asdict(i), ignore_index=True) df.to_csv(args.csv_output) - except Exception as e: + except Exception: traceback.print_exc() df = df.drop_duplicates() - df.to_csv(args.csv_output) \ No newline at end of file + df.to_csv(args.csv_output) diff --git a/veniq/dataset_mining/refMiner.py b/veniq/dataset_mining/refMiner.py index 1ecad79d..dcf66571 100644 --- a/veniq/dataset_mining/refMiner.py +++ b/veniq/dataset_mining/refMiner.py @@ -1,24 +1,12 @@ -from collections import OrderedDict -from functools import partial -from pathlib import Path import os import subprocess +from argparse import ArgumentParser +from collections import OrderedDict +from pathlib import Path from pebble import ProcessPool from tqdm import tqdm -new_dataset = set() -folder_to_analyze = r'/hdd/new_dataset/RefactoringMiner/RefactoringMiner/build/distributions/RefactoringMiner-2.0.1/bin/01' - -for folder in Path(folder_to_analyze).iterdir(): - if folder.is_dir(): - # print(folder) - for subfolder in folder.iterdir(): - if subfolder.is_dir(): - dir = subfolder.parts[-2] + '/' + subfolder.parts[-1] - #dir = subfolder.parts[-1] - new_dataset.add(dir) - def run_ref_miner(folder: str): p = Path(folder) @@ -29,18 +17,46 @@ def run_ref_miner(folder: str): subprocess.Popen(command, stderr=f_err, stdout=f_out).wait() -system_cores_qty = 4 -dir_to_analyze = {} -for x in new_dataset: - java_files = [x.stat().st_size for x in Path(folder_to_analyze, x).glob('**/*.java')] - sum_size = sum(java_files) - dir_to_analyze[x] = sum_size - -dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) -dir_to_analyze = [x[0] for x in dir_to_analyze] - -with ProcessPool(system_cores_qty) as executor: - future = executor.map(run_ref_miner, dir_to_analyze) - result = future.result() - for filename in tqdm(dir_to_analyze): - next(result) +if __name__ == '__main__': # noqa: C901 + system_cores_qty = os.cpu_count() or 1 + parser = ArgumentParser() + parser.add_argument( + "-d", + "--dir", + required=True, + help="File path to JAVA projects" + ) + parser.add_argument( + "--jobs", + "-j", + type=int, + default=system_cores_qty - 1, + help="Number of processes to spawn. " + "By default one less than number of cores. " + "Be careful to raise it above, machine may stop responding while creating dataset.", + ) + + args = parser.parse_args() + new_dataset = set() + + for folder in Path(args.dir).iterdir(): + if folder.is_dir(): + for subfolder in folder.iterdir(): + if subfolder.is_dir(): + dir_name = subfolder.parts[-2] + '/' + subfolder.parts[-1] + new_dataset.add(dir_name) + + dir_to_analyze = {} + for x in new_dataset: + java_files = [x.stat().st_size for x in Path(args.dir, x).glob('**/*.java')] + sum_size = sum(java_files) + dir_to_analyze[x] = sum_size + + dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) + dir_to_analyze = [x[0] for x in dir_to_analyze] + + with ProcessPool(system_cores_qty) as executor: + future = executor.map(run_ref_miner, dir_to_analyze) + result = future.result() + for filename in tqdm(dir_to_analyze): + next(result) From e3a091847c8d57174fb404b538d360ad36588386 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 17:05:17 +0300 Subject: [PATCH 10/52] Fix --- requirements.txt | 3 ++- veniq/dataset_mining/create_dataset_json.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 7041146e..2fa09a6f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,5 @@ tqdm == 4.32.1 bs4==0.0.1 pebble==4.5.3 pandas==1.1.2 -sortedcontainers==2.3.0 \ No newline at end of file +sortedcontainers==2.3.0 +textdistance=4.2.0 \ No newline at end of file diff --git a/veniq/dataset_mining/create_dataset_json.py b/veniq/dataset_mining/create_dataset_json.py index cb4202cd..139dcaf8 100644 --- a/veniq/dataset_mining/create_dataset_json.py +++ b/veniq/dataset_mining/create_dataset_json.py @@ -11,7 +11,7 @@ from sortedcontainers import SortedSet from tqdm import tqdm -from utils.encoding_detector import read_text_with_autodetected_encoding +from veniq.utils.encoding_detector import read_text_with_autodetected_encoding @dataclass From 0b2dc31abc2d3c17371c54f8e3f84f426fa1dada Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 17:22:16 +0300 Subject: [PATCH 11/52] Fix requirements --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2fa09a6f..e5003f5e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,4 @@ bs4==0.0.1 pebble==4.5.3 pandas==1.1.2 sortedcontainers==2.3.0 -textdistance=4.2.0 \ No newline at end of file +textdistance==4.2.0 \ No newline at end of file From fce2d6d1bfb343d3fdf46a4a438653ac9ddcfbad Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 17:35:19 +0300 Subject: [PATCH 12/52] Fix unittest --- Makefile | 1 + test/integration/dataset_collection.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 523311b4..044d249a 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ requirements: unittest: python3 -m unittest discover + python3 -m unittest test/integration/dataset_collection.py install: python3 -m pip install . diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 2ef826b8..5c1260ee 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -1,3 +1,4 @@ +import json import tempfile from os import listdir from pathlib import Path @@ -55,6 +56,7 @@ def test_dataset_collection(self): for x in results_output: x['input_filename'] = str(Path(x['input_filename']).name).split('_')[0] + '.java' del x['output_filename'] + del x['project'] new_results = new_results.append(x, ignore_index=True) df = pd.DataFrame(new_results) From 8ac23aee7649e8731ecdab39de34d997698d2db3 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 26 Nov 2020 18:24:55 +0300 Subject: [PATCH 13/52] Fix --- test/integration/dataset_collection.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 5c1260ee..ed13fba6 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -1,4 +1,3 @@ -import json import tempfile from os import listdir from pathlib import Path From 0bd59e7e6d0a5cbe0c1222a6c99b35cf2b91e7d7 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 27 Nov 2020 16:01:32 +0300 Subject: [PATCH 14/52] Add tests --- .../after/EduStepicConnector.java | 422 ++++++++++++++++++ .../after/FixedMembershipToken.java | 134 ++++++ .../before/EduStepicConnector.java | 409 +++++++++++++++++ .../before/FixedMembershipToken.java | 137 ++++++ 4 files changed, 1102 insertions(+) create mode 100644 test/dataset_mining/after/EduStepicConnector.java create mode 100644 test/dataset_mining/after/FixedMembershipToken.java create mode 100644 test/dataset_mining/before/EduStepicConnector.java create mode 100644 test/dataset_mining/before/FixedMembershipToken.java diff --git a/test/dataset_mining/after/EduStepicConnector.java b/test/dataset_mining/after/EduStepicConnector.java new file mode 100644 index 00000000..c2157b2e --- /dev/null +++ b/test/dataset_mining/after/EduStepicConnector.java @@ -0,0 +1,422 @@ +package com.jetbrains.edu.stepic; + +import com.google.gson.FieldNamingPolicy; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.annotations.Expose; +import com.intellij.openapi.application.ApplicationManager; +import com.intellij.openapi.diagnostic.Logger; +import com.intellij.openapi.editor.Document; +import com.intellij.openapi.project.Project; +import com.intellij.openapi.util.text.StringUtil; +import com.intellij.openapi.vfs.VirtualFile; +import com.intellij.util.net.ssl.CertificateManager; +import com.jetbrains.edu.EduUtils; +import com.jetbrains.edu.courseFormat.Course; +import com.jetbrains.edu.courseFormat.Lesson; +import com.jetbrains.edu.courseFormat.Task; +import com.jetbrains.edu.courseFormat.TaskFile; +import org.apache.http.NameValuePair; +import org.apache.http.StatusLine; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.cookie.Cookie; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.BasicCookieStore; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.protocol.HTTP; +import org.apache.http.util.EntityUtils; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.util.*; + +public class EduStepicConnector { + private static final String stepicUrl = "https://stepic.org/"; + private static final String stepicApiUrl = stepicUrl + "api/"; + private static final Logger LOG = Logger.getInstance(EduStepicConnector.class.getName()); + private static final String ourDomain = "stepic.org"; + private static String ourCSRFToken = ""; + private static CloseableHttpClient ourClient; + + //this prefix indicates that course can be opened by educational plugin + public static final String PYCHARM_PREFIX = "pycharm"; + private static BasicCookieStore ourCookieStore; + + private EduStepicConnector() { + } + + public static boolean login(@NotNull final String user, @NotNull final String password) { + if (ourClient == null || ourCookieStore == null) + initializeClient(); + return postCredentials(user, password); + } + + private static void initializeClient() { + final HttpGet request = new HttpGet(stepicUrl); + request.addHeader(new BasicHeader("referer", "https://stepic.org")); + request.addHeader(new BasicHeader("content-type", "application/json")); + + HttpClientBuilder builder = HttpClients.custom().setSslcontext(CertificateManager.getInstance().getSslContext()).setMaxConnPerRoute(100); + ourCookieStore = new BasicCookieStore(); + ourClient = builder.setDefaultCookieStore(ourCookieStore).build(); + + try { + ourClient.execute(request); + saveCSRFToken(); + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + private static void saveCSRFToken() { + if (ourCookieStore == null) return; + final List cookies = ourCookieStore.getCookies(); + for (Cookie cookie : cookies) { + if (cookie.getName().equals("csrftoken")) { + ourCSRFToken = cookie.getValue(); + } + } + } + + private static boolean postCredentials(String user, String password) { + String url = stepicUrl + "accounts/login/"; + final HttpPost request = new HttpPost(url); + List nvps = new ArrayList (); + nvps.add(new BasicNameValuePair("csrfmiddlewaretoken", ourCSRFToken)); + nvps.add(new BasicNameValuePair("login", user)); + nvps.add(new BasicNameValuePair("next", "/")); + nvps.add(new BasicNameValuePair("password", password)); + nvps.add(new BasicNameValuePair("remember", "on")); + + try { + request.setEntity(new UrlEncodedFormEntity(nvps, HTTP.UTF_8)); + } + catch (UnsupportedEncodingException e) { + LOG.error(e.getMessage()); + return false; + } + + setHeaders(request, "application/x-www-form-urlencoded"); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + saveCSRFToken(); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 302) { + LOG.error("Failed to login " + EntityUtils.toString(response.getEntity())); + return false; + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + return false; + } + return true; + } + + private static T getFromStepic(String link, final Class container) throws IOException { + final HttpGet request = new HttpGet(stepicApiUrl + link); + if (ourClient == null) { + initializeClient(); + } + setHeaders(request, "application/json"); + + final CloseableHttpResponse response = ourClient.execute(request); + final String responseString = EntityUtils.toString(response.getEntity()); + Gson gson = new GsonBuilder().setFieldNamingPolicy(FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES).create(); + return gson.fromJson(responseString, container); + } + + @NotNull + public static List getCourses() { + try { + List result = new ArrayList(); + int pageNumber = 0; + boolean hasNext = addCoursesFromStepic(result, pageNumber); + while (hasNext) { + pageNumber += 1; + hasNext = addCoursesFromStepic(result, pageNumber); + } + return result; + } + catch (IOException e) { + LOG.error("Cannot load course list " + e.getMessage()); + } + return Collections.emptyList(); + } + + private static boolean addCoursesFromStepic(List result, int pageNumber) throws IOException { + final String url = pageNumber == 0 ? "courses" : "courses?page=" + String.valueOf(pageNumber); + final CoursesContainer coursesContainer = getFromStepic(url, CoursesContainer.class); + final List courseInfos = coursesContainer.courses; + for (CourseInfo info : courseInfos) { + final String courseType = info.getType(); + if (StringUtil.isEmptyOrSpaces(courseType)) continue; + final List typeLanguage = StringUtil.split(courseType, " "); + if (typeLanguage.size() == 2 && PYCHARM_PREFIX.equals(typeLanguage.get(0))) { + result.add(info); + } + } + return coursesContainer.meta.containsKey("has_next") && coursesContainer.meta.get("has_next") == Boolean.TRUE; + } + + public static Course getCourse(@NotNull final CourseInfo info) { + final Course course = new Course(); + course.setAuthors(info.getInstructors()); + course.setDescription(info.getDescription()); + course.setName(info.getName()); + String courseType = info.getType(); + course.setLanguage(courseType.substring(PYCHARM_PREFIX.length() + 1)); + course.setUpToDate(true); // TODO: get from stepic + try { + for (Integer section : info.sections) { + course.addLessons(getLessons(section)); + } + return course; + } + catch (IOException e) { + LOG.error("IOException " + e.getMessage()); + } + return null; + } + + public static List getLessons(int sectionId) throws IOException { + final SectionWrapper sectionWrapper = getFromStepic("sections/" + String.valueOf(sectionId), SectionWrapper.class); + List unitIds = sectionWrapper.sections.get(0).units; + final List lessons = new ArrayList(); + for (Integer unitId : unitIds) { + UnitWrapper unit = getFromStepic("units/" + String.valueOf(unitId), UnitWrapper.class); + int lessonID = unit.units.get(0).lesson; + LessonContainer lesson = getFromStepic("lessons/" + String.valueOf(lessonID), LessonContainer.class); + Lesson realLesson = lesson.lessons.get(0); + lessons.add(realLesson); + } + + for (Lesson lesson : lessons) { + lesson.taskList = new ArrayList(); + for (Integer s : lesson.steps) { + createTask(lesson, s); + } + } + return lessons; + } + + private static void createTask(Lesson lesson, Integer s) throws IOException { + final Step step = getStep(s); + final Task task = new Task(); + task.setName(step.options != null ? step.options.title : PYCHARM_PREFIX); + task.setText(step.text); + for (TestFileWrapper wrapper : step.options.test) { + task.setTestsTexts(wrapper.name, wrapper.text); + } + + task.taskFiles = new HashMap(); // TODO: it looks like we don't need taskFiles as map anymore + if (step.options.files != null) { + for (TaskFile taskFile : step.options.files) { + task.taskFiles.put(taskFile.name, taskFile); + } + } + lesson.taskList.add(task); + } + + public static Step getStep(Integer step) throws IOException { + return getFromStepic("steps/" + String.valueOf(step), StepContainer.class).steps.get(0).block; + } + + + public static void showLoginDialog() { + final LoginDialog dialog = new LoginDialog(); + dialog.show(); + } + + + public static void postLesson(Project project, @NotNull final Lesson lesson) { + final HttpPost request = new HttpPost(stepicApiUrl + "lessons"); + if (ourClient == null) { + showLoginDialog(); + } + + setHeaders(request, "application/json"); + String requestBody = new Gson().toJson(new LessonWrapper(lesson)); + request.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + final String responseString = EntityUtils.toString(response.getEntity()); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 201) { + LOG.error("Failed to push " + responseString); + return; + } + final Lesson postedLesson = new Gson().fromJson(responseString, Course.class).getLessons().get(0); + for (Task task : lesson.getTaskList()) { + postTask(project, task, postedLesson.id); + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + public static void postTask(Project project, @NotNull final Task task, int id) { + final HttpPost request = new HttpPost(stepicApiUrl + "step-sources"); + setHeaders(request, "application/json"); + final Gson gson = new GsonBuilder().setPrettyPrinting().excludeFieldsWithoutExposeAnnotation().create(); + String requestBody = gson.toJson(new StepSourceWrapper(project, task, id)); + request.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 201) { + LOG.error("Failed to push " + EntityUtils.toString(response.getEntity())); + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + private static void setHeaders(@NotNull final HttpRequestBase request, String contentType) { + request.addHeader(new BasicHeader("referer", stepicUrl)); + request.addHeader(new BasicHeader("X-CSRFToken", ourCSRFToken)); + request.addHeader(new BasicHeader("content-type", contentType)); + } + + private static class StepContainer { + List steps; + } + + private static class Step { + @Expose StepOptions options; + @Expose String text; + @Expose String name = "pycharm"; + @Expose StepOptions source; + + public static Step fromTask(Project project, @NotNull final Task task) { + final Step step = new Step(); + step.text = task.getTaskText(project); + step.source = StepOptions.fromTask(project, task); + return step; + } + } + + private static class StepOptions { + @Expose List test; + @Expose String title; //HERE + @Expose List files; + @Expose String text; + + public static StepOptions fromTask(final Project project, @NotNull final Task task) { + final StepOptions source = new StepOptions(); + + final String text = task.getTestsText(project); + source.test = Collections.singletonList(new TestFileWrapper("tests.py", text)); + source.files = new ArrayList(); + source.title = task.getName(); + for (final Map.Entry entry : task.getTaskFiles().entrySet()) { + ApplicationManager.getApplication().runWriteAction(new Runnable() { + @Override + public void run() { + final VirtualFile taskDir = task.getTaskDir(project); + assert taskDir != null; + EduUtils.createStudentFileFromAnswer(project, taskDir, taskDir, entry); + } + }); + final TaskFile taskFile = entry.getValue(); + taskFile.name = entry.getKey(); + final Document document = task.getDocument(project, taskFile.name); + if (document != null) { + source.text = document.getImmutableCharSequence().toString(); + taskFile.text = document.getImmutableCharSequence().toString(); + } + source.files.add(taskFile); + } + return source; + } + } + + private static class CoursesContainer { + public List courses; + public Map meta; + } + + static class StepSourceWrapper { + @Expose + StepSource stepSource; + + public StepSourceWrapper(Project project, Task task, int id) { + stepSource = new StepSource(project, task, id); + } + } + + static class LessonWrapper { + Lesson lesson; + + public LessonWrapper(Lesson lesson) { + this.lesson = new Lesson(); + this.lesson.setName(lesson.getName()); + } + } + + static class LessonContainer { + List lessons; + } + + static class StepSource { + @Expose Step block; + @Expose int position = 0; + @Expose int lesson = 0; + + public StepSource(Project project, Task task, int id) { + lesson = id; + position = task.getIndex(); + block = Step.fromTask(project, task); + } + } + + static class TestFileWrapper { + @Expose private final String name; + @Expose private final String text; + + public TestFileWrapper(String name, String text) { + this.name = name; + this.text = text; + } + } + + static class SectionWrapper { + static class Section { + List units; + } + + List
sections; + List lessons; + + static class Unit { + int id; + int lesson; + } + + List units; + } + + static class UnitWrapper { + static class Unit { + int lesson; + } + + List units; + } +} diff --git a/test/dataset_mining/after/FixedMembershipToken.java b/test/dataset_mining/after/FixedMembershipToken.java new file mode 100644 index 00000000..df4fbe97 --- /dev/null +++ b/test/dataset_mining/after/FixedMembershipToken.java @@ -0,0 +1,134 @@ +package org.jgroups.auth; + +import org.jgroups.Event; +import org.jgroups.Message; +import org.jgroups.PhysicalAddress; +import org.jgroups.annotations.Property; +import org.jgroups.stack.IpAddress; +import org.jgroups.util.Bits; +import org.jgroups.util.Util; + +import java.io.DataInput; +import java.io.DataOutput; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; + +/** + *

+ * The FixedMemberShipToken object predefines a list of IP addresses and ports that can join the group. + *

+ *

+ * Configuration parameters for this example are shown below: + *

+ *
    + *
  • fixed_members_value (required) = List of IP addresses & ports (optionally) - ports must be + * seperated by a '/' e.g. 127.0.0.1/1010*127.0.0.1/4567
  • + *
  • fixed_members_seperator (required) = The seperator used between IP addresses - e.g. *
  • + *
+ * + * @author Chris Mills (millsy@jboss.com) + */ +public class FixedMembershipToken extends AuthToken { + private final List memberList = new ArrayList<>(); + private String token = "emptyToken"; + + @Property + private String fixed_members_seperator = ","; + + public FixedMembershipToken() { + } + + public FixedMembershipToken(String token) { + this.token=token; + } + + public String getName() { + return "org.jgroups.auth.FixedMembershipToken"; + } + + @Property + public void setFixedMembersSeparator(String value) { + fixed_members_seperator = value; + } + + public boolean authenticate(AuthToken token, Message msg) { + if ((token != null) && (token instanceof FixedMembershipToken) && (this.memberList != null)) { + PhysicalAddress src = (PhysicalAddress) auth.down(new Event(Event.GET_PHYSICAL_ADDRESS, msg.getSrc())); + if (src == null) { + log.error("didn't find physical address for " + msg.getSrc()); + return false; + } + return isInMembersList((IpAddress)src); + } + + if (log.isWarnEnabled()) + log.warn("Invalid AuthToken instance - wrong type or null"); + return false; + } + + public boolean isInMembersList(IpAddress sender) { + if(memberList == null || sender == null) + return false; + + for(InetSocketAddress addr: memberList) { + if(match(sender, addr)) + return true; + } + return false; + } + + public static boolean match(IpAddress sender, InetSocketAddress addr) { + return !(sender == null || addr == null) + && addr.getAddress().equals(sender.getIpAddress()) + && (addr.getPort() == 0 || addr.getPort() == sender.getPort()); + } + + + private static boolean hasPort(String member) { + return member.contains(":"); + } + + @Property(name = "fixed_members_value") + public void setMemberList(String list) throws UnknownHostException { + memberList.clear(); + StringTokenizer memberListTokenizer = new StringTokenizer(list, fixed_members_seperator); + while (memberListTokenizer.hasMoreTokens()) { + String tmp=memberListTokenizer.nextToken().trim(); + int index=tmp.lastIndexOf('/'); + int port=index != -1? Integer.parseInt(tmp.substring(index+1)) : 0; + String addr_str=index != -1? tmp.substring(0, index) : tmp; + InetAddress addr=InetAddress.getByName(addr_str); + memberList.add(new InetSocketAddress(addr, port)); + } + } + + /** + * Required to serialize the object to pass across the wire + * @param out + * @throws java.io.IOException + */ + public void writeTo(DataOutput out) throws Exception { + if (log.isDebugEnabled()) + log.debug("SimpleToken writeTo()"); + Bits.writeString(this.token,out); + } + + /** + * Required to deserialize the object when read in from the wire + * @param in + * @throws Exception + */ + public void readFrom(DataInput in) throws Exception { + if (log.isDebugEnabled()) + log.debug("SimpleToken readFrom()"); + this.token = Bits.readString(in); + } + + public int size() { + return Util.size(token); + } +} diff --git a/test/dataset_mining/before/EduStepicConnector.java b/test/dataset_mining/before/EduStepicConnector.java new file mode 100644 index 00000000..a37c4680 --- /dev/null +++ b/test/dataset_mining/before/EduStepicConnector.java @@ -0,0 +1,409 @@ +package com.jetbrains.edu.stepic; + +import com.google.gson.FieldNamingPolicy; +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import com.google.gson.annotations.Expose; +import com.intellij.openapi.application.ApplicationManager; +import com.intellij.openapi.diagnostic.Logger; +import com.intellij.openapi.editor.Document; +import com.intellij.openapi.project.Project; +import com.intellij.openapi.util.text.StringUtil; +import com.intellij.openapi.vfs.VirtualFile; +import com.intellij.util.net.ssl.CertificateManager; +import com.jetbrains.edu.EduUtils; +import com.jetbrains.edu.courseFormat.Course; +import com.jetbrains.edu.courseFormat.Lesson; +import com.jetbrains.edu.courseFormat.Task; +import com.jetbrains.edu.courseFormat.TaskFile; +import org.apache.http.NameValuePair; +import org.apache.http.StatusLine; +import org.apache.http.client.entity.UrlEncodedFormEntity; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.cookie.Cookie; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.BasicCookieStore; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicNameValuePair; +import org.apache.http.protocol.HTTP; +import org.apache.http.util.EntityUtils; +import org.jetbrains.annotations.NotNull; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.util.*; + +public class EduStepicConnector { + private static final String stepicUrl = "https://stepic.org/"; + private static final String stepicApiUrl = stepicUrl + "api/"; + private static final Logger LOG = Logger.getInstance(EduStepicConnector.class.getName()); + private static final String ourDomain = "stepic.org"; + private static String ourCSRFToken = ""; + private static CloseableHttpClient ourClient; + + //this prefix indicates that course can be opened by educational plugin + public static final String PYCHARM_PREFIX = "pycharm"; + private static BasicCookieStore ourCookieStore; + + private EduStepicConnector() { + } + + public static boolean login(@NotNull final String user, @NotNull final String password) { + if (ourClient == null || ourCookieStore == null) + initializeClient(); + return postCredentials(user, password); + } + + private static void initializeClient() { + final HttpGet request = new HttpGet(stepicUrl); + request.addHeader(new BasicHeader("referer", "https://stepic.org")); + request.addHeader(new BasicHeader("content-type", "application/json")); + + HttpClientBuilder builder = HttpClients.custom().setSslcontext(CertificateManager.getInstance().getSslContext()).setMaxConnPerRoute(100); + ourCookieStore = new BasicCookieStore(); + ourClient = builder.setDefaultCookieStore(ourCookieStore).build(); + + try { + ourClient.execute(request); + saveCSRFToken(); + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + private static void saveCSRFToken() { + if (ourCookieStore == null) return; + final List cookies = ourCookieStore.getCookies(); + for (Cookie cookie : cookies) { + if (cookie.getName().equals("csrftoken")) { + ourCSRFToken = cookie.getValue(); + } + } + } + + private static boolean postCredentials(String user, String password) { + String url = stepicUrl + "accounts/login/"; + final HttpPost request = new HttpPost(url); + List nvps = new ArrayList (); + nvps.add(new BasicNameValuePair("csrfmiddlewaretoken", ourCSRFToken)); + nvps.add(new BasicNameValuePair("login", user)); + nvps.add(new BasicNameValuePair("next", "/")); + nvps.add(new BasicNameValuePair("password", password)); + nvps.add(new BasicNameValuePair("remember", "on")); + + try { + request.setEntity(new UrlEncodedFormEntity(nvps, HTTP.UTF_8)); + } + catch (UnsupportedEncodingException e) { + LOG.error(e.getMessage()); + return false; + } + + setHeaders(request, "application/x-www-form-urlencoded"); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + saveCSRFToken(); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 302) { + LOG.error("Failed to login " + EntityUtils.toString(response.getEntity())); + return false; + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + return false; + } + return true; + } + + private static T getFromStepic(String link, final Class container) throws IOException { + final HttpGet request = new HttpGet(stepicApiUrl + link); + if (ourClient == null) { + initializeClient(); + } + setHeaders(request, "application/json"); + + final CloseableHttpResponse response = ourClient.execute(request); + final String responseString = EntityUtils.toString(response.getEntity()); + Gson gson = new GsonBuilder().setFieldNamingPolicy(FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES).create(); + return gson.fromJson(responseString, container); + } + + @NotNull + public static List getCourses() { + try { + List result = new ArrayList(); + final List courseInfos = getFromStepic("courses", CoursesContainer.class).courses; + for (CourseInfo info : courseInfos) { + final String courseType = info.getType(); + if (StringUtil.isEmptyOrSpaces(courseType)) continue; + final List typeLanguage = StringUtil.split(courseType, " "); + if (typeLanguage.size() == 2 && PYCHARM_PREFIX.equals(typeLanguage.get(0))) { + result.add(info); + } + } + return result; + } + catch (IOException e) { + LOG.error("Cannot load course list " + e.getMessage()); + } + return Collections.emptyList(); + } + + public static Course getCourse(@NotNull final CourseInfo info) { + final Course course = new Course(); + course.setAuthors(info.getInstructors()); + course.setDescription(info.getDescription()); + course.setName(info.getName()); + String courseType = info.getType(); + course.setLanguage(courseType.substring(PYCHARM_PREFIX.length() + 1)); + course.setUpToDate(true); // TODO: get from stepic + try { + for (Integer section : info.sections) { + course.addLessons(getLessons(section)); + } + return course; + } + catch (IOException e) { + LOG.error("IOException " + e.getMessage()); + } + return null; + } + + public static List getLessons(int sectionId) throws IOException { + final SectionWrapper sectionWrapper = getFromStepic("sections/" + String.valueOf(sectionId), SectionWrapper.class); + List unitIds = sectionWrapper.sections.get(0).units; + final List lessons = new ArrayList(); + for (Integer unitId : unitIds) { + UnitWrapper unit = getFromStepic("units/" + String.valueOf(unitId), UnitWrapper.class); + int lessonID = unit.units.get(0).lesson; + LessonContainer lesson = getFromStepic("lessons/" + String.valueOf(lessonID), LessonContainer.class); + Lesson realLesson = lesson.lessons.get(0); + lessons.add(realLesson); + } + + for (Lesson lesson : lessons) { + lesson.taskList = new ArrayList(); + for (Integer s : lesson.steps) { + createTask(lesson, s); + } + } + return lessons; + } + + private static void createTask(Lesson lesson, Integer s) throws IOException { + final Step step = getStep(s); + final Task task = new Task(); + task.setName(step.options != null ? step.options.title : PYCHARM_PREFIX); + task.setText(step.text); + for (TestFileWrapper wrapper : step.options.test) { + task.setTestsTexts(wrapper.name, wrapper.text); + } + + task.taskFiles = new HashMap(); // TODO: it looks like we don't need taskFiles as map anymore + if (step.options.files != null) { + for (TaskFile taskFile : step.options.files) { + task.taskFiles.put(taskFile.name, taskFile); + } + } + lesson.taskList.add(task); + } + + public static Step getStep(Integer step) throws IOException { + return getFromStepic("steps/" + String.valueOf(step), StepContainer.class).steps.get(0).block; + } + + + public static void showLoginDialog() { + final LoginDialog dialog = new LoginDialog(); + dialog.show(); + } + + + public static void postLesson(Project project, @NotNull final Lesson lesson) { + final HttpPost request = new HttpPost(stepicApiUrl + "lessons"); + if (ourClient == null) { + showLoginDialog(); + } + + setHeaders(request, "application/json"); + String requestBody = new Gson().toJson(new LessonWrapper(lesson)); + request.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + final String responseString = EntityUtils.toString(response.getEntity()); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 201) { + LOG.error("Failed to push " + responseString); + return; + } + final Lesson postedLesson = new Gson().fromJson(responseString, Course.class).getLessons().get(0); + for (Task task : lesson.getTaskList()) { + postTask(project, task, postedLesson.id); + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + public static void postTask(Project project, @NotNull final Task task, int id) { + final HttpPost request = new HttpPost(stepicApiUrl + "step-sources"); + setHeaders(request, "application/json"); + final Gson gson = new GsonBuilder().setPrettyPrinting().excludeFieldsWithoutExposeAnnotation().create(); + String requestBody = gson.toJson(new StepSourceWrapper(project, task, id)); + request.setEntity(new StringEntity(requestBody, ContentType.APPLICATION_JSON)); + + try { + final CloseableHttpResponse response = ourClient.execute(request); + final StatusLine line = response.getStatusLine(); + if (line.getStatusCode() != 201) { + LOG.error("Failed to push " + EntityUtils.toString(response.getEntity())); + } + } + catch (IOException e) { + LOG.error(e.getMessage()); + } + } + + private static void setHeaders(@NotNull final HttpRequestBase request, String contentType) { + request.addHeader(new BasicHeader("referer", stepicUrl)); + request.addHeader(new BasicHeader("X-CSRFToken", ourCSRFToken)); + request.addHeader(new BasicHeader("content-type", contentType)); + } + + private static class StepContainer { + List steps; + } + + private static class Step { + @Expose StepOptions options; + @Expose String text; + @Expose String name = "pycharm"; + @Expose StepOptions source; + + public static Step fromTask(Project project, @NotNull final Task task) { + final Step step = new Step(); + step.text = task.getTaskText(project); + step.source = StepOptions.fromTask(project, task); + return step; + } + } + + private static class StepOptions { + @Expose List test; + @Expose String title; //HERE + @Expose List files; + @Expose String text; + + public static StepOptions fromTask(final Project project, @NotNull final Task task) { + final StepOptions source = new StepOptions(); + + final String text = task.getTestsText(project); + source.test = Collections.singletonList(new TestFileWrapper("tests.py", text)); + source.files = new ArrayList(); + source.title = task.getName(); + for (final Map.Entry entry : task.getTaskFiles().entrySet()) { + ApplicationManager.getApplication().runWriteAction(new Runnable() { + @Override + public void run() { + final VirtualFile taskDir = task.getTaskDir(project); + assert taskDir != null; + EduUtils.createStudentFileFromAnswer(project, taskDir, taskDir, entry); + } + }); + final TaskFile taskFile = entry.getValue(); + taskFile.name = entry.getKey(); + final Document document = task.getDocument(project, taskFile.name); + if (document != null) { + source.text = document.getImmutableCharSequence().toString(); + taskFile.text = document.getImmutableCharSequence().toString(); + } + source.files.add(taskFile); + } + return source; + } + } + + private static class CoursesContainer { + public List courses; + } + + static class StepSourceWrapper { + @Expose + StepSource stepSource; + + public StepSourceWrapper(Project project, Task task, int id) { + stepSource = new StepSource(project, task, id); + } + } + + static class LessonWrapper { + Lesson lesson; + + public LessonWrapper(Lesson lesson) { + this.lesson = new Lesson(); + this.lesson.setName(lesson.getName()); + } + } + + static class LessonContainer { + List lessons; + } + + static class StepSource { + @Expose Step block; + @Expose int position = 0; + @Expose int lesson = 0; + + public StepSource(Project project, Task task, int id) { + lesson = id; + position = task.getIndex(); + block = Step.fromTask(project, task); + } + } + + static class TestFileWrapper { + @Expose private final String name; + @Expose private final String text; + + public TestFileWrapper(String name, String text) { + this.name = name; + this.text = text; + } + } + + static class SectionWrapper { + static class Section { + List units; + } + + List
sections; + List lessons; + + static class Unit { + int id; + int lesson; + } + + List units; + } + + static class UnitWrapper { + static class Unit { + int lesson; + } + + List units; + } +} diff --git a/test/dataset_mining/before/FixedMembershipToken.java b/test/dataset_mining/before/FixedMembershipToken.java new file mode 100644 index 00000000..111652c8 --- /dev/null +++ b/test/dataset_mining/before/FixedMembershipToken.java @@ -0,0 +1,137 @@ +package org.jgroups.auth; + +import org.jgroups.Event; +import org.jgroups.Message; +import org.jgroups.PhysicalAddress; +import org.jgroups.annotations.Property; +import org.jgroups.util.Bits; +import org.jgroups.util.Util; + +import java.io.DataInput; +import java.io.DataOutput; +import java.util.ArrayList; +import java.util.List; +import java.util.StringTokenizer; + +/** + *

+ * The FixedMemberShipToken object predefines a list of IP addresses and ports that can join the + * group. + *

+ *

+ * Configuration parameters for this example are shown below: + *

+ *
    + *
  • fixed_members_value (required) = List of IP addresses & ports (optionally) - ports must be + * seperated by a '/' e.g. 127.0.0.1/1010*127.0.0.1/4567
  • + *
  • fixed_members_seperator (required) = The seperator used between IP addresses - e.g. *
  • + *
+ * + * @author Chris Mills (millsy@jboss.com) + */ +public class FixedMembershipToken extends AuthToken { + private List memberList = null; + private String token = "emptyToken"; + + @Property + private String fixed_members_seperator = ","; + + public FixedMembershipToken() { + } + + public FixedMembershipToken(String token) { + this.token=token; + } + + public String getName() { + return "org.jgroups.auth.FixedMembershipToken"; + } + + @Property + public void setFixedMembersSeparator(String value) { + fixed_members_seperator = value; + } + + public boolean authenticate(AuthToken token, Message msg) { + if ((token != null) && (token instanceof FixedMembershipToken) && (this.memberList != null)) { + PhysicalAddress src = (PhysicalAddress) auth.down(new Event(Event.GET_PHYSICAL_ADDRESS, + msg.getSrc())); + if (src == null) { + if (log.isErrorEnabled()) + log.error("didn't find physical address for " + msg.getSrc()); + return false; + } + + String sourceAddressWithPort = src.toString(); + String sourceAddressWithoutPort = sourceAddressWithPort.substring(0, + sourceAddressWithPort.indexOf(":")); + + if (log.isDebugEnabled()) { + log.debug("AUTHToken received from " + sourceAddressWithPort); + } + + for (String member : memberList) { + if (hasPort(member)) { + if (member.equals(sourceAddressWithPort)) + return true; + } else { + if (member.equals(sourceAddressWithoutPort)) + return true; + } + } + return false; + } + + if (log.isWarnEnabled()) { + log.warn("Invalid AuthToken instance - wrong type or null"); + } + return false; + } + + private static boolean hasPort(String member) { + return member.contains(":"); + } + + @Property(name = "fixed_members_value") + public void setMemberList(String list) { + memberList = new ArrayList<>(); + StringTokenizer memberListTokenizer = new StringTokenizer(list, fixed_members_seperator); + while (memberListTokenizer.hasMoreTokens()) { + memberList.add(memberListTokenizer.nextToken().replace('/', ':')); + } + } + + /** + * Required to serialize the object to pass across the wire + * + * + * + * @param out + * @throws java.io.IOException + */ + public void writeTo(DataOutput out) throws Exception { + if (log.isDebugEnabled()) { + log.debug("SimpleToken writeTo()"); + } + Bits.writeString(this.token,out); + } + + /** + * Required to deserialize the object when read in from the wire + * + * + * + * @param in + * @throws Exception + */ + public void readFrom(DataInput in) throws Exception { + if (log.isDebugEnabled()) { + log.debug("SimpleToken readFrom()"); + } + this.token = Bits.readString(in); + } + + public int size() { + return Util.size(token); + } +} From c8228783694765a35ed0b2584435207e7be81392 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 30 Nov 2020 18:41:32 +0300 Subject: [PATCH 15/52] Add similarity range + funtion end line --- veniq/dataset_mining/code_similarity.py | 64 ++++++++++++++------- veniq/dataset_mining/create_dataset_json.py | 2 +- 2 files changed, 44 insertions(+), 22 deletions(-) diff --git a/veniq/dataset_mining/code_similarity.py b/veniq/dataset_mining/code_similarity.py index ac848e60..8e3c18ce 100644 --- a/veniq/dataset_mining/code_similarity.py +++ b/veniq/dataset_mining/code_similarity.py @@ -1,55 +1,69 @@ from collections import defaultdict -from typing import List +from typing import List, Tuple +from dataclasses import dataclass, asdict import textdistance files = [ - ('EduStepicConnector', [142, 153], [159, 171]), - ('FixedMembershipToken', [55, 88], [73, 82]) + # ('EduStepicConnector', [142, 153], [159, 171]), + # ('FixedMembershipToken', [55, 88], [73, 82]) + ((58, 58), (65, 65), (67, 67), (73, 73)) ] def is_similar_functions( file_before: str, file_after: str, - ranges_before: List[int], - ranges_after: List[int]): + ranges_list_before: List[int], + ranges_after: Tuple[int, int], + res): d = defaultdict(set) exc = [' ', '{', '}', ''] with open(file_before) as before: - before_text = before.read().split('\n') - start_before, end_before = ranges_before - before_lines = before_text[start_before: end_before] with open(file_after) as after: + before_text = before.read().split('\n') start_after, end_after = ranges_after - after_lines = after.read().split('\n')[start_after: end_after] - - for iteration_i, i in enumerate(after_lines, start=start_before): - for iteration_j, j in enumerate(before_lines, start=start_after): - i = i.strip() - j = j.strip() - if (i != '') and (j != '') and (i not in exc) and (j not in exc): - longest_subs = textdistance.ratcliff_obershelp(i, j) - hamm = textdistance.hamming.normalized_similarity(i, j) - d[j].add((longest_subs, hamm, iteration_i, iteration_j, i)) + for ranges_before in ranges_list_before: + start_before, end_before = ranges_before + # since the beginning in array start with 0 + before_lines = before_text[start_before - 1: end_before] + # since the beginning in array start with 0 and we + # do not need the function's name which is usually on the first line + after_lines = after.read().split('\n')[start_after: end_after] + for iteration_i, i in enumerate(after_lines, start=start_before): + for iteration_j, j in enumerate(before_lines, start=start_after): + i = i.strip() + j = j.strip() + if (i != '') and (j != '') and (i not in exc) and (j not in exc): + longest_subs = textdistance.ratcliff_obershelp(i, j) + hamm = textdistance.hamming.normalized_similarity(i, j) + d[j].add((longest_subs, hamm, iteration_i, iteration_j, i)) matched_strings_before = [] - find_similar_strings(d, matched_strings_before) + find_similar_strings(d, matched_strings_before, res) lines_number_of_function_before = 0 for i in before_lines: if i.strip() not in exc: lines_number_of_function_before += 1 - ratio = len(matched_strings_before) / float(lines_number_of_function_before) + if lines_number_of_function_before == 0: + ratio = 0 + else: + ratio = len(matched_strings_before) / float(lines_number_of_function_before) + res.function_lines = lines_number_of_function_before + res.lines_matched = len(matched_strings_before) + res.matched_percent = ratio + res.matched_strings = '\n'.join(matched_strings_before) + if ratio > 0.700000000000000000000000000000001: return True return False -def find_similar_strings(d, matched_strings_before): +def find_similar_strings(d, matched_strings_before, res): for string_before, lst in d.items(): max_val = -1 max_hamm = -1 @@ -57,6 +71,14 @@ def find_similar_strings(d, matched_strings_before): if max_val < subs_val: max_val = subs_val max_hamm = hamm + if max_val > 0.7000000000000000000000000000000000000000001: if max_hamm > 0.4: matched_strings_before.append(string_before) + +# is_similar_functions( +# r'D:\minining_similarity\0\NodeWithRange_before.java', +# r'D:\minining_similarity\0\NodeWithRange_after.java', +# ((58, 58), (65, 65), (67, 67), (73, 73)), +# (67, 70) +# ) \ No newline at end of file diff --git a/veniq/dataset_mining/create_dataset_json.py b/veniq/dataset_mining/create_dataset_json.py index 139dcaf8..b1f0121b 100644 --- a/veniq/dataset_mining/create_dataset_json.py +++ b/veniq/dataset_mining/create_dataset_json.py @@ -110,7 +110,7 @@ def add_to_list(small_list, global_list): files = [x for x in input_dir.iterdir() if x.is_file() and x.name.endswith('out.txt')] df = pd.DataFrame(columns=list(RowResult.__annotations__.keys())) - with ProcessPool(1) as executor: + with ProcessPool(system_cores_qty) as executor: future = executor.map(find_em_items, files) result = future.result() From eb11c564b14de697ec741325c4df7b3b43c95991 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 30 Nov 2020 19:03:29 +0300 Subject: [PATCH 16/52] Fixed file --- veniq/dataset_mining/code_similarity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/veniq/dataset_mining/code_similarity.py b/veniq/dataset_mining/code_similarity.py index 8e3c18ce..90e58949 100644 --- a/veniq/dataset_mining/code_similarity.py +++ b/veniq/dataset_mining/code_similarity.py @@ -52,7 +52,7 @@ def is_similar_functions( ratio = 0 else: ratio = len(matched_strings_before) / float(lines_number_of_function_before) - res.function_lines = lines_number_of_function_before + res.lines_number = lines_number_of_function_before res.lines_matched = len(matched_strings_before) res.matched_percent = ratio res.matched_strings = '\n'.join(matched_strings_before) From bbc980a4cb360a0367a13cd68b6b51fb1082c470 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 1 Dec 2020 18:52:37 +0300 Subject: [PATCH 17/52] Fix tests --- .../after/IOUringEventLoop.java | 295 +++++++++++++++++ .../before/IOUringEventLoop.java | 307 ++++++++++++++++++ .../test_functions_for_mining.py | 45 ++- veniq/dataset_collection/augmentation.py | 10 +- veniq/dataset_mining/__init__.py | 0 veniq/dataset_mining/code_similarity.py | 36 +- veniq/dataset_mining/refMiner.py | 4 +- 7 files changed, 665 insertions(+), 32 deletions(-) create mode 100644 test/dataset_mining/after/IOUringEventLoop.java create mode 100644 test/dataset_mining/before/IOUringEventLoop.java create mode 100644 veniq/dataset_mining/__init__.py diff --git a/test/dataset_mining/after/IOUringEventLoop.java b/test/dataset_mining/after/IOUringEventLoop.java new file mode 100644 index 00000000..2ce47f81 --- /dev/null +++ b/test/dataset_mining/after/IOUringEventLoop.java @@ -0,0 +1,295 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.channel.uring; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SingleThreadEventLoop; +import io.netty.channel.unix.FileDescriptor; +import io.netty.util.collection.IntObjectHashMap; +import io.netty.util.collection.IntObjectMap; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.io.IOException; +import java.util.Queue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; + +final class IOUringEventLoop extends SingleThreadEventLoop implements + IOUringCompletionQueue.IOUringCompletionQueueCallback { + private static final InternalLogger logger = InternalLoggerFactory.getInstance(IOUringEventLoop.class); + + //Todo set config ring buffer size + private final int ringSize = 32; + private final int ENOENT = -2; + + //just temporary -> Todo use ErrorsStaticallyReferencedJniMethods like in Epoll + private static final int SOCKET_ERROR_EPIPE = -32; + private static final long ETIME = -62; + static final long ECANCELED = -125; + + private final IntObjectMap channels = new IntObjectHashMap(4096); + private final RingBuffer ringBuffer; + + private static final long AWAKE = -1L; + private static final long NONE = Long.MAX_VALUE; + + // nextWakeupNanos is: + // AWAKE when EL is awake + // NONE when EL is waiting with no wakeup scheduled + // other value T when EL is waiting with wakeup scheduled at time T + private final AtomicLong nextWakeupNanos = new AtomicLong(AWAKE); + private final FileDescriptor eventfd; + + private long prevDeadlineNanos = NONE; + private boolean pendingWakeup; + private IovecArrayPool iovecArrayPool; + + IOUringEventLoop(final EventLoopGroup parent, final Executor executor, final boolean addTaskWakesUp) { + super(parent, executor, addTaskWakesUp); + + ringBuffer = Native.createRingBuffer(ringSize); + eventfd = Native.newEventFd(); + logger.trace("New EventLoop: {}", this.toString()); + iovecArrayPool = new IovecArrayPool(); + } + + @Override + protected Queue newTaskQueue(int maxPendingTasks) { + return newTaskQueue0(maxPendingTasks); + } + + private static Queue newTaskQueue0(int maxPendingTasks) { + // This event loop never calls takeTask() + return maxPendingTasks == Integer.MAX_VALUE? PlatformDependent.newMpscQueue() + : PlatformDependent.newMpscQueue(maxPendingTasks); + } + + public void add(AbstractIOUringChannel ch) { + logger.trace("Add Channel: {} ", ch.socket.intValue()); + int fd = ch.socket.intValue(); + + channels.put(fd, ch); + } + + public void remove(AbstractIOUringChannel ch) { + logger.trace("Remove Channel: {}", ch.socket.intValue()); + int fd = ch.socket.intValue(); + + AbstractIOUringChannel old = channels.remove(fd); + if (old != null && old != ch) { + // The Channel mapping was already replaced due FD reuse, put back the stored Channel. + channels.put(fd, old); + + // If we found another Channel in the map that is mapped to the same FD the given Channel MUST be closed. + assert !ch.isOpen(); + } + } + + private void closeAll() { + logger.trace("CloseAll IOUringEvenloop"); + // Using the intermediate collection to prevent ConcurrentModificationException. + // In the `close()` method, the channel is deleted from `channels` map. + AbstractIOUringChannel[] localChannels = channels.values().toArray(new AbstractIOUringChannel[0]); + + for (AbstractIOUringChannel ch : localChannels) { + ch.unsafe().close(ch.unsafe().voidPromise()); + } + } + + @Override + protected void run() { + final IOUringCompletionQueue completionQueue = ringBuffer.getIoUringCompletionQueue(); + final IOUringSubmissionQueue submissionQueue = ringBuffer.getIoUringSubmissionQueue(); + + // Lets add the eventfd related events before starting to do any real work. + submissionQueue.addPollIn(eventfd.intValue()); + submissionQueue.submit(); + + for (; ; ) { + logger.trace("Run IOUringEventLoop {}", this.toString()); + long curDeadlineNanos = nextScheduledTaskDeadlineNanos(); + if (curDeadlineNanos == -1L) { + curDeadlineNanos = NONE; // nothing on the calendar + } + nextWakeupNanos.set(curDeadlineNanos); + + // Only submit a timeout if there are no tasks to process and do a blocking operation + // on the completionQueue. + if (!hasTasks()) { + try { + if (curDeadlineNanos != prevDeadlineNanos) { + prevDeadlineNanos = curDeadlineNanos; + submissionQueue.addTimeout(deadlineToDelayNanos(curDeadlineNanos)); + submissionQueue.submit(); + } + + // Check there were any completion events to process + if (completionQueue.process(this) == -1) { + // Block if there is nothing to process after this try again to call process(....) + logger.trace("ioUringWaitCqe {}", this.toString()); + completionQueue.ioUringWaitCqe(); + } + } catch (Throwable t) { + //Todo handle exception + } finally { + if (nextWakeupNanos.get() == AWAKE || nextWakeupNanos.getAndSet(AWAKE) == AWAKE) { + pendingWakeup = true; + } + } + } + + completionQueue.process(this); + + if (hasTasks()) { + runAllTasks(); + } + + try { + if (isShuttingDown()) { + closeAll(); + if (confirmShutdown()) { + break; + } + } + } catch (Throwable t) { + logger.info("Exception error: {}", t); + } + } + } + + @Override + public boolean handle(int fd, int res, long flags, int op, int pollMask) { + IOUringSubmissionQueue submissionQueue = ringBuffer.getIoUringSubmissionQueue(); + switch (op) { + case IOUring.OP_ACCEPT: + //Todo error handle the res + if (res == ECANCELED) { + logger.trace("POLL_LINK canceled"); + break; + } + // Fall-through + + case IOUring.OP_READ: + AbstractIOUringChannel readChannel = channels.get(fd); + if (readChannel == null) { + break; + } + ((AbstractIOUringChannel.AbstractUringUnsafe) readChannel.unsafe()).readComplete(res); + break; + case IOUring.OP_WRITEV: + case IOUring.OP_WRITE: + AbstractIOUringChannel writeChannel = channels.get(fd); + if (writeChannel == null) { + break; + } + ((AbstractIOUringChannel.AbstractUringUnsafe) writeChannel.unsafe()).writeComplete(res); + break; + case IOUring.IO_TIMEOUT: + if (res == ETIME) { + prevDeadlineNanos = NONE; + } + break; + + case IOUring.IO_POLL: + //Todo error handle the res + if (res == ECANCELED) { + logger.trace("POLL_LINK canceled"); + break; + } + if (eventfd.intValue() == fd) { + pendingWakeup = false; + handleEventFd(submissionQueue); + } else { + AbstractIOUringChannel channel = channels.get(fd); + if (channel == null) { + break; + } + switch (pollMask) { + case IOUring.POLLMASK_IN: + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).pollIn(res); + break; + case IOUring.POLLMASK_OUT: + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).pollOut(res); + break; + case IOUring.POLLMASK_RDHUP: + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).pollRdHup(res); + break; + default: + break; + } + } + break; + + case IOUring.OP_POLL_REMOVE: + if (res == ENOENT) { + System.out.println(("POLL_REMOVE OPERATION not permitted")); + } else if (res == 0) { + System.out.println(("POLL_REMOVE OPERATION successful")); + } + break; + + case IOUring.OP_CONNECT: + AbstractIOUringChannel channel = channels.get(fd); + if (channel != null) { + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).connectComplete(res); + } + break; + default: + break; + } + return true; + } + + private void handleEventFd(IOUringSubmissionQueue submissionQueue) { + // We need to consume the data as otherwise we would see another event + // in the completionQueue without + // an extra eventfd_write(....) + Native.eventFdRead(eventfd.intValue()); + + submissionQueue.addPollIn(eventfd.intValue()); + // Submit so its picked up + submissionQueue.submit(); + } + + @Override + protected void cleanup() { + try { + eventfd.close(); + } catch (IOException e) { + e.printStackTrace(); + } + ringBuffer.close(); + iovecArrayPool.release(); + } + + public RingBuffer getRingBuffer() { + return ringBuffer; + } + + @Override + protected void wakeup(boolean inEventLoop) { + if (!inEventLoop && nextWakeupNanos.getAndSet(AWAKE) != AWAKE) { + // write to the evfd which will then wake-up epoll_wait(...) + Native.eventFdWrite(eventfd.intValue(), 1L); + } + } + + public IovecArrayPool getIovecArrayPool() { + return iovecArrayPool; + } +} diff --git a/test/dataset_mining/before/IOUringEventLoop.java b/test/dataset_mining/before/IOUringEventLoop.java new file mode 100644 index 00000000..9e8d0a25 --- /dev/null +++ b/test/dataset_mining/before/IOUringEventLoop.java @@ -0,0 +1,307 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.channel.uring; + +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SingleThreadEventLoop; +import io.netty.channel.unix.FileDescriptor; +import io.netty.util.collection.IntObjectHashMap; +import io.netty.util.collection.IntObjectMap; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.io.IOException; +import java.util.Queue; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicLong; + +final class IOUringEventLoop extends SingleThreadEventLoop implements + IOUringCompletionQueue.IOUringCompletionQueueCallback { + private static final InternalLogger logger = InternalLoggerFactory.getInstance(IOUringEventLoop.class); + + //Todo set config ring buffer size + private final int ringSize = 32; + private final int ENOENT = -2; + + //just temporary -> Todo use ErrorsStaticallyReferencedJniMethods like in Epoll + private static final int SOCKET_ERROR_EPIPE = -32; + private static final long ETIME = -62; + static final long ECANCELED = -125; + + private final IntObjectMap channels = new IntObjectHashMap(4096); + private final RingBuffer ringBuffer; + + private static final long AWAKE = -1L; + private static final long NONE = Long.MAX_VALUE; + + // nextWakeupNanos is: + // AWAKE when EL is awake + // NONE when EL is waiting with no wakeup scheduled + // other value T when EL is waiting with wakeup scheduled at time T + private final AtomicLong nextWakeupNanos = new AtomicLong(AWAKE); + private final FileDescriptor eventfd; + + private long prevDeadlineNanos = NONE; + private boolean pendingWakeup; + private IovecArrayPool iovecArrayPool; + + IOUringEventLoop(final EventLoopGroup parent, final Executor executor, final boolean addTaskWakesUp) { + super(parent, executor, addTaskWakesUp); + + ringBuffer = Native.createRingBuffer(ringSize); + eventfd = Native.newEventFd(); + logger.trace("New EventLoop: {}", this.toString()); + iovecArrayPool = new IovecArrayPool(); + } + + @Override + protected Queue newTaskQueue(int maxPendingTasks) { + return newTaskQueue0(maxPendingTasks); + } + + private static Queue newTaskQueue0(int maxPendingTasks) { + // This event loop never calls takeTask() + return maxPendingTasks == Integer.MAX_VALUE? PlatformDependent.newMpscQueue() + : PlatformDependent.newMpscQueue(maxPendingTasks); + } + + public void add(AbstractIOUringChannel ch) { + logger.trace("Add Channel: {} ", ch.socket.intValue()); + int fd = ch.socket.intValue(); + + channels.put(fd, ch); + } + + public void remove(AbstractIOUringChannel ch) { + logger.trace("Remove Channel: {}", ch.socket.intValue()); + int fd = ch.socket.intValue(); + + AbstractIOUringChannel old = channels.remove(fd); + if (old != null && old != ch) { + // The Channel mapping was already replaced due FD reuse, put back the stored Channel. + channels.put(fd, old); + + // If we found another Channel in the map that is mapped to the same FD the given Channel MUST be closed. + assert !ch.isOpen(); + } + } + + private void closeAll() { + logger.trace("CloseAll IOUringEvenloop"); + // Using the intermediate collection to prevent ConcurrentModificationException. + // In the `close()` method, the channel is deleted from `channels` map. + AbstractIOUringChannel[] localChannels = channels.values().toArray(new AbstractIOUringChannel[0]); + + for (AbstractIOUringChannel ch : localChannels) { + ch.unsafe().close(ch.unsafe().voidPromise()); + } + } + + @Override + protected void run() { + final IOUringCompletionQueue completionQueue = ringBuffer.getIoUringCompletionQueue(); + final IOUringSubmissionQueue submissionQueue = ringBuffer.getIoUringSubmissionQueue(); + + // Lets add the eventfd related events before starting to do any real work. + submissionQueue.addPollIn(eventfd.intValue()); + submissionQueue.submit(); + + for (; ; ) { + logger.trace("Run IOUringEventLoop {}", this.toString()); + long curDeadlineNanos = nextScheduledTaskDeadlineNanos(); + if (curDeadlineNanos == -1L) { + curDeadlineNanos = NONE; // nothing on the calendar + } + nextWakeupNanos.set(curDeadlineNanos); + + // Only submit a timeout if there are no tasks to process and do a blocking operation + // on the completionQueue. + if (!hasTasks()) { + try { + if (curDeadlineNanos != prevDeadlineNanos) { + prevDeadlineNanos = curDeadlineNanos; + submissionQueue.addTimeout(deadlineToDelayNanos(curDeadlineNanos)); + submissionQueue.submit(); + } + + // Check there were any completion events to process + if (completionQueue.process(this) == -1) { + // Block if there is nothing to process after this try again to call process(....) + logger.trace("ioUringWaitCqe {}", this.toString()); + completionQueue.ioUringWaitCqe(); + } + } catch (Throwable t) { + //Todo handle exception + } finally { + if (nextWakeupNanos.get() == AWAKE || nextWakeupNanos.getAndSet(AWAKE) == AWAKE) { + pendingWakeup = true; + } + } + } + + try { + completionQueue.process(this); + } catch (Exception e) { + //Todo handle exception + } + + if (hasTasks()) { + runAllTasks(); + } + + try { + if (isShuttingDown()) { + closeAll(); + if (confirmShutdown()) { + break; + } + } + } catch (Throwable t) { + logger.info("Exception error: {}", t); + } + } + } + + @Override + public boolean handle(int fd, int res, long flags, int op, int pollMask) { + IOUringSubmissionQueue submissionQueue = ringBuffer.getIoUringSubmissionQueue(); + switch (op) { + case IOUring.OP_ACCEPT: + //Todo error handle the res + if (res == ECANCELED) { + logger.trace("POLL_LINK canceled"); + break; + } + // Fall-through + + case IOUring.OP_READ: + AbstractIOUringChannel readChannel = channels.get(fd); + if (readChannel == null) { + break; + } + ((AbstractIOUringChannel.AbstractUringUnsafe) readChannel.unsafe()).readComplete(res); + break; + case IOUring.OP_WRITEV: + case IOUring.OP_WRITE: + AbstractIOUringChannel writeChannel = channels.get(fd); + if (writeChannel == null) { + break; + } + //localFlushAmount -> res + logger.trace("EventLoop Write Res: {}", res); + logger.trace("EventLoop Fd: {}", fd); + + if (res == SOCKET_ERROR_EPIPE) { + writeChannel.shutdownInput(false); + } else { + ((AbstractIOUringChannel.AbstractUringUnsafe) writeChannel.unsafe()).writeComplete(res); + } + break; + case IOUring.IO_TIMEOUT: + if (res == ETIME) { + prevDeadlineNanos = NONE; + } + break; + + case IOUring.IO_POLL: + //Todo error handle the res + if (res == ECANCELED) { + logger.trace("POLL_LINK canceled"); + break; + } + if (eventfd.intValue() == fd) { + pendingWakeup = false; + // We need to consume the data as otherwise we would see another event + // in the completionQueue without + // an extra eventfd_write(....) + Native.eventFdRead(eventfd.intValue()); + + submissionQueue.addPollIn(eventfd.intValue()); + // Submit so its picked up + submissionQueue.submit(); + } else { + AbstractIOUringChannel channel = channels.get(fd); + if (channel == null) { + break; + } + switch (pollMask) { + case IOUring.POLLMASK_IN: + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).pollIn(res); + break; + case IOUring.POLLMASK_OUT: + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).pollOut(res); + break; + case IOUring.POLLMASK_RDHUP: + if (!channel.isActive()) { + channel.shutdownInput(true); + } + break; + default: + break; + } + } + break; + + case IOUring.OP_POLL_REMOVE: + if (res == ENOENT) { + System.out.println(("POLL_REMOVE OPERATION not permitted")); + } else if (res == 0) { + System.out.println(("POLL_REMOVE OPERATION successful")); + } + break; + + case IOUring.OP_CONNECT: + AbstractIOUringChannel channel = channels.get(fd); + System.out.println("Connect res: " + res); + if (channel != null) { + ((AbstractIOUringChannel.AbstractUringUnsafe) channel.unsafe()).connectComplete(res); + } + break; + + default: + break; + } + return true; + } + + @Override + protected void cleanup() { + try { + eventfd.close(); + } catch (IOException e) { + e.printStackTrace(); + } + ringBuffer.close(); + iovecArrayPool.release(); + } + + public RingBuffer getRingBuffer() { + return ringBuffer; + } + + @Override + protected void wakeup(boolean inEventLoop) { + if (!inEventLoop && nextWakeupNanos.getAndSet(AWAKE) != AWAKE) { + // write to the evfd which will then wake-up epoll_wait(...) + Native.eventFdWrite(eventfd.intValue(), 1L); + } + } + + public IovecArrayPool getIovecArrayPool() { + return iovecArrayPool; + } +} diff --git a/test/dataset_mining/test_functions_for_mining.py b/test/dataset_mining/test_functions_for_mining.py index 299b210d..faa5fea2 100644 --- a/test/dataset_mining/test_functions_for_mining.py +++ b/test/dataset_mining/test_functions_for_mining.py @@ -3,17 +3,43 @@ from veniq.dataset_mining.create_dataset_json import find_lines from veniq.dataset_mining.code_similarity import is_similar_functions +from veniq.dataset_mining.mine_examples_with_similarity import Row class TestFunctionsForMining(TestCase): current_directory = Path(__file__).absolute().parent + res = Row( + filepath_saved='', + filename='', + class_name='', + repo_url='', + function_inlined='', + function_name_with_LM='', + commit_sha_before='', + commit_sha_after='', + function_target_start_line=-1, + function_target_end_line=-1, + real_extractions=(), + lines_number=-1, + lines_matched=-1, + matched_percent=-1.0, + matched_strings='', + is_similar=False, + error='', + url='', + downloaded_after=False, + downloaded_before=False, + found_class_before_in_java_file=False, + found_class_after_in_java_file=False + ) def test_is_similar(self): is_similar = is_similar_functions( str(Path(self.current_directory, 'before/EduStepicConnector.java')), str(Path(self.current_directory, 'after/EduStepicConnector.java')), - [142, 153], - [159, 171] + [[142, 153]], + (159, 171), + self.res ) self.assertEqual(is_similar, True) @@ -22,11 +48,22 @@ def test_is_not_similar(self): is_similar = is_similar_functions( str(Path(self.current_directory, 'before/FixedMembershipToken.java')), str(Path(self.current_directory, 'after/FixedMembershipToken.java')), - [55, 88], - [73, 82] + [[55, 88]], + (73, 82), + self.res ) self.assertEqual(is_similar, False) + def test_is_similar_with_non_sequential(self): + is_similar = is_similar_functions( + str(Path(self.current_directory, 'before/IOUringEventLoop.java')), + str(Path(self.current_directory, 'after/IOUringEventLoop.java')), + [[231, 231], [233, 233], [235, 235]], + (258, 267), + self.res + ) + self.assertEqual(is_similar, True) + def test_find_lines_second_line_with_gap(self): d = [{"startLine": 1, "endLine": 1}, {"startLine": 3, "endLine": 5}] diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 6dc09c10..274bf4f4 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -11,6 +11,7 @@ from pathlib import Path from typing import Tuple, Dict, List, Any, Set, Optional +import javalang import pandas as pd from pebble import ProcessPool from tqdm import tqdm @@ -385,7 +386,7 @@ def find_lines_in_changed_file( return {} -def get_ast_if_possible(file_path: Path) -> Optional[AST]: +def get_ast_if_possible(file_path: Path, res=None) -> Optional[AST]: """ Processing file in order to check that its original version can be parsed @@ -393,8 +394,11 @@ def get_ast_if_possible(file_path: Path) -> Optional[AST]: ast = None try: ast = AST.build_from_javalang(build_ast(str(file_path))) - except Exception: - print(f"Processing {file_path} is aborted due to parsing") + except javalang.parser.JavaSyntaxError: + res.error = 'JavaSyntaxError' + except Exception as e: + res.error = str(e) + return ast diff --git a/veniq/dataset_mining/__init__.py b/veniq/dataset_mining/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/veniq/dataset_mining/code_similarity.py b/veniq/dataset_mining/code_similarity.py index 90e58949..e42eb88f 100644 --- a/veniq/dataset_mining/code_similarity.py +++ b/veniq/dataset_mining/code_similarity.py @@ -1,35 +1,32 @@ from collections import defaultdict from typing import List, Tuple -from dataclasses import dataclass, asdict import textdistance -files = [ - # ('EduStepicConnector', [142, 153], [159, 171]), - # ('FixedMembershipToken', [55, 88], [73, 82]) - ((58, 58), (65, 65), (67, 67), (73, 73)) -] - def is_similar_functions( file_before: str, file_after: str, - ranges_list_before: List[int], + ranges_list_before: List[List[int]], ranges_after: Tuple[int, int], res): d = defaultdict(set) + before_lines_for_all_ranges = [] exc = [' ', '{', '}', ''] with open(file_before) as before: + before_text = before.read().split('\n') with open(file_after) as after: - before_text = before.read().split('\n') + after_text = after.read().split('\n') start_after, end_after = ranges_after + # since the beginning in array start with 0 and we + # do not need the function's name which is usually on the first line + after_lines = after_text[start_after: end_after] for ranges_before in ranges_list_before: start_before, end_before = ranges_before # since the beginning in array start with 0 before_lines = before_text[start_before - 1: end_before] - # since the beginning in array start with 0 and we - # do not need the function's name which is usually on the first line - after_lines = after.read().split('\n')[start_after: end_after] + before_lines_for_all_ranges.extend(before_lines) + for iteration_i, i in enumerate(after_lines, start=start_before): for iteration_j, j in enumerate(before_lines, start=start_after): i = i.strip() @@ -39,17 +36,17 @@ def is_similar_functions( hamm = textdistance.hamming.normalized_similarity(i, j) d[j].add((longest_subs, hamm, iteration_i, iteration_j, i)) - matched_strings_before = [] + matched_strings_before: List[str] = [] find_similar_strings(d, matched_strings_before, res) lines_number_of_function_before = 0 - for i in before_lines: + for i in before_lines_for_all_ranges: if i.strip() not in exc: lines_number_of_function_before += 1 if lines_number_of_function_before == 0: - ratio = 0 + ratio = 0.0 else: ratio = len(matched_strings_before) / float(lines_number_of_function_before) res.lines_number = lines_number_of_function_before @@ -57,7 +54,7 @@ def is_similar_functions( res.matched_percent = ratio res.matched_strings = '\n'.join(matched_strings_before) - if ratio > 0.700000000000000000000000000000001: + if ratio > 0.699999: return True return False @@ -75,10 +72,3 @@ def find_similar_strings(d, matched_strings_before, res): if max_val > 0.7000000000000000000000000000000000000000001: if max_hamm > 0.4: matched_strings_before.append(string_before) - -# is_similar_functions( -# r'D:\minining_similarity\0\NodeWithRange_before.java', -# r'D:\minining_similarity\0\NodeWithRange_after.java', -# ((58, 58), (65, 65), (67, 67), (73, 73)), -# (67, 70) -# ) \ No newline at end of file diff --git a/veniq/dataset_mining/refMiner.py b/veniq/dataset_mining/refMiner.py index dcf66571..35b4a23a 100644 --- a/veniq/dataset_mining/refMiner.py +++ b/veniq/dataset_mining/refMiner.py @@ -52,8 +52,8 @@ def run_ref_miner(folder: str): sum_size = sum(java_files) dir_to_analyze[x] = sum_size - dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) - dir_to_analyze = [x[0] for x in dir_to_analyze] + dir_to_analyze = OrderedDict(sorted(dir_to_analyze.items(), key=lambda x: x[1])) # type: ignore + dir_to_analyze = [x[0] for x in dir_to_analyze] # type: ignore with ProcessPool(system_cores_qty) as executor: future = executor.map(run_ref_miner, dir_to_analyze) From 3db02229e2f0404dd7f7451299f7a98bda7eeb86 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 1 Dec 2020 18:53:22 +0300 Subject: [PATCH 18/52] Add files --- veniq/dataset_mining/create_dataset_json.py | 10 +- .../mine_examples_with_similarity.py | 376 ++++++++++++++++++ 2 files changed, 381 insertions(+), 5 deletions(-) create mode 100644 veniq/dataset_mining/mine_examples_with_similarity.py diff --git a/veniq/dataset_mining/create_dataset_json.py b/veniq/dataset_mining/create_dataset_json.py index b1f0121b..de0276e5 100644 --- a/veniq/dataset_mining/create_dataset_json.py +++ b/veniq/dataset_mining/create_dataset_json.py @@ -38,7 +38,7 @@ def find_em_items(file: Path): filename='', repository=x['repository'], sha1=x['sha1'], - lines=tuple(), + lines=tuple(), # type: ignore description=ref.get('description'), url=x['url'] ) @@ -47,7 +47,7 @@ def find_em_items(file: Path): if x.get('codeElementType') != "METHOD_DECLARATION" ] lines_list_of_lists = find_lines(ref_items) - res.lines = lines_list_of_lists + res.lines = lines_list_of_lists # type: ignore if ref_items: res.filename = ref_items[0]['filePath'] results.append(res) @@ -55,7 +55,7 @@ def find_em_items(file: Path): return results -def find_lines(ref_items: List[Dict[any, any]]) -> Tuple[Tuple[Any, ...], ...]: +def find_lines(ref_items: List[Dict[str, any]]) -> Tuple[Tuple[Any, ...], ...]: # type: ignore def add_to_list(small_list, global_list): range_extraction = tuple([small_list[0], small_list[-1]]) @@ -63,11 +63,11 @@ def add_to_list(small_list, global_list): lines = SortedSet() for ref_block in ref_items: - for j in range(ref_block['startLine'], ref_block['endLine'] + 1): + for j in range(ref_block['startLine'], ref_block['endLine'] + 1): # type: ignore lines.add(j) prev = lines[0] cur_list = [prev] - lines_list_of_lists = [] + lines_list_of_lists: List[Tuple[int, int]] = [] for x in lines[1:]: diff = x - prev prev = x diff --git a/veniq/dataset_mining/mine_examples_with_similarity.py b/veniq/dataset_mining/mine_examples_with_similarity.py new file mode 100644 index 00000000..d71cb383 --- /dev/null +++ b/veniq/dataset_mining/mine_examples_with_similarity.py @@ -0,0 +1,376 @@ +import json +import os +import random +import traceback +from argparse import ArgumentParser +from ast import literal_eval +from dataclasses import dataclass, asdict +from functools import partial +from multiprocessing import Manager +from pathlib import Path +from typing import Tuple, Optional + +import pandas as pd +from pebble import ProcessPool +from requests.auth import HTTPBasicAuth +from requests import Session +from tqdm import tqdm + +from veniq.ast_framework import ASTNodeType +# f77a2ddf76ca1e95be78c3808278b5a3cf7871d0 +from veniq.dataset_collection.augmentation import get_ast_if_possible, method_body_lines +from veniq.dataset_mining.code_similarity import is_similar_functions + + +@dataclass +class Row: + filename: str + filepath_saved: str + class_name: str + function_inlined: str + function_name_with_LM: str + # hamming: float + # ratcliff_obershelp: float + function_target_start_line: int + function_target_end_line: int + real_extractions: Optional[Tuple] + commit_sha_before: str + commit_sha_after: str + lines_number: int + lines_matched: int + matched_percent: float + matched_strings: Optional[str] + is_similar: bool + repo_url: str + downloaded_after: bool + downloaded_before: bool + found_class_before_in_java_file: bool + found_class_after_in_java_file: bool + error: str + url: str + + +def get_previous_commit( + sha: str, + file_path: str, + repo: str, + row_res: Row, + session: Session, + auth): + commit_sha_before = '' + url_with_params = f'https://api.github.com/repos/{repo}/commits?path={file_path}&sha={sha}' + # time.sleep(10) + + resp = session.get(url_with_params, auth=auth) + if resp.status_code == 200: + try: + resp_json = resp.json() + if len(resp_json) < 2: + row_res.error = 'No previous commit found. Files was moved' + else: + previous_commit_item = resp_json[1] + commit_sha_before = previous_commit_item['sha'] + + except Exception as e: + print(f'cannot get previous commit {str(e)}') + row_res.error = str(e) + else: + row_res.error = f'{str(resp.status_code)}: {str(resp.content)}' + + return commit_sha_before + + +def download_file( + repo_name: str, + commit_sha: str, + filename_raw: Path, + output_dir_for_saved_file: Path, + auth, + session: Session, + file_prefix: str, + row_res: Row): + commit_url = f'https://api.github.com/repos/{repo_name}/commits/{commit_sha}' + + resp = session.get(commit_url, auth=auth) + resp_json = resp.json() + files = resp_json.get('files', []) + if not files: + row_res.error = f'{resp.status_code}: {resp.content}' # type: ignore + + found_files_in_commit = find_filename(Path(filename_raw), files) + if not found_files_in_commit: + row_res.error = f'File{file_prefix} was not found in commit' + + file_name_for_csv = '' + for file_item in found_files_in_commit: + url = file_item['raw_url'] + file_content = session.get(url, auth=auth).content + file_name_for_csv = Path( + output_dir_for_saved_file, + Path(file_item['filename']).stem + file_prefix + '.java') # type:ignore + + with open(file_name_for_csv, 'wb') as w: + w.write(file_content) + + return file_name_for_csv + + +def sample_from_dict(d, sample=1): + keys = random.sample(list(d), sample) + values = [d[k] for k in keys] + return keys[0], values[0] + + +def handle_commit_example(sample, tokens, output_dir, classes_dict): + example_id, series = sample + results = [] + user, passwd = sample_from_dict(tokens) + repository_url = series['repository'] + commit_sha_after = series['sha1'] + repo_part_1 = Path(repository_url).parts[2:-1] + repo_part_2 = Path(repository_url).parts[-1].replace('.git', '') + repo_name = Path(*repo_part_1, repo_part_2).as_posix() + description = series.get('description') + filename_in_commit = series.get('filename') + filename_raw = Path(description.split('in class')[1].replace('.', '/').strip()) + class_name = filename_raw.stem + classes = classes_dict.get(commit_sha_after, set()) + if class_name not in classes: + add_to_dict_set(commit_sha_after, class_name, classes_dict) + unique_directory = output_dir / str(example_id) + if not unique_directory.exists(): + unique_directory.mkdir(parents=True) + + auth = HTTPBasicAuth(user, passwd) + s = Session() + res = Row( + filepath_saved='', + filename=filename_in_commit, + class_name=class_name, + repo_url=repository_url, + function_inlined='', + function_name_with_LM='', + commit_sha_before='', + commit_sha_after=commit_sha_after, + # hamming=-1, + # ratcliff_obershelp=-1, + function_target_start_line=-1, + function_target_end_line=-1, + real_extractions=(), + lines_number=-1, + lines_matched=-1, + matched_percent=-1.0, + matched_strings='', + is_similar=False, + error='', + url=series['url'], + downloaded_after=False, + downloaded_before=False, + found_class_before_in_java_file=False, + found_class_after_in_java_file=False + ) + file_after = download_file( + repo_name, commit_sha_after, filename_in_commit, + unique_directory, auth, s, '_after', res) + commit_sha_before = get_previous_commit( + commit_sha_after, filename_in_commit, repo_name, + res, s, auth + ) + if file_after: + res.downloaded_after = True + res.filepath_saved = file_after + if commit_sha_before: + res.commit_sha_before = commit_sha_before + file_before = download_file( + repo_name, + commit_sha_before, + filename_in_commit, + unique_directory, + auth, + s, + '_before', + res + ) + if file_before: + res.downloaded_before = True + run_similarity(class_name, description, file_after, file_before, res, series) + else: + res.error = 'File before was not downloaded' + else: + res.error = 'File after was not downloaded' + + results.append(res) + return results + + +def run_similarity(class_name, description, file_after, file_before, res, series): + list_of_lines_before = series['lines'] + res.real_extractions = list_of_lines_before + function_name_inlined = get_function_name_from_description(description, 'Extract Method') + res.function_inlined = function_name_inlined + function_name_with_LM = get_function_name_from_description(description, 'extracted from') + function_name_with_LM = function_name_with_LM.split('(')[0] + res.function_name_with_LM = function_name_with_LM + method_node_before = find_function_by_name_in_ast(file_before, class_name, function_name_with_LM, res) + if method_node_before: + res.found_class_before_in_java_file = True + method_node_after = find_function_by_name_in_ast(file_after, class_name, function_name_inlined, res) + + if method_node_after: + res.found_class_after_in_java_file = True + if method_node_before and method_node_after: + lines_after = method_body_lines(method_node_after, file_after) + lines_before = method_body_lines(method_node_before, file_before) + res.function_target_start_line = lines_before[0] + res.function_target_end_line = lines_before[1] + is_similar = is_similar_functions( + file_before, + file_after, + list_of_lines_before, + lines_after, + res + ) + res.is_similar = is_similar + + +def get_function_name_from_description(description, split_by): + function_string_in_after_file = ' '.join(description.split(split_by)[1].strip().split(' ')[1:]) + function_string_in_after_file = function_string_in_after_file.split(':')[0].strip() + function_name = function_string_in_after_file.split('(')[0] + return function_name + + +def find_function_by_name_in_ast(filename, class_name, function_name, res): + ast = get_ast_if_possible(filename, res) + + if ast: + classes_ast = [ + ast.get_subtree(x) for x in ast.get_proxy_nodes( + ASTNodeType.CLASS_DECLARATION, ASTNodeType.INTERFACE_DECLARATION, ASTNodeType.ENUM_DECLARATION) + if x.name == class_name] + + if classes_ast: + class_ast = classes_ast[0] + for method_node in class_ast.get_proxy_nodes( + ASTNodeType.METHOD_DECLARATION, ASTNodeType.CONSTRUCTOR_DECLARATION): + is_name_equal = method_node.name == function_name + if is_name_equal: + return method_node + + return None + + +def search_filenames_in_commit(filename_raw: Path, files): + """ + Searches filename in commit. If it is not found it tries + to find a subclass + :param filename_raw: file path of file in a commit + :param files: list of all items of a commit, given by github API + :return: list of found files + """ + filename_to_search = Path(*filename_raw.parts[:-1], filename_raw.parts[-1] + '.java') + files_after_arr = find_filename(filename_to_search, files) + if not files_after_arr: + # finds subclass + filename_to_search = Path(*filename_raw.parts[:-2], filename_raw.parts[-2] + '.java') + files_after_arr = find_filename(filename_to_search, files) + if not files_after_arr: + # finds subclass of subclass + filename_to_search = Path(*filename_raw.parts[:-3], filename_raw.parts[-3] + '.java') + files_after_arr = find_filename(filename_to_search, files) + return files_after_arr + + +def find_filename(filename_raw: Path, files): + return [x for x in files if x['filename'].find(filename_raw.as_posix()) > -1] + + +def add_to_dict_set(key, val, multi_dict): + if key not in multi_dict: + multi_dict[key] = set() + temp_set = multi_dict[key] + temp_set.add(val) + multi_dict[key] = temp_set + + +def filter_refactorings_by_em(dataset_samples): + handled_samples = [] + for sample in dataset_samples: + for x in sample['refactorings']: + if x['type'] == 'Extract Method': + new_item = sample.copy() + new_item['refactorings'] = x + handled_samples.append(new_item) + + return handled_samples + + +if __name__ == '__main__': + system_cores_qty = os.cpu_count() or 1 + parser = ArgumentParser() + parser.add_argument( + "-o", "--output_dir", + help="Path where commits with files will be saved", + required=True + ) + parser.add_argument( + "-t", "--token_file", + help="Json file with tokens for github API", + required=True + ) + parser.add_argument( + "--jobs", + "-j", + type=int, + default=system_cores_qty - 1, + help="Number of processes to spawn. " + "By default one less than number of cores. " + "Be careful to raise it above, machine may stop responding while creating dataset.", + ) + parser.add_argument( + "--csv", + help="csv with dataset", + required=True, + type=str, + ) + parser.add_argument( + "-of", "--output_file", + help="Output file for dataset json", + default='similarity.csv', + required=True + ) + args = parser.parse_args() + output_dir = Path(args.output_dir) + + if not output_dir.exists(): + output_dir.mkdir(parents=True) + + output_df = pd.DataFrame(columns=list(Row.__annotations__.keys())) + with open(args.token_file, 'r', encoding='utf-8') as f: + tokens = json.loads(f.read()) + + with open(args.csv, encoding='utf-8') as f: + with Manager() as manager: + d = manager.dict() # type: ignore + with ProcessPool(system_cores_qty) as executor: + dataset_samples = pd.read_csv(args.csv) + dataset_samples['lines'] = dataset_samples['lines'].apply(literal_eval) + func = partial(handle_commit_example, output_dir=output_dir, tokens=tokens, classes_dict=d) + future = executor.map(func, dataset_samples.iterrows(), timeout=10000, ) + result = future.result() + rows = dataset_samples.iterrows() + for sample in tqdm(rows, total=dataset_samples.shape[0]): + try: + results = next(result) + for res in results: + output_df = output_df.append(asdict(res), ignore_index=True) + output_df.to_csv(args.output_file) + except Exception: + sha1 = sample[1]['sha1'] + id = sample[1][0] + stack = traceback.format_exc() + print(f'Error for {id} {sha1} {stack}') + continue + + # output_df = output_df.drop_duplicates() + # output_df.to_csv('new_urls.csv') From 7ac92a065a68cbed0162957052aec8f9f33d2d28 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 1 Dec 2020 18:56:24 +0300 Subject: [PATCH 19/52] Add requirements --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e5003f5e..dc627e5e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,4 +14,5 @@ bs4==0.0.1 pebble==4.5.3 pandas==1.1.2 sortedcontainers==2.3.0 -textdistance==4.2.0 \ No newline at end of file +textdistance==4.2.0 +requests==2.25.0 \ No newline at end of file From a176986aa0b79c81467b190952b483e24b4811bf Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 10 Dec 2020 14:33:20 +0300 Subject: [PATCH 20/52] Fix bug --- veniq/dataset_collection/augmentation.py | 83 +++++++++++++++--------- 1 file changed, 51 insertions(+), 32 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index fd42df4b..207e4b9d 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -11,6 +11,7 @@ from pathlib import Path from typing import Tuple, Dict, List, Any, Set, Optional +import javalang import pandas as pd from pebble import ProcessPool from tqdm import tqdm @@ -278,7 +279,8 @@ def insert_code_with_new_file_creation( invocation_node: ASTNode, file_path: Path, output_path: Path, - dict_original_invocations: Dict[str, List[ASTNode]] + dict_original_invocations: Dict[str, List[ASTNode]], + source_filepath: str ) -> Dict[str, Any]: """ If invocations of class methods were found, @@ -310,6 +312,7 @@ def insert_code_with_new_file_creation( algorithm_for_inlining = AlgorithmFactory().create_obj(algorithm_type) if algorithm_type != InlineTypesAlgorithms.DO_NOTHING: line_to_csv = { + 'project': source_filepath, 'input_filename': file_path, 'class_name': class_name, 'invocation_text_string': text_lines[invocation_node.line - 1].lstrip(), @@ -365,13 +368,17 @@ def find_lines_in_changed_file( class_node_of_changed_file = [ x for x in changed_ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) if x.name == class_name][0] + if class_node_of_changed_file.name == 'PainlessParser' and method_node.name == 'rstatement': + print(1) class_subtree = changed_ast.get_subtree(class_node_of_changed_file) - node = [x for x in class_subtree.get_proxy_nodes( - ASTNodeType.METHOD_DECLARATION, - ASTNodeType.CONSTRUCTOR_DECLARATION) - if x.name == method_node.name][0] # type: ignore - original_func_changed = [x for x in class_subtree.get_proxy_nodes( - ASTNodeType.METHOD_DECLARATION) if x.name == original_func.name][0] + methods_and_constructors = \ + list(class_node_of_changed_file.methods) + list(class_subtree.get_proxy_nodes( + ASTNodeType.CONSTRUCTOR_DECLARATION)) + node = [x for x in methods_and_constructors + if x.name == method_node.name][0] # type: ignore + original_func_changed = [ + x for x in class_node_of_changed_file.methods + if x.name == original_func.name][0] body_start_line, body_end_line = method_body_lines(original_func_changed, new_full_filename) return { @@ -383,7 +390,7 @@ def find_lines_in_changed_file( return {} -def get_ast_if_possible(file_path: Path) -> Optional[AST]: +def get_ast_if_possible(file_path: Path, res=None) -> Optional[AST]: """ Processing file in order to check that its original version can be parsed @@ -391,8 +398,13 @@ def get_ast_if_possible(file_path: Path) -> Optional[AST]: ast = None try: ast = AST.build_from_javalang(build_ast(str(file_path))) - except Exception: - print(f"Processing {file_path} is aborted due to parsing") + except javalang.parser.JavaSyntaxError: + if res: + res.error = 'JavaSyntaxError' + except Exception as e: + if res: + res.error = str(e) + return ast @@ -449,26 +461,30 @@ def analyze_file( methods_list = list(class_declaration.methods) + list(class_declaration.constructors) for method_node in methods_list: - method_decl = ast.get_subtree(method_node) - for method_invoked in method_decl.get_proxy_nodes( - ASTNodeType.METHOD_INVOCATION): - found_method_decl = method_declarations.get(method_invoked.member, []) - # ignore overloaded functions - if len(found_method_decl) == 1: - try: - make_insertion( - ast, - class_declaration, - dst_filename, - found_method_decl, - method_declarations, - method_invoked, - method_node, - output_path, - results - ) - except Exception as e: - print('Error has happened during file analyze: ' + str(e)) + # Ignore overloaded target methods + found_methods_decl = method_declarations.get(method_node.name, []) + if len(found_methods_decl) == 1: + method_decl = ast.get_subtree(method_node) + for method_invoked in method_decl.get_proxy_nodes( + ASTNodeType.METHOD_INVOCATION): + found_method_decl = method_declarations.get(method_invoked.member, []) + # ignore overloaded extracted functions + if len(found_method_decl) == 1: + try: + make_insertion( + ast, + class_declaration, + dst_filename, + found_method_decl, + method_declarations, + method_invoked, + method_node, + output_path, + file_path, + results + ) + except Exception as e: + print('Error has happened during file analyze: ' + str(e)) if not results: dst_filename.unlink() @@ -477,7 +493,7 @@ def analyze_file( def make_insertion(ast, class_declaration, dst_filename, found_method_decl, method_declarations, method_invoked, - method_node, output_path, results): + method_node, output_path, source_filepath, results): is_matched = is_match_to_the_conditions( ast, method_invoked, @@ -491,7 +507,8 @@ def make_insertion(ast, class_declaration, dst_filename, found_method_decl, meth method_invoked, dst_filename, output_path, - method_declarations) + method_declarations, + source_filepath) if log_of_inline: # change source filename, since it will be changed log_of_inline['input_filename'] = str(dst_filename.as_posix()) @@ -579,6 +596,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: df = pd.DataFrame( columns=[ + 'project', 'input_filename', 'class_name', 'invocation_text_string', @@ -612,6 +630,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: for i in single_file_features: # get local path for inlined filename i['output_filename'] = i['output_filename'].relative_to(os.getcwd()).as_posix() + print(i['output_filename'], filename) i['invocation_text_string'] = str(i['invocation_text_string']).encode('utf8') df = df.append(i, ignore_index=True) From 815525a49043c79de3becf5be8b80560fdd6444a Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 10 Dec 2020 14:38:34 +0300 Subject: [PATCH 21/52] Fix print --- veniq/dataset_collection/augmentation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 207e4b9d..8cc44123 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -368,8 +368,6 @@ def find_lines_in_changed_file( class_node_of_changed_file = [ x for x in changed_ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) if x.name == class_name][0] - if class_node_of_changed_file.name == 'PainlessParser' and method_node.name == 'rstatement': - print(1) class_subtree = changed_ast.get_subtree(class_node_of_changed_file) methods_and_constructors = \ list(class_node_of_changed_file.methods) + list(class_subtree.get_proxy_nodes( From 25242c0492af9113aac632d0a35b0e8e942b09c1 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 14 Dec 2020 12:56:44 +0300 Subject: [PATCH 22/52] Add tests --- .../test_dataset_collection.py | 26 ++++++++++++++++++- veniq/dataset_collection/augmentation.py | 7 +++-- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index 622fabfd..e4ee2f17 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -5,7 +5,8 @@ from veniq.dataset_collection.augmentation import ( determine_algorithm_insertion_type, method_body_lines, - is_match_to_the_conditions) + is_match_to_the_conditions, + find_lines_in_changed_file) from veniq.ast_framework import AST, ASTNodeType from veniq.dataset_collection.types_identifier import ( InlineTypesAlgorithms, @@ -377,3 +378,26 @@ def test_start_end_inline_without_args(self): self.temp_filename) self.assertEqual([30, 34], pred_inline_rel_bounds, msg='Wrong inline bounds: {}'.format(pred_inline_rel_bounds)) + + def test_check(self): + old_filename = self.current_directory / 'InlineExamples/PainlessParser.java' + new_filename = self.current_directory / 'InlineTestExamples/PainlessParser.java' + ast = AST.build_from_javalang(build_ast(old_filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'PainlessParser'][0] + inlined_function_declaration = [ + x for x in class_decl.methods + if x.name == 'trailer'][0] + target_function = [ + x for x in class_decl.methods + if x.name == 'rstatement'][0] + result = find_lines_in_changed_file( + new_full_filename=new_filename, + method_node=target_function, + original_func=inlined_function_declaration, + class_name='PainlessParser') + + self.assertEqual(result['invocation_method_start_line'], 1022) + self.assertEqual(result['invocation_method_end_line'], 1083) + self.assertEqual(result['start_line_of_function_where_invocation_occurred'], 544) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 8cc44123..c14f2985 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -370,8 +370,9 @@ def find_lines_in_changed_file( if x.name == class_name][0] class_subtree = changed_ast.get_subtree(class_node_of_changed_file) methods_and_constructors = \ - list(class_node_of_changed_file.methods) + list(class_subtree.get_proxy_nodes( - ASTNodeType.CONSTRUCTOR_DECLARATION)) + list(class_node_of_changed_file.methods) + list( + class_subtree.get_proxy_nodes( + ASTNodeType.CONSTRUCTOR_DECLARATION)) node = [x for x in methods_and_constructors if x.name == method_node.name][0] # type: ignore original_func_changed = [ @@ -420,6 +421,7 @@ def _replacer(match): return "" else: # otherwise, we will return the 1st group return match.group(1) # captured quoted-string + return regex.sub(_replacer, string) @@ -520,6 +522,7 @@ def collect_info_about_functions_without_params( if not method.parameters: method_declarations[method.name].append(method) + # def save_input_file(input_dir: Path, filename: Path) -> Path: # # need to avoid situation when filenames are the same # hash_path = hashlib.sha256(str(filename.parent).encode('utf-8')).hexdigest() From 82464ef94847545aad7d3ae455903b054e1ab746 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 14 Dec 2020 13:31:24 +0300 Subject: [PATCH 23/52] Fix bug and tests --- Makefile | 1 + .../InlineExamples/PainlessParser.java | 3832 +++++++++++++++++ .../InlineTestExamples/PainlessParser.java | 3832 +++++++++++++++++ .../test_dataset_collection.py | 5 - test/integration/dataset_collection.py | 21 +- test/integration/results_predefined.csv | 16 +- veniq/dataset_collection/augmentation.py | 60 +- 7 files changed, 7716 insertions(+), 51 deletions(-) create mode 100644 test/dataset_collection/InlineExamples/PainlessParser.java create mode 100644 test/dataset_collection/InlineTestExamples/PainlessParser.java diff --git a/Makefile b/Makefile index 523311b4..044d249a 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ requirements: unittest: python3 -m unittest discover + python3 -m unittest test/integration/dataset_collection.py install: python3 -m pip install . diff --git a/test/dataset_collection/InlineExamples/PainlessParser.java b/test/dataset_collection/InlineExamples/PainlessParser.java new file mode 100644 index 00000000..98b2b13b --- /dev/null +++ b/test/dataset_collection/InlineExamples/PainlessParser.java @@ -0,0 +1,3832 @@ +package org.elasticsearch.painless.antlr; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; +import java.util.List; +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class PainlessParser extends Parser { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + ID=82, DOTINTEGER=83, DOTID=84; + public static final int + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_lamtype = 31, RULE_funcref = 32; + public static final String[] ruleNames = { + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "lambda", "lamtype", "funcref" + }; + private static final String[] _LITERAL_NAMES = { + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, null, null, "'true'", "'false'", "'null'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + @Override + public Vocabulary getVocabulary() { + return VOCABULARY; + } + @Override + public String getGrammarFileName() { return "PainlessParser.g4"; } + @Override + public String[] getRuleNames() { return ruleNames; } + @Override + public String getSerializedATN() { return _serializedATN; } + @Override + public ATN getATN() { return _ATN; } + public PainlessParser(TokenStream input) { + super(input); + _interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + public static class SourceContext extends ParserRuleContext { + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } + public List function() { + return getRuleContexts(FunctionContext.class); + } + public FunctionContext function(int i) { + return getRuleContext(FunctionContext.class,i); + } + public List statement() { + return getRuleContexts(StatementContext.class); + } + public StatementContext statement(int i) { + return getRuleContext(StatementContext.class,i); + } + public SourceContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_source; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitSource(this); + else return visitor.visitChildren(this); + } + } + public final SourceContext source() throws RecognitionException { + SourceContext _localctx = new SourceContext(_ctx, getState()); + enterRule(_localctx, 0, RULE_source); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(69); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(66); + function(); + } + } + } + setState(71); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + } + setState(75); + _errHandler.sync(this); + _la = _input.LA(1); + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + { + setState(72); + statement(); + } + } + setState(77); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(78); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FunctionContext extends ParserRuleContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ParametersContext parameters() { + return getRuleContext(ParametersContext.class,0); + } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public FunctionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_function; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFunction(this); + else return visitor.visitChildren(this); + } + } + public final FunctionContext function() throws RecognitionException { + FunctionContext _localctx = new FunctionContext(_ctx, getState()); + enterRule(_localctx, 2, RULE_function); + try { + enterOuterAlt(_localctx, 1); + { + setState(80); + decltype(); + setState(81); + match(ID); + setState(82); + parameters(); + setState(83); + block(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ParametersContext extends ParserRuleContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List decltype() { + return getRuleContexts(DecltypeContext.class); + } + public DecltypeContext decltype(int i) { + return getRuleContext(DecltypeContext.class,i); + } + public List ID() { return getTokens(PainlessParser.ID); } + public TerminalNode ID(int i) { + return getToken(PainlessParser.ID, i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ParametersContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_parameters; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitParameters(this); + else return visitor.visitChildren(this); + } + } + public final ParametersContext parameters() throws RecognitionException { + ParametersContext _localctx = new ParametersContext(_ctx, getState()); + enterRule(_localctx, 4, RULE_parameters); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(85); + match(LP); + setState(97); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(86); + decltype(); + setState(87); + match(ID); + setState(94); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(88); + match(COMMA); + setState(89); + decltype(); + setState(90); + match(ID); + } + } + setState(96); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(99); + match(RP); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class StatementContext extends ParserRuleContext { + public RstatementContext rstatement() { + return getRuleContext(RstatementContext.class,0); + } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } + public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } + public StatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_statement; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitStatement(this); + else return visitor.visitChildren(this); + } + } + public final StatementContext statement() throws RecognitionException { + StatementContext _localctx = new StatementContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_statement); + int _la; + try { + setState(105); + switch (_input.LA(1)) { + case IF: + case WHILE: + case FOR: + case TRY: + enterOuterAlt(_localctx, 1); + { + setState(101); + rstatement(); + } + break; + case LBRACE: + case LP: + case DO: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + enterOuterAlt(_localctx, 2); + { + setState(102); + dstatement(); + setState(103); + _la = _input.LA(1); + if ( !(_la==EOF || _la==SEMICOLON) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class RstatementContext extends ParserRuleContext { + public RstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_rstatement; } + public RstatementContext() { } + public void copyFrom(RstatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class ForContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public List SEMICOLON() { return getTokens(PainlessParser.SEMICOLON); } + public TerminalNode SEMICOLON(int i) { + return getToken(PainlessParser.SEMICOLON, i); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EmptyContext empty() { + return getRuleContext(EmptyContext.class,0); + } + public InitializerContext initializer() { + return getRuleContext(InitializerContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public AfterthoughtContext afterthought() { + return getRuleContext(AfterthoughtContext.class,0); + } + public ForContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFor(this); + else return visitor.visitChildren(this); + } + } + public static class TryContext extends RstatementContext { + public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public List trap() { + return getRuleContexts(TrapContext.class); + } + public TrapContext trap(int i) { + return getRuleContext(TrapContext.class,i); + } + public TryContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); + else return visitor.visitChildren(this); + } + } + public static class WhileContext extends RstatementContext { + public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EmptyContext empty() { + return getRuleContext(EmptyContext.class,0); + } + public WhileContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitWhile(this); + else return visitor.visitChildren(this); + } + } + public static class IneachContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode IN() { return getToken(PainlessParser.IN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public IneachContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIneach(this); + else return visitor.visitChildren(this); + } + } + public static class IfContext extends RstatementContext { + public TerminalNode IF() { return getToken(PainlessParser.IF, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List trailer() { + return getRuleContexts(TrailerContext.class); + } + public TrailerContext trailer(int i) { + return getRuleContext(TrailerContext.class,i); + } + public TerminalNode ELSE() { return getToken(PainlessParser.ELSE, 0); } + public IfContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIf(this); + else return visitor.visitChildren(this); + } + } + public static class EachContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EachContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEach(this); + else return visitor.visitChildren(this); + } + } + public final RstatementContext rstatement() throws RecognitionException { + RstatementContext _localctx = new RstatementContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_rstatement); + int _la; + try { + int _alt; + setState(167); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: + _localctx = new IfContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(107); + match(IF); + setState(108); + match(LP); + setState(109); + expression(0); + setState(110); + match(RP); + setState(111); + trailer(); + setState(115); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { + case 1: + { + setState(112); + match(ELSE); + setState(113); + trailer(); + } + break; + case 2: + { + setState(114); + if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); + } + break; + } + } + break; + case 2: + _localctx = new WhileContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(117); + match(WHILE); + setState(118); + match(LP); + setState(119); + expression(0); + setState(120); + match(RP); + setState(123); + switch (_input.LA(1)) { + case LBRACK: + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(121); + trailer(); + } + break; + case SEMICOLON: + { + setState(122); + empty(); + } + break; + default: + throw new NoViableAltException(this); + } + } + break; + case 3: + _localctx = new ForContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(125); + match(FOR); + setState(126); + match(LP); + setState(128); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(127); + initializer(); + } + } + setState(130); + match(SEMICOLON); + setState(132); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(131); + expression(0); + } + } + setState(134); + match(SEMICOLON); + setState(136); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(135); + afterthought(); + } + } + setState(138); + match(RP); + setState(141); + switch (_input.LA(1)) { + case LBRACK: + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(139); + trailer(); + } + break; + case SEMICOLON: + { + setState(140); + empty(); + } + break; + default: + throw new NoViableAltException(this); + } + } + break; + case 4: + _localctx = new EachContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(143); + match(FOR); + setState(144); + match(LP); + setState(145); + decltype(); + setState(146); + match(ID); + setState(147); + match(COLON); + setState(148); + expression(0); + setState(149); + match(RP); + setState(150); + trailer(); + } + break; + case 5: + _localctx = new IneachContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(152); + match(FOR); + setState(153); + match(LP); + setState(154); + match(ID); + setState(155); + match(IN); + setState(156); + expression(0); + setState(157); + match(RP); + setState(158); + trailer(); + } + break; + case 6: + _localctx = new TryContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(160); + match(TRY); + setState(161); + block(); + setState(163); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(162); + trap(); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(165); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DstatementContext extends ParserRuleContext { + public DstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_dstatement; } + public DstatementContext() { } + public void copyFrom(DstatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class DeclContext extends DstatementContext { + public DeclarationContext declaration() { + return getRuleContext(DeclarationContext.class,0); + } + public DeclContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecl(this); + else return visitor.visitChildren(this); + } + } + public static class BreakContext extends DstatementContext { + public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); } + public BreakContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBreak(this); + else return visitor.visitChildren(this); + } + } + public static class ThrowContext extends DstatementContext { + public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ThrowContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); + else return visitor.visitChildren(this); + } + } + public static class ContinueContext extends DstatementContext { + public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } + public ContinueContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); + else return visitor.visitChildren(this); + } + } + public static class ExprContext extends DstatementContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ExprContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); + else return visitor.visitChildren(this); + } + } + public static class DoContext extends DstatementContext { + public TerminalNode DO() { return getToken(PainlessParser.DO, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public DoContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDo(this); + else return visitor.visitChildren(this); + } + } + public static class ReturnContext extends DstatementContext { + public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ReturnContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitReturn(this); + else return visitor.visitChildren(this); + } + } + public final DstatementContext dstatement() throws RecognitionException { + DstatementContext _localctx = new DstatementContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_dstatement); + int _la; + try { + setState(186); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + case 1: + _localctx = new DoContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(169); + match(DO); + setState(170); + block(); + setState(171); + match(WHILE); + setState(172); + match(LP); + setState(173); + expression(0); + setState(174); + match(RP); + } + break; + case 2: + _localctx = new DeclContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(176); + declaration(); + } + break; + case 3: + _localctx = new ContinueContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(177); + match(CONTINUE); + } + break; + case 4: + _localctx = new BreakContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(178); + match(BREAK); + } + break; + case 5: + _localctx = new ReturnContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(179); + match(RETURN); + setState(181); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(180); + expression(0); + } + } + } + break; + case 6: + _localctx = new ThrowContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(183); + match(THROW); + setState(184); + expression(0); + } + break; + case 7: + _localctx = new ExprContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(185); + expression(0); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class TrailerContext extends ParserRuleContext { + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public TrailerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_trailer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrailer(this); + else return visitor.visitChildren(this); + } + } + public final TrailerContext trailer() throws RecognitionException { + TrailerContext _localctx = new TrailerContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_trailer); + try { + setState(190); + switch (_input.LA(1)) { + case LBRACK: + enterOuterAlt(_localctx, 1); + { + setState(188); + block(); + } + break; + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + enterOuterAlt(_localctx, 2); + { + setState(189); + statement(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class BlockContext extends ParserRuleContext { + public TerminalNode LBRACK() { return getToken(PainlessParser.LBRACK, 0); } + public TerminalNode RBRACK() { return getToken(PainlessParser.RBRACK, 0); } + public List statement() { + return getRuleContexts(StatementContext.class); + } + public StatementContext statement(int i) { + return getRuleContext(StatementContext.class,i); + } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } + public BlockContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_block; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBlock(this); + else return visitor.visitChildren(this); + } + } + public final BlockContext block() throws RecognitionException { + BlockContext _localctx = new BlockContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_block); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(192); + match(LBRACK); + setState(196); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(193); + statement(); + } + } + } + setState(198); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + } + setState(200); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(199); + dstatement(); + } + } + setState(202); + match(RBRACK); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class EmptyContext extends ParserRuleContext { + public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public EmptyContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_empty; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEmpty(this); + else return visitor.visitChildren(this); + } + } + public final EmptyContext empty() throws RecognitionException { + EmptyContext _localctx = new EmptyContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_empty); + try { + enterOuterAlt(_localctx, 1); + { + setState(204); + match(SEMICOLON); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class InitializerContext extends ParserRuleContext { + public DeclarationContext declaration() { + return getRuleContext(DeclarationContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public InitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_initializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitInitializer(this); + else return visitor.visitChildren(this); + } + } + public final InitializerContext initializer() throws RecognitionException { + InitializerContext _localctx = new InitializerContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_initializer); + try { + setState(208); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(206); + declaration(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(207); + expression(0); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class AfterthoughtContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public AfterthoughtContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_afterthought; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitAfterthought(this); + else return visitor.visitChildren(this); + } + } + public final AfterthoughtContext afterthought() throws RecognitionException { + AfterthoughtContext _localctx = new AfterthoughtContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_afterthought); + try { + enterOuterAlt(_localctx, 1); + { + setState(210); + expression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DeclarationContext extends ParserRuleContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public List declvar() { + return getRuleContexts(DeclvarContext.class); + } + public DeclvarContext declvar(int i) { + return getRuleContext(DeclvarContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public DeclarationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_declaration; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDeclaration(this); + else return visitor.visitChildren(this); + } + } + public final DeclarationContext declaration() throws RecognitionException { + DeclarationContext _localctx = new DeclarationContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_declaration); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(212); + decltype(); + setState(213); + declvar(); + setState(218); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(214); + match(COMMA); + setState(215); + declvar(); + } + } + setState(220); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DecltypeContext extends ParserRuleContext { + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public List LBRACE() { return getTokens(PainlessParser.LBRACE); } + public TerminalNode LBRACE(int i) { + return getToken(PainlessParser.LBRACE, i); + } + public List RBRACE() { return getTokens(PainlessParser.RBRACE); } + public TerminalNode RBRACE(int i) { + return getToken(PainlessParser.RBRACE, i); + } + public DecltypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_decltype; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecltype(this); + else return visitor.visitChildren(this); + } + } + public final DecltypeContext decltype() throws RecognitionException { + DecltypeContext _localctx = new DecltypeContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_decltype); + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(221); + match(TYPE); + setState(226); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(222); + match(LBRACE); + setState(223); + match(RBRACE); + } + } + } + setState(228); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DeclvarContext extends ParserRuleContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode ASSIGN() { return getToken(PainlessParser.ASSIGN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public DeclvarContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_declvar; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDeclvar(this); + else return visitor.visitChildren(this); + } + } + public final DeclvarContext declvar() throws RecognitionException { + DeclvarContext _localctx = new DeclvarContext(_ctx, getState()); + enterRule(_localctx, 26, RULE_declvar); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(229); + match(ID); + setState(232); + _la = _input.LA(1); + if (_la==ASSIGN) { + { + setState(230); + match(ASSIGN); + setState(231); + expression(0); + } + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class TrapContext extends ParserRuleContext { + public TerminalNode CATCH() { return getToken(PainlessParser.CATCH, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public TrapContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_trap; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrap(this); + else return visitor.visitChildren(this); + } + } + public final TrapContext trap() throws RecognitionException { + TrapContext _localctx = new TrapContext(_ctx, getState()); + enterRule(_localctx, 28, RULE_trap); + try { + enterOuterAlt(_localctx, 1); + { + setState(234); + match(CATCH); + setState(235); + match(LP); + setState(236); + match(TYPE); + setState(237); + match(ID); + setState(238); + match(RP); + setState(239); + block(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ExpressionContext extends ParserRuleContext { + public ExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_expression; } + public ExpressionContext() { } + public void copyFrom(ExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class SingleContext extends ExpressionContext { + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public SingleContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitSingle(this); + else return visitor.visitChildren(this); + } + } + public static class CompContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode LT() { return getToken(PainlessParser.LT, 0); } + public TerminalNode LTE() { return getToken(PainlessParser.LTE, 0); } + public TerminalNode GT() { return getToken(PainlessParser.GT, 0); } + public TerminalNode GTE() { return getToken(PainlessParser.GTE, 0); } + public TerminalNode EQ() { return getToken(PainlessParser.EQ, 0); } + public TerminalNode EQR() { return getToken(PainlessParser.EQR, 0); } + public TerminalNode NE() { return getToken(PainlessParser.NE, 0); } + public TerminalNode NER() { return getToken(PainlessParser.NER, 0); } + public CompContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitComp(this); + else return visitor.visitChildren(this); + } + } + public static class BoolContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode BOOLAND() { return getToken(PainlessParser.BOOLAND, 0); } + public TerminalNode BOOLOR() { return getToken(PainlessParser.BOOLOR, 0); } + public BoolContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBool(this); + else return visitor.visitChildren(this); + } + } + public static class ConditionalContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode COND() { return getToken(PainlessParser.COND, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public ConditionalContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitConditional(this); + else return visitor.visitChildren(this); + } + } + public static class AssignmentContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode ASSIGN() { return getToken(PainlessParser.ASSIGN, 0); } + public TerminalNode AADD() { return getToken(PainlessParser.AADD, 0); } + public TerminalNode ASUB() { return getToken(PainlessParser.ASUB, 0); } + public TerminalNode AMUL() { return getToken(PainlessParser.AMUL, 0); } + public TerminalNode ADIV() { return getToken(PainlessParser.ADIV, 0); } + public TerminalNode AREM() { return getToken(PainlessParser.AREM, 0); } + public TerminalNode AAND() { return getToken(PainlessParser.AAND, 0); } + public TerminalNode AXOR() { return getToken(PainlessParser.AXOR, 0); } + public TerminalNode AOR() { return getToken(PainlessParser.AOR, 0); } + public TerminalNode ALSH() { return getToken(PainlessParser.ALSH, 0); } + public TerminalNode ARSH() { return getToken(PainlessParser.ARSH, 0); } + public TerminalNode AUSH() { return getToken(PainlessParser.AUSH, 0); } + public AssignmentContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitAssignment(this); + else return visitor.visitChildren(this); + } + } + public static class BinaryContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode MUL() { return getToken(PainlessParser.MUL, 0); } + public TerminalNode DIV() { return getToken(PainlessParser.DIV, 0); } + public TerminalNode REM() { return getToken(PainlessParser.REM, 0); } + public TerminalNode ADD() { return getToken(PainlessParser.ADD, 0); } + public TerminalNode SUB() { return getToken(PainlessParser.SUB, 0); } + public TerminalNode FIND() { return getToken(PainlessParser.FIND, 0); } + public TerminalNode MATCH() { return getToken(PainlessParser.MATCH, 0); } + public TerminalNode LSH() { return getToken(PainlessParser.LSH, 0); } + public TerminalNode RSH() { return getToken(PainlessParser.RSH, 0); } + public TerminalNode USH() { return getToken(PainlessParser.USH, 0); } + public TerminalNode BWAND() { return getToken(PainlessParser.BWAND, 0); } + public TerminalNode XOR() { return getToken(PainlessParser.XOR, 0); } + public TerminalNode BWOR() { return getToken(PainlessParser.BWOR, 0); } + public BinaryContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBinary(this); + else return visitor.visitChildren(this); + } + } + public static class ElvisContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode ELVIS() { return getToken(PainlessParser.ELVIS, 0); } + public ElvisContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitElvis(this); + else return visitor.visitChildren(this); + } + } + public static class InstanceofContext extends ExpressionContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode INSTANCEOF() { return getToken(PainlessParser.INSTANCEOF, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public InstanceofContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitInstanceof(this); + else return visitor.visitChildren(this); + } + } + public final ExpressionContext expression() throws RecognitionException { + return expression(0); + } + private ExpressionContext expression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + ExpressionContext _localctx = new ExpressionContext(_ctx, _parentState); + ExpressionContext _prevctx = _localctx; + int _startState = 30; + enterRecursionRule(_localctx, 30, RULE_expression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + { + _localctx = new SingleContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(242); + unary(); + } + _ctx.stop = _input.LT(-1); + setState(294); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(292); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { + case 1: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(244); + if (!(precpred(_ctx, 15))) throw new FailedPredicateException(this, "precpred(_ctx, 15)"); + setState(245); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(246); + expression(16); + } + break; + case 2: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(247); + if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); + setState(248); + _la = _input.LA(1); + if ( !(_la==ADD || _la==SUB) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(249); + expression(15); + } + break; + case 3: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(250); + if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); + setState(251); + _la = _input.LA(1); + if ( !(_la==FIND || _la==MATCH) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(252); + expression(14); + } + break; + case 4: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(253); + if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); + setState(254); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(255); + expression(13); + } + break; + case 5: + { + _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(256); + if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); + setState(257); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(258); + expression(12); + } + break; + case 6: + { + _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(259); + if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); + setState(260); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(261); + expression(10); + } + break; + case 7: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(262); + if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); + setState(263); + match(BWAND); + setState(264); + expression(9); + } + break; + case 8: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(265); + if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); + setState(266); + match(XOR); + setState(267); + expression(8); + } + break; + case 9: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(268); + if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); + setState(269); + match(BWOR); + setState(270); + expression(7); + } + break; + case 10: + { + _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(271); + if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); + setState(272); + match(BOOLAND); + setState(273); + expression(6); + } + break; + case 11: + { + _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(274); + if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); + setState(275); + match(BOOLOR); + setState(276); + expression(5); + } + break; + case 12: + { + _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(277); + if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); + setState(278); + match(COND); + setState(279); + expression(0); + setState(280); + match(COLON); + setState(281); + expression(3); + } + break; + case 13: + { + _localctx = new ElvisContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(283); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(284); + match(ELVIS); + setState(285); + expression(2); + } + break; + case 14: + { + _localctx = new AssignmentContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(286); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(287); + _la = _input.LA(1); + if ( !(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & ((1L << (ASSIGN - 60)) | (1L << (AADD - 60)) | (1L << (ASUB - 60)) | (1L << (AMUL - 60)) | (1L << (ADIV - 60)) | (1L << (AREM - 60)) | (1L << (AAND - 60)) | (1L << (AXOR - 60)) | (1L << (AOR - 60)) | (1L << (ALSH - 60)) | (1L << (ARSH - 60)) | (1L << (AUSH - 60)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(288); + expression(1); + } + break; + case 15: + { + _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(289); + if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); + setState(290); + match(INSTANCEOF); + setState(291); + decltype(); + } + break; + } + } + } + setState(296); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + public static class UnaryContext extends ParserRuleContext { + public UnaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_unary; } + public UnaryContext() { } + public void copyFrom(UnaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class CastContext extends UnaryContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public CastContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCast(this); + else return visitor.visitChildren(this); + } + } + public static class PreContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public TerminalNode INCR() { return getToken(PainlessParser.INCR, 0); } + public TerminalNode DECR() { return getToken(PainlessParser.DECR, 0); } + public PreContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPre(this); + else return visitor.visitChildren(this); + } + } + public static class ReadContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public ReadContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitRead(this); + else return visitor.visitChildren(this); + } + } + public static class PostContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public TerminalNode INCR() { return getToken(PainlessParser.INCR, 0); } + public TerminalNode DECR() { return getToken(PainlessParser.DECR, 0); } + public PostContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPost(this); + else return visitor.visitChildren(this); + } + } + public static class OperatorContext extends UnaryContext { + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public TerminalNode BOOLNOT() { return getToken(PainlessParser.BOOLNOT, 0); } + public TerminalNode BWNOT() { return getToken(PainlessParser.BWNOT, 0); } + public TerminalNode ADD() { return getToken(PainlessParser.ADD, 0); } + public TerminalNode SUB() { return getToken(PainlessParser.SUB, 0); } + public OperatorContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitOperator(this); + else return visitor.visitChildren(this); + } + } + public final UnaryContext unary() throws RecognitionException { + UnaryContext _localctx = new UnaryContext(_ctx, getState()); + enterRule(_localctx, 32, RULE_unary); + int _la; + try { + setState(310); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + case 1: + _localctx = new PreContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(297); + _la = _input.LA(1); + if ( !(_la==INCR || _la==DECR) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(298); + chain(); + } + break; + case 2: + _localctx = new PostContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(299); + chain(); + setState(300); + _la = _input.LA(1); + if ( !(_la==INCR || _la==DECR) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case 3: + _localctx = new ReadContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(302); + chain(); + } + break; + case 4: + _localctx = new OperatorContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(303); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(304); + unary(); + } + break; + case 5: + _localctx = new CastContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(305); + match(LP); + setState(306); + decltype(); + setState(307); + match(RP); + setState(308); + unary(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ChainContext extends ParserRuleContext { + public ChainContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_chain; } + public ChainContext() { } + public void copyFrom(ChainContext ctx) { + super.copyFrom(ctx); + } + } + public static class StaticContext extends ChainContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public PostdotContext postdot() { + return getRuleContext(PostdotContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public StaticContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitStatic(this); + else return visitor.visitChildren(this); + } + } + public static class DynamicContext extends ChainContext { + public PrimaryContext primary() { + return getRuleContext(PrimaryContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public DynamicContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDynamic(this); + else return visitor.visitChildren(this); + } + } + public static class NewarrayContext extends ChainContext { + public ArrayinitializerContext arrayinitializer() { + return getRuleContext(ArrayinitializerContext.class,0); + } + public NewarrayContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewarray(this); + else return visitor.visitChildren(this); + } + } + public final ChainContext chain() throws RecognitionException { + ChainContext _localctx = new ChainContext(_ctx, getState()); + enterRule(_localctx, 34, RULE_chain); + try { + int _alt; + setState(328); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + case 1: + _localctx = new DynamicContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(312); + primary(); + setState(316); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(313); + postfix(); + } + } + } + setState(318); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + } + } + break; + case 2: + _localctx = new StaticContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(319); + decltype(); + setState(320); + postdot(); + setState(324); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(321); + postfix(); + } + } + } + setState(326); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + } + } + break; + case 3: + _localctx = new NewarrayContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(327); + arrayinitializer(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PrimaryContext extends ParserRuleContext { + public PrimaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_primary; } + public PrimaryContext() { } + public void copyFrom(PrimaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class ListinitContext extends PrimaryContext { + public ListinitializerContext listinitializer() { + return getRuleContext(ListinitializerContext.class,0); + } + public ListinitContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitListinit(this); + else return visitor.visitChildren(this); + } + } + public static class RegexContext extends PrimaryContext { + public TerminalNode REGEX() { return getToken(PainlessParser.REGEX, 0); } + public RegexContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitRegex(this); + else return visitor.visitChildren(this); + } + } + public static class NullContext extends PrimaryContext { + public TerminalNode NULL() { return getToken(PainlessParser.NULL, 0); } + public NullContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNull(this); + else return visitor.visitChildren(this); + } + } + public static class StringContext extends PrimaryContext { + public TerminalNode STRING() { return getToken(PainlessParser.STRING, 0); } + public StringContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitString(this); + else return visitor.visitChildren(this); + } + } + public static class MapinitContext extends PrimaryContext { + public MapinitializerContext mapinitializer() { + return getRuleContext(MapinitializerContext.class,0); + } + public MapinitContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMapinit(this); + else return visitor.visitChildren(this); + } + } + public static class CalllocalContext extends PrimaryContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public CalllocalContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCalllocal(this); + else return visitor.visitChildren(this); + } + } + public static class TrueContext extends PrimaryContext { + public TerminalNode TRUE() { return getToken(PainlessParser.TRUE, 0); } + public TrueContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrue(this); + else return visitor.visitChildren(this); + } + } + public static class FalseContext extends PrimaryContext { + public TerminalNode FALSE() { return getToken(PainlessParser.FALSE, 0); } + public FalseContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFalse(this); + else return visitor.visitChildren(this); + } + } + public static class VariableContext extends PrimaryContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public VariableContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitVariable(this); + else return visitor.visitChildren(this); + } + } + public static class NumericContext extends PrimaryContext { + public TerminalNode OCTAL() { return getToken(PainlessParser.OCTAL, 0); } + public TerminalNode HEX() { return getToken(PainlessParser.HEX, 0); } + public TerminalNode INTEGER() { return getToken(PainlessParser.INTEGER, 0); } + public TerminalNode DECIMAL() { return getToken(PainlessParser.DECIMAL, 0); } + public NumericContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNumeric(this); + else return visitor.visitChildren(this); + } + } + public static class NewobjectContext extends PrimaryContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public NewobjectContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewobject(this); + else return visitor.visitChildren(this); + } + } + public static class PrecedenceContext extends PrimaryContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public PrecedenceContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPrecedence(this); + else return visitor.visitChildren(this); + } + } + public final PrimaryContext primary() throws RecognitionException { + PrimaryContext _localctx = new PrimaryContext(_ctx, getState()); + enterRule(_localctx, 36, RULE_primary); + int _la; + try { + setState(348); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + case 1: + _localctx = new PrecedenceContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(330); + match(LP); + setState(331); + expression(0); + setState(332); + match(RP); + } + break; + case 2: + _localctx = new NumericContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(334); + _la = _input.LA(1); + if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case 3: + _localctx = new TrueContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(335); + match(TRUE); + } + break; + case 4: + _localctx = new FalseContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(336); + match(FALSE); + } + break; + case 5: + _localctx = new NullContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(337); + match(NULL); + } + break; + case 6: + _localctx = new StringContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(338); + match(STRING); + } + break; + case 7: + _localctx = new RegexContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(339); + match(REGEX); + } + break; + case 8: + _localctx = new ListinitContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(340); + listinitializer(); + } + break; + case 9: + _localctx = new MapinitContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(341); + mapinitializer(); + } + break; + case 10: + _localctx = new VariableContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(342); + match(ID); + } + break; + case 11: + _localctx = new CalllocalContext(_localctx); + enterOuterAlt(_localctx, 11); + { + setState(343); + match(ID); + setState(344); + arguments(); + } + break; + case 12: + _localctx = new NewobjectContext(_localctx); + enterOuterAlt(_localctx, 12); + { + setState(345); + match(NEW); + setState(346); + match(TYPE); + setState(347); + arguments(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PostfixContext extends ParserRuleContext { + public CallinvokeContext callinvoke() { + return getRuleContext(CallinvokeContext.class,0); + } + public FieldaccessContext fieldaccess() { + return getRuleContext(FieldaccessContext.class,0); + } + public BraceaccessContext braceaccess() { + return getRuleContext(BraceaccessContext.class,0); + } + public PostfixContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_postfix; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPostfix(this); + else return visitor.visitChildren(this); + } + } + public final PostfixContext postfix() throws RecognitionException { + PostfixContext _localctx = new PostfixContext(_ctx, getState()); + enterRule(_localctx, 38, RULE_postfix); + try { + setState(353); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(350); + callinvoke(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(351); + fieldaccess(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(352); + braceaccess(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PostdotContext extends ParserRuleContext { + public CallinvokeContext callinvoke() { + return getRuleContext(CallinvokeContext.class,0); + } + public FieldaccessContext fieldaccess() { + return getRuleContext(FieldaccessContext.class,0); + } + public PostdotContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_postdot; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPostdot(this); + else return visitor.visitChildren(this); + } + } + public final PostdotContext postdot() throws RecognitionException { + PostdotContext _localctx = new PostdotContext(_ctx, getState()); + enterRule(_localctx, 40, RULE_postdot); + try { + setState(357); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(355); + callinvoke(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(356); + fieldaccess(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class CallinvokeContext extends ParserRuleContext { + public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); } + public TerminalNode NSDOT() { return getToken(PainlessParser.NSDOT, 0); } + public CallinvokeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_callinvoke; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCallinvoke(this); + else return visitor.visitChildren(this); + } + } + public final CallinvokeContext callinvoke() throws RecognitionException { + CallinvokeContext _localctx = new CallinvokeContext(_ctx, getState()); + enterRule(_localctx, 42, RULE_callinvoke); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(359); + _la = _input.LA(1); + if ( !(_la==DOT || _la==NSDOT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(360); + match(DOTID); + setState(361); + arguments(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FieldaccessContext extends ParserRuleContext { + public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); } + public TerminalNode NSDOT() { return getToken(PainlessParser.NSDOT, 0); } + public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); } + public TerminalNode DOTINTEGER() { return getToken(PainlessParser.DOTINTEGER, 0); } + public FieldaccessContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_fieldaccess; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFieldaccess(this); + else return visitor.visitChildren(this); + } + } + public final FieldaccessContext fieldaccess() throws RecognitionException { + FieldaccessContext _localctx = new FieldaccessContext(_ctx, getState()); + enterRule(_localctx, 44, RULE_fieldaccess); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(363); + _la = _input.LA(1); + if ( !(_la==DOT || _la==NSDOT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(364); + _la = _input.LA(1); + if ( !(_la==DOTINTEGER || _la==DOTID) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class BraceaccessContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public BraceaccessContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_braceaccess; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBraceaccess(this); + else return visitor.visitChildren(this); + } + } + public final BraceaccessContext braceaccess() throws RecognitionException { + BraceaccessContext _localctx = new BraceaccessContext(_ctx, getState()); + enterRule(_localctx, 46, RULE_braceaccess); + try { + enterOuterAlt(_localctx, 1); + { + setState(366); + match(LBRACE); + setState(367); + expression(0); + setState(368); + match(RBRACE); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArrayinitializerContext extends ParserRuleContext { + public ArrayinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_arrayinitializer; } + public ArrayinitializerContext() { } + public void copyFrom(ArrayinitializerContext ctx) { + super.copyFrom(ctx); + } + } + public static class NewstandardarrayContext extends ArrayinitializerContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public List LBRACE() { return getTokens(PainlessParser.LBRACE); } + public TerminalNode LBRACE(int i) { + return getToken(PainlessParser.LBRACE, i); + } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public List RBRACE() { return getTokens(PainlessParser.RBRACE); } + public TerminalNode RBRACE(int i) { + return getToken(PainlessParser.RBRACE, i); + } + public PostdotContext postdot() { + return getRuleContext(PostdotContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public NewstandardarrayContext(ArrayinitializerContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewstandardarray(this); + else return visitor.visitChildren(this); + } + } + public static class NewinitializedarrayContext extends ArrayinitializerContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public TerminalNode LBRACK() { return getToken(PainlessParser.LBRACK, 0); } + public TerminalNode RBRACK() { return getToken(PainlessParser.RBRACK, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public NewinitializedarrayContext(ArrayinitializerContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewinitializedarray(this); + else return visitor.visitChildren(this); + } + } + public final ArrayinitializerContext arrayinitializer() throws RecognitionException { + ArrayinitializerContext _localctx = new ArrayinitializerContext(_ctx, getState()); + enterRule(_localctx, 48, RULE_arrayinitializer); + int _la; + try { + int _alt; + setState(411); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { + case 1: + _localctx = new NewstandardarrayContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(370); + match(NEW); + setState(371); + match(TYPE); + setState(376); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(372); + match(LBRACE); + setState(373); + expression(0); + setState(374); + match(RBRACE); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(378); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(387); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { + case 1: + { + setState(380); + postdot(); + setState(384); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(381); + postfix(); + } + } + } + setState(386); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + } + } + break; + } + } + break; + case 2: + _localctx = new NewinitializedarrayContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(389); + match(NEW); + setState(390); + match(TYPE); + setState(391); + match(LBRACE); + setState(392); + match(RBRACE); + setState(393); + match(LBRACK); + setState(402); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(394); + expression(0); + setState(399); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(395); + match(COMMA); + setState(396); + expression(0); + } + } + setState(401); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(404); + match(RBRACK); + setState(408); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(405); + postfix(); + } + } + } + setState(410); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + } + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ListinitializerContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ListinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_listinitializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitListinitializer(this); + else return visitor.visitChildren(this); + } + } + public final ListinitializerContext listinitializer() throws RecognitionException { + ListinitializerContext _localctx = new ListinitializerContext(_ctx, getState()); + enterRule(_localctx, 50, RULE_listinitializer); + int _la; + try { + setState(426); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(413); + match(LBRACE); + setState(414); + expression(0); + setState(419); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(415); + match(COMMA); + setState(416); + expression(0); + } + } + setState(421); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(422); + match(RBRACE); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(424); + match(LBRACE); + setState(425); + match(RBRACE); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class MapinitializerContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public List maptoken() { + return getRuleContexts(MaptokenContext.class); + } + public MaptokenContext maptoken(int i) { + return getRuleContext(MaptokenContext.class,i); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public MapinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_mapinitializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMapinitializer(this); + else return visitor.visitChildren(this); + } + } + public final MapinitializerContext mapinitializer() throws RecognitionException { + MapinitializerContext _localctx = new MapinitializerContext(_ctx, getState()); + enterRule(_localctx, 52, RULE_mapinitializer); + int _la; + try { + setState(442); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(428); + match(LBRACE); + setState(429); + maptoken(); + setState(434); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(430); + match(COMMA); + setState(431); + maptoken(); + } + } + setState(436); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(437); + match(RBRACE); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(439); + match(LBRACE); + setState(440); + match(COLON); + setState(441); + match(RBRACE); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class MaptokenContext extends ParserRuleContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public MaptokenContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_maptoken; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMaptoken(this); + else return visitor.visitChildren(this); + } + } + public final MaptokenContext maptoken() throws RecognitionException { + MaptokenContext _localctx = new MaptokenContext(_ctx, getState()); + enterRule(_localctx, 54, RULE_maptoken); + try { + enterOuterAlt(_localctx, 1); + { + setState(444); + expression(0); + setState(445); + match(COLON); + setState(446); + expression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArgumentsContext extends ParserRuleContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List argument() { + return getRuleContexts(ArgumentContext.class); + } + public ArgumentContext argument(int i) { + return getRuleContext(ArgumentContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ArgumentsContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_arguments; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitArguments(this); + else return visitor.visitChildren(this); + } + } + public final ArgumentsContext arguments() throws RecognitionException { + ArgumentsContext _localctx = new ArgumentsContext(_ctx, getState()); + enterRule(_localctx, 56, RULE_arguments); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + { + setState(448); + match(LP); + setState(457); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(449); + argument(); + setState(454); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(450); + match(COMMA); + setState(451); + argument(); + } + } + setState(456); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(459); + match(RP); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArgumentContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public LambdaContext lambda() { + return getRuleContext(LambdaContext.class,0); + } + public FuncrefContext funcref() { + return getRuleContext(FuncrefContext.class,0); + } + public ArgumentContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_argument; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitArgument(this); + else return visitor.visitChildren(this); + } + } + public final ArgumentContext argument() throws RecognitionException { + ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); + enterRule(_localctx, 58, RULE_argument); + try { + setState(464); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(461); + expression(0); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(462); + lambda(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(463); + funcref(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class LambdaContext extends ParserRuleContext { + public TerminalNode ARROW() { return getToken(PainlessParser.ARROW, 0); } + public List lamtype() { + return getRuleContexts(LamtypeContext.class); + } + public LamtypeContext lamtype(int i) { + return getRuleContext(LamtypeContext.class,i); + } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public LambdaContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_lambda; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLambda(this); + else return visitor.visitChildren(this); + } + } + public final LambdaContext lambda() throws RecognitionException { + LambdaContext _localctx = new LambdaContext(_ctx, getState()); + enterRule(_localctx, 60, RULE_lambda); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(479); + switch (_input.LA(1)) { + case TYPE: + case ID: + { + setState(466); + lamtype(); + } + break; + case LP: + { + setState(467); + match(LP); + setState(476); + _la = _input.LA(1); + if (_la==TYPE || _la==ID) { + { + setState(468); + lamtype(); + setState(473); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(469); + match(COMMA); + setState(470); + lamtype(); + } + } + setState(475); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(478); + match(RP); + } + break; + default: + throw new NoViableAltException(this); + } + setState(481); + match(ARROW); + setState(484); + switch (_input.LA(1)) { + case LBRACK: + { + setState(482); + block(); + } + break; + case LBRACE: + case LP: + case NEW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(483); + expression(0); + } + break; + default: + throw new NoViableAltException(this); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class LamtypeContext extends ParserRuleContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public LamtypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_lamtype; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLamtype(this); + else return visitor.visitChildren(this); + } + } + public final LamtypeContext lamtype() throws RecognitionException { + LamtypeContext _localctx = new LamtypeContext(_ctx, getState()); + enterRule(_localctx, 62, RULE_lamtype); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(487); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(486); + decltype(); + } + } + setState(489); + match(ID); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FuncrefContext extends ParserRuleContext { + public FuncrefContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_funcref; } + public FuncrefContext() { } + public void copyFrom(FuncrefContext ctx) { + super.copyFrom(ctx); + } + } + public static class ClassfuncrefContext extends FuncrefContext { + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ClassfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitClassfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class CapturingfuncrefContext extends FuncrefContext { + public List ID() { return getTokens(PainlessParser.ID); } + public TerminalNode ID(int i) { + return getToken(PainlessParser.ID, i); + } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public CapturingfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCapturingfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class ConstructorfuncrefContext extends FuncrefContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public ConstructorfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitConstructorfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class LocalfuncrefContext extends FuncrefContext { + public TerminalNode THIS() { return getToken(PainlessParser.THIS, 0); } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public LocalfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLocalfuncref(this); + else return visitor.visitChildren(this); + } + } + public final FuncrefContext funcref() throws RecognitionException { + FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); + enterRule(_localctx, 64, RULE_funcref); + try { + setState(504); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { + case 1: + _localctx = new ClassfuncrefContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(491); + match(TYPE); + setState(492); + match(REF); + setState(493); + match(ID); + } + break; + case 2: + _localctx = new ConstructorfuncrefContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(494); + decltype(); + setState(495); + match(REF); + setState(496); + match(NEW); + } + break; + case 3: + _localctx = new CapturingfuncrefContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(498); + match(ID); + setState(499); + match(REF); + setState(500); + match(ID); + } + break; + case 4: + _localctx = new LocalfuncrefContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(501); + match(THIS); + setState(502); + match(REF); + setState(503); + match(ID); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + case 4: + return rstatement_sempred((RstatementContext)_localctx, predIndex); + case 15: + return expression_sempred((ExpressionContext)_localctx, predIndex); + } + return true; + } + private boolean rstatement_sempred(RstatementContext _localctx, int predIndex) { + switch (predIndex) { + case 0: + return _input.LA(1) != ELSE ; + } + return true; + } + private boolean expression_sempred(ExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 1: + return precpred(_ctx, 15); + case 2: + return precpred(_ctx, 14); + case 3: + return precpred(_ctx, 13); + case 4: + return precpred(_ctx, 12); + case 5: + return precpred(_ctx, 11); + case 6: + return precpred(_ctx, 9); + case 7: + return precpred(_ctx, 8); + case 8: + return precpred(_ctx, 7); + case 9: + return precpred(_ctx, 6); + case 10: + return precpred(_ctx, 5); + case 11: + return precpred(_ctx, 4); + case 12: + return precpred(_ctx, 3); + case 13: + return precpred(_ctx, 2); + case 14: + return precpred(_ctx, 1); + case 15: + return precpred(_ctx, 10); + } + return true; + } + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fd\4\2\t\2\4"+ + "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ + "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\3\2\7\2F\n\2\f\2\16\2I\13\2\3\2\7\2L\n\2\f\2\16\2O\13\2\3"+ + "\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4_\n\4\f\4\16"+ + "\4b\13\4\5\4d\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5l\n\5\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\5\6v\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6~\n\6\3\6\3\6\3\6\5\6"+ + "\u0083\n\6\3\6\3\6\5\6\u0087\n\6\3\6\3\6\5\6\u008b\n\6\3\6\3\6\3\6\5\6"+ + "\u0090\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\3\6\6\6\u00a6\n\6\r\6\16\6\u00a7\5\6\u00aa\n\6\3\7\3"+ + "\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00b8\n\7\3\7\3\7\3\7\5"+ + "\7\u00bd\n\7\3\b\3\b\5\b\u00c1\n\b\3\t\3\t\7\t\u00c5\n\t\f\t\16\t\u00c8"+ + "\13\t\3\t\5\t\u00cb\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d3\n\13\3\f"+ + "\3\f\3\r\3\r\3\r\3\r\7\r\u00db\n\r\f\r\16\r\u00de\13\r\3\16\3\16\3\16"+ + "\7\16\u00e3\n\16\f\16\16\16\u00e6\13\16\3\17\3\17\3\17\5\17\u00eb\n\17"+ + "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\7\21\u0127\n\21\f\21\16\21\u012a\13\21\3\22\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u0139\n\22\3\23\3\23"+ + "\7\23\u013d\n\23\f\23\16\23\u0140\13\23\3\23\3\23\3\23\7\23\u0145\n\23"+ + "\f\23\16\23\u0148\13\23\3\23\5\23\u014b\n\23\3\24\3\24\3\24\3\24\3\24"+ + "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\5\24"+ + "\u015f\n\24\3\25\3\25\3\25\5\25\u0164\n\25\3\26\3\26\5\26\u0168\n\26\3"+ + "\27\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3"+ + "\32\3\32\3\32\6\32\u017b\n\32\r\32\16\32\u017c\3\32\3\32\7\32\u0181\n"+ + "\32\f\32\16\32\u0184\13\32\5\32\u0186\n\32\3\32\3\32\3\32\3\32\3\32\3"+ + "\32\3\32\3\32\7\32\u0190\n\32\f\32\16\32\u0193\13\32\5\32\u0195\n\32\3"+ + "\32\3\32\7\32\u0199\n\32\f\32\16\32\u019c\13\32\5\32\u019e\n\32\3\33\3"+ + "\33\3\33\3\33\7\33\u01a4\n\33\f\33\16\33\u01a7\13\33\3\33\3\33\3\33\3"+ + "\33\5\33\u01ad\n\33\3\34\3\34\3\34\3\34\7\34\u01b3\n\34\f\34\16\34\u01b6"+ + "\13\34\3\34\3\34\3\34\3\34\3\34\5\34\u01bd\n\34\3\35\3\35\3\35\3\35\3"+ + "\36\3\36\3\36\3\36\7\36\u01c7\n\36\f\36\16\36\u01ca\13\36\5\36\u01cc\n"+ + "\36\3\36\3\36\3\37\3\37\3\37\5\37\u01d3\n\37\3 \3 \3 \3 \3 \7 \u01da\n"+ + " \f \16 \u01dd\13 \5 \u01df\n \3 \5 \u01e2\n \3 \3 \3 \5 \u01e7\n \3!"+ + "\5!\u01ea\n!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+ + "\5\"\u01fb\n\"\3\"\2\3 #\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&("+ + "*,.\60\62\64\668:<>@B\2\17\3\3\16\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3"+ + "\2,/\3\2>I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0236\2G\3\2\2\2\4R"+ + "\3\2\2\2\6W\3\2\2\2\bk\3\2\2\2\n\u00a9\3\2\2\2\f\u00bc\3\2\2\2\16\u00c0"+ + "\3\2\2\2\20\u00c2\3\2\2\2\22\u00ce\3\2\2\2\24\u00d2\3\2\2\2\26\u00d4\3"+ + "\2\2\2\30\u00d6\3\2\2\2\32\u00df\3\2\2\2\34\u00e7\3\2\2\2\36\u00ec\3\2"+ + "\2\2 \u00f3\3\2\2\2\"\u0138\3\2\2\2$\u014a\3\2\2\2&\u015e\3\2\2\2(\u0163"+ + "\3\2\2\2*\u0167\3\2\2\2,\u0169\3\2\2\2.\u016d\3\2\2\2\60\u0170\3\2\2\2"+ + "\62\u019d\3\2\2\2\64\u01ac\3\2\2\2\66\u01bc\3\2\2\28\u01be\3\2\2\2:\u01c2"+ + "\3\2\2\2<\u01d2\3\2\2\2>\u01e1\3\2\2\2@\u01e9\3\2\2\2B\u01fa\3\2\2\2D"+ + "F\5\4\3\2ED\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2"+ + "JL\5\b\5\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2OM\3\2\2\2"+ + "PQ\7\2\2\3Q\3\3\2\2\2RS\5\32\16\2ST\7T\2\2TU\5\6\4\2UV\5\20\t\2V\5\3\2"+ + "\2\2Wc\7\t\2\2XY\5\32\16\2Y`\7T\2\2Z[\7\r\2\2[\\\5\32\16\2\\]\7T\2\2]"+ + "_\3\2\2\2^Z\3\2\2\2_b\3\2\2\2`^\3\2\2\2`a\3\2\2\2ad\3\2\2\2b`\3\2\2\2"+ + "cX\3\2\2\2cd\3\2\2\2de\3\2\2\2ef\7\n\2\2f\7\3\2\2\2gl\5\n\6\2hi\5\f\7"+ + "\2ij\t\2\2\2jl\3\2\2\2kg\3\2\2\2kh\3\2\2\2l\t\3\2\2\2mn\7\17\2\2no\7\t"+ + "\2\2op\5 \21\2pq\7\n\2\2qu\5\16\b\2rs\7\21\2\2sv\5\16\b\2tv\6\6\2\2ur"+ + "\3\2\2\2ut\3\2\2\2v\u00aa\3\2\2\2wx\7\22\2\2xy\7\t\2\2yz\5 \21\2z}\7\n"+ + "\2\2{~\5\16\b\2|~\5\22\n\2}{\3\2\2\2}|\3\2\2\2~\u00aa\3\2\2\2\177\u0080"+ + "\7\24\2\2\u0080\u0082\7\t\2\2\u0081\u0083\5\24\13\2\u0082\u0081\3\2\2"+ + "\2\u0082\u0083\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0086\7\16\2\2\u0085"+ + "\u0087\5 \21\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2"+ + "\2\2\u0088\u008a\7\16\2\2\u0089\u008b\5\26\f\2\u008a\u0089\3\2\2\2\u008a"+ + "\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008f\7\n\2\2\u008d\u0090\5\16"+ + "\b\2\u008e\u0090\5\22\n\2\u008f\u008d\3\2\2\2\u008f\u008e\3\2\2\2\u0090"+ + "\u00aa\3\2\2\2\u0091\u0092\7\24\2\2\u0092\u0093\7\t\2\2\u0093\u0094\5"+ + "\32\16\2\u0094\u0095\7T\2\2\u0095\u0096\7\66\2\2\u0096\u0097\5 \21\2\u0097"+ + "\u0098\7\n\2\2\u0098\u0099\5\16\b\2\u0099\u00aa\3\2\2\2\u009a\u009b\7"+ + "\24\2\2\u009b\u009c\7\t\2\2\u009c\u009d\7T\2\2\u009d\u009e\7\20\2\2\u009e"+ + "\u009f\5 \21\2\u009f\u00a0\7\n\2\2\u00a0\u00a1\5\16\b\2\u00a1\u00aa\3"+ + "\2\2\2\u00a2\u00a3\7\31\2\2\u00a3\u00a5\5\20\t\2\u00a4\u00a6\5\36\20\2"+ + "\u00a5\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00a5\3\2\2\2\u00a7\u00a8"+ + "\3\2\2\2\u00a8\u00aa\3\2\2\2\u00a9m\3\2\2\2\u00a9w\3\2\2\2\u00a9\177\3"+ + "\2\2\2\u00a9\u0091\3\2\2\2\u00a9\u009a\3\2\2\2\u00a9\u00a2\3\2\2\2\u00aa"+ + "\13\3\2\2\2\u00ab\u00ac\7\23\2\2\u00ac\u00ad\5\20\t\2\u00ad\u00ae\7\22"+ + "\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\5 \21\2\u00b0\u00b1\7\n\2\2\u00b1"+ + "\u00bd\3\2\2\2\u00b2\u00bd\5\30\r\2\u00b3\u00bd\7\25\2\2\u00b4\u00bd\7"+ + "\26\2\2\u00b5\u00b7\7\27\2\2\u00b6\u00b8\5 \21\2\u00b7\u00b6\3\2\2\2\u00b7"+ + "\u00b8\3\2\2\2\u00b8\u00bd\3\2\2\2\u00b9\u00ba\7\33\2\2\u00ba\u00bd\5"+ + " \21\2\u00bb\u00bd\5 \21\2\u00bc\u00ab\3\2\2\2\u00bc\u00b2\3\2\2\2\u00bc"+ + "\u00b3\3\2\2\2\u00bc\u00b4\3\2\2\2\u00bc\u00b5\3\2\2\2\u00bc\u00b9\3\2"+ + "\2\2\u00bc\u00bb\3\2\2\2\u00bd\r\3\2\2\2\u00be\u00c1\5\20\t\2\u00bf\u00c1"+ + "\5\b\5\2\u00c0\u00be\3\2\2\2\u00c0\u00bf\3\2\2\2\u00c1\17\3\2\2\2\u00c2"+ + "\u00c6\7\5\2\2\u00c3\u00c5\5\b\5\2\u00c4\u00c3\3\2\2\2\u00c5\u00c8\3\2"+ + "\2\2\u00c6\u00c4\3\2\2\2\u00c6\u00c7\3\2\2\2\u00c7\u00ca\3\2\2\2\u00c8"+ + "\u00c6\3\2\2\2\u00c9\u00cb\5\f\7\2\u00ca\u00c9\3\2\2\2\u00ca\u00cb\3\2"+ + "\2\2\u00cb\u00cc\3\2\2\2\u00cc\u00cd\7\6\2\2\u00cd\21\3\2\2\2\u00ce\u00cf"+ + "\7\16\2\2\u00cf\23\3\2\2\2\u00d0\u00d3\5\30\r\2\u00d1\u00d3\5 \21\2\u00d2"+ + "\u00d0\3\2\2\2\u00d2\u00d1\3\2\2\2\u00d3\25\3\2\2\2\u00d4\u00d5\5 \21"+ + "\2\u00d5\27\3\2\2\2\u00d6\u00d7\5\32\16\2\u00d7\u00dc\5\34\17\2\u00d8"+ + "\u00d9\7\r\2\2\u00d9\u00db\5\34\17\2\u00da\u00d8\3\2\2\2\u00db\u00de\3"+ + "\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\31\3\2\2\2\u00de"+ + "\u00dc\3\2\2\2\u00df\u00e4\7S\2\2\u00e0\u00e1\7\7\2\2\u00e1\u00e3\7\b"+ + "\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e6\3\2\2\2\u00e4\u00e2\3\2\2\2\u00e4"+ + "\u00e5\3\2\2\2\u00e5\33\3\2\2\2\u00e6\u00e4\3\2\2\2\u00e7\u00ea\7T\2\2"+ + "\u00e8\u00e9\7>\2\2\u00e9\u00eb\5 \21\2\u00ea\u00e8\3\2\2\2\u00ea\u00eb"+ + "\3\2\2\2\u00eb\35\3\2\2\2\u00ec\u00ed\7\32\2\2\u00ed\u00ee\7\t\2\2\u00ee"+ + "\u00ef\7S\2\2\u00ef\u00f0\7T\2\2\u00f0\u00f1\7\n\2\2\u00f1\u00f2\5\20"+ + "\t\2\u00f2\37\3\2\2\2\u00f3\u00f4\b\21\1\2\u00f4\u00f5\5\"\22\2\u00f5"+ + "\u0128\3\2\2\2\u00f6\u00f7\f\21\2\2\u00f7\u00f8\t\3\2\2\u00f8\u0127\5"+ + " \21\22\u00f9\u00fa\f\20\2\2\u00fa\u00fb\t\4\2\2\u00fb\u0127\5 \21\21"+ + "\u00fc\u00fd\f\17\2\2\u00fd\u00fe\t\5\2\2\u00fe\u0127\5 \21\20\u00ff\u0100"+ + "\f\16\2\2\u0100\u0101\t\6\2\2\u0101\u0127\5 \21\17\u0102\u0103\f\r\2\2"+ + "\u0103\u0104\t\7\2\2\u0104\u0127\5 \21\16\u0105\u0106\f\13\2\2\u0106\u0107"+ + "\t\b\2\2\u0107\u0127\5 \21\f\u0108\u0109\f\n\2\2\u0109\u010a\7\60\2\2"+ + "\u010a\u0127\5 \21\13\u010b\u010c\f\t\2\2\u010c\u010d\7\61\2\2\u010d\u0127"+ + "\5 \21\n\u010e\u010f\f\b\2\2\u010f\u0110\7\62\2\2\u0110\u0127\5 \21\t"+ + "\u0111\u0112\f\7\2\2\u0112\u0113\7\63\2\2\u0113\u0127\5 \21\b\u0114\u0115"+ + "\f\6\2\2\u0115\u0116\7\64\2\2\u0116\u0127\5 \21\7\u0117\u0118\f\5\2\2"+ + "\u0118\u0119\7\65\2\2\u0119\u011a\5 \21\2\u011a\u011b\7\66\2\2\u011b\u011c"+ + "\5 \21\5\u011c\u0127\3\2\2\2\u011d\u011e\f\4\2\2\u011e\u011f\7\67\2\2"+ + "\u011f\u0127\5 \21\4\u0120\u0121\f\3\2\2\u0121\u0122\t\t\2\2\u0122\u0127"+ + "\5 \21\3\u0123\u0124\f\f\2\2\u0124\u0125\7\35\2\2\u0125\u0127\5\32\16"+ + "\2\u0126\u00f6\3\2\2\2\u0126\u00f9\3\2\2\2\u0126\u00fc\3\2\2\2\u0126\u00ff"+ + "\3\2\2\2\u0126\u0102\3\2\2\2\u0126\u0105\3\2\2\2\u0126\u0108\3\2\2\2\u0126"+ + "\u010b\3\2\2\2\u0126\u010e\3\2\2\2\u0126\u0111\3\2\2\2\u0126\u0114\3\2"+ + "\2\2\u0126\u0117\3\2\2\2\u0126\u011d\3\2\2\2\u0126\u0120\3\2\2\2\u0126"+ + "\u0123\3\2\2\2\u0127\u012a\3\2\2\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2"+ + "\2\2\u0129!\3\2\2\2\u012a\u0128\3\2\2\2\u012b\u012c\t\n\2\2\u012c\u0139"+ + "\5$\23\2\u012d\u012e\5$\23\2\u012e\u012f\t\n\2\2\u012f\u0139\3\2\2\2\u0130"+ + "\u0139\5$\23\2\u0131\u0132\t\13\2\2\u0132\u0139\5\"\22\2\u0133\u0134\7"+ + "\t\2\2\u0134\u0135\5\32\16\2\u0135\u0136\7\n\2\2\u0136\u0137\5\"\22\2"+ + "\u0137\u0139\3\2\2\2\u0138\u012b\3\2\2\2\u0138\u012d\3\2\2\2\u0138\u0130"+ + "\3\2\2\2\u0138\u0131\3\2\2\2\u0138\u0133\3\2\2\2\u0139#\3\2\2\2\u013a"+ + "\u013e\5&\24\2\u013b\u013d\5(\25\2\u013c\u013b\3\2\2\2\u013d\u0140\3\2"+ + "\2\2\u013e\u013c\3\2\2\2\u013e\u013f\3\2\2\2\u013f\u014b\3\2\2\2\u0140"+ + "\u013e\3\2\2\2\u0141\u0142\5\32\16\2\u0142\u0146\5*\26\2\u0143\u0145\5"+ + "(\25\2\u0144\u0143\3\2\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146"+ + "\u0147\3\2\2\2\u0147\u014b\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u014b\5\62"+ + "\32\2\u014a\u013a\3\2\2\2\u014a\u0141\3\2\2\2\u014a\u0149\3\2\2\2\u014b"+ + "%\3\2\2\2\u014c\u014d\7\t\2\2\u014d\u014e\5 \21\2\u014e\u014f\7\n\2\2"+ + "\u014f\u015f\3\2\2\2\u0150\u015f\t\f\2\2\u0151\u015f\7P\2\2\u0152\u015f"+ + "\7Q\2\2\u0153\u015f\7R\2\2\u0154\u015f\7N\2\2\u0155\u015f\7O\2\2\u0156"+ + "\u015f\5\64\33\2\u0157\u015f\5\66\34\2\u0158\u015f\7T\2\2\u0159\u015a"+ + "\7T\2\2\u015a\u015f\5:\36\2\u015b\u015c\7\30\2\2\u015c\u015d\7S\2\2\u015d"+ + "\u015f\5:\36\2\u015e\u014c\3\2\2\2\u015e\u0150\3\2\2\2\u015e\u0151\3\2"+ + "\2\2\u015e\u0152\3\2\2\2\u015e\u0153\3\2\2\2\u015e\u0154\3\2\2\2\u015e"+ + "\u0155\3\2\2\2\u015e\u0156\3\2\2\2\u015e\u0157\3\2\2\2\u015e\u0158\3\2"+ + "\2\2\u015e\u0159\3\2\2\2\u015e\u015b\3\2\2\2\u015f\'\3\2\2\2\u0160\u0164"+ + "\5,\27\2\u0161\u0164\5.\30\2\u0162\u0164\5\60\31\2\u0163\u0160\3\2\2\2"+ + "\u0163\u0161\3\2\2\2\u0163\u0162\3\2\2\2\u0164)\3\2\2\2\u0165\u0168\5"+ + ",\27\2\u0166\u0168\5.\30\2\u0167\u0165\3\2\2\2\u0167\u0166\3\2\2\2\u0168"+ + "+\3\2\2\2\u0169\u016a\t\r\2\2\u016a\u016b\7V\2\2\u016b\u016c\5:\36\2\u016c"+ + "-\3\2\2\2\u016d\u016e\t\r\2\2\u016e\u016f\t\16\2\2\u016f/\3\2\2\2\u0170"+ + "\u0171\7\7\2\2\u0171\u0172\5 \21\2\u0172\u0173\7\b\2\2\u0173\61\3\2\2"+ + "\2\u0174\u0175\7\30\2\2\u0175\u017a\7S\2\2\u0176\u0177\7\7\2\2\u0177\u0178"+ + "\5 \21\2\u0178\u0179\7\b\2\2\u0179\u017b\3\2\2\2\u017a\u0176\3\2\2\2\u017b"+ + "\u017c\3\2\2\2\u017c\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d\u0185\3\2"+ + "\2\2\u017e\u0182\5*\26\2\u017f\u0181\5(\25\2\u0180\u017f\3\2\2\2\u0181"+ + "\u0184\3\2\2\2\u0182\u0180\3\2\2\2\u0182\u0183\3\2\2\2\u0183\u0186\3\2"+ + "\2\2\u0184\u0182\3\2\2\2\u0185\u017e\3\2\2\2\u0185\u0186\3\2\2\2\u0186"+ + "\u019e\3\2\2\2\u0187\u0188\7\30\2\2\u0188\u0189\7S\2\2\u0189\u018a\7\7"+ + "\2\2\u018a\u018b\7\b\2\2\u018b\u0194\7\5\2\2\u018c\u0191\5 \21\2\u018d"+ + "\u018e\7\r\2\2\u018e\u0190\5 \21\2\u018f\u018d\3\2\2\2\u0190\u0193\3\2"+ + "\2\2\u0191\u018f\3\2\2\2\u0191\u0192\3\2\2\2\u0192\u0195\3\2\2\2\u0193"+ + "\u0191\3\2\2\2\u0194\u018c\3\2\2\2\u0194\u0195\3\2\2\2\u0195\u0196\3\2"+ + "\2\2\u0196\u019a\7\6\2\2\u0197\u0199\5(\25\2\u0198\u0197\3\2\2\2\u0199"+ + "\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u019e\3\2"+ + "\2\2\u019c\u019a\3\2\2\2\u019d\u0174\3\2\2\2\u019d\u0187\3\2\2\2\u019e"+ + "\63\3\2\2\2\u019f\u01a0\7\7\2\2\u01a0\u01a5\5 \21\2\u01a1\u01a2\7\r\2"+ + "\2\u01a2\u01a4\5 \21\2\u01a3\u01a1\3\2\2\2\u01a4\u01a7\3\2\2\2\u01a5\u01a3"+ + "\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\u01a8\3\2\2\2\u01a7\u01a5\3\2\2\2\u01a8"+ + "\u01a9\7\b\2\2\u01a9\u01ad\3\2\2\2\u01aa\u01ab\7\7\2\2\u01ab\u01ad\7\b"+ + "\2\2\u01ac\u019f\3\2\2\2\u01ac\u01aa\3\2\2\2\u01ad\65\3\2\2\2\u01ae\u01af"+ + "\7\7\2\2\u01af\u01b4\58\35\2\u01b0\u01b1\7\r\2\2\u01b1\u01b3\58\35\2\u01b2"+ + "\u01b0\3\2\2\2\u01b3\u01b6\3\2\2\2\u01b4\u01b2\3\2\2\2\u01b4\u01b5\3\2"+ + "\2\2\u01b5\u01b7\3\2\2\2\u01b6\u01b4\3\2\2\2\u01b7\u01b8\7\b\2\2\u01b8"+ + "\u01bd\3\2\2\2\u01b9\u01ba\7\7\2\2\u01ba\u01bb\7\66\2\2\u01bb\u01bd\7"+ + "\b\2\2\u01bc\u01ae\3\2\2\2\u01bc\u01b9\3\2\2\2\u01bd\67\3\2\2\2\u01be"+ + "\u01bf\5 \21\2\u01bf\u01c0\7\66\2\2\u01c0\u01c1\5 \21\2\u01c19\3\2\2\2"+ + "\u01c2\u01cb\7\t\2\2\u01c3\u01c8\5<\37\2\u01c4\u01c5\7\r\2\2\u01c5\u01c7"+ + "\5<\37\2\u01c6\u01c4\3\2\2\2\u01c7\u01ca\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c8"+ + "\u01c9\3\2\2\2\u01c9\u01cc\3\2\2\2\u01ca\u01c8\3\2\2\2\u01cb\u01c3\3\2"+ + "\2\2\u01cb\u01cc\3\2\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\7\n\2\2\u01ce"+ + ";\3\2\2\2\u01cf\u01d3\5 \21\2\u01d0\u01d3\5> \2\u01d1\u01d3\5B\"\2\u01d2"+ + "\u01cf\3\2\2\2\u01d2\u01d0\3\2\2\2\u01d2\u01d1\3\2\2\2\u01d3=\3\2\2\2"+ + "\u01d4\u01e2\5@!\2\u01d5\u01de\7\t\2\2\u01d6\u01db\5@!\2\u01d7\u01d8\7"+ + "\r\2\2\u01d8\u01da\5@!\2\u01d9\u01d7\3\2\2\2\u01da\u01dd\3\2\2\2\u01db"+ + "\u01d9\3\2\2\2\u01db\u01dc\3\2\2\2\u01dc\u01df\3\2\2\2\u01dd\u01db\3\2"+ + "\2\2\u01de\u01d6\3\2\2\2\u01de\u01df\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0"+ + "\u01e2\7\n\2\2\u01e1\u01d4\3\2\2\2\u01e1\u01d5\3\2\2\2\u01e2\u01e3\3\2"+ + "\2\2\u01e3\u01e6\79\2\2\u01e4\u01e7\5\20\t\2\u01e5\u01e7\5 \21\2\u01e6"+ + "\u01e4\3\2\2\2\u01e6\u01e5\3\2\2\2\u01e7?\3\2\2\2\u01e8\u01ea\5\32\16"+ + "\2\u01e9\u01e8\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec"+ + "\7T\2\2\u01ecA\3\2\2\2\u01ed\u01ee\7S\2\2\u01ee\u01ef\78\2\2\u01ef\u01fb"+ + "\7T\2\2\u01f0\u01f1\5\32\16\2\u01f1\u01f2\78\2\2\u01f2\u01f3\7\30\2\2"+ + "\u01f3\u01fb\3\2\2\2\u01f4\u01f5\7T\2\2\u01f5\u01f6\78\2\2\u01f6\u01fb"+ + "\7T\2\2\u01f7\u01f8\7\34\2\2\u01f8\u01f9\78\2\2\u01f9\u01fb\7T\2\2\u01fa"+ + "\u01ed\3\2\2\2\u01fa\u01f0\3\2\2\2\u01fa\u01f4\3\2\2\2\u01fa\u01f7\3\2"+ + "\2\2\u01fbC\3\2\2\2\65GM`cku}\u0082\u0086\u008a\u008f\u00a7\u00a9\u00b7"+ + "\u00bc\u00c0\u00c6\u00ca\u00d2\u00dc\u00e4\u00ea\u0126\u0128\u0138\u013e"+ + "\u0146\u014a\u015e\u0163\u0167\u017c\u0182\u0185\u0191\u0194\u019a\u019d"+ + "\u01a5\u01ac\u01b4\u01bc\u01c8\u01cb\u01d2\u01db\u01de\u01e1\u01e6\u01e9"+ + "\u01fa"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} \ No newline at end of file diff --git a/test/dataset_collection/InlineTestExamples/PainlessParser.java b/test/dataset_collection/InlineTestExamples/PainlessParser.java new file mode 100644 index 00000000..98b2b13b --- /dev/null +++ b/test/dataset_collection/InlineTestExamples/PainlessParser.java @@ -0,0 +1,3832 @@ +package org.elasticsearch.painless.antlr; +import org.antlr.v4.runtime.FailedPredicateException; +import org.antlr.v4.runtime.NoViableAltException; +import org.antlr.v4.runtime.Parser; +import org.antlr.v4.runtime.ParserRuleContext; +import org.antlr.v4.runtime.RecognitionException; +import org.antlr.v4.runtime.RuleContext; +import org.antlr.v4.runtime.RuntimeMetaData; +import org.antlr.v4.runtime.TokenStream; +import org.antlr.v4.runtime.Vocabulary; +import org.antlr.v4.runtime.VocabularyImpl; +import org.antlr.v4.runtime.atn.ATN; +import org.antlr.v4.runtime.atn.ATNDeserializer; +import org.antlr.v4.runtime.atn.ParserATNSimulator; +import org.antlr.v4.runtime.atn.PredictionContextCache; +import org.antlr.v4.runtime.dfa.DFA; +import org.antlr.v4.runtime.tree.ParseTreeVisitor; +import org.antlr.v4.runtime.tree.TerminalNode; +import java.util.List; +@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"}) +class PainlessParser extends Parser { + static { RuntimeMetaData.checkVersion("4.5.3", RuntimeMetaData.VERSION); } + protected static final DFA[] _decisionToDFA; + protected static final PredictionContextCache _sharedContextCache = + new PredictionContextCache(); + public static final int + WS=1, COMMENT=2, LBRACK=3, RBRACK=4, LBRACE=5, RBRACE=6, LP=7, RP=8, DOT=9, + NSDOT=10, COMMA=11, SEMICOLON=12, IF=13, IN=14, ELSE=15, WHILE=16, DO=17, + FOR=18, CONTINUE=19, BREAK=20, RETURN=21, NEW=22, TRY=23, CATCH=24, THROW=25, + THIS=26, INSTANCEOF=27, BOOLNOT=28, BWNOT=29, MUL=30, DIV=31, REM=32, + ADD=33, SUB=34, LSH=35, RSH=36, USH=37, LT=38, LTE=39, GT=40, GTE=41, + EQ=42, EQR=43, NE=44, NER=45, BWAND=46, XOR=47, BWOR=48, BOOLAND=49, BOOLOR=50, + COND=51, COLON=52, ELVIS=53, REF=54, ARROW=55, FIND=56, MATCH=57, INCR=58, + DECR=59, ASSIGN=60, AADD=61, ASUB=62, AMUL=63, ADIV=64, AREM=65, AAND=66, + AXOR=67, AOR=68, ALSH=69, ARSH=70, AUSH=71, OCTAL=72, HEX=73, INTEGER=74, + DECIMAL=75, STRING=76, REGEX=77, TRUE=78, FALSE=79, NULL=80, TYPE=81, + ID=82, DOTINTEGER=83, DOTID=84; + public static final int + RULE_source = 0, RULE_function = 1, RULE_parameters = 2, RULE_statement = 3, + RULE_rstatement = 4, RULE_dstatement = 5, RULE_trailer = 6, RULE_block = 7, + RULE_empty = 8, RULE_initializer = 9, RULE_afterthought = 10, RULE_declaration = 11, + RULE_decltype = 12, RULE_declvar = 13, RULE_trap = 14, RULE_expression = 15, + RULE_unary = 16, RULE_chain = 17, RULE_primary = 18, RULE_postfix = 19, + RULE_postdot = 20, RULE_callinvoke = 21, RULE_fieldaccess = 22, RULE_braceaccess = 23, + RULE_arrayinitializer = 24, RULE_listinitializer = 25, RULE_mapinitializer = 26, + RULE_maptoken = 27, RULE_arguments = 28, RULE_argument = 29, RULE_lambda = 30, + RULE_lamtype = 31, RULE_funcref = 32; + public static final String[] ruleNames = { + "source", "function", "parameters", "statement", "rstatement", "dstatement", + "trailer", "block", "empty", "initializer", "afterthought", "declaration", + "decltype", "declvar", "trap", "expression", "unary", "chain", "primary", + "postfix", "postdot", "callinvoke", "fieldaccess", "braceaccess", "arrayinitializer", + "listinitializer", "mapinitializer", "maptoken", "arguments", "argument", + "lambda", "lamtype", "funcref" + }; + private static final String[] _LITERAL_NAMES = { + null, null, null, "'{'", "'}'", "'['", "']'", "'('", "')'", "'.'", "'?.'", + "','", "';'", "'if'", "'in'", "'else'", "'while'", "'do'", "'for'", "'continue'", + "'break'", "'return'", "'new'", "'try'", "'catch'", "'throw'", "'this'", + "'instanceof'", "'!'", "'~'", "'*'", "'/'", "'%'", "'+'", "'-'", "'<<'", + "'>>'", "'>>>'", "'<'", "'<='", "'>'", "'>='", "'=='", "'==='", "'!='", + "'!=='", "'&'", "'^'", "'|'", "'&&'", "'||'", "'?'", "':'", "'?:'", "'::'", + "'->'", "'=~'", "'==~'", "'++'", "'--'", "'='", "'+='", "'-='", "'*='", + "'/='", "'%='", "'&='", "'^='", "'|='", "'<<='", "'>>='", "'>>>='", null, + null, null, null, null, null, "'true'", "'false'", "'null'" + }; + private static final String[] _SYMBOLIC_NAMES = { + null, "WS", "COMMENT", "LBRACK", "RBRACK", "LBRACE", "RBRACE", "LP", "RP", + "DOT", "NSDOT", "COMMA", "SEMICOLON", "IF", "IN", "ELSE", "WHILE", "DO", + "FOR", "CONTINUE", "BREAK", "RETURN", "NEW", "TRY", "CATCH", "THROW", + "THIS", "INSTANCEOF", "BOOLNOT", "BWNOT", "MUL", "DIV", "REM", "ADD", + "SUB", "LSH", "RSH", "USH", "LT", "LTE", "GT", "GTE", "EQ", "EQR", "NE", + "NER", "BWAND", "XOR", "BWOR", "BOOLAND", "BOOLOR", "COND", "COLON", "ELVIS", + "REF", "ARROW", "FIND", "MATCH", "INCR", "DECR", "ASSIGN", "AADD", "ASUB", + "AMUL", "ADIV", "AREM", "AAND", "AXOR", "AOR", "ALSH", "ARSH", "AUSH", + "OCTAL", "HEX", "INTEGER", "DECIMAL", "STRING", "REGEX", "TRUE", "FALSE", + "NULL", "TYPE", "ID", "DOTINTEGER", "DOTID" + }; + public static final Vocabulary VOCABULARY = new VocabularyImpl(_LITERAL_NAMES, _SYMBOLIC_NAMES); + @Deprecated + public static final String[] tokenNames; + static { + tokenNames = new String[_SYMBOLIC_NAMES.length]; + for (int i = 0; i < tokenNames.length; i++) { + tokenNames[i] = VOCABULARY.getLiteralName(i); + if (tokenNames[i] == null) { + tokenNames[i] = VOCABULARY.getSymbolicName(i); + } + if (tokenNames[i] == null) { + tokenNames[i] = ""; + } + } + } + @Override + @Deprecated + public String[] getTokenNames() { + return tokenNames; + } + @Override + public Vocabulary getVocabulary() { + return VOCABULARY; + } + @Override + public String getGrammarFileName() { return "PainlessParser.g4"; } + @Override + public String[] getRuleNames() { return ruleNames; } + @Override + public String getSerializedATN() { return _serializedATN; } + @Override + public ATN getATN() { return _ATN; } + public PainlessParser(TokenStream input) { + super(input); + _interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache); + } + public static class SourceContext extends ParserRuleContext { + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } + public List function() { + return getRuleContexts(FunctionContext.class); + } + public FunctionContext function(int i) { + return getRuleContext(FunctionContext.class,i); + } + public List statement() { + return getRuleContexts(StatementContext.class); + } + public StatementContext statement(int i) { + return getRuleContext(StatementContext.class,i); + } + public SourceContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_source; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitSource(this); + else return visitor.visitChildren(this); + } + } + public final SourceContext source() throws RecognitionException { + SourceContext _localctx = new SourceContext(_ctx, getState()); + enterRule(_localctx, 0, RULE_source); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(69); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(66); + function(); + } + } + } + setState(71); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,0,_ctx); + } + setState(75); + _errHandler.sync(this); + _la = _input.LA(1); + while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << IF) | (1L << WHILE) | (1L << DO) | (1L << FOR) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << TRY) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + { + setState(72); + statement(); + } + } + setState(77); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(78); + match(EOF); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FunctionContext extends ParserRuleContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ParametersContext parameters() { + return getRuleContext(ParametersContext.class,0); + } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public FunctionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_function; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFunction(this); + else return visitor.visitChildren(this); + } + } + public final FunctionContext function() throws RecognitionException { + FunctionContext _localctx = new FunctionContext(_ctx, getState()); + enterRule(_localctx, 2, RULE_function); + try { + enterOuterAlt(_localctx, 1); + { + setState(80); + decltype(); + setState(81); + match(ID); + setState(82); + parameters(); + setState(83); + block(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ParametersContext extends ParserRuleContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List decltype() { + return getRuleContexts(DecltypeContext.class); + } + public DecltypeContext decltype(int i) { + return getRuleContext(DecltypeContext.class,i); + } + public List ID() { return getTokens(PainlessParser.ID); } + public TerminalNode ID(int i) { + return getToken(PainlessParser.ID, i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ParametersContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_parameters; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitParameters(this); + else return visitor.visitChildren(this); + } + } + public final ParametersContext parameters() throws RecognitionException { + ParametersContext _localctx = new ParametersContext(_ctx, getState()); + enterRule(_localctx, 4, RULE_parameters); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(85); + match(LP); + setState(97); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(86); + decltype(); + setState(87); + match(ID); + setState(94); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(88); + match(COMMA); + setState(89); + decltype(); + setState(90); + match(ID); + } + } + setState(96); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(99); + match(RP); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class StatementContext extends ParserRuleContext { + public RstatementContext rstatement() { + return getRuleContext(RstatementContext.class,0); + } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } + public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public TerminalNode EOF() { return getToken(PainlessParser.EOF, 0); } + public StatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_statement; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitStatement(this); + else return visitor.visitChildren(this); + } + } + public final StatementContext statement() throws RecognitionException { + StatementContext _localctx = new StatementContext(_ctx, getState()); + enterRule(_localctx, 6, RULE_statement); + int _la; + try { + setState(105); + switch (_input.LA(1)) { + case IF: + case WHILE: + case FOR: + case TRY: + enterOuterAlt(_localctx, 1); + { + setState(101); + rstatement(); + } + break; + case LBRACE: + case LP: + case DO: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + enterOuterAlt(_localctx, 2); + { + setState(102); + dstatement(); + setState(103); + _la = _input.LA(1); + if ( !(_la==EOF || _la==SEMICOLON) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class RstatementContext extends ParserRuleContext { + public RstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_rstatement; } + public RstatementContext() { } + public void copyFrom(RstatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class ForContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public List SEMICOLON() { return getTokens(PainlessParser.SEMICOLON); } + public TerminalNode SEMICOLON(int i) { + return getToken(PainlessParser.SEMICOLON, i); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EmptyContext empty() { + return getRuleContext(EmptyContext.class,0); + } + public InitializerContext initializer() { + return getRuleContext(InitializerContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public AfterthoughtContext afterthought() { + return getRuleContext(AfterthoughtContext.class,0); + } + public ForContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFor(this); + else return visitor.visitChildren(this); + } + } + public static class TryContext extends RstatementContext { + public TerminalNode TRY() { return getToken(PainlessParser.TRY, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public List trap() { + return getRuleContexts(TrapContext.class); + } + public TrapContext trap(int i) { + return getRuleContext(TrapContext.class,i); + } + public TryContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTry(this); + else return visitor.visitChildren(this); + } + } + public static class WhileContext extends RstatementContext { + public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EmptyContext empty() { + return getRuleContext(EmptyContext.class,0); + } + public WhileContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitWhile(this); + else return visitor.visitChildren(this); + } + } + public static class IneachContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode IN() { return getToken(PainlessParser.IN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public IneachContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIneach(this); + else return visitor.visitChildren(this); + } + } + public static class IfContext extends RstatementContext { + public TerminalNode IF() { return getToken(PainlessParser.IF, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List trailer() { + return getRuleContexts(TrailerContext.class); + } + public TrailerContext trailer(int i) { + return getRuleContext(TrailerContext.class,i); + } + public TerminalNode ELSE() { return getToken(PainlessParser.ELSE, 0); } + public IfContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitIf(this); + else return visitor.visitChildren(this); + } + } + public static class EachContext extends RstatementContext { + public TerminalNode FOR() { return getToken(PainlessParser.FOR, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public TrailerContext trailer() { + return getRuleContext(TrailerContext.class,0); + } + public EachContext(RstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEach(this); + else return visitor.visitChildren(this); + } + } + public final RstatementContext rstatement() throws RecognitionException { + RstatementContext _localctx = new RstatementContext(_ctx, getState()); + enterRule(_localctx, 8, RULE_rstatement); + int _la; + try { + int _alt; + setState(167); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) { + case 1: + _localctx = new IfContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(107); + match(IF); + setState(108); + match(LP); + setState(109); + expression(0); + setState(110); + match(RP); + setState(111); + trailer(); + setState(115); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,5,_ctx) ) { + case 1: + { + setState(112); + match(ELSE); + setState(113); + trailer(); + } + break; + case 2: + { + setState(114); + if (!( _input.LA(1) != ELSE )) throw new FailedPredicateException(this, " _input.LA(1) != ELSE "); + } + break; + } + } + break; + case 2: + _localctx = new WhileContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(117); + match(WHILE); + setState(118); + match(LP); + setState(119); + expression(0); + setState(120); + match(RP); + setState(123); + switch (_input.LA(1)) { + case LBRACK: + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(121); + trailer(); + } + break; + case SEMICOLON: + { + setState(122); + empty(); + } + break; + default: + throw new NoViableAltException(this); + } + } + break; + case 3: + _localctx = new ForContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(125); + match(FOR); + setState(126); + match(LP); + setState(128); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(127); + initializer(); + } + } + setState(130); + match(SEMICOLON); + setState(132); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(131); + expression(0); + } + } + setState(134); + match(SEMICOLON); + setState(136); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(135); + afterthought(); + } + } + setState(138); + match(RP); + setState(141); + switch (_input.LA(1)) { + case LBRACK: + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(139); + trailer(); + } + break; + case SEMICOLON: + { + setState(140); + empty(); + } + break; + default: + throw new NoViableAltException(this); + } + } + break; + case 4: + _localctx = new EachContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(143); + match(FOR); + setState(144); + match(LP); + setState(145); + decltype(); + setState(146); + match(ID); + setState(147); + match(COLON); + setState(148); + expression(0); + setState(149); + match(RP); + setState(150); + trailer(); + } + break; + case 5: + _localctx = new IneachContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(152); + match(FOR); + setState(153); + match(LP); + setState(154); + match(ID); + setState(155); + match(IN); + setState(156); + expression(0); + setState(157); + match(RP); + setState(158); + trailer(); + } + break; + case 6: + _localctx = new TryContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(160); + match(TRY); + setState(161); + block(); + setState(163); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(162); + trap(); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(165); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,11,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DstatementContext extends ParserRuleContext { + public DstatementContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_dstatement; } + public DstatementContext() { } + public void copyFrom(DstatementContext ctx) { + super.copyFrom(ctx); + } + } + public static class DeclContext extends DstatementContext { + public DeclarationContext declaration() { + return getRuleContext(DeclarationContext.class,0); + } + public DeclContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecl(this); + else return visitor.visitChildren(this); + } + } + public static class BreakContext extends DstatementContext { + public TerminalNode BREAK() { return getToken(PainlessParser.BREAK, 0); } + public BreakContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBreak(this); + else return visitor.visitChildren(this); + } + } + public static class ThrowContext extends DstatementContext { + public TerminalNode THROW() { return getToken(PainlessParser.THROW, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ThrowContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitThrow(this); + else return visitor.visitChildren(this); + } + } + public static class ContinueContext extends DstatementContext { + public TerminalNode CONTINUE() { return getToken(PainlessParser.CONTINUE, 0); } + public ContinueContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitContinue(this); + else return visitor.visitChildren(this); + } + } + public static class ExprContext extends DstatementContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ExprContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitExpr(this); + else return visitor.visitChildren(this); + } + } + public static class DoContext extends DstatementContext { + public TerminalNode DO() { return getToken(PainlessParser.DO, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public TerminalNode WHILE() { return getToken(PainlessParser.WHILE, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public DoContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDo(this); + else return visitor.visitChildren(this); + } + } + public static class ReturnContext extends DstatementContext { + public TerminalNode RETURN() { return getToken(PainlessParser.RETURN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public ReturnContext(DstatementContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitReturn(this); + else return visitor.visitChildren(this); + } + } + public final DstatementContext dstatement() throws RecognitionException { + DstatementContext _localctx = new DstatementContext(_ctx, getState()); + enterRule(_localctx, 10, RULE_dstatement); + int _la; + try { + setState(186); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,14,_ctx) ) { + case 1: + _localctx = new DoContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(169); + match(DO); + setState(170); + block(); + setState(171); + match(WHILE); + setState(172); + match(LP); + setState(173); + expression(0); + setState(174); + match(RP); + } + break; + case 2: + _localctx = new DeclContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(176); + declaration(); + } + break; + case 3: + _localctx = new ContinueContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(177); + match(CONTINUE); + } + break; + case 4: + _localctx = new BreakContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(178); + match(BREAK); + } + break; + case 5: + _localctx = new ReturnContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(179); + match(RETURN); + setState(181); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(180); + expression(0); + } + } + } + break; + case 6: + _localctx = new ThrowContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(183); + match(THROW); + setState(184); + expression(0); + } + break; + case 7: + _localctx = new ExprContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(185); + expression(0); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class TrailerContext extends ParserRuleContext { + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public StatementContext statement() { + return getRuleContext(StatementContext.class,0); + } + public TrailerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_trailer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrailer(this); + else return visitor.visitChildren(this); + } + } + public final TrailerContext trailer() throws RecognitionException { + TrailerContext _localctx = new TrailerContext(_ctx, getState()); + enterRule(_localctx, 12, RULE_trailer); + try { + setState(190); + switch (_input.LA(1)) { + case LBRACK: + enterOuterAlt(_localctx, 1); + { + setState(188); + block(); + } + break; + case LBRACE: + case LP: + case IF: + case WHILE: + case DO: + case FOR: + case CONTINUE: + case BREAK: + case RETURN: + case NEW: + case TRY: + case THROW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + enterOuterAlt(_localctx, 2); + { + setState(189); + statement(); + } + break; + default: + throw new NoViableAltException(this); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class BlockContext extends ParserRuleContext { + public TerminalNode LBRACK() { return getToken(PainlessParser.LBRACK, 0); } + public TerminalNode RBRACK() { return getToken(PainlessParser.RBRACK, 0); } + public List statement() { + return getRuleContexts(StatementContext.class); + } + public StatementContext statement(int i) { + return getRuleContext(StatementContext.class,i); + } + public DstatementContext dstatement() { + return getRuleContext(DstatementContext.class,0); + } + public BlockContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_block; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBlock(this); + else return visitor.visitChildren(this); + } + } + public final BlockContext block() throws RecognitionException { + BlockContext _localctx = new BlockContext(_ctx, getState()); + enterRule(_localctx, 14, RULE_block); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(192); + match(LBRACK); + setState(196); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(193); + statement(); + } + } + } + setState(198); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,16,_ctx); + } + setState(200); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << DO) | (1L << CONTINUE) | (1L << BREAK) | (1L << RETURN) | (1L << NEW) | (1L << THROW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(199); + dstatement(); + } + } + setState(202); + match(RBRACK); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class EmptyContext extends ParserRuleContext { + public TerminalNode SEMICOLON() { return getToken(PainlessParser.SEMICOLON, 0); } + public EmptyContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_empty; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitEmpty(this); + else return visitor.visitChildren(this); + } + } + public final EmptyContext empty() throws RecognitionException { + EmptyContext _localctx = new EmptyContext(_ctx, getState()); + enterRule(_localctx, 16, RULE_empty); + try { + enterOuterAlt(_localctx, 1); + { + setState(204); + match(SEMICOLON); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class InitializerContext extends ParserRuleContext { + public DeclarationContext declaration() { + return getRuleContext(DeclarationContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public InitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_initializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitInitializer(this); + else return visitor.visitChildren(this); + } + } + public final InitializerContext initializer() throws RecognitionException { + InitializerContext _localctx = new InitializerContext(_ctx, getState()); + enterRule(_localctx, 18, RULE_initializer); + try { + setState(208); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,18,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(206); + declaration(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(207); + expression(0); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class AfterthoughtContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public AfterthoughtContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_afterthought; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitAfterthought(this); + else return visitor.visitChildren(this); + } + } + public final AfterthoughtContext afterthought() throws RecognitionException { + AfterthoughtContext _localctx = new AfterthoughtContext(_ctx, getState()); + enterRule(_localctx, 20, RULE_afterthought); + try { + enterOuterAlt(_localctx, 1); + { + setState(210); + expression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DeclarationContext extends ParserRuleContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public List declvar() { + return getRuleContexts(DeclvarContext.class); + } + public DeclvarContext declvar(int i) { + return getRuleContext(DeclvarContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public DeclarationContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_declaration; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDeclaration(this); + else return visitor.visitChildren(this); + } + } + public final DeclarationContext declaration() throws RecognitionException { + DeclarationContext _localctx = new DeclarationContext(_ctx, getState()); + enterRule(_localctx, 22, RULE_declaration); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(212); + decltype(); + setState(213); + declvar(); + setState(218); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(214); + match(COMMA); + setState(215); + declvar(); + } + } + setState(220); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DecltypeContext extends ParserRuleContext { + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public List LBRACE() { return getTokens(PainlessParser.LBRACE); } + public TerminalNode LBRACE(int i) { + return getToken(PainlessParser.LBRACE, i); + } + public List RBRACE() { return getTokens(PainlessParser.RBRACE); } + public TerminalNode RBRACE(int i) { + return getToken(PainlessParser.RBRACE, i); + } + public DecltypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_decltype; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDecltype(this); + else return visitor.visitChildren(this); + } + } + public final DecltypeContext decltype() throws RecognitionException { + DecltypeContext _localctx = new DecltypeContext(_ctx, getState()); + enterRule(_localctx, 24, RULE_decltype); + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + setState(221); + match(TYPE); + setState(226); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(222); + match(LBRACE); + setState(223); + match(RBRACE); + } + } + } + setState(228); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,20,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class DeclvarContext extends ParserRuleContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode ASSIGN() { return getToken(PainlessParser.ASSIGN, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public DeclvarContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_declvar; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDeclvar(this); + else return visitor.visitChildren(this); + } + } + public final DeclvarContext declvar() throws RecognitionException { + DeclvarContext _localctx = new DeclvarContext(_ctx, getState()); + enterRule(_localctx, 26, RULE_declvar); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(229); + match(ID); + setState(232); + _la = _input.LA(1); + if (_la==ASSIGN) { + { + setState(230); + match(ASSIGN); + setState(231); + expression(0); + } + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class TrapContext extends ParserRuleContext { + public TerminalNode CATCH() { return getToken(PainlessParser.CATCH, 0); } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public TrapContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_trap; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrap(this); + else return visitor.visitChildren(this); + } + } + public final TrapContext trap() throws RecognitionException { + TrapContext _localctx = new TrapContext(_ctx, getState()); + enterRule(_localctx, 28, RULE_trap); + try { + enterOuterAlt(_localctx, 1); + { + setState(234); + match(CATCH); + setState(235); + match(LP); + setState(236); + match(TYPE); + setState(237); + match(ID); + setState(238); + match(RP); + setState(239); + block(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ExpressionContext extends ParserRuleContext { + public ExpressionContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_expression; } + public ExpressionContext() { } + public void copyFrom(ExpressionContext ctx) { + super.copyFrom(ctx); + } + } + public static class SingleContext extends ExpressionContext { + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public SingleContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitSingle(this); + else return visitor.visitChildren(this); + } + } + public static class CompContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode LT() { return getToken(PainlessParser.LT, 0); } + public TerminalNode LTE() { return getToken(PainlessParser.LTE, 0); } + public TerminalNode GT() { return getToken(PainlessParser.GT, 0); } + public TerminalNode GTE() { return getToken(PainlessParser.GTE, 0); } + public TerminalNode EQ() { return getToken(PainlessParser.EQ, 0); } + public TerminalNode EQR() { return getToken(PainlessParser.EQR, 0); } + public TerminalNode NE() { return getToken(PainlessParser.NE, 0); } + public TerminalNode NER() { return getToken(PainlessParser.NER, 0); } + public CompContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitComp(this); + else return visitor.visitChildren(this); + } + } + public static class BoolContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode BOOLAND() { return getToken(PainlessParser.BOOLAND, 0); } + public TerminalNode BOOLOR() { return getToken(PainlessParser.BOOLOR, 0); } + public BoolContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBool(this); + else return visitor.visitChildren(this); + } + } + public static class ConditionalContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode COND() { return getToken(PainlessParser.COND, 0); } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public ConditionalContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitConditional(this); + else return visitor.visitChildren(this); + } + } + public static class AssignmentContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode ASSIGN() { return getToken(PainlessParser.ASSIGN, 0); } + public TerminalNode AADD() { return getToken(PainlessParser.AADD, 0); } + public TerminalNode ASUB() { return getToken(PainlessParser.ASUB, 0); } + public TerminalNode AMUL() { return getToken(PainlessParser.AMUL, 0); } + public TerminalNode ADIV() { return getToken(PainlessParser.ADIV, 0); } + public TerminalNode AREM() { return getToken(PainlessParser.AREM, 0); } + public TerminalNode AAND() { return getToken(PainlessParser.AAND, 0); } + public TerminalNode AXOR() { return getToken(PainlessParser.AXOR, 0); } + public TerminalNode AOR() { return getToken(PainlessParser.AOR, 0); } + public TerminalNode ALSH() { return getToken(PainlessParser.ALSH, 0); } + public TerminalNode ARSH() { return getToken(PainlessParser.ARSH, 0); } + public TerminalNode AUSH() { return getToken(PainlessParser.AUSH, 0); } + public AssignmentContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitAssignment(this); + else return visitor.visitChildren(this); + } + } + public static class BinaryContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode MUL() { return getToken(PainlessParser.MUL, 0); } + public TerminalNode DIV() { return getToken(PainlessParser.DIV, 0); } + public TerminalNode REM() { return getToken(PainlessParser.REM, 0); } + public TerminalNode ADD() { return getToken(PainlessParser.ADD, 0); } + public TerminalNode SUB() { return getToken(PainlessParser.SUB, 0); } + public TerminalNode FIND() { return getToken(PainlessParser.FIND, 0); } + public TerminalNode MATCH() { return getToken(PainlessParser.MATCH, 0); } + public TerminalNode LSH() { return getToken(PainlessParser.LSH, 0); } + public TerminalNode RSH() { return getToken(PainlessParser.RSH, 0); } + public TerminalNode USH() { return getToken(PainlessParser.USH, 0); } + public TerminalNode BWAND() { return getToken(PainlessParser.BWAND, 0); } + public TerminalNode XOR() { return getToken(PainlessParser.XOR, 0); } + public TerminalNode BWOR() { return getToken(PainlessParser.BWOR, 0); } + public BinaryContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBinary(this); + else return visitor.visitChildren(this); + } + } + public static class ElvisContext extends ExpressionContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode ELVIS() { return getToken(PainlessParser.ELVIS, 0); } + public ElvisContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitElvis(this); + else return visitor.visitChildren(this); + } + } + public static class InstanceofContext extends ExpressionContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode INSTANCEOF() { return getToken(PainlessParser.INSTANCEOF, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public InstanceofContext(ExpressionContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitInstanceof(this); + else return visitor.visitChildren(this); + } + } + public final ExpressionContext expression() throws RecognitionException { + return expression(0); + } + private ExpressionContext expression(int _p) throws RecognitionException { + ParserRuleContext _parentctx = _ctx; + int _parentState = getState(); + ExpressionContext _localctx = new ExpressionContext(_ctx, _parentState); + ExpressionContext _prevctx = _localctx; + int _startState = 30; + enterRecursionRule(_localctx, 30, RULE_expression, _p); + int _la; + try { + int _alt; + enterOuterAlt(_localctx, 1); + { + { + _localctx = new SingleContext(_localctx); + _ctx = _localctx; + _prevctx = _localctx; + setState(242); + unary(); + } + _ctx.stop = _input.LT(-1); + setState(294); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + if ( _parseListeners!=null ) triggerExitRuleEvent(); + _prevctx = _localctx; + { + setState(292); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,22,_ctx) ) { + case 1: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(244); + if (!(precpred(_ctx, 15))) throw new FailedPredicateException(this, "precpred(_ctx, 15)"); + setState(245); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << MUL) | (1L << DIV) | (1L << REM))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(246); + expression(16); + } + break; + case 2: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(247); + if (!(precpred(_ctx, 14))) throw new FailedPredicateException(this, "precpred(_ctx, 14)"); + setState(248); + _la = _input.LA(1); + if ( !(_la==ADD || _la==SUB) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(249); + expression(15); + } + break; + case 3: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(250); + if (!(precpred(_ctx, 13))) throw new FailedPredicateException(this, "precpred(_ctx, 13)"); + setState(251); + _la = _input.LA(1); + if ( !(_la==FIND || _la==MATCH) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(252); + expression(14); + } + break; + case 4: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(253); + if (!(precpred(_ctx, 12))) throw new FailedPredicateException(this, "precpred(_ctx, 12)"); + setState(254); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LSH) | (1L << RSH) | (1L << USH))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(255); + expression(13); + } + break; + case 5: + { + _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(256); + if (!(precpred(_ctx, 11))) throw new FailedPredicateException(this, "precpred(_ctx, 11)"); + setState(257); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LT) | (1L << LTE) | (1L << GT) | (1L << GTE))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(258); + expression(12); + } + break; + case 6: + { + _localctx = new CompContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(259); + if (!(precpred(_ctx, 9))) throw new FailedPredicateException(this, "precpred(_ctx, 9)"); + setState(260); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << EQ) | (1L << EQR) | (1L << NE) | (1L << NER))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(261); + expression(10); + } + break; + case 7: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(262); + if (!(precpred(_ctx, 8))) throw new FailedPredicateException(this, "precpred(_ctx, 8)"); + setState(263); + match(BWAND); + setState(264); + expression(9); + } + break; + case 8: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(265); + if (!(precpred(_ctx, 7))) throw new FailedPredicateException(this, "precpred(_ctx, 7)"); + setState(266); + match(XOR); + setState(267); + expression(8); + } + break; + case 9: + { + _localctx = new BinaryContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(268); + if (!(precpred(_ctx, 6))) throw new FailedPredicateException(this, "precpred(_ctx, 6)"); + setState(269); + match(BWOR); + setState(270); + expression(7); + } + break; + case 10: + { + _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(271); + if (!(precpred(_ctx, 5))) throw new FailedPredicateException(this, "precpred(_ctx, 5)"); + setState(272); + match(BOOLAND); + setState(273); + expression(6); + } + break; + case 11: + { + _localctx = new BoolContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(274); + if (!(precpred(_ctx, 4))) throw new FailedPredicateException(this, "precpred(_ctx, 4)"); + setState(275); + match(BOOLOR); + setState(276); + expression(5); + } + break; + case 12: + { + _localctx = new ConditionalContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(277); + if (!(precpred(_ctx, 3))) throw new FailedPredicateException(this, "precpred(_ctx, 3)"); + setState(278); + match(COND); + setState(279); + expression(0); + setState(280); + match(COLON); + setState(281); + expression(3); + } + break; + case 13: + { + _localctx = new ElvisContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(283); + if (!(precpred(_ctx, 2))) throw new FailedPredicateException(this, "precpred(_ctx, 2)"); + setState(284); + match(ELVIS); + setState(285); + expression(2); + } + break; + case 14: + { + _localctx = new AssignmentContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(286); + if (!(precpred(_ctx, 1))) throw new FailedPredicateException(this, "precpred(_ctx, 1)"); + setState(287); + _la = _input.LA(1); + if ( !(((((_la - 60)) & ~0x3f) == 0 && ((1L << (_la - 60)) & ((1L << (ASSIGN - 60)) | (1L << (AADD - 60)) | (1L << (ASUB - 60)) | (1L << (AMUL - 60)) | (1L << (ADIV - 60)) | (1L << (AREM - 60)) | (1L << (AAND - 60)) | (1L << (AXOR - 60)) | (1L << (AOR - 60)) | (1L << (ALSH - 60)) | (1L << (ARSH - 60)) | (1L << (AUSH - 60)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(288); + expression(1); + } + break; + case 15: + { + _localctx = new InstanceofContext(new ExpressionContext(_parentctx, _parentState)); + pushNewRecursionContext(_localctx, _startState, RULE_expression); + setState(289); + if (!(precpred(_ctx, 10))) throw new FailedPredicateException(this, "precpred(_ctx, 10)"); + setState(290); + match(INSTANCEOF); + setState(291); + decltype(); + } + break; + } + } + } + setState(296); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,23,_ctx); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + unrollRecursionContexts(_parentctx); + } + return _localctx; + } + public static class UnaryContext extends ParserRuleContext { + public UnaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_unary; } + public UnaryContext() { } + public void copyFrom(UnaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class CastContext extends UnaryContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public CastContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCast(this); + else return visitor.visitChildren(this); + } + } + public static class PreContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public TerminalNode INCR() { return getToken(PainlessParser.INCR, 0); } + public TerminalNode DECR() { return getToken(PainlessParser.DECR, 0); } + public PreContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPre(this); + else return visitor.visitChildren(this); + } + } + public static class ReadContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public ReadContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitRead(this); + else return visitor.visitChildren(this); + } + } + public static class PostContext extends UnaryContext { + public ChainContext chain() { + return getRuleContext(ChainContext.class,0); + } + public TerminalNode INCR() { return getToken(PainlessParser.INCR, 0); } + public TerminalNode DECR() { return getToken(PainlessParser.DECR, 0); } + public PostContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPost(this); + else return visitor.visitChildren(this); + } + } + public static class OperatorContext extends UnaryContext { + public UnaryContext unary() { + return getRuleContext(UnaryContext.class,0); + } + public TerminalNode BOOLNOT() { return getToken(PainlessParser.BOOLNOT, 0); } + public TerminalNode BWNOT() { return getToken(PainlessParser.BWNOT, 0); } + public TerminalNode ADD() { return getToken(PainlessParser.ADD, 0); } + public TerminalNode SUB() { return getToken(PainlessParser.SUB, 0); } + public OperatorContext(UnaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitOperator(this); + else return visitor.visitChildren(this); + } + } + public final UnaryContext unary() throws RecognitionException { + UnaryContext _localctx = new UnaryContext(_ctx, getState()); + enterRule(_localctx, 32, RULE_unary); + int _la; + try { + setState(310); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,24,_ctx) ) { + case 1: + _localctx = new PreContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(297); + _la = _input.LA(1); + if ( !(_la==INCR || _la==DECR) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(298); + chain(); + } + break; + case 2: + _localctx = new PostContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(299); + chain(); + setState(300); + _la = _input.LA(1); + if ( !(_la==INCR || _la==DECR) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case 3: + _localctx = new ReadContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(302); + chain(); + } + break; + case 4: + _localctx = new OperatorContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(303); + _la = _input.LA(1); + if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(304); + unary(); + } + break; + case 5: + _localctx = new CastContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(305); + match(LP); + setState(306); + decltype(); + setState(307); + match(RP); + setState(308); + unary(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ChainContext extends ParserRuleContext { + public ChainContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_chain; } + public ChainContext() { } + public void copyFrom(ChainContext ctx) { + super.copyFrom(ctx); + } + } + public static class StaticContext extends ChainContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public PostdotContext postdot() { + return getRuleContext(PostdotContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public StaticContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitStatic(this); + else return visitor.visitChildren(this); + } + } + public static class DynamicContext extends ChainContext { + public PrimaryContext primary() { + return getRuleContext(PrimaryContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public DynamicContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitDynamic(this); + else return visitor.visitChildren(this); + } + } + public static class NewarrayContext extends ChainContext { + public ArrayinitializerContext arrayinitializer() { + return getRuleContext(ArrayinitializerContext.class,0); + } + public NewarrayContext(ChainContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewarray(this); + else return visitor.visitChildren(this); + } + } + public final ChainContext chain() throws RecognitionException { + ChainContext _localctx = new ChainContext(_ctx, getState()); + enterRule(_localctx, 34, RULE_chain); + try { + int _alt; + setState(328); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,27,_ctx) ) { + case 1: + _localctx = new DynamicContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(312); + primary(); + setState(316); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(313); + postfix(); + } + } + } + setState(318); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,25,_ctx); + } + } + break; + case 2: + _localctx = new StaticContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(319); + decltype(); + setState(320); + postdot(); + setState(324); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(321); + postfix(); + } + } + } + setState(326); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,26,_ctx); + } + } + break; + case 3: + _localctx = new NewarrayContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(327); + arrayinitializer(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PrimaryContext extends ParserRuleContext { + public PrimaryContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_primary; } + public PrimaryContext() { } + public void copyFrom(PrimaryContext ctx) { + super.copyFrom(ctx); + } + } + public static class ListinitContext extends PrimaryContext { + public ListinitializerContext listinitializer() { + return getRuleContext(ListinitializerContext.class,0); + } + public ListinitContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitListinit(this); + else return visitor.visitChildren(this); + } + } + public static class RegexContext extends PrimaryContext { + public TerminalNode REGEX() { return getToken(PainlessParser.REGEX, 0); } + public RegexContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitRegex(this); + else return visitor.visitChildren(this); + } + } + public static class NullContext extends PrimaryContext { + public TerminalNode NULL() { return getToken(PainlessParser.NULL, 0); } + public NullContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNull(this); + else return visitor.visitChildren(this); + } + } + public static class StringContext extends PrimaryContext { + public TerminalNode STRING() { return getToken(PainlessParser.STRING, 0); } + public StringContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitString(this); + else return visitor.visitChildren(this); + } + } + public static class MapinitContext extends PrimaryContext { + public MapinitializerContext mapinitializer() { + return getRuleContext(MapinitializerContext.class,0); + } + public MapinitContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMapinit(this); + else return visitor.visitChildren(this); + } + } + public static class CalllocalContext extends PrimaryContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public CalllocalContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCalllocal(this); + else return visitor.visitChildren(this); + } + } + public static class TrueContext extends PrimaryContext { + public TerminalNode TRUE() { return getToken(PainlessParser.TRUE, 0); } + public TrueContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitTrue(this); + else return visitor.visitChildren(this); + } + } + public static class FalseContext extends PrimaryContext { + public TerminalNode FALSE() { return getToken(PainlessParser.FALSE, 0); } + public FalseContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFalse(this); + else return visitor.visitChildren(this); + } + } + public static class VariableContext extends PrimaryContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public VariableContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitVariable(this); + else return visitor.visitChildren(this); + } + } + public static class NumericContext extends PrimaryContext { + public TerminalNode OCTAL() { return getToken(PainlessParser.OCTAL, 0); } + public TerminalNode HEX() { return getToken(PainlessParser.HEX, 0); } + public TerminalNode INTEGER() { return getToken(PainlessParser.INTEGER, 0); } + public TerminalNode DECIMAL() { return getToken(PainlessParser.DECIMAL, 0); } + public NumericContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNumeric(this); + else return visitor.visitChildren(this); + } + } + public static class NewobjectContext extends PrimaryContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public NewobjectContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewobject(this); + else return visitor.visitChildren(this); + } + } + public static class PrecedenceContext extends PrimaryContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public PrecedenceContext(PrimaryContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPrecedence(this); + else return visitor.visitChildren(this); + } + } + public final PrimaryContext primary() throws RecognitionException { + PrimaryContext _localctx = new PrimaryContext(_ctx, getState()); + enterRule(_localctx, 36, RULE_primary); + int _la; + try { + setState(348); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,28,_ctx) ) { + case 1: + _localctx = new PrecedenceContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(330); + match(LP); + setState(331); + expression(0); + setState(332); + match(RP); + } + break; + case 2: + _localctx = new NumericContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(334); + _la = _input.LA(1); + if ( !(((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)))) != 0)) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + break; + case 3: + _localctx = new TrueContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(335); + match(TRUE); + } + break; + case 4: + _localctx = new FalseContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(336); + match(FALSE); + } + break; + case 5: + _localctx = new NullContext(_localctx); + enterOuterAlt(_localctx, 5); + { + setState(337); + match(NULL); + } + break; + case 6: + _localctx = new StringContext(_localctx); + enterOuterAlt(_localctx, 6); + { + setState(338); + match(STRING); + } + break; + case 7: + _localctx = new RegexContext(_localctx); + enterOuterAlt(_localctx, 7); + { + setState(339); + match(REGEX); + } + break; + case 8: + _localctx = new ListinitContext(_localctx); + enterOuterAlt(_localctx, 8); + { + setState(340); + listinitializer(); + } + break; + case 9: + _localctx = new MapinitContext(_localctx); + enterOuterAlt(_localctx, 9); + { + setState(341); + mapinitializer(); + } + break; + case 10: + _localctx = new VariableContext(_localctx); + enterOuterAlt(_localctx, 10); + { + setState(342); + match(ID); + } + break; + case 11: + _localctx = new CalllocalContext(_localctx); + enterOuterAlt(_localctx, 11); + { + setState(343); + match(ID); + setState(344); + arguments(); + } + break; + case 12: + _localctx = new NewobjectContext(_localctx); + enterOuterAlt(_localctx, 12); + { + setState(345); + match(NEW); + setState(346); + match(TYPE); + setState(347); + arguments(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PostfixContext extends ParserRuleContext { + public CallinvokeContext callinvoke() { + return getRuleContext(CallinvokeContext.class,0); + } + public FieldaccessContext fieldaccess() { + return getRuleContext(FieldaccessContext.class,0); + } + public BraceaccessContext braceaccess() { + return getRuleContext(BraceaccessContext.class,0); + } + public PostfixContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_postfix; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPostfix(this); + else return visitor.visitChildren(this); + } + } + public final PostfixContext postfix() throws RecognitionException { + PostfixContext _localctx = new PostfixContext(_ctx, getState()); + enterRule(_localctx, 38, RULE_postfix); + try { + setState(353); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,29,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(350); + callinvoke(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(351); + fieldaccess(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(352); + braceaccess(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class PostdotContext extends ParserRuleContext { + public CallinvokeContext callinvoke() { + return getRuleContext(CallinvokeContext.class,0); + } + public FieldaccessContext fieldaccess() { + return getRuleContext(FieldaccessContext.class,0); + } + public PostdotContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_postdot; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitPostdot(this); + else return visitor.visitChildren(this); + } + } + public final PostdotContext postdot() throws RecognitionException { + PostdotContext _localctx = new PostdotContext(_ctx, getState()); + enterRule(_localctx, 40, RULE_postdot); + try { + setState(357); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,30,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(355); + callinvoke(); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(356); + fieldaccess(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class CallinvokeContext extends ParserRuleContext { + public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); } + public ArgumentsContext arguments() { + return getRuleContext(ArgumentsContext.class,0); + } + public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); } + public TerminalNode NSDOT() { return getToken(PainlessParser.NSDOT, 0); } + public CallinvokeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_callinvoke; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCallinvoke(this); + else return visitor.visitChildren(this); + } + } + public final CallinvokeContext callinvoke() throws RecognitionException { + CallinvokeContext _localctx = new CallinvokeContext(_ctx, getState()); + enterRule(_localctx, 42, RULE_callinvoke); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(359); + _la = _input.LA(1); + if ( !(_la==DOT || _la==NSDOT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(360); + match(DOTID); + setState(361); + arguments(); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FieldaccessContext extends ParserRuleContext { + public TerminalNode DOT() { return getToken(PainlessParser.DOT, 0); } + public TerminalNode NSDOT() { return getToken(PainlessParser.NSDOT, 0); } + public TerminalNode DOTID() { return getToken(PainlessParser.DOTID, 0); } + public TerminalNode DOTINTEGER() { return getToken(PainlessParser.DOTINTEGER, 0); } + public FieldaccessContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_fieldaccess; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitFieldaccess(this); + else return visitor.visitChildren(this); + } + } + public final FieldaccessContext fieldaccess() throws RecognitionException { + FieldaccessContext _localctx = new FieldaccessContext(_ctx, getState()); + enterRule(_localctx, 44, RULE_fieldaccess); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(363); + _la = _input.LA(1); + if ( !(_la==DOT || _la==NSDOT) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + setState(364); + _la = _input.LA(1); + if ( !(_la==DOTINTEGER || _la==DOTID) ) { + _errHandler.recoverInline(this); + } else { + consume(); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class BraceaccessContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public BraceaccessContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_braceaccess; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitBraceaccess(this); + else return visitor.visitChildren(this); + } + } + public final BraceaccessContext braceaccess() throws RecognitionException { + BraceaccessContext _localctx = new BraceaccessContext(_ctx, getState()); + enterRule(_localctx, 46, RULE_braceaccess); + try { + enterOuterAlt(_localctx, 1); + { + setState(366); + match(LBRACE); + setState(367); + expression(0); + setState(368); + match(RBRACE); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArrayinitializerContext extends ParserRuleContext { + public ArrayinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_arrayinitializer; } + public ArrayinitializerContext() { } + public void copyFrom(ArrayinitializerContext ctx) { + super.copyFrom(ctx); + } + } + public static class NewstandardarrayContext extends ArrayinitializerContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public List LBRACE() { return getTokens(PainlessParser.LBRACE); } + public TerminalNode LBRACE(int i) { + return getToken(PainlessParser.LBRACE, i); + } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public List RBRACE() { return getTokens(PainlessParser.RBRACE); } + public TerminalNode RBRACE(int i) { + return getToken(PainlessParser.RBRACE, i); + } + public PostdotContext postdot() { + return getRuleContext(PostdotContext.class,0); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public NewstandardarrayContext(ArrayinitializerContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewstandardarray(this); + else return visitor.visitChildren(this); + } + } + public static class NewinitializedarrayContext extends ArrayinitializerContext { + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public TerminalNode LBRACK() { return getToken(PainlessParser.LBRACK, 0); } + public TerminalNode RBRACK() { return getToken(PainlessParser.RBRACK, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public List postfix() { + return getRuleContexts(PostfixContext.class); + } + public PostfixContext postfix(int i) { + return getRuleContext(PostfixContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public NewinitializedarrayContext(ArrayinitializerContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitNewinitializedarray(this); + else return visitor.visitChildren(this); + } + } + public final ArrayinitializerContext arrayinitializer() throws RecognitionException { + ArrayinitializerContext _localctx = new ArrayinitializerContext(_ctx, getState()); + enterRule(_localctx, 48, RULE_arrayinitializer); + int _la; + try { + int _alt; + setState(411); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,37,_ctx) ) { + case 1: + _localctx = new NewstandardarrayContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(370); + match(NEW); + setState(371); + match(TYPE); + setState(376); + _errHandler.sync(this); + _alt = 1; + do { + switch (_alt) { + case 1: + { + { + setState(372); + match(LBRACE); + setState(373); + expression(0); + setState(374); + match(RBRACE); + } + } + break; + default: + throw new NoViableAltException(this); + } + setState(378); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,31,_ctx); + } while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ); + setState(387); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,33,_ctx) ) { + case 1: + { + setState(380); + postdot(); + setState(384); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(381); + postfix(); + } + } + } + setState(386); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,32,_ctx); + } + } + break; + } + } + break; + case 2: + _localctx = new NewinitializedarrayContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(389); + match(NEW); + setState(390); + match(TYPE); + setState(391); + match(LBRACE); + setState(392); + match(RBRACE); + setState(393); + match(LBRACK); + setState(402); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(394); + expression(0); + setState(399); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(395); + match(COMMA); + setState(396); + expression(0); + } + } + setState(401); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(404); + match(RBRACK); + setState(408); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + while ( _alt!=2 && _alt!=org.antlr.v4.runtime.atn.ATN.INVALID_ALT_NUMBER ) { + if ( _alt==1 ) { + { + { + setState(405); + postfix(); + } + } + } + setState(410); + _errHandler.sync(this); + _alt = getInterpreter().adaptivePredict(_input,36,_ctx); + } + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ListinitializerContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ListinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_listinitializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitListinitializer(this); + else return visitor.visitChildren(this); + } + } + public final ListinitializerContext listinitializer() throws RecognitionException { + ListinitializerContext _localctx = new ListinitializerContext(_ctx, getState()); + enterRule(_localctx, 50, RULE_listinitializer); + int _la; + try { + setState(426); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,39,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(413); + match(LBRACE); + setState(414); + expression(0); + setState(419); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(415); + match(COMMA); + setState(416); + expression(0); + } + } + setState(421); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(422); + match(RBRACE); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(424); + match(LBRACE); + setState(425); + match(RBRACE); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class MapinitializerContext extends ParserRuleContext { + public TerminalNode LBRACE() { return getToken(PainlessParser.LBRACE, 0); } + public List maptoken() { + return getRuleContexts(MaptokenContext.class); + } + public MaptokenContext maptoken(int i) { + return getRuleContext(MaptokenContext.class,i); + } + public TerminalNode RBRACE() { return getToken(PainlessParser.RBRACE, 0); } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public MapinitializerContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_mapinitializer; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMapinitializer(this); + else return visitor.visitChildren(this); + } + } + public final MapinitializerContext mapinitializer() throws RecognitionException { + MapinitializerContext _localctx = new MapinitializerContext(_ctx, getState()); + enterRule(_localctx, 52, RULE_mapinitializer); + int _la; + try { + setState(442); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,41,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(428); + match(LBRACE); + setState(429); + maptoken(); + setState(434); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(430); + match(COMMA); + setState(431); + maptoken(); + } + } + setState(436); + _errHandler.sync(this); + _la = _input.LA(1); + } + setState(437); + match(RBRACE); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(439); + match(LBRACE); + setState(440); + match(COLON); + setState(441); + match(RBRACE); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class MaptokenContext extends ParserRuleContext { + public List expression() { + return getRuleContexts(ExpressionContext.class); + } + public ExpressionContext expression(int i) { + return getRuleContext(ExpressionContext.class,i); + } + public TerminalNode COLON() { return getToken(PainlessParser.COLON, 0); } + public MaptokenContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_maptoken; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitMaptoken(this); + else return visitor.visitChildren(this); + } + } + public final MaptokenContext maptoken() throws RecognitionException { + MaptokenContext _localctx = new MaptokenContext(_ctx, getState()); + enterRule(_localctx, 54, RULE_maptoken); + try { + enterOuterAlt(_localctx, 1); + { + setState(444); + expression(0); + setState(445); + match(COLON); + setState(446); + expression(0); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArgumentsContext extends ParserRuleContext { + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public List argument() { + return getRuleContexts(ArgumentContext.class); + } + public ArgumentContext argument(int i) { + return getRuleContext(ArgumentContext.class,i); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public ArgumentsContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_arguments; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitArguments(this); + else return visitor.visitChildren(this); + } + } + public final ArgumentsContext arguments() throws RecognitionException { + ArgumentsContext _localctx = new ArgumentsContext(_ctx, getState()); + enterRule(_localctx, 56, RULE_arguments); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + { + setState(448); + match(LP); + setState(457); + _la = _input.LA(1); + if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << LBRACE) | (1L << LP) | (1L << NEW) | (1L << THIS) | (1L << BOOLNOT) | (1L << BWNOT) | (1L << ADD) | (1L << SUB) | (1L << INCR) | (1L << DECR))) != 0) || ((((_la - 72)) & ~0x3f) == 0 && ((1L << (_la - 72)) & ((1L << (OCTAL - 72)) | (1L << (HEX - 72)) | (1L << (INTEGER - 72)) | (1L << (DECIMAL - 72)) | (1L << (STRING - 72)) | (1L << (REGEX - 72)) | (1L << (TRUE - 72)) | (1L << (FALSE - 72)) | (1L << (NULL - 72)) | (1L << (TYPE - 72)) | (1L << (ID - 72)))) != 0)) { + { + setState(449); + argument(); + setState(454); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(450); + match(COMMA); + setState(451); + argument(); + } + } + setState(456); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(459); + match(RP); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class ArgumentContext extends ParserRuleContext { + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public LambdaContext lambda() { + return getRuleContext(LambdaContext.class,0); + } + public FuncrefContext funcref() { + return getRuleContext(FuncrefContext.class,0); + } + public ArgumentContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_argument; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitArgument(this); + else return visitor.visitChildren(this); + } + } + public final ArgumentContext argument() throws RecognitionException { + ArgumentContext _localctx = new ArgumentContext(_ctx, getState()); + enterRule(_localctx, 58, RULE_argument); + try { + setState(464); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,44,_ctx) ) { + case 1: + enterOuterAlt(_localctx, 1); + { + setState(461); + expression(0); + } + break; + case 2: + enterOuterAlt(_localctx, 2); + { + setState(462); + lambda(); + } + break; + case 3: + enterOuterAlt(_localctx, 3); + { + setState(463); + funcref(); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class LambdaContext extends ParserRuleContext { + public TerminalNode ARROW() { return getToken(PainlessParser.ARROW, 0); } + public List lamtype() { + return getRuleContexts(LamtypeContext.class); + } + public LamtypeContext lamtype(int i) { + return getRuleContext(LamtypeContext.class,i); + } + public TerminalNode LP() { return getToken(PainlessParser.LP, 0); } + public TerminalNode RP() { return getToken(PainlessParser.RP, 0); } + public BlockContext block() { + return getRuleContext(BlockContext.class,0); + } + public ExpressionContext expression() { + return getRuleContext(ExpressionContext.class,0); + } + public List COMMA() { return getTokens(PainlessParser.COMMA); } + public TerminalNode COMMA(int i) { + return getToken(PainlessParser.COMMA, i); + } + public LambdaContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_lambda; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLambda(this); + else return visitor.visitChildren(this); + } + } + public final LambdaContext lambda() throws RecognitionException { + LambdaContext _localctx = new LambdaContext(_ctx, getState()); + enterRule(_localctx, 60, RULE_lambda); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(479); + switch (_input.LA(1)) { + case TYPE: + case ID: + { + setState(466); + lamtype(); + } + break; + case LP: + { + setState(467); + match(LP); + setState(476); + _la = _input.LA(1); + if (_la==TYPE || _la==ID) { + { + setState(468); + lamtype(); + setState(473); + _errHandler.sync(this); + _la = _input.LA(1); + while (_la==COMMA) { + { + { + setState(469); + match(COMMA); + setState(470); + lamtype(); + } + } + setState(475); + _errHandler.sync(this); + _la = _input.LA(1); + } + } + } + setState(478); + match(RP); + } + break; + default: + throw new NoViableAltException(this); + } + setState(481); + match(ARROW); + setState(484); + switch (_input.LA(1)) { + case LBRACK: + { + setState(482); + block(); + } + break; + case LBRACE: + case LP: + case NEW: + case BOOLNOT: + case BWNOT: + case ADD: + case SUB: + case INCR: + case DECR: + case OCTAL: + case HEX: + case INTEGER: + case DECIMAL: + case STRING: + case REGEX: + case TRUE: + case FALSE: + case NULL: + case TYPE: + case ID: + { + setState(483); + expression(0); + } + break; + default: + throw new NoViableAltException(this); + } + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class LamtypeContext extends ParserRuleContext { + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public LamtypeContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_lamtype; } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLamtype(this); + else return visitor.visitChildren(this); + } + } + public final LamtypeContext lamtype() throws RecognitionException { + LamtypeContext _localctx = new LamtypeContext(_ctx, getState()); + enterRule(_localctx, 62, RULE_lamtype); + int _la; + try { + enterOuterAlt(_localctx, 1); + { + setState(487); + _la = _input.LA(1); + if (_la==TYPE) { + { + setState(486); + decltype(); + } + } + setState(489); + match(ID); + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public static class FuncrefContext extends ParserRuleContext { + public FuncrefContext(ParserRuleContext parent, int invokingState) { + super(parent, invokingState); + } + @Override public int getRuleIndex() { return RULE_funcref; } + public FuncrefContext() { } + public void copyFrom(FuncrefContext ctx) { + super.copyFrom(ctx); + } + } + public static class ClassfuncrefContext extends FuncrefContext { + public TerminalNode TYPE() { return getToken(PainlessParser.TYPE, 0); } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public ClassfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitClassfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class CapturingfuncrefContext extends FuncrefContext { + public List ID() { return getTokens(PainlessParser.ID); } + public TerminalNode ID(int i) { + return getToken(PainlessParser.ID, i); + } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public CapturingfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitCapturingfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class ConstructorfuncrefContext extends FuncrefContext { + public DecltypeContext decltype() { + return getRuleContext(DecltypeContext.class,0); + } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode NEW() { return getToken(PainlessParser.NEW, 0); } + public ConstructorfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitConstructorfuncref(this); + else return visitor.visitChildren(this); + } + } + public static class LocalfuncrefContext extends FuncrefContext { + public TerminalNode THIS() { return getToken(PainlessParser.THIS, 0); } + public TerminalNode REF() { return getToken(PainlessParser.REF, 0); } + public TerminalNode ID() { return getToken(PainlessParser.ID, 0); } + public LocalfuncrefContext(FuncrefContext ctx) { copyFrom(ctx); } + @Override + public T accept(ParseTreeVisitor visitor) { + if ( visitor instanceof PainlessParserVisitor ) return ((PainlessParserVisitor)visitor).visitLocalfuncref(this); + else return visitor.visitChildren(this); + } + } + public final FuncrefContext funcref() throws RecognitionException { + FuncrefContext _localctx = new FuncrefContext(_ctx, getState()); + enterRule(_localctx, 64, RULE_funcref); + try { + setState(504); + _errHandler.sync(this); + switch ( getInterpreter().adaptivePredict(_input,50,_ctx) ) { + case 1: + _localctx = new ClassfuncrefContext(_localctx); + enterOuterAlt(_localctx, 1); + { + setState(491); + match(TYPE); + setState(492); + match(REF); + setState(493); + match(ID); + } + break; + case 2: + _localctx = new ConstructorfuncrefContext(_localctx); + enterOuterAlt(_localctx, 2); + { + setState(494); + decltype(); + setState(495); + match(REF); + setState(496); + match(NEW); + } + break; + case 3: + _localctx = new CapturingfuncrefContext(_localctx); + enterOuterAlt(_localctx, 3); + { + setState(498); + match(ID); + setState(499); + match(REF); + setState(500); + match(ID); + } + break; + case 4: + _localctx = new LocalfuncrefContext(_localctx); + enterOuterAlt(_localctx, 4); + { + setState(501); + match(THIS); + setState(502); + match(REF); + setState(503); + match(ID); + } + break; + } + } + catch (RecognitionException re) { + _localctx.exception = re; + _errHandler.reportError(this, re); + _errHandler.recover(this, re); + } + finally { + exitRule(); + } + return _localctx; + } + public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) { + switch (ruleIndex) { + case 4: + return rstatement_sempred((RstatementContext)_localctx, predIndex); + case 15: + return expression_sempred((ExpressionContext)_localctx, predIndex); + } + return true; + } + private boolean rstatement_sempred(RstatementContext _localctx, int predIndex) { + switch (predIndex) { + case 0: + return _input.LA(1) != ELSE ; + } + return true; + } + private boolean expression_sempred(ExpressionContext _localctx, int predIndex) { + switch (predIndex) { + case 1: + return precpred(_ctx, 15); + case 2: + return precpred(_ctx, 14); + case 3: + return precpred(_ctx, 13); + case 4: + return precpred(_ctx, 12); + case 5: + return precpred(_ctx, 11); + case 6: + return precpred(_ctx, 9); + case 7: + return precpred(_ctx, 8); + case 8: + return precpred(_ctx, 7); + case 9: + return precpred(_ctx, 6); + case 10: + return precpred(_ctx, 5); + case 11: + return precpred(_ctx, 4); + case 12: + return precpred(_ctx, 3); + case 13: + return precpred(_ctx, 2); + case 14: + return precpred(_ctx, 1); + case 15: + return precpred(_ctx, 10); + } + return true; + } + public static final String _serializedATN = + "\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3V\u01fd\4\2\t\2\4"+ + "\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t"+ + "\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22"+ + "\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31\t\31"+ + "\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!"+ + "\t!\4\"\t\"\3\2\7\2F\n\2\f\2\16\2I\13\2\3\2\7\2L\n\2\f\2\16\2O\13\2\3"+ + "\2\3\2\3\3\3\3\3\3\3\3\3\3\3\4\3\4\3\4\3\4\3\4\3\4\3\4\7\4_\n\4\f\4\16"+ + "\4b\13\4\5\4d\n\4\3\4\3\4\3\5\3\5\3\5\3\5\5\5l\n\5\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\5\6v\n\6\3\6\3\6\3\6\3\6\3\6\3\6\5\6~\n\6\3\6\3\6\3\6\5\6"+ + "\u0083\n\6\3\6\3\6\5\6\u0087\n\6\3\6\3\6\5\6\u008b\n\6\3\6\3\6\3\6\5\6"+ + "\u0090\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6"+ + "\3\6\3\6\3\6\3\6\3\6\6\6\u00a6\n\6\r\6\16\6\u00a7\5\6\u00aa\n\6\3\7\3"+ + "\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\3\7\5\7\u00b8\n\7\3\7\3\7\3\7\5"+ + "\7\u00bd\n\7\3\b\3\b\5\b\u00c1\n\b\3\t\3\t\7\t\u00c5\n\t\f\t\16\t\u00c8"+ + "\13\t\3\t\5\t\u00cb\n\t\3\t\3\t\3\n\3\n\3\13\3\13\5\13\u00d3\n\13\3\f"+ + "\3\f\3\r\3\r\3\r\3\r\7\r\u00db\n\r\f\r\16\r\u00de\13\r\3\16\3\16\3\16"+ + "\7\16\u00e3\n\16\f\16\16\16\u00e6\13\16\3\17\3\17\3\17\5\17\u00eb\n\17"+ + "\3\20\3\20\3\20\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\21"+ + "\3\21\3\21\7\21\u0127\n\21\f\21\16\21\u012a\13\21\3\22\3\22\3\22\3\22"+ + "\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\5\22\u0139\n\22\3\23\3\23"+ + "\7\23\u013d\n\23\f\23\16\23\u0140\13\23\3\23\3\23\3\23\7\23\u0145\n\23"+ + "\f\23\16\23\u0148\13\23\3\23\5\23\u014b\n\23\3\24\3\24\3\24\3\24\3\24"+ + "\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\3\24\5\24"+ + "\u015f\n\24\3\25\3\25\3\25\5\25\u0164\n\25\3\26\3\26\5\26\u0168\n\26\3"+ + "\27\3\27\3\27\3\27\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3"+ + "\32\3\32\3\32\6\32\u017b\n\32\r\32\16\32\u017c\3\32\3\32\7\32\u0181\n"+ + "\32\f\32\16\32\u0184\13\32\5\32\u0186\n\32\3\32\3\32\3\32\3\32\3\32\3"+ + "\32\3\32\3\32\7\32\u0190\n\32\f\32\16\32\u0193\13\32\5\32\u0195\n\32\3"+ + "\32\3\32\7\32\u0199\n\32\f\32\16\32\u019c\13\32\5\32\u019e\n\32\3\33\3"+ + "\33\3\33\3\33\7\33\u01a4\n\33\f\33\16\33\u01a7\13\33\3\33\3\33\3\33\3"+ + "\33\5\33\u01ad\n\33\3\34\3\34\3\34\3\34\7\34\u01b3\n\34\f\34\16\34\u01b6"+ + "\13\34\3\34\3\34\3\34\3\34\3\34\5\34\u01bd\n\34\3\35\3\35\3\35\3\35\3"+ + "\36\3\36\3\36\3\36\7\36\u01c7\n\36\f\36\16\36\u01ca\13\36\5\36\u01cc\n"+ + "\36\3\36\3\36\3\37\3\37\3\37\5\37\u01d3\n\37\3 \3 \3 \3 \3 \7 \u01da\n"+ + " \f \16 \u01dd\13 \5 \u01df\n \3 \5 \u01e2\n \3 \3 \3 \5 \u01e7\n \3!"+ + "\5!\u01ea\n!\3!\3!\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\"\3\""+ + "\5\"\u01fb\n\"\3\"\2\3 #\2\4\6\b\n\f\16\20\22\24\26\30\32\34\36 \"$&("+ + "*,.\60\62\64\668:<>@B\2\17\3\3\16\16\3\2 \"\3\2#$\3\2:;\3\2%\'\3\2(+\3"+ + "\2,/\3\2>I\3\2<=\4\2\36\37#$\3\2JM\3\2\13\f\3\2UV\u0236\2G\3\2\2\2\4R"+ + "\3\2\2\2\6W\3\2\2\2\bk\3\2\2\2\n\u00a9\3\2\2\2\f\u00bc\3\2\2\2\16\u00c0"+ + "\3\2\2\2\20\u00c2\3\2\2\2\22\u00ce\3\2\2\2\24\u00d2\3\2\2\2\26\u00d4\3"+ + "\2\2\2\30\u00d6\3\2\2\2\32\u00df\3\2\2\2\34\u00e7\3\2\2\2\36\u00ec\3\2"+ + "\2\2 \u00f3\3\2\2\2\"\u0138\3\2\2\2$\u014a\3\2\2\2&\u015e\3\2\2\2(\u0163"+ + "\3\2\2\2*\u0167\3\2\2\2,\u0169\3\2\2\2.\u016d\3\2\2\2\60\u0170\3\2\2\2"+ + "\62\u019d\3\2\2\2\64\u01ac\3\2\2\2\66\u01bc\3\2\2\28\u01be\3\2\2\2:\u01c2"+ + "\3\2\2\2<\u01d2\3\2\2\2>\u01e1\3\2\2\2@\u01e9\3\2\2\2B\u01fa\3\2\2\2D"+ + "F\5\4\3\2ED\3\2\2\2FI\3\2\2\2GE\3\2\2\2GH\3\2\2\2HM\3\2\2\2IG\3\2\2\2"+ + "JL\5\b\5\2KJ\3\2\2\2LO\3\2\2\2MK\3\2\2\2MN\3\2\2\2NP\3\2\2\2OM\3\2\2\2"+ + "PQ\7\2\2\3Q\3\3\2\2\2RS\5\32\16\2ST\7T\2\2TU\5\6\4\2UV\5\20\t\2V\5\3\2"+ + "\2\2Wc\7\t\2\2XY\5\32\16\2Y`\7T\2\2Z[\7\r\2\2[\\\5\32\16\2\\]\7T\2\2]"+ + "_\3\2\2\2^Z\3\2\2\2_b\3\2\2\2`^\3\2\2\2`a\3\2\2\2ad\3\2\2\2b`\3\2\2\2"+ + "cX\3\2\2\2cd\3\2\2\2de\3\2\2\2ef\7\n\2\2f\7\3\2\2\2gl\5\n\6\2hi\5\f\7"+ + "\2ij\t\2\2\2jl\3\2\2\2kg\3\2\2\2kh\3\2\2\2l\t\3\2\2\2mn\7\17\2\2no\7\t"+ + "\2\2op\5 \21\2pq\7\n\2\2qu\5\16\b\2rs\7\21\2\2sv\5\16\b\2tv\6\6\2\2ur"+ + "\3\2\2\2ut\3\2\2\2v\u00aa\3\2\2\2wx\7\22\2\2xy\7\t\2\2yz\5 \21\2z}\7\n"+ + "\2\2{~\5\16\b\2|~\5\22\n\2}{\3\2\2\2}|\3\2\2\2~\u00aa\3\2\2\2\177\u0080"+ + "\7\24\2\2\u0080\u0082\7\t\2\2\u0081\u0083\5\24\13\2\u0082\u0081\3\2\2"+ + "\2\u0082\u0083\3\2\2\2\u0083\u0084\3\2\2\2\u0084\u0086\7\16\2\2\u0085"+ + "\u0087\5 \21\2\u0086\u0085\3\2\2\2\u0086\u0087\3\2\2\2\u0087\u0088\3\2"+ + "\2\2\u0088\u008a\7\16\2\2\u0089\u008b\5\26\f\2\u008a\u0089\3\2\2\2\u008a"+ + "\u008b\3\2\2\2\u008b\u008c\3\2\2\2\u008c\u008f\7\n\2\2\u008d\u0090\5\16"+ + "\b\2\u008e\u0090\5\22\n\2\u008f\u008d\3\2\2\2\u008f\u008e\3\2\2\2\u0090"+ + "\u00aa\3\2\2\2\u0091\u0092\7\24\2\2\u0092\u0093\7\t\2\2\u0093\u0094\5"+ + "\32\16\2\u0094\u0095\7T\2\2\u0095\u0096\7\66\2\2\u0096\u0097\5 \21\2\u0097"+ + "\u0098\7\n\2\2\u0098\u0099\5\16\b\2\u0099\u00aa\3\2\2\2\u009a\u009b\7"+ + "\24\2\2\u009b\u009c\7\t\2\2\u009c\u009d\7T\2\2\u009d\u009e\7\20\2\2\u009e"+ + "\u009f\5 \21\2\u009f\u00a0\7\n\2\2\u00a0\u00a1\5\16\b\2\u00a1\u00aa\3"+ + "\2\2\2\u00a2\u00a3\7\31\2\2\u00a3\u00a5\5\20\t\2\u00a4\u00a6\5\36\20\2"+ + "\u00a5\u00a4\3\2\2\2\u00a6\u00a7\3\2\2\2\u00a7\u00a5\3\2\2\2\u00a7\u00a8"+ + "\3\2\2\2\u00a8\u00aa\3\2\2\2\u00a9m\3\2\2\2\u00a9w\3\2\2\2\u00a9\177\3"+ + "\2\2\2\u00a9\u0091\3\2\2\2\u00a9\u009a\3\2\2\2\u00a9\u00a2\3\2\2\2\u00aa"+ + "\13\3\2\2\2\u00ab\u00ac\7\23\2\2\u00ac\u00ad\5\20\t\2\u00ad\u00ae\7\22"+ + "\2\2\u00ae\u00af\7\t\2\2\u00af\u00b0\5 \21\2\u00b0\u00b1\7\n\2\2\u00b1"+ + "\u00bd\3\2\2\2\u00b2\u00bd\5\30\r\2\u00b3\u00bd\7\25\2\2\u00b4\u00bd\7"+ + "\26\2\2\u00b5\u00b7\7\27\2\2\u00b6\u00b8\5 \21\2\u00b7\u00b6\3\2\2\2\u00b7"+ + "\u00b8\3\2\2\2\u00b8\u00bd\3\2\2\2\u00b9\u00ba\7\33\2\2\u00ba\u00bd\5"+ + " \21\2\u00bb\u00bd\5 \21\2\u00bc\u00ab\3\2\2\2\u00bc\u00b2\3\2\2\2\u00bc"+ + "\u00b3\3\2\2\2\u00bc\u00b4\3\2\2\2\u00bc\u00b5\3\2\2\2\u00bc\u00b9\3\2"+ + "\2\2\u00bc\u00bb\3\2\2\2\u00bd\r\3\2\2\2\u00be\u00c1\5\20\t\2\u00bf\u00c1"+ + "\5\b\5\2\u00c0\u00be\3\2\2\2\u00c0\u00bf\3\2\2\2\u00c1\17\3\2\2\2\u00c2"+ + "\u00c6\7\5\2\2\u00c3\u00c5\5\b\5\2\u00c4\u00c3\3\2\2\2\u00c5\u00c8\3\2"+ + "\2\2\u00c6\u00c4\3\2\2\2\u00c6\u00c7\3\2\2\2\u00c7\u00ca\3\2\2\2\u00c8"+ + "\u00c6\3\2\2\2\u00c9\u00cb\5\f\7\2\u00ca\u00c9\3\2\2\2\u00ca\u00cb\3\2"+ + "\2\2\u00cb\u00cc\3\2\2\2\u00cc\u00cd\7\6\2\2\u00cd\21\3\2\2\2\u00ce\u00cf"+ + "\7\16\2\2\u00cf\23\3\2\2\2\u00d0\u00d3\5\30\r\2\u00d1\u00d3\5 \21\2\u00d2"+ + "\u00d0\3\2\2\2\u00d2\u00d1\3\2\2\2\u00d3\25\3\2\2\2\u00d4\u00d5\5 \21"+ + "\2\u00d5\27\3\2\2\2\u00d6\u00d7\5\32\16\2\u00d7\u00dc\5\34\17\2\u00d8"+ + "\u00d9\7\r\2\2\u00d9\u00db\5\34\17\2\u00da\u00d8\3\2\2\2\u00db\u00de\3"+ + "\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd\31\3\2\2\2\u00de"+ + "\u00dc\3\2\2\2\u00df\u00e4\7S\2\2\u00e0\u00e1\7\7\2\2\u00e1\u00e3\7\b"+ + "\2\2\u00e2\u00e0\3\2\2\2\u00e3\u00e6\3\2\2\2\u00e4\u00e2\3\2\2\2\u00e4"+ + "\u00e5\3\2\2\2\u00e5\33\3\2\2\2\u00e6\u00e4\3\2\2\2\u00e7\u00ea\7T\2\2"+ + "\u00e8\u00e9\7>\2\2\u00e9\u00eb\5 \21\2\u00ea\u00e8\3\2\2\2\u00ea\u00eb"+ + "\3\2\2\2\u00eb\35\3\2\2\2\u00ec\u00ed\7\32\2\2\u00ed\u00ee\7\t\2\2\u00ee"+ + "\u00ef\7S\2\2\u00ef\u00f0\7T\2\2\u00f0\u00f1\7\n\2\2\u00f1\u00f2\5\20"+ + "\t\2\u00f2\37\3\2\2\2\u00f3\u00f4\b\21\1\2\u00f4\u00f5\5\"\22\2\u00f5"+ + "\u0128\3\2\2\2\u00f6\u00f7\f\21\2\2\u00f7\u00f8\t\3\2\2\u00f8\u0127\5"+ + " \21\22\u00f9\u00fa\f\20\2\2\u00fa\u00fb\t\4\2\2\u00fb\u0127\5 \21\21"+ + "\u00fc\u00fd\f\17\2\2\u00fd\u00fe\t\5\2\2\u00fe\u0127\5 \21\20\u00ff\u0100"+ + "\f\16\2\2\u0100\u0101\t\6\2\2\u0101\u0127\5 \21\17\u0102\u0103\f\r\2\2"+ + "\u0103\u0104\t\7\2\2\u0104\u0127\5 \21\16\u0105\u0106\f\13\2\2\u0106\u0107"+ + "\t\b\2\2\u0107\u0127\5 \21\f\u0108\u0109\f\n\2\2\u0109\u010a\7\60\2\2"+ + "\u010a\u0127\5 \21\13\u010b\u010c\f\t\2\2\u010c\u010d\7\61\2\2\u010d\u0127"+ + "\5 \21\n\u010e\u010f\f\b\2\2\u010f\u0110\7\62\2\2\u0110\u0127\5 \21\t"+ + "\u0111\u0112\f\7\2\2\u0112\u0113\7\63\2\2\u0113\u0127\5 \21\b\u0114\u0115"+ + "\f\6\2\2\u0115\u0116\7\64\2\2\u0116\u0127\5 \21\7\u0117\u0118\f\5\2\2"+ + "\u0118\u0119\7\65\2\2\u0119\u011a\5 \21\2\u011a\u011b\7\66\2\2\u011b\u011c"+ + "\5 \21\5\u011c\u0127\3\2\2\2\u011d\u011e\f\4\2\2\u011e\u011f\7\67\2\2"+ + "\u011f\u0127\5 \21\4\u0120\u0121\f\3\2\2\u0121\u0122\t\t\2\2\u0122\u0127"+ + "\5 \21\3\u0123\u0124\f\f\2\2\u0124\u0125\7\35\2\2\u0125\u0127\5\32\16"+ + "\2\u0126\u00f6\3\2\2\2\u0126\u00f9\3\2\2\2\u0126\u00fc\3\2\2\2\u0126\u00ff"+ + "\3\2\2\2\u0126\u0102\3\2\2\2\u0126\u0105\3\2\2\2\u0126\u0108\3\2\2\2\u0126"+ + "\u010b\3\2\2\2\u0126\u010e\3\2\2\2\u0126\u0111\3\2\2\2\u0126\u0114\3\2"+ + "\2\2\u0126\u0117\3\2\2\2\u0126\u011d\3\2\2\2\u0126\u0120\3\2\2\2\u0126"+ + "\u0123\3\2\2\2\u0127\u012a\3\2\2\2\u0128\u0126\3\2\2\2\u0128\u0129\3\2"+ + "\2\2\u0129!\3\2\2\2\u012a\u0128\3\2\2\2\u012b\u012c\t\n\2\2\u012c\u0139"+ + "\5$\23\2\u012d\u012e\5$\23\2\u012e\u012f\t\n\2\2\u012f\u0139\3\2\2\2\u0130"+ + "\u0139\5$\23\2\u0131\u0132\t\13\2\2\u0132\u0139\5\"\22\2\u0133\u0134\7"+ + "\t\2\2\u0134\u0135\5\32\16\2\u0135\u0136\7\n\2\2\u0136\u0137\5\"\22\2"+ + "\u0137\u0139\3\2\2\2\u0138\u012b\3\2\2\2\u0138\u012d\3\2\2\2\u0138\u0130"+ + "\3\2\2\2\u0138\u0131\3\2\2\2\u0138\u0133\3\2\2\2\u0139#\3\2\2\2\u013a"+ + "\u013e\5&\24\2\u013b\u013d\5(\25\2\u013c\u013b\3\2\2\2\u013d\u0140\3\2"+ + "\2\2\u013e\u013c\3\2\2\2\u013e\u013f\3\2\2\2\u013f\u014b\3\2\2\2\u0140"+ + "\u013e\3\2\2\2\u0141\u0142\5\32\16\2\u0142\u0146\5*\26\2\u0143\u0145\5"+ + "(\25\2\u0144\u0143\3\2\2\2\u0145\u0148\3\2\2\2\u0146\u0144\3\2\2\2\u0146"+ + "\u0147\3\2\2\2\u0147\u014b\3\2\2\2\u0148\u0146\3\2\2\2\u0149\u014b\5\62"+ + "\32\2\u014a\u013a\3\2\2\2\u014a\u0141\3\2\2\2\u014a\u0149\3\2\2\2\u014b"+ + "%\3\2\2\2\u014c\u014d\7\t\2\2\u014d\u014e\5 \21\2\u014e\u014f\7\n\2\2"+ + "\u014f\u015f\3\2\2\2\u0150\u015f\t\f\2\2\u0151\u015f\7P\2\2\u0152\u015f"+ + "\7Q\2\2\u0153\u015f\7R\2\2\u0154\u015f\7N\2\2\u0155\u015f\7O\2\2\u0156"+ + "\u015f\5\64\33\2\u0157\u015f\5\66\34\2\u0158\u015f\7T\2\2\u0159\u015a"+ + "\7T\2\2\u015a\u015f\5:\36\2\u015b\u015c\7\30\2\2\u015c\u015d\7S\2\2\u015d"+ + "\u015f\5:\36\2\u015e\u014c\3\2\2\2\u015e\u0150\3\2\2\2\u015e\u0151\3\2"+ + "\2\2\u015e\u0152\3\2\2\2\u015e\u0153\3\2\2\2\u015e\u0154\3\2\2\2\u015e"+ + "\u0155\3\2\2\2\u015e\u0156\3\2\2\2\u015e\u0157\3\2\2\2\u015e\u0158\3\2"+ + "\2\2\u015e\u0159\3\2\2\2\u015e\u015b\3\2\2\2\u015f\'\3\2\2\2\u0160\u0164"+ + "\5,\27\2\u0161\u0164\5.\30\2\u0162\u0164\5\60\31\2\u0163\u0160\3\2\2\2"+ + "\u0163\u0161\3\2\2\2\u0163\u0162\3\2\2\2\u0164)\3\2\2\2\u0165\u0168\5"+ + ",\27\2\u0166\u0168\5.\30\2\u0167\u0165\3\2\2\2\u0167\u0166\3\2\2\2\u0168"+ + "+\3\2\2\2\u0169\u016a\t\r\2\2\u016a\u016b\7V\2\2\u016b\u016c\5:\36\2\u016c"+ + "-\3\2\2\2\u016d\u016e\t\r\2\2\u016e\u016f\t\16\2\2\u016f/\3\2\2\2\u0170"+ + "\u0171\7\7\2\2\u0171\u0172\5 \21\2\u0172\u0173\7\b\2\2\u0173\61\3\2\2"+ + "\2\u0174\u0175\7\30\2\2\u0175\u017a\7S\2\2\u0176\u0177\7\7\2\2\u0177\u0178"+ + "\5 \21\2\u0178\u0179\7\b\2\2\u0179\u017b\3\2\2\2\u017a\u0176\3\2\2\2\u017b"+ + "\u017c\3\2\2\2\u017c\u017a\3\2\2\2\u017c\u017d\3\2\2\2\u017d\u0185\3\2"+ + "\2\2\u017e\u0182\5*\26\2\u017f\u0181\5(\25\2\u0180\u017f\3\2\2\2\u0181"+ + "\u0184\3\2\2\2\u0182\u0180\3\2\2\2\u0182\u0183\3\2\2\2\u0183\u0186\3\2"+ + "\2\2\u0184\u0182\3\2\2\2\u0185\u017e\3\2\2\2\u0185\u0186\3\2\2\2\u0186"+ + "\u019e\3\2\2\2\u0187\u0188\7\30\2\2\u0188\u0189\7S\2\2\u0189\u018a\7\7"+ + "\2\2\u018a\u018b\7\b\2\2\u018b\u0194\7\5\2\2\u018c\u0191\5 \21\2\u018d"+ + "\u018e\7\r\2\2\u018e\u0190\5 \21\2\u018f\u018d\3\2\2\2\u0190\u0193\3\2"+ + "\2\2\u0191\u018f\3\2\2\2\u0191\u0192\3\2\2\2\u0192\u0195\3\2\2\2\u0193"+ + "\u0191\3\2\2\2\u0194\u018c\3\2\2\2\u0194\u0195\3\2\2\2\u0195\u0196\3\2"+ + "\2\2\u0196\u019a\7\6\2\2\u0197\u0199\5(\25\2\u0198\u0197\3\2\2\2\u0199"+ + "\u019c\3\2\2\2\u019a\u0198\3\2\2\2\u019a\u019b\3\2\2\2\u019b\u019e\3\2"+ + "\2\2\u019c\u019a\3\2\2\2\u019d\u0174\3\2\2\2\u019d\u0187\3\2\2\2\u019e"+ + "\63\3\2\2\2\u019f\u01a0\7\7\2\2\u01a0\u01a5\5 \21\2\u01a1\u01a2\7\r\2"+ + "\2\u01a2\u01a4\5 \21\2\u01a3\u01a1\3\2\2\2\u01a4\u01a7\3\2\2\2\u01a5\u01a3"+ + "\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\u01a8\3\2\2\2\u01a7\u01a5\3\2\2\2\u01a8"+ + "\u01a9\7\b\2\2\u01a9\u01ad\3\2\2\2\u01aa\u01ab\7\7\2\2\u01ab\u01ad\7\b"+ + "\2\2\u01ac\u019f\3\2\2\2\u01ac\u01aa\3\2\2\2\u01ad\65\3\2\2\2\u01ae\u01af"+ + "\7\7\2\2\u01af\u01b4\58\35\2\u01b0\u01b1\7\r\2\2\u01b1\u01b3\58\35\2\u01b2"+ + "\u01b0\3\2\2\2\u01b3\u01b6\3\2\2\2\u01b4\u01b2\3\2\2\2\u01b4\u01b5\3\2"+ + "\2\2\u01b5\u01b7\3\2\2\2\u01b6\u01b4\3\2\2\2\u01b7\u01b8\7\b\2\2\u01b8"+ + "\u01bd\3\2\2\2\u01b9\u01ba\7\7\2\2\u01ba\u01bb\7\66\2\2\u01bb\u01bd\7"+ + "\b\2\2\u01bc\u01ae\3\2\2\2\u01bc\u01b9\3\2\2\2\u01bd\67\3\2\2\2\u01be"+ + "\u01bf\5 \21\2\u01bf\u01c0\7\66\2\2\u01c0\u01c1\5 \21\2\u01c19\3\2\2\2"+ + "\u01c2\u01cb\7\t\2\2\u01c3\u01c8\5<\37\2\u01c4\u01c5\7\r\2\2\u01c5\u01c7"+ + "\5<\37\2\u01c6\u01c4\3\2\2\2\u01c7\u01ca\3\2\2\2\u01c8\u01c6\3\2\2\2\u01c8"+ + "\u01c9\3\2\2\2\u01c9\u01cc\3\2\2\2\u01ca\u01c8\3\2\2\2\u01cb\u01c3\3\2"+ + "\2\2\u01cb\u01cc\3\2\2\2\u01cc\u01cd\3\2\2\2\u01cd\u01ce\7\n\2\2\u01ce"+ + ";\3\2\2\2\u01cf\u01d3\5 \21\2\u01d0\u01d3\5> \2\u01d1\u01d3\5B\"\2\u01d2"+ + "\u01cf\3\2\2\2\u01d2\u01d0\3\2\2\2\u01d2\u01d1\3\2\2\2\u01d3=\3\2\2\2"+ + "\u01d4\u01e2\5@!\2\u01d5\u01de\7\t\2\2\u01d6\u01db\5@!\2\u01d7\u01d8\7"+ + "\r\2\2\u01d8\u01da\5@!\2\u01d9\u01d7\3\2\2\2\u01da\u01dd\3\2\2\2\u01db"+ + "\u01d9\3\2\2\2\u01db\u01dc\3\2\2\2\u01dc\u01df\3\2\2\2\u01dd\u01db\3\2"+ + "\2\2\u01de\u01d6\3\2\2\2\u01de\u01df\3\2\2\2\u01df\u01e0\3\2\2\2\u01e0"+ + "\u01e2\7\n\2\2\u01e1\u01d4\3\2\2\2\u01e1\u01d5\3\2\2\2\u01e2\u01e3\3\2"+ + "\2\2\u01e3\u01e6\79\2\2\u01e4\u01e7\5\20\t\2\u01e5\u01e7\5 \21\2\u01e6"+ + "\u01e4\3\2\2\2\u01e6\u01e5\3\2\2\2\u01e7?\3\2\2\2\u01e8\u01ea\5\32\16"+ + "\2\u01e9\u01e8\3\2\2\2\u01e9\u01ea\3\2\2\2\u01ea\u01eb\3\2\2\2\u01eb\u01ec"+ + "\7T\2\2\u01ecA\3\2\2\2\u01ed\u01ee\7S\2\2\u01ee\u01ef\78\2\2\u01ef\u01fb"+ + "\7T\2\2\u01f0\u01f1\5\32\16\2\u01f1\u01f2\78\2\2\u01f2\u01f3\7\30\2\2"+ + "\u01f3\u01fb\3\2\2\2\u01f4\u01f5\7T\2\2\u01f5\u01f6\78\2\2\u01f6\u01fb"+ + "\7T\2\2\u01f7\u01f8\7\34\2\2\u01f8\u01f9\78\2\2\u01f9\u01fb\7T\2\2\u01fa"+ + "\u01ed\3\2\2\2\u01fa\u01f0\3\2\2\2\u01fa\u01f4\3\2\2\2\u01fa\u01f7\3\2"+ + "\2\2\u01fbC\3\2\2\2\65GM`cku}\u0082\u0086\u008a\u008f\u00a7\u00a9\u00b7"+ + "\u00bc\u00c0\u00c6\u00ca\u00d2\u00dc\u00e4\u00ea\u0126\u0128\u0138\u013e"+ + "\u0146\u014a\u015e\u0163\u0167\u017c\u0182\u0185\u0191\u0194\u019a\u019d"+ + "\u01a5\u01ac\u01b4\u01bc\u01c8\u01cb\u01d2\u01db\u01de\u01e1\u01e6\u01e9"+ + "\u01fa"; + public static final ATN _ATN = + new ATNDeserializer().deserialize(_serializedATN.toCharArray()); + static { + _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; + for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) { + _decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i); + } + } +} \ No newline at end of file diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index e4ee2f17..eebaebd9 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -389,15 +389,10 @@ def test_check(self): inlined_function_declaration = [ x for x in class_decl.methods if x.name == 'trailer'][0] - target_function = [ - x for x in class_decl.methods - if x.name == 'rstatement'][0] result = find_lines_in_changed_file( new_full_filename=new_filename, - method_node=target_function, original_func=inlined_function_declaration, class_name='PainlessParser') self.assertEqual(result['invocation_method_start_line'], 1022) self.assertEqual(result['invocation_method_end_line'], 1083) - self.assertEqual(result['start_line_of_function_where_invocation_occurred'], 544) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 2ef826b8..605a533c 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -5,12 +5,22 @@ import pandas as pd from tqdm import tqdm +import difflib +import pprint from veniq.dataset_collection.augmentation import analyze_file - +import traceback class IntegrationDatasetCollection(TestCase): + def diff_dicts(self, a, b): + if a == b: + return '' + return '\n'.join( + difflib.ndiff(pprint.pformat(a, width=30).splitlines(), + pprint.pformat(b, width=30).splitlines()) + ) + def test_dataset_collection(self): samples_path = Path(__file__).absolute().parent / "dataset_collection" # ignore output filename, cause it is not so important @@ -40,6 +50,7 @@ def test_dataset_collection(self): ) from e new_results = pd.DataFrame(columns=[ + 'project' 'input_filename', 'class_name', 'invocation_text_string', @@ -55,6 +66,7 @@ def test_dataset_collection(self): for x in results_output: x['input_filename'] = str(Path(x['input_filename']).name).split('_')[0] + '.java' del x['output_filename'] + del x['project'] new_results = new_results.append(x, ignore_index=True) df = pd.DataFrame(new_results) @@ -65,4 +77,9 @@ def test_dataset_collection(self): df_diff = pd.concat([new_results, results_predefined]).drop_duplicates(keep=False) size_of_difference = df_diff.shape[0] print(f'Difference in dataframes: {size_of_difference} rows') - self.assertEqual(size_of_difference, 0) + + try: + self.assertEqual(size_of_difference, 0) + except AssertionError as e: + print(self.diff_dicts(new_results.to_dict(), results_predefined.to_dict())) + raise e diff --git a/test/integration/results_predefined.csv b/test/integration/results_predefined.csv index ec331da2..1990a3e8 100644 --- a/test/integration/results_predefined.csv +++ b/test/integration/results_predefined.csv @@ -1,9 +1,9 @@ ,input_filename,class_name,invocation_text_string,method_where_invocation_occurred,start_line_of_function_where_invocation_occurred,invocation_method_name,invocation_method_start_line,invocation_method_end_line,can_be_parsed,inline_insertion_line_start,inline_insertion_line_end -0,GlobalShortcutConfigForm.java,GlobalShortcutConfigForm,"this.initComponents();",GlobalShortcutConfigForm,25,initComponents,113,196,TRUE,29,110 -2,HudFragment.java,HudFragment,"showLaunchConfirm();",addWaveTable,409,showLaunchConfirm,375,393,TRUE,471,487 -1,HudFragment.java,HudFragment,"toggleMenus();",build,36,toggleMenus,406,411,TRUE,107,110 -3,PlanetDialog.java,PlanetDialog,"makeBloom();",PlanetDialog,56,makeBloom,116,126,TRUE,58,66 -4,PlanetDialog.java,PlanetDialog,"updateSelected();",PlanetDialog,56,updateSelected,277,315,TRUE,100,136 -5,ReaderHandler.java,ReaderHandler,"receiveMessage();",onWebSocketConnect,132,receiveMessage,74,130,TRUE,134,188 -6,ToggleProfilingPointAction.java,ToggleProfilingPointAction,"nextFactory();",actionPerformed,165,nextFactory,266,271,TRUE,201,204 -7,ToggleProfilingPointAction.java,ToggleProfilingPointAction,"resetFactories();",actionPerformed,165,resetFactories,273,279,TRUE,206,210 +0,GlobalShortcutConfigForm.java,GlobalShortcutConfigForm,this.initComponents();,GlobalShortcutConfigForm,25,initComponents,113,196,True,29,110 +2,HudFragment.java,HudFragment,showLaunchConfirm();,addWaveTable,409,showLaunchConfirm,375,393,True,471,487 +1,HudFragment.java,HudFragment,toggleMenus();,build,36,toggleMenus,406,411,True,107,110 +3,PlanetDialog.java,PlanetDialog,makeBloom();,PlanetDialog,56,makeBloom,116,126,True,58,66 +4,PlanetDialog.java,PlanetDialog,updateSelected();,PlanetDialog,56,updateSelected,277,315,True,100,136 +5,ReaderHandler.java,ReaderHandler,receiveMessage();,onWebSocketConnect,132,receiveMessage,74,130,True,134,188 +6,ToggleProfilingPointAction.java,ToggleProfilingPointAction,nextFactory();,actionPerformed,165,nextFactory,266,271,True,201,204 +7,ToggleProfilingPointAction.java,ToggleProfilingPointAction,resetFactories();,actionPerformed,165,resetFactories,273,279,True,206,210 diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index c14f2985..f9884a58 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -318,7 +318,8 @@ def insert_code_with_new_file_creation( 'invocation_text_string': text_lines[invocation_node.line - 1].lstrip(), 'method_where_invocation_occurred': method_node.name, 'invocation_method_name': original_func.name, - 'output_filename': new_full_filename + 'output_filename': new_full_filename, + 'start_line_of_function_where_invocation_occurred': method_node.line } inline_method_bounds = algorithm_for_inlining().inline_function( @@ -335,10 +336,8 @@ def insert_code_with_new_file_creation( if get_ast_if_possible(new_full_filename): rest_of_csv_row_for_changed_file = find_lines_in_changed_file( class_name=class_name, - method_node=method_node, new_full_filename=new_full_filename, original_func=original_func) - can_be_parsed = True line_to_csv.update(rest_of_csv_row_for_changed_file) else: @@ -352,7 +351,6 @@ def insert_code_with_new_file_creation( # type: ignore def find_lines_in_changed_file( new_full_filename: Path, - method_node: ASTNode, original_func: ASTNode, class_name: str) -> Dict[str, Any]: """ @@ -369,12 +367,6 @@ def find_lines_in_changed_file( x for x in changed_ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) if x.name == class_name][0] class_subtree = changed_ast.get_subtree(class_node_of_changed_file) - methods_and_constructors = \ - list(class_node_of_changed_file.methods) + list( - class_subtree.get_proxy_nodes( - ASTNodeType.CONSTRUCTOR_DECLARATION)) - node = [x for x in methods_and_constructors - if x.name == method_node.name][0] # type: ignore original_func_changed = [ x for x in class_node_of_changed_file.methods if x.name == original_func.name][0] @@ -382,8 +374,7 @@ def find_lines_in_changed_file( body_start_line, body_end_line = method_body_lines(original_func_changed, new_full_filename) return { 'invocation_method_start_line': body_start_line, - 'invocation_method_end_line': body_end_line, - 'start_line_of_function_where_invocation_occurred': node.line + 'invocation_method_end_line': body_end_line } else: return {} @@ -461,30 +452,27 @@ def analyze_file( methods_list = list(class_declaration.methods) + list(class_declaration.constructors) for method_node in methods_list: - # Ignore overloaded target methods - found_methods_decl = method_declarations.get(method_node.name, []) - if len(found_methods_decl) == 1: - method_decl = ast.get_subtree(method_node) - for method_invoked in method_decl.get_proxy_nodes( - ASTNodeType.METHOD_INVOCATION): - found_method_decl = method_declarations.get(method_invoked.member, []) - # ignore overloaded extracted functions - if len(found_method_decl) == 1: - try: - make_insertion( - ast, - class_declaration, - dst_filename, - found_method_decl, - method_declarations, - method_invoked, - method_node, - output_path, - file_path, - results - ) - except Exception as e: - print('Error has happened during file analyze: ' + str(e)) + method_decl = ast.get_subtree(method_node) + for method_invoked in method_decl.get_proxy_nodes( + ASTNodeType.METHOD_INVOCATION): + found_method_decl = method_declarations.get(method_invoked.member, []) + # ignore overloaded extracted functions + if len(found_method_decl) == 1: + try: + make_insertion( + ast, + class_declaration, + dst_filename, + found_method_decl, + method_declarations, + method_invoked, + method_node, + output_path, + file_path, + results + ) + except Exception as e: + print('Error has happened during file analyze: ' + str(e)) if not results: dst_filename.unlink() From c58a2755419ce71217f886f149604bce84666669 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 14 Dec 2020 13:37:33 +0300 Subject: [PATCH 24/52] Fix stupid flake --- test/integration/dataset_collection.py | 6 +++--- veniq/dataset_collection/augmentation.py | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 605a533c..61b1ee63 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -1,3 +1,5 @@ +import difflib +import pprint import tempfile from os import listdir from pathlib import Path @@ -5,11 +7,9 @@ import pandas as pd from tqdm import tqdm -import difflib -import pprint from veniq.dataset_collection.augmentation import analyze_file -import traceback + class IntegrationDatasetCollection(TestCase): diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index f9884a58..8271ba78 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -366,7 +366,6 @@ def find_lines_in_changed_file( class_node_of_changed_file = [ x for x in changed_ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) if x.name == class_name][0] - class_subtree = changed_ast.get_subtree(class_node_of_changed_file) original_func_changed = [ x for x in class_node_of_changed_file.methods if x.name == original_func.name][0] From 6cff2b276969b624f22961756c3311f3aa4adbf0 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 14 Dec 2020 13:40:12 +0300 Subject: [PATCH 25/52] Fix print --- veniq/dataset_collection/augmentation.py | 1 - 1 file changed, 1 deletion(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 8271ba78..6f769b70 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -618,7 +618,6 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: for i in single_file_features: # get local path for inlined filename i['output_filename'] = i['output_filename'].relative_to(os.getcwd()).as_posix() - print(i['output_filename'], filename) i['invocation_text_string'] = str(i['invocation_text_string']).encode('utf8') df = df.append(i, ignore_index=True) From d6513873f78cb0f02ac4bb10db9952a936ec65bf Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 14 Dec 2020 18:19:38 +0300 Subject: [PATCH 26/52] Add script for statistics collection --- .../dataset_collection/check_synth_dataset.py | 255 ++++++++++++++++++ 1 file changed, 255 insertions(+) create mode 100644 veniq/dataset_collection/check_synth_dataset.py diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py new file mode 100644 index 00000000..18d8ccf6 --- /dev/null +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -0,0 +1,255 @@ +import os +from argparse import ArgumentParser +from functools import partial +from pathlib import Path + +import pandas as pd +from enum import Enum + +from pebble import ProcessPool +from tqdm import tqdm + +from veniq.utils.ast_builder import build_ast +from veniq.ast_framework import AST, ASTNodeType +from veniq.utils.encoding_detector import read_text_with_autodetected_encoding +from veniq.dataset_collection.augmentation import method_body_lines + +import traceback + + +class FunctionExist(Enum): + ERROR = -1 + CLASS_NOT_FOUND = 0 + OVERLOADED_FUNC = 1 + FUNCTION_NO_FOUND = 2 + FUNCTION_LINES_NOT_MATCHED = 3 + FUNCTION_LINES_MATCHED = 4 + + +def was_not_inlined(inline_insertion_line_start, invocation_text_string, output_filename): + text = read_text_with_autodetected_encoding(output_filename).split('\n') + invocation_text_line = text[inline_insertion_line_start - 1] + return invocation_text_line == invocation_text_string + + +def check_invocation_inside_target( + invocation_line: int, + start_line, + end_line): + if (invocation_line >= start_line) and (invocation_line <= end_line): + return True + + return False + + +def check_function_start_end_line( + filename: str, + class_name: str, + function_name: str, + start_line: int, + end_line=None): + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == class_name] + + if len(class_decl) == 0: + return FunctionExist.CLASS_NOT_FOUND + else: + class_decl = class_decl[0] + ctrs = [x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION)] + + all_considered_methods = list([x for x in class_decl.methods]) + ctrs + functions = [x for x in all_considered_methods if x.name == function_name] + + if len(functions) == 0: + return (None, None), FunctionExist.FUNCTION_EXISTS.name + elif len(functions) > 1: + return (None, None), FunctionExist.OVERLOADED_FUNC.name + else: + original_func = functions[0] + body_start_line, body_end_line = method_body_lines(original_func, Path(filename)) + + if end_line is None: + if original_func.line == start_line: + return (original_func.line, body_end_line), FunctionExist.FUNCTION_LINES_MATCHED.name + else: + return (None, None), FunctionExist.FUNCTION_LINES_NOT_MATCHED.name + + elif (body_start_line == start_line) and (body_end_line == end_line): + return (body_start_line, body_end_line), FunctionExist.FUNCTION_LINES_MATCHED.name + elif (body_start_line != start_line) or (body_end_line != end_line): + return (None, None), FunctionExist.FUNCTION_LINES_NOT_MATCHED.name + + return (None, None), FunctionExist.ERROR.name + + +def make_check(series: pd.Series, output_path: str): + _, row = series + output_filename = str(Path(output_path, row['output_filename'])) + class_name = row['class_name'] + invocation_text_string = row['invocation_text_string'] + method_where_invocation_occurred = row['method_where_invocation_occurred'] + start_line_of_function_where_invocation_occurred = row['start_line_of_function_where_invocation_occurred'] + invocation_method_name = row['invocation_method_name'] + invocation_method_start_line = row['invocation_method_start_line'] + invocation_method_end_line = row['invocation_method_end_line'] + # can_be_parsed = row['can_be_parsed'] + inline_insertion_line_start = row['inline_insertion_line_start'] + inline_insertion_line_end = row['inline_insertion_line_end'] + + _, row['are_inlined_lines_matched'] = check_function_start_end_line( + filename=output_filename, + class_name=class_name, + function_name=invocation_method_name, + start_line=invocation_method_start_line, + end_line=invocation_method_end_line + ) + + lines, row['are_target_lines_matched'] = check_function_start_end_line( + filename=output_filename, + class_name=class_name, + function_name=method_where_invocation_occurred, + start_line=start_line_of_function_where_invocation_occurred + ) + + body_start_line, body_end_line = lines + + if body_start_line and body_end_line: + row['insertion_start_line_inside_target'] = check_invocation_inside_target( + inline_insertion_line_start, body_start_line, body_end_line) + row['insertion_end_line_inside_target'] = check_invocation_inside_target( + inline_insertion_line_end, body_start_line, body_end_line) + else: + row['insertion_start_line_inside_target'] = row['insertion_end_line_inside_target'] = False + + row['was_not_inlined'] = was_not_inlined( + inline_insertion_line_start, + invocation_text_string, + output_filename + ) + + return row + + +if __name__ == '__main__': # noqa: C901 + system_cores_qty = os.cpu_count() or 1 + parser = ArgumentParser() + parser.add_argument( + "-d", "--dir", + required=True, + help="File path to JAVA source code for methods augmentations" + ) + parser.add_argument( + "--jobs", + "-j", + type=int, + default=system_cores_qty - 1, + help="Number of processes to spawn. " + "By default one less than number of cores. " + "Be careful to raise it above, machine may stop responding while creating dataset.", + ) + parser.add_argument( + "--csv", + type=str, + required=True, + help='Csv file for dataset' + ) + + args = parser.parse_args() + + df = pd.read_csv(args.csv) + columns = [x for x in df.columns if x.find('Unnamed') < 0] \ + + ['are_target_lines_matched', + 'are_inlined_lines_matched', + 'was_not_inlined', + 'insertion_start_line_inside_target', + 'insertion_end_line_inside_target' + ] + new_df = pd.DataFrame(columns=columns) + + with ProcessPool(system_cores_qty) as executor: + p_check = partial( + make_check, + output_path=args.dir, + ) + future = executor.map(p_check, df.iterrows()) + result = future.result() + + for _, row in tqdm(df.iterrows(), total=df.shape[0]): + try: + res = next(result) + new_df = new_df.append(res[columns], ignore_index=True) + + new_df.to_csv('checked.csv') + except Exception as e: + print(f'Exception in {_} case') + traceback.print_exc() + + duplicateRowsDF = new_df[new_df.duplicated()] + print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') + was_not_inlined_df = new_df[new_df['was_not_inlined']] + print(f'Was not inlined {was_not_inlined_df.shape[0]}') + + insertion_start_line_inside_target = \ + new_df[~(new_df['insertion_start_line_inside_target'] & new_df['insertion_end_line_inside_target'])] + + print(f'Samples where insertion were outside target function {insertion_start_line_inside_target.shape[0]}') + + negative_insertion = new_df[ + new_df['invocation_method_start_line'] - new_df['invocation_method_end_line'] + ] + print(f'Negative insertions: {negative_insertion.shape[0]}') + ################################################################################################################# + are_inlined_lines_matched = new_df[ + new_df['are_inlined_lines_matched'] == FunctionExist.ERROR.name + ] + print(f'Samples where error happened ' + f'when checking inlined\'s range {are_inlined_lines_matched.shape[0]}') + are_inlined_lines_matched = new_df[ + new_df['are_inlined_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name + ] + print(f'Samples where class was not found ' + f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') + are_inlined_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name + ] + print(f'Samples where inlined function is overloaded' + f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') + are_inlined_lines_matched = new_df[ + new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name + ] + print(f'Samples where inlined function was not found' + f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') + are_inlined_lines_matched = new_df[ + new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name + ] + print(f'Samples where lines of inlined function matched' + f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') + + ######################################################################################### + target_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.ERROR.name + ] + print(f'Samples where error happened ' + f'when checking target\'s range {target_lines_matched.shape[0]}') + target_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name + ] + print(f'Samples where class was not found ' + f'when checking target\'s range {target_lines_matched.shape[0]}') + target_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name + ] + print(f'Samples where target function is overloaded' + f'when checking target\'s range {target_lines_matched.shape[0]}') + target_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name + ] + print(f'Samples where target function was not found' + f'when checking target\'s range {target_lines_matched.shape[0]}') + target_lines_matched = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name + ] + print(f'Samples where lines of target function matched' + f'when checking target\'s range {target_lines_matched.shape[0]}') From f852bf9fc0c0c7592e05cb021c23d347067a7b82 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 15 Dec 2020 11:06:41 +0300 Subject: [PATCH 27/52] Fix some code --- veniq/dataset_collection/check_synth_dataset.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 18d8ccf6..c89d01df 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -158,7 +158,8 @@ def make_check(series: pd.Series, output_path: str): args = parser.parse_args() - df = pd.read_csv(args.csv) + full_df = pd.read_csv(args.csv) + df = full_df[full_df['can_be_parsed']] columns = [x for x in df.columns if x.find('Unnamed') < 0] \ + ['are_target_lines_matched', 'are_inlined_lines_matched', @@ -196,10 +197,13 @@ def make_check(series: pd.Series, output_path: str): print(f'Samples where insertion were outside target function {insertion_start_line_inside_target.shape[0]}') - negative_insertion = new_df[ - new_df['invocation_method_start_line'] - new_df['invocation_method_end_line'] - ] - print(f'Negative insertions: {negative_insertion.shape[0]}') + new_df['score_diff'] = new_df['invocation_method_start_line'].sub(new_df['invocation_method_end_line'], axis=0) + negative_insertions = new_df[new_df['score_diff'] < 0] + print(f'Negative insertions: {negative_insertions.shape[0]}') + + can_be_parsed = full_df['can_be_parsed'].shape[0] + print(f'Cases when insertion was made ' + f'but it cannot be parsed {can_be_parsed}') ################################################################################################################# are_inlined_lines_matched = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.ERROR.name From 72dd3c0c9884cf8b8bf430ca0d73fcd8eb5da2e9 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 15 Dec 2020 12:13:01 +0300 Subject: [PATCH 28/52] Fix integration tests and add overloaded filter --- test/integration/dataset_collection.py | 5 +- veniq/dataset_collection/augmentation.py | 71 +++++++++++------------- 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 61b1ee63..8364ab0e 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -1,4 +1,5 @@ import difflib +import json import pprint import tempfile from os import listdir @@ -50,7 +51,6 @@ def test_dataset_collection(self): ) from e new_results = pd.DataFrame(columns=[ - 'project' 'input_filename', 'class_name', 'invocation_text_string', @@ -65,6 +65,7 @@ def test_dataset_collection(self): ]) for x in results_output: x['input_filename'] = str(Path(x['input_filename']).name).split('_')[0] + '.java' + x['invocation_text_string'] = x['invocation_text_string'].strip() del x['output_filename'] del x['project'] new_results = new_results.append(x, ignore_index=True) @@ -74,6 +75,8 @@ def test_dataset_collection(self): df = pd.read_csv(Path(__file__).absolute().parent / 'results_predefined.csv', index_col=0) results_predefined = df.sort_values(by=df.columns.to_list()) + # json.dump(new_results.to_dict(), open('new_results.json', 'w')) + # json.dump(results_predefined.to_dict(), open('results_predefined.json', 'w')) df_diff = pd.concat([new_results, results_predefined]).drop_duplicates(keep=False) size_of_difference = df_diff.shape[0] print(f'Difference in dataframes: {size_of_difference} rows') diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 6f769b70..77391b4e 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -447,31 +447,38 @@ def analyze_file( ] for class_ast in classes_declaration: class_declaration = class_ast.get_root() - collect_info_about_functions_without_params(class_declaration, method_declarations) + methods_list: List[ASTNode] = \ + list(class_declaration.methods) \ + + list(class_declaration.constructors) + collect_info_about_functions_without_params(method_declarations, methods_list) - methods_list = list(class_declaration.methods) + list(class_declaration.constructors) for method_node in methods_list: method_decl = ast.get_subtree(method_node) - for method_invoked in method_decl.get_proxy_nodes( - ASTNodeType.METHOD_INVOCATION): - found_method_decl = method_declarations.get(method_invoked.member, []) - # ignore overloaded extracted functions - if len(found_method_decl) == 1: - try: - make_insertion( - ast, - class_declaration, - dst_filename, - found_method_decl, - method_declarations, - method_invoked, - method_node, - output_path, - file_path, - results - ) - except Exception as e: - print('Error has happened during file analyze: ' + str(e)) + found_functions = method_declarations.get(method_node.name, []) + # we do not consider overloaded constructors and functions + # as target functions + if len(found_functions) == 1: + for invocation_node in method_decl.get_proxy_nodes( + ASTNodeType.METHOD_INVOCATION): + extracted_function = method_declarations.get(invocation_node.member, []) + # ignore overloaded extracted functions + if len(extracted_function) == 1: + if not extracted_function[0].parameters: + try: + make_insertion( + ast, + class_declaration, + dst_filename, + extracted_function, + method_declarations, + invocation_node, + method_node, + output_path, + file_path, + results + ) + except Exception as e: + print('Error has happened during file analyze: ' + str(e)) if not results: dst_filename.unlink() @@ -503,22 +510,10 @@ def make_insertion(ast, class_declaration, dst_filename, found_method_decl, meth def collect_info_about_functions_without_params( - class_declaration: ASTNode, - method_declarations: Dict[str, List[ASTNode]]) -> None: - for method in class_declaration.methods: - if not method.parameters: - method_declarations[method.name].append(method) - - -# def save_input_file(input_dir: Path, filename: Path) -> Path: -# # need to avoid situation when filenames are the same -# hash_path = hashlib.sha256(str(filename.parent).encode('utf-8')).hexdigest() -# dst_filename = input_dir / f'{filename.stem}_{hash_path}.java' -# if not dst_filename.parent.exists(): -# dst_filename.parent.mkdir(parents=True) -# if not dst_filename.exists(): -# shutil.copyfile(filename, dst_filename) -# return dst_filename + method_declarations: Dict[str, List[ASTNode]], + list_of_considered_nodes: List[ASTNode]) -> None: + for node in list_of_considered_nodes: + method_declarations[node.name].append(node) def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: From 0ac171944f50407f855cfd8c19c91a0d5cbbe473 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 15 Dec 2020 12:24:39 +0300 Subject: [PATCH 29/52] Fix issue-111 --- test/integration/dataset_collection.py | 5 +---- veniq/dataset_collection/augmentation.py | 3 ++- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 8364ab0e..6dedd94a 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -1,5 +1,4 @@ import difflib -import json import pprint import tempfile from os import listdir @@ -75,8 +74,6 @@ def test_dataset_collection(self): df = pd.read_csv(Path(__file__).absolute().parent / 'results_predefined.csv', index_col=0) results_predefined = df.sort_values(by=df.columns.to_list()) - # json.dump(new_results.to_dict(), open('new_results.json', 'w')) - # json.dump(results_predefined.to_dict(), open('results_predefined.json', 'w')) df_diff = pd.concat([new_results, results_predefined]).drop_duplicates(keep=False) size_of_difference = df_diff.shape[0] print(f'Difference in dataframes: {size_of_difference} rows') @@ -85,4 +82,4 @@ def test_dataset_collection(self): self.assertEqual(size_of_difference, 0) except AssertionError as e: print(self.diff_dicts(new_results.to_dict(), results_predefined.to_dict())) - raise e + raise e \ No newline at end of file diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 77391b4e..075909e0 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -415,6 +415,7 @@ def _replacer(match): return regex.sub(_replacer, string) +# flake8: noqa: C901 def analyze_file( file_path: Path, output_path: Path, @@ -654,4 +655,4 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: shutil.rmtree(full_dataset_folder) if small_dataset_folder.exists(): - shutil.rmtree(small_dataset_folder) + shutil.rmtree(small_dataset_folder) \ No newline at end of file From 375690fe4deaeb7486ae6cedc11be73cbf62be2a Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 15 Dec 2020 12:36:31 +0300 Subject: [PATCH 30/52] Fix flake8 --- test/integration/dataset_collection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/dataset_collection.py b/test/integration/dataset_collection.py index 6dedd94a..8b717150 100644 --- a/test/integration/dataset_collection.py +++ b/test/integration/dataset_collection.py @@ -82,4 +82,4 @@ def test_dataset_collection(self): self.assertEqual(size_of_difference, 0) except AssertionError as e: print(self.diff_dicts(new_results.to_dict(), results_predefined.to_dict())) - raise e \ No newline at end of file + raise e From 4e725a7585090b8dc18b79cf1ba365c2e7426a95 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Tue, 15 Dec 2020 18:33:21 +0300 Subject: [PATCH 31/52] Add fixes --- .../dataset_collection/check_synth_dataset.py | 74 ++++++++++++------- 1 file changed, 47 insertions(+), 27 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index c89d01df..24969aaf 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -32,16 +32,6 @@ def was_not_inlined(inline_insertion_line_start, invocation_text_string, output_ return invocation_text_line == invocation_text_string -def check_invocation_inside_target( - invocation_line: int, - start_line, - end_line): - if (invocation_line >= start_line) and (invocation_line <= end_line): - return True - - return False - - def check_function_start_end_line( filename: str, class_name: str, @@ -116,12 +106,16 @@ def make_check(series: pd.Series, output_path: str): body_start_line, body_end_line = lines if body_start_line and body_end_line: - row['insertion_start_line_inside_target'] = check_invocation_inside_target( - inline_insertion_line_start, body_start_line, body_end_line) - row['insertion_end_line_inside_target'] = check_invocation_inside_target( - inline_insertion_line_end, body_start_line, body_end_line) + is_inside_start = 1 if \ + inline_insertion_line_start < start_line_of_function_where_invocation_occurred \ + else 0 + row['insertion_start_line_inside_target'] = is_inside_start + is_inside_end = 1 if \ + inline_insertion_line_end > body_end_line \ + else 0 + row['insertion_end_line_inside_target'] = is_inside_end else: - row['insertion_start_line_inside_target'] = row['insertion_end_line_inside_target'] = False + row['insertion_start_line_inside_target'] = row['insertion_end_line_inside_target'] = -1 row['was_not_inlined'] = was_not_inlined( inline_insertion_line_start, @@ -187,73 +181,99 @@ def make_check(series: pd.Series, output_path: str): print(f'Exception in {_} case') traceback.print_exc() + filtered_df = new_df.copy() + + + def remove_indices(df_to_filter: pd.DataFrame): + rows = filtered_df.index[df_to_filter.index] + filtered_df.drop(rows, inplace=True) + duplicateRowsDF = new_df[new_df.duplicated()] print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') + remove_indices(duplicateRowsDF) was_not_inlined_df = new_df[new_df['was_not_inlined']] + remove_indices(was_not_inlined_df) print(f'Was not inlined {was_not_inlined_df.shape[0]}') - insertion_start_line_inside_target = \ - new_df[~(new_df['insertion_start_line_inside_target'] & new_df['insertion_end_line_inside_target'])] - - print(f'Samples where insertion were outside target function {insertion_start_line_inside_target.shape[0]}') + insertion_start_line_inside_target = new_df[new_df['insertion_start_line_inside_target'] == 1] + remove_indices(insertion_start_line_inside_target) + print(f'Samples where insertion start line ' + f'was outside target function {insertion_start_line_inside_target.shape[0]}') + insertion_end_line_inside_target = new_df[new_df['insertion_end_line_inside_target'] == 1] + remove_indices(insertion_end_line_inside_target) + print(f'Samples where insertion end line ' + f'was outside target function {insertion_end_line_inside_target.shape[0]}') - new_df['score_diff'] = new_df['invocation_method_start_line'].sub(new_df['invocation_method_end_line'], axis=0) + new_df['score_diff'] = new_df['invocation_method_end_line'].sub(new_df['invocation_method_start_line'], axis=0) negative_insertions = new_df[new_df['score_diff'] < 0] + remove_indices(negative_insertions) print(f'Negative insertions: {negative_insertions.shape[0]}') - can_be_parsed = full_df['can_be_parsed'].shape[0] + can_be_parsed = full_df[~full_df['can_be_parsed']] print(f'Cases when insertion was made ' - f'but it cannot be parsed {can_be_parsed}') + f'but it cannot be parsed {can_be_parsed.shape[0]}') + remove_indices(can_be_parsed) ################################################################################################################# are_inlined_lines_matched = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.ERROR.name ] + remove_indices(are_inlined_lines_matched) print(f'Samples where error happened ' f'when checking inlined\'s range {are_inlined_lines_matched.shape[0]}') are_inlined_lines_matched = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name ] + remove_indices(are_inlined_lines_matched) print(f'Samples where class was not found ' f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') are_inlined_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name ] - print(f'Samples where inlined function is overloaded' + remove_indices(are_inlined_lines_matched) + print(f'Samples where inlined function is overloaded ' f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') are_inlined_lines_matched = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name ] - print(f'Samples where inlined function was not found' + remove_indices(are_inlined_lines_matched) + print(f'Samples where inlined function was not found ' f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') are_inlined_lines_matched = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name ] - print(f'Samples where lines of inlined function matched' + print(f'Samples where lines of inlined function matched ' f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') - ######################################################################################### target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.ERROR.name ] + remove_indices(target_lines_matched) print(f'Samples where error happened ' f'when checking target\'s range {target_lines_matched.shape[0]}') target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name ] + remove_indices(target_lines_matched) print(f'Samples where class was not found ' f'when checking target\'s range {target_lines_matched.shape[0]}') target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name ] + remove_indices(target_lines_matched) print(f'Samples where target function is overloaded' f'when checking target\'s range {target_lines_matched.shape[0]}') target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name ] + remove_indices(target_lines_matched) print(f'Samples where target function was not found' f'when checking target\'s range {target_lines_matched.shape[0]}') target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name ] - print(f'Samples where lines of target function matched' + print(f'Samples where lines of target function matched ' f'when checking target\'s range {target_lines_matched.shape[0]}') + + print(f'After filtering we\'ve got {filtered_df.shape[0]} of {new_df.shape[0]}') + ratio = float(filtered_df.shape[0])/new_df.shape[0] + print(f'We have {ratio}% of all dataset') \ No newline at end of file From 7d06a83495a15cc7c119981636b748202f1c0e5f Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 16 Dec 2020 12:38:12 +0300 Subject: [PATCH 32/52] Fix contructor declaration --- veniq/dataset_collection/check_synth_dataset.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 24969aaf..d37220f7 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -47,7 +47,7 @@ def check_function_start_end_line( return FunctionExist.CLASS_NOT_FOUND else: class_decl = class_decl[0] - ctrs = [x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION)] + ctrs = [x for x in ast.get_proxy_nodes(ASTNodeType.CONSTRUCTOR_DECLARATION)] all_considered_methods = list([x for x in class_decl.methods]) + ctrs functions = [x for x in all_considered_methods if x.name == function_name] @@ -163,7 +163,7 @@ def make_check(series: pd.Series, output_path: str): ] new_df = pd.DataFrame(columns=columns) - with ProcessPool(system_cores_qty) as executor: + with ProcessPool(1) as executor: p_check = partial( make_check, output_path=args.dir, From 03d563899d38b7cfd06ba669b420e1751fc8ee19 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 16 Dec 2020 13:06:30 +0300 Subject: [PATCH 33/52] Fix bug --- .../dataset_collection/check_synth_dataset.py | 92 +++++++++++-------- 1 file changed, 53 insertions(+), 39 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index d37220f7..38441654 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -29,7 +29,7 @@ class FunctionExist(Enum): def was_not_inlined(inline_insertion_line_start, invocation_text_string, output_filename): text = read_text_with_autodetected_encoding(output_filename).split('\n') invocation_text_line = text[inline_insertion_line_start - 1] - return invocation_text_line == invocation_text_string + return invocation_text_line.strip() == invocation_text_string.strip() def check_function_start_end_line( @@ -183,11 +183,11 @@ def make_check(series: pd.Series, output_path: str): filtered_df = new_df.copy() - def remove_indices(df_to_filter: pd.DataFrame): - rows = filtered_df.index[df_to_filter.index] - filtered_df.drop(rows, inplace=True) + rows = filtered_df[filtered_df.index.isin(df_to_filter.index)] + filtered_df.drop(rows.index, inplace=True) + print(f'Total lines: {new_df.shape[0]}') duplicateRowsDF = new_df[new_df.duplicated()] print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') remove_indices(duplicateRowsDF) @@ -212,68 +212,82 @@ def remove_indices(df_to_filter: pd.DataFrame): can_be_parsed = full_df[~full_df['can_be_parsed']] print(f'Cases when insertion was made ' f'but it cannot be parsed {can_be_parsed.shape[0]}') - remove_indices(can_be_parsed) ################################################################################################################# - are_inlined_lines_matched = new_df[ + temp_df = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.ERROR.name ] - remove_indices(are_inlined_lines_matched) + remove_indices(temp_df) print(f'Samples where error happened ' - f'when checking inlined\'s range {are_inlined_lines_matched.shape[0]}') - are_inlined_lines_matched = new_df[ + f'when checking inlined\'s range {temp_df.shape[0]}') + temp_df = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name ] - remove_indices(are_inlined_lines_matched) + remove_indices(temp_df) print(f'Samples where class was not found ' - f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') - are_inlined_lines_matched = new_df[ + f'when checking inlines\'s range {temp_df.shape[0]}') + temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name ] - remove_indices(are_inlined_lines_matched) + remove_indices(temp_df) print(f'Samples where inlined function is overloaded ' - f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') - are_inlined_lines_matched = new_df[ + f'when checking inlines\'s range {temp_df.shape[0]}') + temp_df = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name ] - remove_indices(are_inlined_lines_matched) + remove_indices(temp_df) print(f'Samples where inlined function was not found ' - f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') - are_inlined_lines_matched = new_df[ + f'when checking inlines\'s range {temp_df.shape[0]}') + temp_df = new_df[ + new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_LINES_NOT_MATCHED.name + ] + print(f'Samples where lines of inlined function were not matched ' + f'when checking inlines\'s range {temp_df.shape[0]}') + remove_indices(temp_df) + temp_df = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name ] - print(f'Samples where lines of inlined function matched ' - f'when checking inlines\'s range {are_inlined_lines_matched.shape[0]}') + print(f'Samples where lines of inlined function were matched ' + f'when checking inlines\'s range {temp_df.shape[0]}') ######################################################################################### - target_lines_matched = new_df[ + temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.ERROR.name ] - remove_indices(target_lines_matched) + remove_indices(temp_df) print(f'Samples where error happened ' - f'when checking target\'s range {target_lines_matched.shape[0]}') - target_lines_matched = new_df[ + f'when checking target\'s range {temp_df.shape[0]}') + temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.CLASS_NOT_FOUND.name ] - remove_indices(target_lines_matched) + remove_indices(temp_df) print(f'Samples where class was not found ' - f'when checking target\'s range {target_lines_matched.shape[0]}') - target_lines_matched = new_df[ + f'when checking target\'s range {temp_df.shape[0]}') + temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.OVERLOADED_FUNC.name ] - remove_indices(target_lines_matched) - print(f'Samples where target function is overloaded' - f'when checking target\'s range {target_lines_matched.shape[0]}') + remove_indices(temp_df) + print(f'Samples where target function is overloaded ' + f'when checking target\'s range {temp_df.shape[0]}') target_lines_matched = new_df[ new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name ] - remove_indices(target_lines_matched) - print(f'Samples where target function was not found' - f'when checking target\'s range {target_lines_matched.shape[0]}') - target_lines_matched = new_df[ + remove_indices(temp_df) + print(f'Samples where target function was not found ' + f'when checking target\'s range {temp_df.shape[0]}') + + temp_df = new_df[ + new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_LINES_NOT_MATCHED.name + ] + print(f'Samples where lines of target function were not matched ' + f'when checking inlines\'s range {temp_df.shape[0]}') + remove_indices(temp_df) + temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_LINES_MATCHED.name ] - print(f'Samples where lines of target function matched ' - f'when checking target\'s range {target_lines_matched.shape[0]}') - print(f'After filtering we\'ve got {filtered_df.shape[0]} of {new_df.shape[0]}') - ratio = float(filtered_df.shape[0])/new_df.shape[0] - print(f'We have {ratio}% of all dataset') \ No newline at end of file + print(f'Samples where lines of target function were matched ' + f'when checking target\'s range {temp_df.shape[0]}') + + filtered_size = filtered_df.shape[0] - can_be_parsed.shape[0] + print(f'After filtering we\'ve got {filtered_size} of {new_df.shape[0]}') + ratio = float(filtered_size)/new_df.shape[0] + print(f'We have {ratio}% correct samples of all dataset') From 3879e6d258615e90e19baa253ea45b64223af5c2 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 16 Dec 2020 13:28:53 +0300 Subject: [PATCH 34/52] Fix sys_Cores --- veniq/dataset_collection/check_synth_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 38441654..6c749d3d 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -163,7 +163,7 @@ def make_check(series: pd.Series, output_path: str): ] new_df = pd.DataFrame(columns=columns) - with ProcessPool(1) as executor: + with ProcessPool(system_cores_qty) as executor: p_check = partial( make_check, output_path=args.dir, From d6f28384e8db6c86c910d0bd62727f4c277a3b2b Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 16 Dec 2020 17:10:15 +0300 Subject: [PATCH 35/52] Fix was_not_inlined_function --- veniq/dataset_collection/check_synth_dataset.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 6c749d3d..6f3e5ea5 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -2,7 +2,7 @@ from argparse import ArgumentParser from functools import partial from pathlib import Path - +import ast import pandas as pd from enum import Enum @@ -29,7 +29,7 @@ class FunctionExist(Enum): def was_not_inlined(inline_insertion_line_start, invocation_text_string, output_filename): text = read_text_with_autodetected_encoding(output_filename).split('\n') invocation_text_line = text[inline_insertion_line_start - 1] - return invocation_text_line.strip() == invocation_text_string.strip() + return invocation_text_line.strip() == invocation_text_string.decode('utf-8').strip() def check_function_start_end_line( @@ -154,6 +154,8 @@ def make_check(series: pd.Series, output_path: str): full_df = pd.read_csv(args.csv) df = full_df[full_df['can_be_parsed']] + df['invocation_text_string'] = df['invocation_text_string'].apply(ast.literal_eval) + columns = [x for x in df.columns if x.find('Unnamed') < 0] \ + ['are_target_lines_matched', 'are_inlined_lines_matched', @@ -163,7 +165,7 @@ def make_check(series: pd.Series, output_path: str): ] new_df = pd.DataFrame(columns=columns) - with ProcessPool(system_cores_qty) as executor: + with ProcessPool(1) as executor: p_check = partial( make_check, output_path=args.dir, @@ -287,7 +289,7 @@ def remove_indices(df_to_filter: pd.DataFrame): print(f'Samples where lines of target function were matched ' f'when checking target\'s range {temp_df.shape[0]}') - filtered_size = filtered_df.shape[0] - can_be_parsed.shape[0] + filtered_size = filtered_df.shape[0] print(f'After filtering we\'ve got {filtered_size} of {new_df.shape[0]}') ratio = float(filtered_size)/new_df.shape[0] print(f'We have {ratio}% correct samples of all dataset') From 0e5e9fde18aaf8089c11133afc970d37122605ed Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 16 Dec 2020 17:25:31 +0300 Subject: [PATCH 36/52] Fix gain sys_core --- veniq/dataset_collection/check_synth_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 6f3e5ea5..34d2a0d5 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -165,7 +165,7 @@ def make_check(series: pd.Series, output_path: str): ] new_df = pd.DataFrame(columns=columns) - with ProcessPool(1) as executor: + with ProcessPool(system_cores_qty) as executor: p_check = partial( make_check, output_path=args.dir, From fb0e3e6e24d454c2352a4f37b1479ae7ed009d8e Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 17 Dec 2020 12:15:55 +0300 Subject: [PATCH 37/52] Fix f flake8 --- .../dataset_collection/check_synth_dataset.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 34d2a0d5..495d50a4 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -156,13 +156,13 @@ def make_check(series: pd.Series, output_path: str): df = full_df[full_df['can_be_parsed']] df['invocation_text_string'] = df['invocation_text_string'].apply(ast.literal_eval) - columns = [x for x in df.columns if x.find('Unnamed') < 0] \ - + ['are_target_lines_matched', - 'are_inlined_lines_matched', - 'was_not_inlined', - 'insertion_start_line_inside_target', - 'insertion_end_line_inside_target' - ] + columns = [x for x in df.columns if x.find('Unnamed') < 0] + [ + 'are_target_lines_matched', + 'are_inlined_lines_matched', + 'was_not_inlined', + 'insertion_start_line_inside_target', + 'insertion_end_line_inside_target' + ] new_df = pd.DataFrame(columns=columns) with ProcessPool(system_cores_qty) as executor: @@ -179,7 +179,7 @@ def make_check(series: pd.Series, output_path: str): new_df = new_df.append(res[columns], ignore_index=True) new_df.to_csv('checked.csv') - except Exception as e: + except Exception: print(f'Exception in {_} case') traceback.print_exc() @@ -241,7 +241,7 @@ def remove_indices(df_to_filter: pd.DataFrame): f'when checking inlines\'s range {temp_df.shape[0]}') temp_df = new_df[ new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_LINES_NOT_MATCHED.name - ] + ] print(f'Samples where lines of inlined function were not matched ' f'when checking inlines\'s range {temp_df.shape[0]}') remove_indices(temp_df) @@ -278,7 +278,7 @@ def remove_indices(df_to_filter: pd.DataFrame): temp_df = new_df[ new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_LINES_NOT_MATCHED.name - ] + ] print(f'Samples where lines of target function were not matched ' f'when checking inlines\'s range {temp_df.shape[0]}') remove_indices(temp_df) @@ -291,5 +291,5 @@ def remove_indices(df_to_filter: pd.DataFrame): filtered_size = filtered_df.shape[0] print(f'After filtering we\'ve got {filtered_size} of {new_df.shape[0]}') - ratio = float(filtered_size)/new_df.shape[0] + ratio = float(filtered_size) / new_df.shape[0] print(f'We have {ratio}% correct samples of all dataset') From 417432b1d3cc18cde042c70b15d27559cc70e1bc Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 17 Dec 2020 12:42:41 +0300 Subject: [PATCH 38/52] Fix bug --- veniq/dataset_collection/check_synth_dataset.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/veniq/dataset_collection/check_synth_dataset.py b/veniq/dataset_collection/check_synth_dataset.py index 495d50a4..35026313 100644 --- a/veniq/dataset_collection/check_synth_dataset.py +++ b/veniq/dataset_collection/check_synth_dataset.py @@ -21,7 +21,7 @@ class FunctionExist(Enum): ERROR = -1 CLASS_NOT_FOUND = 0 OVERLOADED_FUNC = 1 - FUNCTION_NO_FOUND = 2 + FUNCTION_NOT_FOUND = 2 FUNCTION_LINES_NOT_MATCHED = 3 FUNCTION_LINES_MATCHED = 4 @@ -46,14 +46,14 @@ def check_function_start_end_line( if len(class_decl) == 0: return FunctionExist.CLASS_NOT_FOUND else: - class_decl = class_decl[0] + class_decl = class_decl[0] # type: ignore ctrs = [x for x in ast.get_proxy_nodes(ASTNodeType.CONSTRUCTOR_DECLARATION)] - all_considered_methods = list([x for x in class_decl.methods]) + ctrs + all_considered_methods = list([x for x in class_decl.methods]) + ctrs # type: ignore functions = [x for x in all_considered_methods if x.name == function_name] if len(functions) == 0: - return (None, None), FunctionExist.FUNCTION_EXISTS.name + return (None, None), FunctionExist.FUNCTION_NOT_FOUND.name elif len(functions) > 1: return (None, None), FunctionExist.OVERLOADED_FUNC.name else: @@ -234,7 +234,7 @@ def remove_indices(df_to_filter: pd.DataFrame): print(f'Samples where inlined function is overloaded ' f'when checking inlines\'s range {temp_df.shape[0]}') temp_df = new_df[ - new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name + new_df['are_inlined_lines_matched'] == FunctionExist.FUNCTION_NOT_FOUND.name ] remove_indices(temp_df) print(f'Samples where inlined function was not found ' @@ -270,7 +270,7 @@ def remove_indices(df_to_filter: pd.DataFrame): print(f'Samples where target function is overloaded ' f'when checking target\'s range {temp_df.shape[0]}') target_lines_matched = new_df[ - new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_NO_FOUND.name + new_df['are_target_lines_matched'] == FunctionExist.FUNCTION_NOT_FOUND.name ] remove_indices(temp_df) print(f'Samples where target function was not found ' From 19b831a254c7ec8f7cb12cb1383e41de5fd372e4 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 13 Jan 2021 20:05:29 +0300 Subject: [PATCH 39/52] First version of stats --- veniq/dataset_collection/augmentation.py | 245 ++++++++++++----------- 1 file changed, 133 insertions(+), 112 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 9e2f25d0..82de8d98 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -136,7 +136,9 @@ class InvocationType(Enum): SEVERAL_RETURNS = 15 IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS = 16 IS_NOT_PARENT_MEMBER_REF = 17 - METHOD_WITH_ARGUMENTS = 18 + EXTRACTED_NCSS_LARGE = 18 + METHOD_WITH_ARGUMENTS = 19 + @classmethod def list_types(cls): @@ -257,43 +259,43 @@ def get_stats_for_pruned_cases( is_not_several_returns, is_not_ternary, method_invoked) -> List[str]: invocation_types_to_ignore: List[str] = [] - if is_not_chain_before: + if not is_not_chain_before: invocation_types_to_ignore.append(InvocationType.METHOD_CHAIN_BEFORE.name) - if is_actual_parameter_simple: + if not is_actual_parameter_simple: invocation_types_to_ignore.append(InvocationType.SIMPLE_ACTUAL_PARAMETER.name) - if is_not_chain_after: + if not is_not_chain_after: invocation_types_to_ignore.append(InvocationType.METHOD_CHAIN_AFTER.name) - if is_not_inside_if: + if not is_not_inside_if: invocation_types_to_ignore.append(InvocationType.INSIDE_IF.name) - if is_not_inside_while: + if not is_not_inside_while: invocation_types_to_ignore.append(InvocationType.INSIDE_WHILE.name) - if is_not_binary_operation: + if not is_not_binary_operation: invocation_types_to_ignore.append(InvocationType.INSIDE_BINARY_OPERATION.name) - if is_not_ternary: + if not is_not_ternary: invocation_types_to_ignore.append(InvocationType.INSIDE_TERNARY.name) - if is_not_class_creator: + if not is_not_class_creator: invocation_types_to_ignore.append(InvocationType.INSIDE_CLASS_CREATOR.name) - if is_not_cast: + if not is_not_cast: invocation_types_to_ignore.append(InvocationType.TYPE_CAST.name) - if is_not_array_creator: + if not is_not_array_creator: invocation_types_to_ignore.append(InvocationType.INSIDE_ARRAY_CREATOR.name) - if is_not_parent_member_ref: + if not is_not_parent_member_ref: invocation_types_to_ignore.append(InvocationType.IS_NOT_PARENT_MEMBER_REF.name) - if is_not_inside_for: + if not is_not_inside_for: invocation_types_to_ignore.append(InvocationType.INSIDE_FOR.name) - if is_not_enhanced_for_control: + if not is_not_enhanced_for_control: invocation_types_to_ignore.append(InvocationType.INSIDE_FOREACH.name) - if is_not_lambda: + if not is_not_lambda: invocation_types_to_ignore.append(InvocationType.INSIDE_LAMBDA.name) - if is_not_method_inv_single_statement_in_if: + if not is_not_method_inv_single_statement_in_if: invocation_types_to_ignore.append(InvocationType.SINGLE_STATEMENT_IN_IF.name) - if is_not_assign_value_with_return_type: + if not is_not_assign_value_with_return_type: invocation_types_to_ignore.append(InvocationType.IS_NOT_ASSIGN_VALUE_WITH_RETURN_TYPE.name) - if is_not_several_returns: + if not is_not_several_returns: invocation_types_to_ignore.append(InvocationType.SEVERAL_RETURNS.name) - if is_not_at_the_same_line_as_prohibited_stats: + if not is_not_at_the_same_line_as_prohibited_stats: invocation_types_to_ignore.append(InvocationType.IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS.name) - if not method_invoked.argument: + if method_invoked.arguments: invocation_types_to_ignore.append(InvocationType.METHOD_WITH_ARGUMENTS.name) return invocation_types_to_ignore @@ -399,60 +401,57 @@ def insert_code_with_new_file_creation( new_full_filename = Path(output_path, f'{file_name}_{method_node.name}_{invocation_node.line}.java') original_func = dict_original_invocations.get(invocation_node.member)[0] # type: ignore - ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) + # ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) line_to_csv = {} # @acheshkov asked to consider only methods with ncss > 3, that's all. - if ncss_extracted >= 3: - ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) - body_start_line, body_end_line = method_body_lines(original_func, file_path) - text_lines = read_text_with_autodetected_encoding(str(file_path)).split('\n') - # we do not inline one-line methods like - # public String getRemainingString() {return str.substring(index);} - if body_start_line != body_end_line: - algorithm_type = determine_algorithm_insertion_type( - ast, - method_node, - invocation_node, - dict_original_invocations + # ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) + body_start_line, body_end_line = method_body_lines(original_func, file_path) + text_lines = read_text_with_autodetected_encoding(str(file_path)).split('\n') + # we do not inline one-line methods like + # public String getRemainingString() {return str.substring(index);} + if body_start_line != body_end_line: + algorithm_type = determine_algorithm_insertion_type( + ast, + method_node, + invocation_node, + dict_original_invocations + ) + algorithm_for_inlining = AlgorithmFactory().create_obj(algorithm_type) + if algorithm_type != InlineTypesAlgorithms.DO_NOTHING: + line_to_csv = { + 'original_filename': file_path, + 'class_name': class_name, + 'invocation_line_string': text_lines[invocation_node.line - 1].lstrip().encode('utf-8').decode('utf-8'), + 'target_method': method_node.name, + 'extract_method_name': original_func.name, + 'output_filename': Path(new_full_filename).name, + 'target_method_start_line': method_node.line, + # 'ncss_extracted': ncss_extracted, + # 'ncss_target': ncss_target + } + + inline_method_bounds = algorithm_for_inlining().inline_function( + file_path, + invocation_node.line, + body_start_line, + body_end_line, + new_full_filename, ) - algorithm_for_inlining = AlgorithmFactory().create_obj(algorithm_type) - if algorithm_type != InlineTypesAlgorithms.DO_NOTHING: - Path(source_filepath) - line_to_csv = { - 'project_id': source_filepath, - 'original_filename': file_path, - 'class_name': class_name, - 'invocation_line_string': text_lines[invocation_node.line - 1].lstrip(), - 'target_method': method_node.name, - 'extract_method_name': original_func.name, - 'output_filename': new_full_filename, - 'target_method_start_line': method_node.line, - 'ncss_extracted': ncss_extracted, - 'ncss_target': ncss_extracted - } - - inline_method_bounds = algorithm_for_inlining().inline_function( - file_path, - invocation_node.line, - body_start_line, - body_end_line, - new_full_filename, - ) - if inline_method_bounds is not None: - line_to_csv['insertion_start'] = inline_method_bounds[0] - line_to_csv['insertion_end'] = inline_method_bounds[1] - - if get_ast_if_possible(new_full_filename): - rest_of_csv_row_for_changed_file = find_lines_in_changed_file( - class_name=class_name, - new_full_filename=new_full_filename, - original_func=original_func) - is_valid_ast = True - line_to_csv.update(rest_of_csv_row_for_changed_file) - else: - is_valid_ast = False - - line_to_csv['is_valid_ast'] = is_valid_ast + if inline_method_bounds is not None: + line_to_csv['insertion_start'] = inline_method_bounds[0] + line_to_csv['insertion_end'] = inline_method_bounds[1] + + if get_ast_if_possible(new_full_filename): + rest_of_csv_row_for_changed_file = find_lines_in_changed_file( + class_name=class_name, + new_full_filename=new_full_filename, + original_func=original_func) + is_valid_ast = True + line_to_csv.update(rest_of_csv_row_for_changed_file) + else: + is_valid_ast = False + + line_to_csv['is_valid_ast'] = is_valid_ast return line_to_csv @@ -528,7 +527,8 @@ def _replacer(match): def analyze_file( file_path: Path, output_path: Path, - input_dir: Path + input_dir: Path, + dataset_dir: str ) -> List[Any]: """ In this function we process each file. @@ -544,10 +544,6 @@ def analyze_file( text = "\n".join([ll.rstrip() for ll in text_without_comments.splitlines() if ll.strip()]) dst_filename = save_text_to_new_file(input_dir, text, file_path) - # remove full_dataset/input prefix - # real_input_dataset_path = Path('/'.join(Path(input_dir).absolute().parts[:-2])) - project_id = Path(dst_filename.absolute()).relative_to(input_dir.absolute()).parts[:2] - print(project_id) ast = get_ast_if_possible(dst_filename) if ast is None: dst_filename.unlink() @@ -574,26 +570,25 @@ def analyze_file( if len(found_functions) == 1: for invocation_node in method_decl.get_proxy_nodes( ASTNodeType.METHOD_INVOCATION): - extracted_function = method_declarations.get(invocation_node.member, []) + extracted_function_method_decl = method_declarations.get(invocation_node.member, []) # ignore overloaded extracted functions - if len(extracted_function) == 1: - if not extracted_function[0].parameters: - try: - make_insertion( - ast, - class_declaration, - dst_filename, - extracted_function, - method_declarations, - invocation_node, - method_node, - output_path, - file_path, - results, - input_dir - ) - except Exception as e: - print('Error has happened during file analyze: ' + str(e)) + if len(extracted_function_method_decl) == 1: + try: + make_insertion( + ast, + class_declaration, + dst_filename, + extracted_function_method_decl, + method_declarations, + invocation_node, + method_node, + output_path, + file_path, + results, + dataset_dir + ) + except Exception as e: + print('Error has happened during file analyze: ' + str(e)) if not results: dst_filename.unlink() @@ -602,28 +597,54 @@ def analyze_file( def make_insertion(ast, class_declaration, dst_filename, found_method_decl, method_declarations, method_invoked, - method_node, output_path, source_filepath, results, input_dir): + method_node, output_path, source_filepath, results, dataset_dir): ignored_cases = is_match_to_the_conditions( ast, method_invoked, found_method_decl[0] ) - # if ignored_cases: - log_of_inline = insert_code_with_new_file_creation( - class_declaration.name, - ast, - method_node, - method_invoked, - dst_filename, - output_path, - method_declarations, - source_filepath) + + original_func = method_declarations.get(method_invoked.member)[0] # type: ignore + ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) + ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) + log_of_inline = { + 'extracted_method': method_invoked.member, + 'target_method': method_node.name, + 'ncss_extracted': ncss_extracted, + 'ncss_target': ncss_target, + } + + if not ignored_cases: + if ncss_extracted >= 3: + log_of_inline['EXTRACTED_NCSS_LARGE'] = True + log_of_inline = insert_code_with_new_file_creation( + class_declaration.name, + ast, + method_node, + method_invoked, + dst_filename, + output_path, + method_declarations, + source_filepath) + + if log_of_inline: + log_of_inline['OK'] = True + # default initialization + for case_name in InvocationType.list_types(): + log_of_inline[case_name] = False + + # found ignored cases for case_name in ignored_cases: log_of_inline[case_name] = True if log_of_inline: # change source filename, since it will be changed - log_of_inline['original_filename'] = str(dst_filename.as_posix()) + log_of_inline['original_filename'] = dst_filename.name + # remove full_dataset/input prefix + # real_input_dataset_path = Path('/'.join(Path(input_dir).absolute().parts[:-2])) + project_id = '/'.join(Path(source_filepath.absolute()).relative_to(Path(dataset_dir).absolute()).parts[:2]) + # print(dst_filename.absolute(), input_dir.absolute(), project_id) + log_of_inline['project_id'] = project_id results.append(log_of_inline) @@ -721,13 +742,14 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: p_analyze = partial( analyze_file, output_path=output_dir.absolute(), - input_dir=input_dir + input_dir=input_dir, + dataset_dir=args.dir ) future = executor.map(p_analyze, files_without_tests, timeout=1000, ) result = future.result() # each 100 cycles we dump the results - iteration_cycle = 1000 + iteration_cycle = 10 iteration_number = 0 for filename in tqdm(files_without_tests): try: @@ -736,10 +758,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: for i in single_file_features: # get local path for inlined filename # i['output_filename'] = i['output_filename'].relative_to(os.getcwd()).as_posix() - i['output_filename'] = i['output_filename'].name - i['original_filename'] = i['original_filename'].name # print(i['output_filename'], filename) - i['invocation_line_string'] = str(i['invocation_line_string']).encode('utf8') df = df.append(i, ignore_index=True) if (iteration_number % iteration_cycle) == 0: @@ -747,6 +766,8 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: iteration_number += 1 except Exception as e: print(str(e)) + import traceback + traceback.print_exc() df.to_csv(csv_output) if args.zip: From e2dbb5dda67a992a084240bcb1872c0e59ed0716 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 14 Jan 2021 14:10:40 +0300 Subject: [PATCH 40/52] Fix some filters --- veniq/dataset_collection/augmentation.py | 199 +++++++++++++---------- veniq/dataset_collection/full_stats.py | 56 +++++++ 2 files changed, 172 insertions(+), 83 deletions(-) create mode 100644 veniq/dataset_collection/full_stats.py diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 82de8d98..7f21cf3b 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -89,9 +89,9 @@ def check_nesting_statements( at the same line as prohibited statements. """ prohibited_statements = [ - ASTNodeType.IF_STATEMENT, - ASTNodeType.WHILE_STATEMENT, - ASTNodeType.FOR_STATEMENT, + # ASTNodeType.IF_STATEMENT, + # ASTNodeType.WHILE_STATEMENT, + # ASTNodeType.FOR_STATEMENT, ASTNodeType.SYNCHRONIZED_STATEMENT, ASTNodeType.CATCH_CLAUSE, ASTNodeType.SUPER_CONSTRUCTOR_INVOCATION, @@ -117,27 +117,30 @@ def check_nesting_statements( class InvocationType(Enum): - OK = 0 + # OK = 0 METHOD_CHAIN_BEFORE = 1 - SIMPLE_ACTUAL_PARAMETER = 2 + NOT_SIMPLE_ACTUAL_PARAMETER = 2 METHOD_CHAIN_AFTER = 3 - INSIDE_IF = 3 - INSIDE_WHILE = 4 - INSIDE_FOR = 5 - INSIDE_FOREACH = 6 - INSIDE_BINARY_OPERATION = 7 - INSIDE_TERNARY = 8 - INSIDE_CLASS_CREATOR = 9 - TYPE_CAST = 10 - INSIDE_ARRAY_CREATOR = 11 - SINGLE_STATEMENT_IN_IF = 12 - INSIDE_LAMBDA = 13 - IS_NOT_ASSIGN_VALUE_WITH_RETURN_TYPE = 14 - SEVERAL_RETURNS = 15 - IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS = 16 - IS_NOT_PARENT_MEMBER_REF = 17 - EXTRACTED_NCSS_LARGE = 18 - METHOD_WITH_ARGUMENTS = 19 + INSIDE_IF = 4 + INSIDE_WHILE = 5 + INSIDE_FOR = 6 + INSIDE_FOREACH = 7 + INSIDE_BINARY_OPERATION = 8 + INSIDE_TERNARY = 9 + INSIDE_CLASS_CREATOR = 10 + CAST_OF_RETURN_TYPE = 11 + INSIDE_ARRAY_CREATOR = 12 + SINGLE_STATEMENT_IN_IF = 13 + INSIDE_LAMBDA = 14 + ALREADY_ASSIGNED_VALUE_IN_INVOCATION = 15 + SEVERAL_RETURNS = 16 + IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS = 17 + IS_NOT_PARENT_MEMBER_REF = 18 + # EXTRACTED_NCSS_SMALL = 19 + CROSSED_VAR_NAMES = 20 + CAST_IN_ACTUAL_PARAMS = 21 + ABSTRACT_METHOD = 22 + METHOD_WITH_ARGUMENTS = 999 @classmethod @@ -161,6 +164,10 @@ def is_match_to_the_conditions( parent = method_invoked.parent no_children = True + is_not_is_extract_method_abstract = True + if 'abstract' in found_method_decl.modifiers: + is_not_is_extract_method_abstract = False + maybe_if = parent.parent is_not_method_inv_single_statement_in_if = True if maybe_if.node_type == ASTNodeType.IF_STATEMENT: @@ -193,8 +200,14 @@ def is_match_to_the_conditions( # if a parameter is any expression, we ignore it, # since it is difficult to extract with AST is_actual_parameter_simple = all([hasattr(x, 'member') for x in method_invoked.arguments]) + is_not_actual_param_cast = True + if not is_actual_parameter_simple: + found_casts = [x for x in method_invoked.arguments if x.node_type == ASTNodeType.CAST] + if len(found_casts) > 0: + is_not_actual_param_cast = False + is_not_class_creator = not (parent.node_type == ASTNodeType.CLASS_CREATOR) - is_not_cast = not (parent.node_type == ASTNodeType.CAST) + is_not_cast_of_return_type = not (parent.node_type == ASTNodeType.CAST) is_not_array_creator = not (parent.node_type == ASTNodeType.ARRAY_CREATOR) is_not_lambda = not (parent.node_type == ASTNodeType.LAMBDA_EXPRESSION) is_not_at_the_same_line_as_prohibited_stats = check_nesting_statements(method_invoked) @@ -205,7 +218,7 @@ def is_match_to_the_conditions( is_not_assign_value_with_return_type, is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, - is_not_cast, + is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, is_not_class_creator, @@ -218,6 +231,8 @@ def is_match_to_the_conditions( is_not_parent_member_ref, is_not_several_returns, is_not_ternary, + is_not_actual_param_cast, + is_not_is_extract_method_abstract, method_invoked ) @@ -244,25 +259,27 @@ def is_match_to_the_conditions( # if (not method_invoked.qualifier and other_requirements) or \ # (method_invoked.qualifier == 'this' and other_requirements): - if (not method_invoked.qualifier) or (method_invoked.qualifier == 'this'): - return ignored_cases - else: - return [] + return ignored_cases def get_stats_for_pruned_cases( is_actual_parameter_simple, is_not_array_creator, is_not_assign_value_with_return_type, - is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, is_not_cast, - is_not_chain_after, is_not_chain_before, is_not_class_creator, + is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, + is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, is_not_class_creator, is_not_enhanced_for_control, is_not_inside_for, is_not_inside_if, is_not_inside_while, is_not_lambda, is_not_method_inv_single_statement_in_if, is_not_parent_member_ref, - is_not_several_returns, is_not_ternary, method_invoked) -> List[str]: + is_not_several_returns, is_not_ternary, is_not_actual_param_cast, + is_not_is_extract_method_abstract, method_invoked) -> List[str]: invocation_types_to_ignore: List[str] = [] + if not is_not_is_extract_method_abstract: + invocation_types_to_ignore.append(InvocationType.ABSTRACT_METHOD.name) if not is_not_chain_before: invocation_types_to_ignore.append(InvocationType.METHOD_CHAIN_BEFORE.name) if not is_actual_parameter_simple: - invocation_types_to_ignore.append(InvocationType.SIMPLE_ACTUAL_PARAMETER.name) + invocation_types_to_ignore.append(InvocationType.NOT_SIMPLE_ACTUAL_PARAMETER.name) + if not is_not_actual_param_cast: + invocation_types_to_ignore.append(InvocationType.CAST_IN_ACTUAL_PARAMS.name) if not is_not_chain_after: invocation_types_to_ignore.append(InvocationType.METHOD_CHAIN_AFTER.name) if not is_not_inside_if: @@ -275,8 +292,8 @@ def get_stats_for_pruned_cases( invocation_types_to_ignore.append(InvocationType.INSIDE_TERNARY.name) if not is_not_class_creator: invocation_types_to_ignore.append(InvocationType.INSIDE_CLASS_CREATOR.name) - if not is_not_cast: - invocation_types_to_ignore.append(InvocationType.TYPE_CAST.name) + if not is_not_cast_of_return_type: + invocation_types_to_ignore.append(InvocationType.CAST_OF_RETURN_TYPE.name) if not is_not_array_creator: invocation_types_to_ignore.append(InvocationType.INSIDE_ARRAY_CREATOR.name) if not is_not_parent_member_ref: @@ -290,7 +307,7 @@ def get_stats_for_pruned_cases( if not is_not_method_inv_single_statement_in_if: invocation_types_to_ignore.append(InvocationType.SINGLE_STATEMENT_IN_IF.name) if not is_not_assign_value_with_return_type: - invocation_types_to_ignore.append(InvocationType.IS_NOT_ASSIGN_VALUE_WITH_RETURN_TYPE.name) + invocation_types_to_ignore.append(InvocationType.ALREADY_ASSIGNED_VALUE_IN_INVOCATION.name) if not is_not_several_returns: invocation_types_to_ignore.append(InvocationType.SEVERAL_RETURNS.name) if not is_not_at_the_same_line_as_prohibited_stats: @@ -303,9 +320,11 @@ def get_stats_for_pruned_cases( def check_whether_method_has_return_type( method_decl: AST, - var_decls: Set[str]) -> InlineTypesAlgorithms: + var_decls: Set[str], + line_to_csv: Dict[str, Any]) -> InlineTypesAlgorithms: """ Run function to check whether Method declaration can be inlined + :param line_to_csv: dict of insertion result as row for DataFrame :param method_decl: method, where invocation occurred :param var_decls: set of variables for found invoked method :return: enum InlineTypesAlgorithms @@ -319,6 +338,9 @@ def check_whether_method_has_return_type( if not var_decls or not intersected_names: return InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS + if intersected_names: + line_to_csv[InvocationType.CROSSED_VAR_NAMES.name] = True + return InlineTypesAlgorithms.DO_NOTHING @@ -347,7 +369,8 @@ def determine_algorithm_insertion_type( ast: AST, method_node: ASTNode, invocation_node: ASTNode, - dict_original_nodes: Dict[str, List[ASTNode]] + dict_original_nodes: Dict[str, List[ASTNode]], + line_to_csv ) -> InlineTypesAlgorithms: """ @@ -371,7 +394,8 @@ def determine_algorithm_insertion_type( var_decls = set(get_variables_decl_in_node(ast.get_subtree(original_method))) return check_whether_method_has_return_type( ast.get_subtree(method_node), - var_decls + var_decls, + line_to_csv ) else: return InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS @@ -387,7 +411,7 @@ def insert_code_with_new_file_creation( file_path: Path, output_path: Path, dict_original_invocations: Dict[str, List[ASTNode]], - source_filepath: str + row_dict: Dict[str, Any] ) -> Dict[str, Any]: """ If invocations of class methods were found, @@ -402,7 +426,7 @@ def insert_code_with_new_file_creation( new_full_filename = Path(output_path, f'{file_name}_{method_node.name}_{invocation_node.line}.java') original_func = dict_original_invocations.get(invocation_node.member)[0] # type: ignore # ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) - line_to_csv = {} + # line_to_csv = {} # @acheshkov asked to consider only methods with ncss > 3, that's all. # ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) body_start_line, body_end_line = method_body_lines(original_func, file_path) @@ -414,21 +438,23 @@ def insert_code_with_new_file_creation( ast, method_node, invocation_node, - dict_original_invocations + dict_original_invocations, + row_dict ) algorithm_for_inlining = AlgorithmFactory().create_obj(algorithm_type) if algorithm_type != InlineTypesAlgorithms.DO_NOTHING: - line_to_csv = { + row_dict.update({ 'original_filename': file_path, 'class_name': class_name, 'invocation_line_string': text_lines[invocation_node.line - 1].lstrip().encode('utf-8').decode('utf-8'), 'target_method': method_node.name, - 'extract_method_name': original_func.name, + 'extract_method': original_func.name, 'output_filename': Path(new_full_filename).name, 'target_method_start_line': method_node.line, + 'do_nothing': False, # 'ncss_extracted': ncss_extracted, # 'ncss_target': ncss_target - } + }) inline_method_bounds = algorithm_for_inlining().inline_function( file_path, @@ -438,8 +464,8 @@ def insert_code_with_new_file_creation( new_full_filename, ) if inline_method_bounds is not None: - line_to_csv['insertion_start'] = inline_method_bounds[0] - line_to_csv['insertion_end'] = inline_method_bounds[1] + row_dict['insertion_start'] = inline_method_bounds[0] + row_dict['insertion_end'] = inline_method_bounds[1] if get_ast_if_possible(new_full_filename): rest_of_csv_row_for_changed_file = find_lines_in_changed_file( @@ -447,13 +473,15 @@ def insert_code_with_new_file_creation( new_full_filename=new_full_filename, original_func=original_func) is_valid_ast = True - line_to_csv.update(rest_of_csv_row_for_changed_file) + row_dict.update(rest_of_csv_row_for_changed_file) else: is_valid_ast = False - line_to_csv['is_valid_ast'] = is_valid_ast + row_dict['is_valid_ast'] = is_valid_ast + else: + row_dict['do_nothing'] = True - return line_to_csv + return row_dict # type: ignore @@ -562,6 +590,8 @@ def analyze_file( + list(class_declaration.constructors) collect_info_about_functions_without_params(method_declarations, methods_list) + if file_path.name.endswith('RedisRegistry.java'): + print(1) for method_node in methods_list: method_decl = ast.get_subtree(method_node) found_functions = method_declarations.get(method_node.name, []) @@ -598,26 +628,30 @@ def analyze_file( def make_insertion(ast, class_declaration, dst_filename, found_method_decl, method_declarations, method_invoked, method_node, output_path, source_filepath, results, dataset_dir): - ignored_cases = is_match_to_the_conditions( - ast, - method_invoked, - found_method_decl[0] - ) - original_func = method_declarations.get(method_invoked.member)[0] # type: ignore - ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) - ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) - log_of_inline = { - 'extracted_method': method_invoked.member, - 'target_method': method_node.name, - 'ncss_extracted': ncss_extracted, - 'ncss_target': ncss_target, - } - - if not ignored_cases: - if ncss_extracted >= 3: - log_of_inline['EXTRACTED_NCSS_LARGE'] = True - log_of_inline = insert_code_with_new_file_creation( + if (not method_invoked.qualifier) or (method_invoked.qualifier == 'this'): + ignored_cases = is_match_to_the_conditions( + ast, + method_invoked, + found_method_decl[0] + ) + + original_func = method_declarations.get(method_invoked.member)[0] # type: ignore + ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) + ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) + log_of_inline = { + 'extract_method': method_invoked.member, + 'target_method': method_node.name, + 'ncss_extracted': ncss_extracted, + 'ncss_target': ncss_target, + 'invocation_line_number_in_original_file': method_invoked.line + } + # default init + for case_name in InvocationType.list_types(): + log_of_inline[case_name] = False + + if not ignored_cases: + insert_code_with_new_file_creation( class_declaration.name, ast, method_node, @@ -625,19 +659,15 @@ def make_insertion(ast, class_declaration, dst_filename, found_method_decl, meth dst_filename, output_path, method_declarations, - source_filepath) - - if log_of_inline: - log_of_inline['OK'] = True - # default initialization - for case_name in InvocationType.list_types(): - log_of_inline[case_name] = False + log_of_inline) + log_of_inline['NO_IGNORED_CASES'] = True + else: + log_of_inline['NO_IGNORED_CASES'] = False - # found ignored cases - for case_name in ignored_cases: - log_of_inline[case_name] = True + # found ignored cases + for case_name in ignored_cases: + log_of_inline[case_name] = True - if log_of_inline: # change source filename, since it will be changed log_of_inline['original_filename'] = dst_filename.name # remove full_dataset/input prefix @@ -724,9 +754,10 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: 'original_filename', 'class_name', 'invocation_line_string', + 'invocation_line_number_in_original_file', 'target_method', 'target_method_start_line', - 'extract_method_name', + 'extract_method', 'extract_method_start_line', 'extract_method_end_line', 'output_filename', @@ -734,11 +765,13 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: 'insertion_start', 'insertion_end', 'ncss_target', - 'ncss_extracted' + 'ncss_extracted', + 'do_nothing', + 'NO_IGNORED_CASES' ] + [x for x in InvocationType.list_types()] df = pd.DataFrame(columns=columns) - with ProcessPool(1) as executor: + with ProcessPool(system_cores_qty) as executor: p_analyze = partial( analyze_file, output_path=output_dir.absolute(), @@ -749,7 +782,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: result = future.result() # each 100 cycles we dump the results - iteration_cycle = 10 + iteration_cycle = 100 iteration_number = 0 for filename in tqdm(files_without_tests): try: diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py new file mode 100644 index 00000000..6b4105b3 --- /dev/null +++ b/veniq/dataset_collection/full_stats.py @@ -0,0 +1,56 @@ +import pandas as pd + +df = pd.read_csv('out.csv') +immutable_df = df.copy() + + +def remove_indices(df_to_filter: pd.DataFrame): + rows = df[df.index.isin(df_to_filter.index)] + df.drop(rows.index, inplace=True) + + +print(f'Total lines: {df.shape[0]}') +duplicateRowsDF = immutable_df[immutable_df.duplicated()] +print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') +remove_indices(duplicateRowsDF) +ncss_target = immutable_df[immutable_df['ncss_target'] > 2] +remove_indices(ncss_target) +print(f'ncss_target > 2 {ncss_target.shape[0]}') + +# REMOVE METHOD CHAINING SINCE IT IS not correct +# to inline them, we have different type objects, +# it's not a function of the original class +method_chain_before = immutable_df[immutable_df['METHOD_CHAIN_BEFORE'] == True] +remove_indices(method_chain_before) +method_chain_after = immutable_df[immutable_df['METHOD_CHAIN_AFTER'] == True] +remove_indices(method_chain_after) + +method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] +print(f'Samples where extracted method has parameters: ' + f'{method_with_arguments.shape[0]}. We prune such methods') +remove_indices(method_with_arguments) + +crossed_var_names = immutable_df[immutable_df['CROSSED_VAR_NAMES'] == True] +remove_indices(crossed_var_names) +print(f'Samples where var names of extracted function is crossed with target method' + f'{crossed_var_names.shape[0]}') + +do_nothing = immutable_df[immutable_df['do_nothing'] == True] +remove_indices(do_nothing) +print(f'Samples where we rejected to inline by some reason' + f'{do_nothing.shape[0]}') + + +not_simple_actual_parameter = immutable_df[immutable_df['NOT_SIMPLE_ACTUAL_PARAMETER'] == True] +remove_indices(not_simple_actual_parameter) +print(f'Samples where actual parameter in invocation is not simple. Sometimes it matches the cast typing' + f'{not_simple_actual_parameter.shape[0]}') + + +# immutable_df['score_diff'] = immutable_df['invocation_method_end_line'].sub(immutable_df['invocation_method_start_line'], axis=0) +# negative_insertions = immutable_df[immutable_df['score_diff'] < 0] +# remove_indices(negative_insertions) +# print(f'Negative insertions: {negative_insertions.shape[0]}') +# +# print(f'Total cases: {df.shape[0]}') +# print(f'Target ncss 3: {df.shape[0]}') From 266f66db5def509fed8e63a3bcb5aea6f125a120 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Thu, 14 Jan 2021 17:51:12 +0300 Subject: [PATCH 41/52] Some fixes + collect stats --- veniq/dataset_collection/augmentation.py | 44 +++++--- veniq/dataset_collection/full_stats.py | 132 +++++++++++++++++++---- 2 files changed, 139 insertions(+), 37 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 7f21cf3b..a4c96dd1 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -177,14 +177,15 @@ def is_match_to_the_conditions( is_not_assign_value_with_return_type = True is_not_several_returns = True - if found_method_decl.return_type: - if parent.node_type == ASTNodeType.VARIABLE_DECLARATOR: - is_not_assign_value_with_return_type = False + if hasattr(found_method_decl, 'return_type'): + if found_method_decl.return_type: + if parent.node_type == ASTNodeType.VARIABLE_DECLARATOR: + is_not_assign_value_with_return_type = False - ast_subtree = ast.get_subtree(found_method_decl) - stats = [x for x in ast_subtree.get_proxy_nodes(ASTNodeType.RETURN_STATEMENT)] - if len(stats) > 1: - is_not_several_returns = False + ast_subtree = ast.get_subtree(found_method_decl) + stats = [x for x in ast_subtree.get_proxy_nodes(ASTNodeType.RETURN_STATEMENT)] + if len(stats) > 1: + is_not_several_returns = False is_not_parent_member_ref = not (method_invoked.parent.node_type == ASTNodeType.MEMBER_REFERENCE) is_not_chain_before = not (parent.node_type == ASTNodeType.METHOD_INVOCATION) and no_children @@ -389,20 +390,31 @@ def determine_algorithm_insertion_type( else: original_method = original_invoked_method[0] if not original_method.parameters: - if not original_method.return_type: - # Find the original method declaration by the name of method invocation - var_decls = set(get_variables_decl_in_node(ast.get_subtree(original_method))) - return check_whether_method_has_return_type( - ast.get_subtree(method_node), - var_decls, - line_to_csv - ) + + has_attr_return_type = hasattr(original_method, 'return_type') + if has_attr_return_type: + if not original_method.return_type: + return run_var_crossing_check(ast, line_to_csv, method_node, original_method) + else: + return InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS + # Else if we have constructor, it doesn't have return type else: - return InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS + return run_var_crossing_check(ast, line_to_csv, method_node, original_method) + else: return InlineTypesAlgorithms.DO_NOTHING +def run_var_crossing_check(ast, line_to_csv, method_node, original_method): + # Find the original method declaration by the name of method invocation + var_decls = set(get_variables_decl_in_node(ast.get_subtree(original_method))) + return check_whether_method_has_return_type( + ast.get_subtree(method_node), + var_decls, + line_to_csv + ) + + def insert_code_with_new_file_creation( class_name: str, ast: AST, diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index 6b4105b3..68af0627 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -1,51 +1,141 @@ import pandas as pd -df = pd.read_csv('out.csv') +df = pd.read_csv(r'd:\git\veniq\veniq\dataset_collection\new_dataset\full_dataset\out.csv') immutable_df = df.copy() -def remove_indices(df_to_filter: pd.DataFrame): - rows = df[df.index.isin(df_to_filter.index)] - df.drop(rows.index, inplace=True) +def remove_indices(df_to_filter: pd.DataFrame, src_df): + rows = src_df[src_df.index.isin(df_to_filter.index)] + src_df.drop(rows.index, inplace=True) print(f'Total lines: {df.shape[0]}') duplicateRowsDF = immutable_df[immutable_df.duplicated()] print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') -remove_indices(duplicateRowsDF) +remove_indices(duplicateRowsDF, df) ncss_target = immutable_df[immutable_df['ncss_target'] > 2] -remove_indices(ncss_target) +remove_indices(ncss_target, df) print(f'ncss_target > 2 {ncss_target.shape[0]}') +# exclude abstract extract methods +abstract_method = immutable_df[immutable_df['ABSTRACT_METHOD'] == True] +remove_indices(abstract_method, df) + # REMOVE METHOD CHAINING SINCE IT IS not correct # to inline them, we have different type objects, # it's not a function of the original class method_chain_before = immutable_df[immutable_df['METHOD_CHAIN_BEFORE'] == True] -remove_indices(method_chain_before) +remove_indices(method_chain_before, df) method_chain_after = immutable_df[immutable_df['METHOD_CHAIN_AFTER'] == True] -remove_indices(method_chain_after) +remove_indices(method_chain_after, df) method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] print(f'Samples where extracted method has parameters: ' f'{method_with_arguments.shape[0]}. We prune such methods') -remove_indices(method_with_arguments) +with_arguments_df = df[df['METHOD_WITH_ARGUMENTS'] == True] +remove_indices(method_with_arguments, df) + + +def count_filters_for_df(immutable_df, df_changed): + crossed_var_names = immutable_df[immutable_df['CROSSED_VAR_NAMES'] == True] + remove_indices(crossed_var_names, df_changed) + print(f'Samples where var names of extracted function is crossed with target method' + f'{crossed_var_names.shape[0]}') + + do_nothing = immutable_df[immutable_df['do_nothing'] == True] + remove_indices(do_nothing, df_changed) + print(f'Samples where we rejected to inline by some reason' + f'{do_nothing.shape[0]}') + + not_simple_actual_parameter = immutable_df[immutable_df['NOT_SIMPLE_ACTUAL_PARAMETER'] == True] + remove_indices(not_simple_actual_parameter, df_changed) + print(f'Samples where actual parameter in invocation is not simple' + f'{not_simple_actual_parameter.shape[0]}') + + inside_if = immutable_df[immutable_df['INSIDE_IF'] == True] + remove_indices(inside_if, df_changed) + print(f'Samples where invocation was inside if condition' + f'{inside_if.shape[0]}') + + inside_while = immutable_df[immutable_df['INSIDE_WHILE'] == True] + remove_indices(inside_while, df_changed) + print(f'Samples where invocation was inside while condition' + f'{inside_while.shape[0]}') + + not_simple_actual_parameter = immutable_df[immutable_df['INSIDE_FOR'] == True] + remove_indices(not_simple_actual_parameter, df_changed) + print(f'Samples where invocation was inside for condition' + f'{not_simple_actual_parameter.shape[0]}') + + inside_foreach = immutable_df[immutable_df['INSIDE_FOREACH'] == True] + remove_indices(inside_foreach, df_changed) + print(f'Samples where invocation was inside foreach condition' + f'{inside_foreach.shape[0]}') + + inside_binary_operation = immutable_df[immutable_df['INSIDE_BINARY_OPERATION'] == True] + remove_indices(inside_binary_operation, df_changed) + print(f'Samples where invocation was inside binary operation' + f'{inside_binary_operation.shape[0]}') + + inside_ternary = immutable_df[immutable_df['INSIDE_TERNARY'] == True] + remove_indices(inside_ternary, df_changed) + print(f'Samples where invocation was inside ternary operation' + f'{inside_ternary.shape[0]}') + + inside_class_creator = immutable_df[immutable_df['INSIDE_CLASS_CREATOR'] == True] + remove_indices(inside_class_creator, df_changed) + print(f'Samples where invocation was inside class creator' + f'{inside_class_creator.shape[0]}') + + cast_of_return_type = immutable_df[immutable_df['CAST_OF_RETURN_TYPE'] == True] + remove_indices(cast_of_return_type, df_changed) + print(f'Samples where return parameter was casted' + f'{cast_of_return_type.shape[0]}') + + cast_in_actual_params = immutable_df[immutable_df['CAST_IN_ACTUAL_PARAMS'] == True] + remove_indices(cast_in_actual_params, df_changed) + print(f'Samples where actual parameter in invocation was casted' + f'{cast_in_actual_params.shape[0]}') + + inside_array_creator = immutable_df[immutable_df['INSIDE_ARRAY_CREATOR'] == True] + remove_indices(inside_array_creator, df_changed) + print(f'Samples where invocation was in array creator' + f'{inside_array_creator.shape[0]}') + + single_statement_in_if = immutable_df[immutable_df['SINGLE_STATEMENT_IN_IF'] == True] + remove_indices(single_statement_in_if, df_changed) + print(f'Samples where invocation was in if block with 1 statement' + f'{single_statement_in_if.shape[0]}') + + inside_lambda = immutable_df[immutable_df['INSIDE_LAMBDA'] == True] + remove_indices(inside_lambda, df_changed) + print(f'Samples where invocation was in lambda' + f'{inside_lambda.shape[0]}') + + already_assigned_value_in_invocation = immutable_df[immutable_df['ALREADY_ASSIGNED_VALUE_IN_INVOCATION'] == True] + remove_indices(already_assigned_value_in_invocation, df_changed) + print(f'Samples where already assigned value was in invocation' + f'{already_assigned_value_in_invocation.shape[0]}') -crossed_var_names = immutable_df[immutable_df['CROSSED_VAR_NAMES'] == True] -remove_indices(crossed_var_names) -print(f'Samples where var names of extracted function is crossed with target method' - f'{crossed_var_names.shape[0]}') + several_returns = immutable_df[immutable_df['SEVERAL_RETURNS'] == True] + remove_indices(several_returns, df_changed) + print(f'Samples where there are several returns in extracted method' + f'{several_returns.shape[0]}') -do_nothing = immutable_df[immutable_df['do_nothing'] == True] -remove_indices(do_nothing) -print(f'Samples where we rejected to inline by some reason' - f'{do_nothing.shape[0]}') + is_not_at_the_same_line_as_prohibited_stats = immutable_df[immutable_df['IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS'] == True] + remove_indices(is_not_at_the_same_line_as_prohibited_stats, df_changed) + print(f'Samples where invocation was inside try-statement, synchronized statement,' + 'catch clause, super constructor invocation' + f'{is_not_at_the_same_line_as_prohibited_stats.shape[0]}') + is_not_parent_member_ref = immutable_df[immutable_df['IS_NOT_PARENT_MEMBER_REF'] == True] + remove_indices(is_not_parent_member_ref, df_changed) + print(f'Samples where is_not_parent_member_ref:' + f'{is_not_parent_member_ref.shape[0]}') -not_simple_actual_parameter = immutable_df[immutable_df['NOT_SIMPLE_ACTUAL_PARAMETER'] == True] -remove_indices(not_simple_actual_parameter) -print(f'Samples where actual parameter in invocation is not simple. Sometimes it matches the cast typing' - f'{not_simple_actual_parameter.shape[0]}') +count_filters_for_df(immutable_df, df) +count_filters_for_df(immutable_df, with_arguments_df) # immutable_df['score_diff'] = immutable_df['invocation_method_end_line'].sub(immutable_df['invocation_method_start_line'], axis=0) # negative_insertions = immutable_df[immutable_df['score_diff'] < 0] From 47ce5816923697bc683b3d05022e9a0e7d722b9d Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 15 Jan 2021 12:49:35 +0300 Subject: [PATCH 42/52] ADd function for filter --- veniq/dataset_collection/full_stats.py | 84 +++++++++++++++----------- 1 file changed, 49 insertions(+), 35 deletions(-) diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index 68af0627..ae53223c 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -1,42 +1,62 @@ import pandas as pd -df = pd.read_csv(r'd:\git\veniq\veniq\dataset_collection\new_dataset\full_dataset\out.csv') -immutable_df = df.copy() - def remove_indices(df_to_filter: pd.DataFrame, src_df): rows = src_df[src_df.index.isin(df_to_filter.index)] src_df.drop(rows.index, inplace=True) -print(f'Total lines: {df.shape[0]}') -duplicateRowsDF = immutable_df[immutable_df.duplicated()] -print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') -remove_indices(duplicateRowsDF, df) -ncss_target = immutable_df[immutable_df['ncss_target'] > 2] -remove_indices(ncss_target, df) -print(f'ncss_target > 2 {ncss_target.shape[0]}') +def make_filtration(): + df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\out.csv') + immutable_df = df.copy() + print(f'Total lines: {df.shape[0]}') + duplicateRowsDF = immutable_df[immutable_df.duplicated()] + print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') + remove_indices(duplicateRowsDF, df) + + method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] + percent_without = (method_with_arguments.shape[0] / float(immutable_df.shape[0])) * 100 + print(f'Samples where extracted method has parameters: ' + f'{method_with_arguments.shape[0]}; {percent_without}') + + without_arguments_df = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == False] + percent_with = (without_arguments_df.shape[0] / float(immutable_df.shape[0])) * 100 + print(f'Samples where extracted method doesn\'t parameters: ' + f'{without_arguments_df.shape[0]}; {percent_with}') + + print('Analyzing methods without arguments') + must_have_filtration(without_arguments_df.copy(), df.copy()) + print('Analyzing methods with arguments') + must_have_filtration(method_with_arguments.copy(), df.copy()) + -# exclude abstract extract methods -abstract_method = immutable_df[immutable_df['ABSTRACT_METHOD'] == True] -remove_indices(abstract_method, df) +def must_have_filtration(immutable_df, df): + filter_with_indices_exclusion(df, immutable_df, immutable_df['ncss_target'] > 2, 'ncss_target > 2') + # exclude abstract extract methods + abstract_method = immutable_df[immutable_df['ABSTRACT_METHOD'] == True] + remove_indices(abstract_method, df) + # REMOVE METHOD CHAINING SINCE IT IS not correct + # to inline them, we have different type objects, + # it's not a function of the original class + method_chain_before = immutable_df[immutable_df['METHOD_CHAIN_BEFORE'] == True] + remove_indices(method_chain_before, df) + method_chain_after = immutable_df[immutable_df['METHOD_CHAIN_AFTER'] == True] + remove_indices(method_chain_after, df) -# REMOVE METHOD CHAINING SINCE IT IS not correct -# to inline them, we have different type objects, -# it's not a function of the original class -method_chain_before = immutable_df[immutable_df['METHOD_CHAIN_BEFORE'] == True] -remove_indices(method_chain_before, df) -method_chain_after = immutable_df[immutable_df['METHOD_CHAIN_AFTER'] == True] -remove_indices(method_chain_after, df) + count_filters_for_df(immutable_df, df) -method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] -print(f'Samples where extracted method has parameters: ' - f'{method_with_arguments.shape[0]}. We prune such methods') -with_arguments_df = df[df['METHOD_WITH_ARGUMENTS'] == True] -remove_indices(method_with_arguments, df) + +def filter_with_indices_exclusion(df, immutable_df, lambda_f, str_to_print): + filtered_df = immutable_df[lambda_f] + remove_indices(filtered_df, df) + percent = (filtered_df.shape[0] / float(immutable_df.shape[0])) * 100 + print(f'{str_to_print} {filtered_df.shape[0]}; {percent}') def count_filters_for_df(immutable_df, df_changed): + + is_valid_ast = immutable_df[immutable_df['is_valid_ast'] == True] + crossed_var_names = immutable_df[immutable_df['CROSSED_VAR_NAMES'] == True] remove_indices(crossed_var_names, df_changed) print(f'Samples where var names of extracted function is crossed with target method' @@ -133,14 +153,8 @@ def count_filters_for_df(immutable_df, df_changed): print(f'Samples where is_not_parent_member_ref:' f'{is_not_parent_member_ref.shape[0]}') + print(f'Remained cases: {df_changed.shape[0]}') + print(f'is_valid_ast cases: {is_valid_ast.shape[0]}') -count_filters_for_df(immutable_df, df) -count_filters_for_df(immutable_df, with_arguments_df) - -# immutable_df['score_diff'] = immutable_df['invocation_method_end_line'].sub(immutable_df['invocation_method_start_line'], axis=0) -# negative_insertions = immutable_df[immutable_df['score_diff'] < 0] -# remove_indices(negative_insertions) -# print(f'Negative insertions: {negative_insertions.shape[0]}') -# -# print(f'Total cases: {df.shape[0]}') -# print(f'Target ncss 3: {df.shape[0]}') +if __name__ == '__main__': + make_filtration() From 09adebd2233739aa393093b982c3baf16a312ec7 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 15 Jan 2021 17:03:43 +0300 Subject: [PATCH 43/52] Fix ncss > 2 --- veniq/dataset_collection/augmentation.py | 6 +- veniq/dataset_collection/full_stats.py | 261 ++++++++++++++--------- 2 files changed, 158 insertions(+), 109 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index a4c96dd1..f155838f 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -492,7 +492,8 @@ def insert_code_with_new_file_creation( row_dict['is_valid_ast'] = is_valid_ast else: row_dict['do_nothing'] = True - + else: + row_dict['ONE_LINE_FUNCTION'] = True return row_dict @@ -602,8 +603,6 @@ def analyze_file( + list(class_declaration.constructors) collect_info_about_functions_without_params(method_declarations, methods_list) - if file_path.name.endswith('RedisRegistry.java'): - print(1) for method_node in methods_list: method_decl = ast.get_subtree(method_node) found_functions = method_declarations.get(method_node.name, []) @@ -779,6 +778,7 @@ def save_text_to_new_file(input_dir: Path, text: str, filename: Path) -> Path: 'ncss_target', 'ncss_extracted', 'do_nothing', + 'ONE_LINE_FUNCTION', 'NO_IGNORED_CASES' ] + [x for x in InvocationType.list_types()] df = pd.DataFrame(columns=columns) diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index ae53223c..726983d8 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -3,7 +3,9 @@ def remove_indices(df_to_filter: pd.DataFrame, src_df): rows = src_df[src_df.index.isin(df_to_filter.index)] + print(f'dropped {rows.shape[0]}') src_df.drop(rows.index, inplace=True) + print(f'remained {src_df.shape[0]}') def make_filtration(): @@ -17,144 +19,191 @@ def make_filtration(): method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] percent_without = (method_with_arguments.shape[0] / float(immutable_df.shape[0])) * 100 print(f'Samples where extracted method has parameters: ' - f'{method_with_arguments.shape[0]}; {percent_without}') + f'{method_with_arguments.shape[0]}; {percent_without:.2f}') without_arguments_df = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == False] percent_with = (without_arguments_df.shape[0] / float(immutable_df.shape[0])) * 100 print(f'Samples where extracted method doesn\'t parameters: ' - f'{without_arguments_df.shape[0]}; {percent_with}') + f'{without_arguments_df.shape[0]}; {percent_with:.2f}') print('Analyzing methods without arguments') - must_have_filtration(without_arguments_df.copy(), df.copy()) + must_have_filtration(without_arguments_df.__deepcopy__(), without_arguments_df.__deepcopy__()) print('Analyzing methods with arguments') - must_have_filtration(method_with_arguments.copy(), df.copy()) + must_have_filtration(method_with_arguments.__deepcopy__(), df.__deepcopy__()) def must_have_filtration(immutable_df, df): - filter_with_indices_exclusion(df, immutable_df, immutable_df['ncss_target'] > 2, 'ncss_target > 2') + + print('Must-have filters') + filter_with_indices_exclusion(df, immutable_df, immutable_df['ncss_target'] < 3, 'ncss_target < 3') # exclude abstract extract methods - abstract_method = immutable_df[immutable_df['ABSTRACT_METHOD'] == True] - remove_indices(abstract_method, df) + filter_with_indices_exclusion(df, immutable_df, immutable_df['ABSTRACT_METHOD'] == True, 'abstract methods') # REMOVE METHOD CHAINING SINCE IT IS not correct # to inline them, we have different type objects, # it's not a function of the original class - method_chain_before = immutable_df[immutable_df['METHOD_CHAIN_BEFORE'] == True] - remove_indices(method_chain_before, df) - method_chain_after = immutable_df[immutable_df['METHOD_CHAIN_AFTER'] == True] - remove_indices(method_chain_after, df) - count_filters_for_df(immutable_df, df) + filter_with_indices_exclusion( + df, immutable_df, + immutable_df['METHOD_CHAIN_BEFORE'] == True, 'method_chain_before') + + filter_with_indices_exclusion( + df, immutable_df, + immutable_df['METHOD_CHAIN_AFTER'] == True, 'method_chain_after') + + count_filters_for_df(df.__deepcopy__(), df.__deepcopy__()) def filter_with_indices_exclusion(df, immutable_df, lambda_f, str_to_print): filtered_df = immutable_df[lambda_f] remove_indices(filtered_df, df) percent = (filtered_df.shape[0] / float(immutable_df.shape[0])) * 100 - print(f'{str_to_print} {filtered_df.shape[0]}; {percent}') + print(f'{str_to_print} {filtered_df.shape[0]}; {percent:.2f}%') def count_filters_for_df(immutable_df, df_changed): + print('Filters with invocation types classification') is_valid_ast = immutable_df[immutable_df['is_valid_ast'] == True] - crossed_var_names = immutable_df[immutable_df['CROSSED_VAR_NAMES'] == True] - remove_indices(crossed_var_names, df_changed) - print(f'Samples where var names of extracted function is crossed with target method' - f'{crossed_var_names.shape[0]}') - - do_nothing = immutable_df[immutable_df['do_nothing'] == True] - remove_indices(do_nothing, df_changed) - print(f'Samples where we rejected to inline by some reason' - f'{do_nothing.shape[0]}') - - not_simple_actual_parameter = immutable_df[immutable_df['NOT_SIMPLE_ACTUAL_PARAMETER'] == True] - remove_indices(not_simple_actual_parameter, df_changed) - print(f'Samples where actual parameter in invocation is not simple' - f'{not_simple_actual_parameter.shape[0]}') - - inside_if = immutable_df[immutable_df['INSIDE_IF'] == True] - remove_indices(inside_if, df_changed) - print(f'Samples where invocation was inside if condition' - f'{inside_if.shape[0]}') - - inside_while = immutable_df[immutable_df['INSIDE_WHILE'] == True] - remove_indices(inside_while, df_changed) - print(f'Samples where invocation was inside while condition' - f'{inside_while.shape[0]}') - - not_simple_actual_parameter = immutable_df[immutable_df['INSIDE_FOR'] == True] - remove_indices(not_simple_actual_parameter, df_changed) - print(f'Samples where invocation was inside for condition' - f'{not_simple_actual_parameter.shape[0]}') - - inside_foreach = immutable_df[immutable_df['INSIDE_FOREACH'] == True] - remove_indices(inside_foreach, df_changed) - print(f'Samples where invocation was inside foreach condition' - f'{inside_foreach.shape[0]}') - - inside_binary_operation = immutable_df[immutable_df['INSIDE_BINARY_OPERATION'] == True] - remove_indices(inside_binary_operation, df_changed) - print(f'Samples where invocation was inside binary operation' - f'{inside_binary_operation.shape[0]}') - - inside_ternary = immutable_df[immutable_df['INSIDE_TERNARY'] == True] - remove_indices(inside_ternary, df_changed) - print(f'Samples where invocation was inside ternary operation' - f'{inside_ternary.shape[0]}') - - inside_class_creator = immutable_df[immutable_df['INSIDE_CLASS_CREATOR'] == True] - remove_indices(inside_class_creator, df_changed) - print(f'Samples where invocation was inside class creator' - f'{inside_class_creator.shape[0]}') - - cast_of_return_type = immutable_df[immutable_df['CAST_OF_RETURN_TYPE'] == True] - remove_indices(cast_of_return_type, df_changed) - print(f'Samples where return parameter was casted' - f'{cast_of_return_type.shape[0]}') - - cast_in_actual_params = immutable_df[immutable_df['CAST_IN_ACTUAL_PARAMS'] == True] - remove_indices(cast_in_actual_params, df_changed) - print(f'Samples where actual parameter in invocation was casted' - f'{cast_in_actual_params.shape[0]}') - - inside_array_creator = immutable_df[immutable_df['INSIDE_ARRAY_CREATOR'] == True] - remove_indices(inside_array_creator, df_changed) - print(f'Samples where invocation was in array creator' - f'{inside_array_creator.shape[0]}') - - single_statement_in_if = immutable_df[immutable_df['SINGLE_STATEMENT_IN_IF'] == True] - remove_indices(single_statement_in_if, df_changed) - print(f'Samples where invocation was in if block with 1 statement' - f'{single_statement_in_if.shape[0]}') - - inside_lambda = immutable_df[immutable_df['INSIDE_LAMBDA'] == True] - remove_indices(inside_lambda, df_changed) - print(f'Samples where invocation was in lambda' - f'{inside_lambda.shape[0]}') - - already_assigned_value_in_invocation = immutable_df[immutable_df['ALREADY_ASSIGNED_VALUE_IN_INVOCATION'] == True] - remove_indices(already_assigned_value_in_invocation, df_changed) - print(f'Samples where already assigned value was in invocation' - f'{already_assigned_value_in_invocation.shape[0]}') - - several_returns = immutable_df[immutable_df['SEVERAL_RETURNS'] == True] - remove_indices(several_returns, df_changed) - print(f'Samples where there are several returns in extracted method' - f'{several_returns.shape[0]}') - - is_not_at_the_same_line_as_prohibited_stats = immutable_df[immutable_df['IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS'] == True] - remove_indices(is_not_at_the_same_line_as_prohibited_stats, df_changed) - print(f'Samples where invocation was inside try-statement, synchronized statement,' + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['CROSSED_VAR_NAMES'] == True, + 'Samples where var names of extracted function is crossed with target method' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['do_nothing'] == True, + 'Samples where we rejected to inline by some reason' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['NOT_SIMPLE_ACTUAL_PARAMETER'] == True, + 'Samples where actual parameter in invocation is not simple' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_IF'] == True, + 'Samples where invocation was inside if condition' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_WHILE'] == True, + 'Samples where invocation was inside while condition' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_FOR'] == True, + 'Samples where invocation was inside for condition' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_FOREACH'] == True, + 'Samples where invocation was inside foreach condition' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_BINARY_OPERATION'] == True, + 'Samples where invocation was inside binary operation' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_TERNARY'] == True, + 'Samples where invocation was inside ternary operation' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_CLASS_CREATOR'] == True, + 'Samples where invocation was inside class creator' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['CAST_OF_RETURN_TYPE'] == True, + 'Samples where return parameter was casted' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['CAST_IN_ACTUAL_PARAMS'] == True, + 'Samples where actual parameter in invocation was casted' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_ARRAY_CREATOR'] == True, + 'Samples where invocation was in array creator' + ) + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['SINGLE_STATEMENT_IN_IF'] == True, + 'Samples where invocation was in if block with 1 statement' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['INSIDE_LAMBDA'] == True, + 'Samples where invocation was in lambda' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['ALREADY_ASSIGNED_VALUE_IN_INVOCATION'] == True, + 'Samples where already assigned value was in invocation' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['SEVERAL_RETURNS'] == True, + 'Samples where there are several returns in extracted method' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS'] == True, + 'Samples where invocation was inside try-statement, synchronized statement,' 'catch clause, super constructor invocation' - f'{is_not_at_the_same_line_as_prohibited_stats.shape[0]}') + ) - is_not_parent_member_ref = immutable_df[immutable_df['IS_NOT_PARENT_MEMBER_REF'] == True] - remove_indices(is_not_parent_member_ref, df_changed) - print(f'Samples where is_not_parent_member_ref:' - f'{is_not_parent_member_ref.shape[0]}') + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['IS_NOT_PARENT_MEMBER_REF'] == True, + 'Samples where is_not_parent_member_ref' + ) print(f'Remained cases: {df_changed.shape[0]}') + df_changed.to_csv('remained.csv') print(f'is_valid_ast cases: {is_valid_ast.shape[0]}') + is_valid_ast.to_csv('is_valid_ast.csv') + print(f'cases where filters didn\'t work: {is_valid_ast.shape[0]}') + if __name__ == '__main__': make_filtration() From 38a83cfc19b42e3b7b7310903cf7b610b9056c50 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 18 Jan 2021 16:30:53 +0300 Subject: [PATCH 44/52] Remove cross varialbes --- veniq/dataset_collection/augmentation.py | 189 ++++++++++++----------- 1 file changed, 101 insertions(+), 88 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index f155838f..a8c3339f 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -132,15 +132,16 @@ class InvocationType(Enum): INSIDE_ARRAY_CREATOR = 12 SINGLE_STATEMENT_IN_IF = 13 INSIDE_LAMBDA = 14 - ALREADY_ASSIGNED_VALUE_IN_INVOCATION = 15 + #ALREADY_ASSIGNED_VALUE_IN_INVOCATION = 15 SEVERAL_RETURNS = 16 IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS = 17 IS_NOT_PARENT_MEMBER_REF = 18 # EXTRACTED_NCSS_SMALL = 19 - CROSSED_VAR_NAMES = 20 + #CROSSED_VAR_NAMES_INSIDE_FUNCTION = 20 CAST_IN_ACTUAL_PARAMS = 21 ABSTRACT_METHOD = 22 - METHOD_WITH_ARGUMENTS = 999 + #CROSSED_FUNC_NAMES = 23 + #METHOD_WITH_ARGUMENTS_VAR_CROSSED = 999 @classmethod @@ -213,6 +214,11 @@ def is_match_to_the_conditions( is_not_lambda = not (parent.node_type == ASTNodeType.LAMBDA_EXPRESSION) is_not_at_the_same_line_as_prohibited_stats = check_nesting_statements(method_invoked) + are_crossed_func_params = True + if is_actual_parameter_simple: + if are_not_params_crossed(method_invoked, found_method_decl): + are_crossed_func_params = False + ignored_cases = get_stats_for_pruned_cases( is_actual_parameter_simple, is_not_array_creator, @@ -234,43 +240,50 @@ def is_match_to_the_conditions( is_not_ternary, is_not_actual_param_cast, is_not_is_extract_method_abstract, + are_crossed_func_params, method_invoked ) - # other_requirements = all([ - # is_not_chain_before, - # is_actual_parameter_simple, - # is_not_chain_after, - # is_not_inside_if, - # is_not_inside_while, - # is_not_binary_operation, - # is_not_ternary, - # is_not_class_creator, - # is_not_cast, - # is_not_array_creator, - # is_not_parent_member_ref, - # is_not_inside_for, - # is_not_enhanced_for_control, - # is_not_lambda, - # is_not_method_inv_single_statement_in_if, - # is_not_assign_value_with_return_type, - # is_not_several_returns, - # is_not_at_the_same_line_as_prohibited_stats, - # not method_invoked.arguments]) - - # if (not method_invoked.qualifier and other_requirements) or \ - # (method_invoked.qualifier == 'this' and other_requirements): return ignored_cases +def are_not_params_crossed( + invocaton_node: ASTNode, + method_declaration: ASTNode) -> bool: + """ + Check if names of params of invocation are matched with + params of method declaration: + + Matched: + func(a, b); + public void func(int a, int b) + + Not matched + func(a, e); + public void func(int a, int b) + + :param invocaton_node: invocation of function + :param method_declaration: method declaration of invoked function + :return: + """ + m_decl_names = set([x.name for x in method_declaration.parameters]) + m_inv_names = set([x.member for x in invocaton_node.arguments]) + intersection = m_inv_names.difference(m_decl_names) + if not intersection: + return True + else: + return False + + def get_stats_for_pruned_cases( is_actual_parameter_simple, is_not_array_creator, is_not_assign_value_with_return_type, is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, - is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, is_not_class_creator, - is_not_enhanced_for_control, is_not_inside_for, is_not_inside_if, is_not_inside_while, - is_not_lambda, is_not_method_inv_single_statement_in_if, is_not_parent_member_ref, + is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, + is_not_class_creator, is_not_enhanced_for_control, is_not_inside_for, + is_not_inside_if, is_not_inside_while, is_not_lambda, + is_not_method_inv_single_statement_in_if, is_not_parent_member_ref, is_not_several_returns, is_not_ternary, is_not_actual_param_cast, - is_not_is_extract_method_abstract, method_invoked) -> List[str]: + is_not_is_extract_method_abstract, are_crossed_func_params, method_invoked) -> List[str]: invocation_types_to_ignore: List[str] = [] if not is_not_is_extract_method_abstract: @@ -307,14 +320,12 @@ def get_stats_for_pruned_cases( invocation_types_to_ignore.append(InvocationType.INSIDE_LAMBDA.name) if not is_not_method_inv_single_statement_in_if: invocation_types_to_ignore.append(InvocationType.SINGLE_STATEMENT_IN_IF.name) - if not is_not_assign_value_with_return_type: - invocation_types_to_ignore.append(InvocationType.ALREADY_ASSIGNED_VALUE_IN_INVOCATION.name) + # if not is_not_assign_value_with_return_type: + # invocation_types_to_ignore.append(InvocationType.ALREADY_ASSIGNED_VALUE_IN_INVOCATION.name) if not is_not_several_returns: invocation_types_to_ignore.append(InvocationType.SEVERAL_RETURNS.name) if not is_not_at_the_same_line_as_prohibited_stats: invocation_types_to_ignore.append(InvocationType.IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS.name) - if method_invoked.arguments: - invocation_types_to_ignore.append(InvocationType.METHOD_WITH_ARGUMENTS.name) return invocation_types_to_ignore @@ -340,7 +351,7 @@ def check_whether_method_has_return_type( return InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS if intersected_names: - line_to_csv[InvocationType.CROSSED_VAR_NAMES.name] = True + line_to_csv[InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION.name] = True return InlineTypesAlgorithms.DO_NOTHING @@ -389,20 +400,15 @@ def determine_algorithm_insertion_type( return InlineTypesAlgorithms.DO_NOTHING else: original_method = original_invoked_method[0] - if not original_method.parameters: - - has_attr_return_type = hasattr(original_method, 'return_type') - if has_attr_return_type: - if not original_method.return_type: - return run_var_crossing_check(ast, line_to_csv, method_node, original_method) - else: - return InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS - # Else if we have constructor, it doesn't have return type + has_attr_return_type = hasattr(original_method, 'return_type') + if has_attr_return_type: + if not original_method.return_type: + return InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS else: - return run_var_crossing_check(ast, line_to_csv, method_node, original_method) - + return InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS + # Else if we have constructor, it doesn't have return type else: - return InlineTypesAlgorithms.DO_NOTHING + return InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS def run_var_crossing_check(ast, line_to_csv, method_node, original_method): @@ -611,10 +617,13 @@ def analyze_file( if len(found_functions) == 1: for invocation_node in method_decl.get_proxy_nodes( ASTNodeType.METHOD_INVOCATION): + print(f'Method: {method_node.name} inv: {invocation_node.member}') + extracted_function_method_decl = method_declarations.get(invocation_node.member, []) # ignore overloaded extracted functions if len(extracted_function_method_decl) == 1: try: + # print(f'Method: {method_node.name} inv: {invocation_node.member}') make_insertion( ast, class_declaration, @@ -637,56 +646,60 @@ def analyze_file( return results -def make_insertion(ast, class_declaration, dst_filename, found_method_decl, method_declarations, method_invoked, - method_node, output_path, source_filepath, results, dataset_dir): +def make_insertion( + ast, class_declaration, dst_filename, + found_method_decl, method_declarations, method_invoked, + method_node, output_path, source_filepath, results, + dataset_dir): if (not method_invoked.qualifier) or (method_invoked.qualifier == 'this'): ignored_cases = is_match_to_the_conditions( ast, method_invoked, - found_method_decl[0] - ) + found_method_decl[0]) original_func = method_declarations.get(method_invoked.member)[0] # type: ignore ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) - log_of_inline = { - 'extract_method': method_invoked.member, - 'target_method': method_node.name, - 'ncss_extracted': ncss_extracted, - 'ncss_target': ncss_target, - 'invocation_line_number_in_original_file': method_invoked.line - } - # default init - for case_name in InvocationType.list_types(): - log_of_inline[case_name] = False - - if not ignored_cases: - insert_code_with_new_file_creation( - class_declaration.name, - ast, - method_node, - method_invoked, - dst_filename, - output_path, - method_declarations, - log_of_inline) - log_of_inline['NO_IGNORED_CASES'] = True - else: - log_of_inline['NO_IGNORED_CASES'] = False - - # found ignored cases - for case_name in ignored_cases: - log_of_inline[case_name] = True - - # change source filename, since it will be changed - log_of_inline['original_filename'] = dst_filename.name - # remove full_dataset/input prefix - # real_input_dataset_path = Path('/'.join(Path(input_dir).absolute().parts[:-2])) - project_id = '/'.join(Path(source_filepath.absolute()).relative_to(Path(dataset_dir).absolute()).parts[:2]) - # print(dst_filename.absolute(), input_dir.absolute(), project_id) - log_of_inline['project_id'] = project_id - results.append(log_of_inline) + + if ncss_extracted > 2: + log_of_inline = { + 'extract_method': method_invoked.member, + 'target_method': method_node.name, + 'ncss_extracted': ncss_extracted, + 'ncss_target': ncss_target, + 'invocation_line_number_in_original_file': method_invoked.line + } + # default init + for case_name in InvocationType.list_types(): + log_of_inline[case_name] = False + + if not ignored_cases: + insert_code_with_new_file_creation( + class_declaration.name, + ast, + method_node, + method_invoked, + dst_filename, + output_path, + method_declarations, + log_of_inline) + log_of_inline['NO_IGNORED_CASES'] = True + else: + log_of_inline['NO_IGNORED_CASES'] = False + + # found ignored cases + for case_name in ignored_cases: + log_of_inline[case_name] = True + + # change source filename, since it will be changed + log_of_inline['original_filename'] = dst_filename.name + # remove full_dataset/input prefix + # real_input_dataset_path = Path('/'.join(Path(input_dir).absolute().parts[:-2])) + project_id = '/'.join(Path(source_filepath.absolute()).relative_to(Path(dataset_dir).absolute()).parts[:2]) + # print(dst_filename.absolute(), input_dir.absolute(), project_id) + log_of_inline['project_id'] = project_id + results.append(log_of_inline) def collect_info_about_functions_without_params( From 15bf7756b1b595c54defe89763160a1936257ac0 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Mon, 18 Jan 2021 17:57:56 +0300 Subject: [PATCH 45/52] Remove all var cross --- test/dataset_collection/test_dataset_collection.py | 12 ++++++++++++ veniq/dataset_collection/augmentation.py | 4 ++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index 99672b64..ddd4bc2b 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -394,3 +394,15 @@ def test_check(self): self.assertEqual(result['invocation_method_start_line'], 1022) self.assertEqual(result['invocation_method_end_line'], 1083) + + def testInlineWithParamsWithIntersectedVars(self): + self.assertEqual(True, True) + + def testInlineWithParamsWithoutIntersectedVars(self): + self.assertEqual(True, True) + + def testInlineWithParamsWithIntersectedVarsWithReturn(self): + self.assertEqual(True, True) + + def testInlineWithComplexParams(self): + self.assertEqual(True, True) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index a8c3339f..281e6a4c 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -617,7 +617,7 @@ def analyze_file( if len(found_functions) == 1: for invocation_node in method_decl.get_proxy_nodes( ASTNodeType.METHOD_INVOCATION): - print(f'Method: {method_node.name} inv: {invocation_node.member}') + # print(f'Method: {method_node.name} inv: {invocation_node.member}') extracted_function_method_decl = method_declarations.get(invocation_node.member, []) # ignore overloaded extracted functions @@ -662,7 +662,7 @@ def make_insertion( ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) ncss_target = NCSSMetric().value(ast.get_subtree(method_node)) - if ncss_extracted > 2: + if ncss_extracted > 3: log_of_inline = { 'extract_method': method_invoked.member, 'target_method': method_node.name, From ada0f23636bc70c60966e3d6891c0e76498900b8 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 20 Jan 2021 16:52:36 +0300 Subject: [PATCH 46/52] Fix complex --- .../InlineExamples/InlineWithParams.java | 57 ++++++++++ .../test_dataset_collection.py | 80 +++++++++++-- veniq/dataset_collection/augmentation.py | 105 ++++++++++++------ veniq/dataset_collection/full_stats.py | 54 ++++----- 4 files changed, 226 insertions(+), 70 deletions(-) create mode 100644 test/dataset_collection/InlineExamples/InlineWithParams.java diff --git a/test/dataset_collection/InlineExamples/InlineWithParams.java b/test/dataset_collection/InlineExamples/InlineWithParams.java new file mode 100644 index 00000000..f41cef95 --- /dev/null +++ b/test/dataset_collection/InlineExamples/InlineWithParams.java @@ -0,0 +1,57 @@ +class InlineWithParams +{ + public void target() { + int a = 0; + int b = 0; + extracted(a, b); + int e = 0; + extracted_not_intersected(a, e); + } + private void extracted(int a, int b) { + for (int i = 0; i < 10; i++) { int a = 5; int b = 6; } + do + { + var1 = 2; + } + while (var2 == 2); + } + + private void extracted_not_intersected(int g, int c) { + for (int i = 0; i < 10; i++) { int g = 5; int b = 6; } + do + { + var1 = 2; + } + while (var2 == 2); + } + + private int extracted_return(int a, int b) { + int u = 0; + for (int i = 0; i < 10; i++) { int a = 5; int b = 6; } + do + { + var1 = 2; + } + while (var2 == 2); + + return u; + } + + public int target_return() { + int a = 0; + int b = 0; + int e = extracted_return(a, b); + return e; + } + + public int return_int() {return 0;} + + public void target_complex() { + Integer a = 0; + int b = 0; + int e = extracted_return(a.intValue(), return_int()); + return e; + } + + +} \ No newline at end of file diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index ddd4bc2b..9c54eeab 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -6,7 +6,7 @@ determine_algorithm_insertion_type, method_body_lines, is_match_to_the_conditions, - find_lines_in_changed_file) + find_lines_in_changed_file, are_functional_arguments_equal) from veniq.ast_framework import AST, ASTNodeType from veniq.dataset_collection.types_identifier import ( InlineTypesAlgorithms, @@ -395,14 +395,78 @@ def test_check(self): self.assertEqual(result['invocation_method_start_line'], 1022) self.assertEqual(result['invocation_method_end_line'], 1083) - def testInlineWithParamsWithIntersectedVars(self): - self.assertEqual(True, True) + def testFunctionParamsEqualToArguments(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted'][0] + + res = are_functional_arguments_equal(extracted_invocation, extracted) + self.assertEqual(res, True) + + def testFunctionParamsNotEqualToArguments(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted_not_intersected'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted_not_intersected'][0] - def testInlineWithParamsWithoutIntersectedVars(self): - self.assertEqual(True, True) + res = are_functional_arguments_equal(extracted_invocation, extracted) + self.assertEqual(res, False) def testInlineWithParamsWithIntersectedVarsWithReturn(self): - self.assertEqual(True, True) + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target_return'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted_return'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted_return'][0] + + res = are_functional_arguments_equal(extracted_invocation, extracted) + self.assertEqual(res, True) + + def testInlineWithComplexParamsNotEqual(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target_complex'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted_return'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted_return'][0] - def testInlineWithComplexParams(self): - self.assertEqual(True, True) + res = are_functional_arguments_equal(extracted_invocation, extracted) + self.assertEqual(res, False) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 281e6a4c..5f65c37b 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -123,7 +123,7 @@ class InvocationType(Enum): METHOD_CHAIN_AFTER = 3 INSIDE_IF = 4 INSIDE_WHILE = 5 - INSIDE_FOR = 6 + #INSIDE_FOR = 6 INSIDE_FOREACH = 7 INSIDE_BINARY_OPERATION = 8 INSIDE_TERNARY = 9 @@ -137,10 +137,10 @@ class InvocationType(Enum): IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS = 17 IS_NOT_PARENT_MEMBER_REF = 18 # EXTRACTED_NCSS_SMALL = 19 - #CROSSED_VAR_NAMES_INSIDE_FUNCTION = 20 + CROSSED_VAR_NAMES_INSIDE_FUNCTION = 20 CAST_IN_ACTUAL_PARAMS = 21 ABSTRACT_METHOD = 22 - #CROSSED_FUNC_NAMES = 23 + NOT_CROSSED_FUNC_PARAMS = 23 #METHOD_WITH_ARGUMENTS_VAR_CROSSED = 999 @@ -150,11 +150,40 @@ def list_types(cls): return types +def are_functional_arguments_equal( + invocaton_node: ASTNode, + method_declaration: ASTNode) -> bool: + """ + Check if names of params of invocation are matched with + params of method declaration: + + Matched: + func(a, b); + public void func(int a, int b) + + Not matched + func(a, e); + public void func(int a, int b) + + :param invocaton_node: invocation of function + :param method_declaration: method declaration of invoked function + :return: + """ + m_decl_names = set([x.name for x in method_declaration.parameters]) + m_inv_names = set([x.member for x in invocaton_node.arguments]) + intersection = m_inv_names.difference(m_decl_names) + if not intersection: + return True + else: + return False + + @typing.no_type_check def is_match_to_the_conditions( ast: AST, method_invoked: ASTNode, - found_method_decl=None) -> List[str]: + found_method_decl=None, + target=None) -> List[str]: if method_invoked.parent.node_type == ASTNodeType.THIS: parent = method_invoked.parent.parent class_names = [x for x in method_invoked.parent.children if hasattr(x, 'string')] @@ -194,7 +223,7 @@ def is_match_to_the_conditions( is_not_chain_after = not chains_after is_not_inside_if = not (parent.node_type == ASTNodeType.IF_STATEMENT) is_not_inside_while = not (parent.node_type == ASTNodeType.WHILE_STATEMENT) - is_not_inside_for = not (parent.node_type == ASTNodeType.FOR_STATEMENT) + # is_not_inside_for = not (parent.node_type == ASTNodeType.FOR_STATEMENT) is_not_enhanced_for_control = not (parent.node_type == ASTNodeType.ENHANCED_FOR_CONTROL) # ignore case else if (getServiceInterface() != null) { is_not_binary_operation = not (parent.node_type == ASTNodeType.BINARY_OPERATION) @@ -214,10 +243,13 @@ def is_match_to_the_conditions( is_not_lambda = not (parent.node_type == ASTNodeType.LAMBDA_EXPRESSION) is_not_at_the_same_line_as_prohibited_stats = check_nesting_statements(method_invoked) - are_crossed_func_params = True + are_functional_arguments_eq = False + are_var_crossed_inside_extracted = True if is_actual_parameter_simple: - if are_not_params_crossed(method_invoked, found_method_decl): - are_crossed_func_params = False + are_functional_arguments_eq = are_functional_arguments_equal(method_invoked, found_method_decl) + if are_functional_arguments_eq: + if are_not_var_crossed(method_invoked, found_method_decl, target): + are_var_crossed_inside_extracted = False ignored_cases = get_stats_for_pruned_cases( is_actual_parameter_simple, @@ -230,7 +262,8 @@ def is_match_to_the_conditions( is_not_chain_before, is_not_class_creator, is_not_enhanced_for_control, - is_not_inside_for, + # is_not_inside_for, + are_var_crossed_inside_extracted, is_not_inside_if, is_not_inside_while, is_not_lambda, @@ -240,46 +273,37 @@ def is_match_to_the_conditions( is_not_ternary, is_not_actual_param_cast, is_not_is_extract_method_abstract, - are_crossed_func_params, + are_functional_arguments_eq, method_invoked ) return ignored_cases -def are_not_params_crossed( +def are_not_var_crossed( invocaton_node: ASTNode, - method_declaration: ASTNode) -> bool: - """ - Check if names of params of invocation are matched with - params of method declaration: - - Matched: - func(a, b); - public void func(int a, int b) - - Not matched - func(a, e); - public void func(int a, int b) - - :param invocaton_node: invocation of function - :param method_declaration: method declaration of invoked function - :return: - """ - m_decl_names = set([x.name for x in method_declaration.parameters]) + method_declaration: ASTNode, + target: ASTNode) -> bool: + # m_decl_names = set([x.name for x in method_declaration.parameters]) m_inv_names = set([x.member for x in invocaton_node.arguments]) - intersection = m_inv_names.difference(m_decl_names) - if not intersection: + var_names_in_extracted = set(get_variables_decl_in_node(method_declaration)) + var_names_in_target = set(get_variables_decl_in_node(target)) + + var_names_in_extracted = var_names_in_extracted.difference(m_inv_names) + intersected_names = var_names_in_target & var_names_in_extracted + if not intersected_names: return True - else: - return False + + return False def get_stats_for_pruned_cases( is_actual_parameter_simple, is_not_array_creator, is_not_assign_value_with_return_type, is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, - is_not_class_creator, is_not_enhanced_for_control, is_not_inside_for, + is_not_class_creator, is_not_enhanced_for_control, + # is_not_inside_for, + are_var_crossed_inside_extracted, is_not_inside_if, is_not_inside_while, is_not_lambda, is_not_method_inv_single_statement_in_if, is_not_parent_member_ref, is_not_several_returns, is_not_ternary, is_not_actual_param_cast, @@ -312,8 +336,8 @@ def get_stats_for_pruned_cases( invocation_types_to_ignore.append(InvocationType.INSIDE_ARRAY_CREATOR.name) if not is_not_parent_member_ref: invocation_types_to_ignore.append(InvocationType.IS_NOT_PARENT_MEMBER_REF.name) - if not is_not_inside_for: - invocation_types_to_ignore.append(InvocationType.INSIDE_FOR.name) + # if not is_not_inside_for: + # invocation_types_to_ignore.append(InvocationType.INSIDE_FOR.name) if not is_not_enhanced_for_control: invocation_types_to_ignore.append(InvocationType.INSIDE_FOREACH.name) if not is_not_lambda: @@ -327,6 +351,11 @@ def get_stats_for_pruned_cases( if not is_not_at_the_same_line_as_prohibited_stats: invocation_types_to_ignore.append(InvocationType.IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS.name) + if not are_crossed_func_params: + invocation_types_to_ignore.append(InvocationType.NOT_CROSSED_FUNC_PARAMS) + if are_var_crossed_inside_extracted: + invocation_types_to_ignore.append(InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION) + return invocation_types_to_ignore @@ -656,7 +685,9 @@ def make_insertion( ignored_cases = is_match_to_the_conditions( ast, method_invoked, - found_method_decl[0]) + found_method_decl[0], + method_node + ) original_func = method_declarations.get(method_invoked.member)[0] # type: ignore ncss_extracted = NCSSMetric().value(ast.get_subtree(original_func)) diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index 726983d8..a3628704 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -9,27 +9,31 @@ def remove_indices(df_to_filter: pd.DataFrame, src_df): def make_filtration(): - df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\out.csv') + # df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\out.csv') + + df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\01_19\out.csv') immutable_df = df.copy() print(f'Total lines: {df.shape[0]}') duplicateRowsDF = immutable_df[immutable_df.duplicated()] print(f'Duplicated rows: {duplicateRowsDF.shape[0]}') remove_indices(duplicateRowsDF, df) - method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] - percent_without = (method_with_arguments.shape[0] / float(immutable_df.shape[0])) * 100 - print(f'Samples where extracted method has parameters: ' - f'{method_with_arguments.shape[0]}; {percent_without:.2f}') + # method_with_arguments = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == True] + # percent_without = (method_with_arguments.shape[0] / float(immutable_df.shape[0])) * 100 + # print(f'Samples where extracted method has parameters: ' + # f'{method_with_arguments.shape[0]}; {percent_without:.2f}') - without_arguments_df = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == False] - percent_with = (without_arguments_df.shape[0] / float(immutable_df.shape[0])) * 100 - print(f'Samples where extracted method doesn\'t parameters: ' - f'{without_arguments_df.shape[0]}; {percent_with:.2f}') + # without_arguments_df = immutable_df[immutable_df['METHOD_WITH_ARGUMENTS'] == False] + # percent_with = (without_arguments_df.shape[0] / float(immutable_df.shape[0])) * 100 + # print(f'Samples where extracted method doesn\'t parameters: ' + # f'{without_arguments_df.shape[0]}; {percent_with:.2f}') + # + # print('Analyzing methods without arguments') + # must_have_filtration(without_arguments_df.__deepcopy__(), without_arguments_df.__deepcopy__()) + # print('Analyzing methods with arguments') + # must_have_filtration(method_with_arguments.__deepcopy__(), df.__deepcopy__()) - print('Analyzing methods without arguments') - must_have_filtration(without_arguments_df.__deepcopy__(), without_arguments_df.__deepcopy__()) - print('Analyzing methods with arguments') - must_have_filtration(method_with_arguments.__deepcopy__(), df.__deepcopy__()) + must_have_filtration(immutable_df.__deepcopy__(), df.__deepcopy__()) def must_have_filtration(immutable_df, df): @@ -65,12 +69,12 @@ def count_filters_for_df(immutable_df, df_changed): print('Filters with invocation types classification') is_valid_ast = immutable_df[immutable_df['is_valid_ast'] == True] - filter_with_indices_exclusion( - df_changed, - immutable_df, - immutable_df['CROSSED_VAR_NAMES'] == True, - 'Samples where var names of extracted function is crossed with target method' - ) + # filter_with_indices_exclusion( + # df_changed, + # immutable_df, + # immutable_df['CROSSED_VAR_NAMES'] == True, + # 'Samples where var names of extracted function is crossed with target method' + # ) filter_with_indices_exclusion( df_changed, @@ -169,12 +173,12 @@ def count_filters_for_df(immutable_df, df_changed): 'Samples where invocation was in lambda' ) - filter_with_indices_exclusion( - df_changed, - immutable_df, - immutable_df['ALREADY_ASSIGNED_VALUE_IN_INVOCATION'] == True, - 'Samples where already assigned value was in invocation' - ) + # filter_with_indices_exclusion( + # df_changed, + # immutable_df, + # immutable_df['ALREADY_ASSIGNED_VALUE_IN_INVOCATION'] == True, + # 'Samples where already assigned value was in invocation' + # ) filter_with_indices_exclusion( df_changed, From c074c5737a5198728a6097618d6abb734fd03cc7 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 20 Jan 2021 17:29:50 +0300 Subject: [PATCH 47/52] Add test for crossed vars --- .../InlineExamples/InlineWithParams.java | 18 +++++ .../test_dataset_collection.py | 65 +++++++++++++++++-- veniq/dataset_collection/augmentation.py | 21 +++--- 3 files changed, 92 insertions(+), 12 deletions(-) diff --git a/test/dataset_collection/InlineExamples/InlineWithParams.java b/test/dataset_collection/InlineExamples/InlineWithParams.java index f41cef95..8c9b8499 100644 --- a/test/dataset_collection/InlineExamples/InlineWithParams.java +++ b/test/dataset_collection/InlineExamples/InlineWithParams.java @@ -16,6 +16,24 @@ private void extracted(int a, int b) { while (var2 == 2); } + public void target_var_not_crossed() { + int a = 0; + int b = 0; + extracted(a, b); + --a; + --b; + } + + public void target_var_crossed() { + int a = 0; + int b = 0; + extracted(a, b); + --a; + --b; + int i = 0; + --i; + } + private void extracted_not_intersected(int g, int c) { for (int i = 0; i < 10; i++) { int g = 5; int b = 6; } do diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index 9c54eeab..7444426d 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -6,7 +6,7 @@ determine_algorithm_insertion_type, method_body_lines, is_match_to_the_conditions, - find_lines_in_changed_file, are_functional_arguments_equal) + find_lines_in_changed_file, are_functional_arguments_equal, are_not_var_crossed) from veniq.ast_framework import AST, ASTNodeType from veniq.dataset_collection.types_identifier import ( InlineTypesAlgorithms, @@ -377,7 +377,7 @@ def test_inline_with_return_with_assigning(self): open(test_filepath, encoding='utf-8') as test_ex: self.assertMultiLineEqual(actual_file.read(), test_ex.read(), 'File are not matched') - def test_check(self): + def test_check_start_end_lines(self): old_filename = self.current_directory / 'InlineExamples/PainlessParser.java' new_filename = self.current_directory / 'InlineTestExamples/PainlessParser.java' ast = AST.build_from_javalang(build_ast(old_filename)) @@ -433,7 +433,7 @@ def testFunctionParamsNotEqualToArguments(self): res = are_functional_arguments_equal(extracted_invocation, extracted) self.assertEqual(res, False) - def testInlineWithParamsWithIntersectedVarsWithReturn(self): + def testFunctionParamsNotEqualToArgumentsWithReturn(self): filename = self.current_directory / 'InlineExamples/InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ @@ -452,7 +452,7 @@ def testInlineWithParamsWithIntersectedVarsWithReturn(self): res = are_functional_arguments_equal(extracted_invocation, extracted) self.assertEqual(res, True) - def testInlineWithComplexParamsNotEqual(self): + def testFunctionParamsNotEqualWithComplexParams(self): filename = self.current_directory / 'InlineExamples/InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ @@ -470,3 +470,60 @@ def testInlineWithComplexParamsNotEqual(self): res = are_functional_arguments_equal(extracted_invocation, extracted) self.assertEqual(res, False) + + def testCrossedVars(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target_complex'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted_return'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted_return'][0] + + res = are_functional_arguments_equal(extracted_invocation, extracted) + self.assertEqual(res, False) + + def testNotCrossedVarsOnlyFuncArguments(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target_var_not_crossed'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted'][0] + + res = are_not_var_crossed(extracted_invocation, extracted, target, ast) + self.assertEqual(res, True) + + def testNotCrossedVarsWithManyVars(self): + filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'InlineWithParams'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target_var_crossed'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted'][0] + + res = are_not_var_crossed(extracted_invocation, extracted, target, ast) + self.assertEqual(res, False) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 5f65c37b..f05ec459 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -248,7 +248,7 @@ def is_match_to_the_conditions( if is_actual_parameter_simple: are_functional_arguments_eq = are_functional_arguments_equal(method_invoked, found_method_decl) if are_functional_arguments_eq: - if are_not_var_crossed(method_invoked, found_method_decl, target): + if are_not_var_crossed(method_invoked, found_method_decl, target, ast): are_var_crossed_inside_extracted = False ignored_cases = get_stats_for_pruned_cases( @@ -283,18 +283,23 @@ def is_match_to_the_conditions( def are_not_var_crossed( invocaton_node: ASTNode, method_declaration: ASTNode, - target: ASTNode) -> bool: + target: ASTNode, + ast: AST) -> bool: # m_decl_names = set([x.name for x in method_declaration.parameters]) m_inv_names = set([x.member for x in invocaton_node.arguments]) - var_names_in_extracted = set(get_variables_decl_in_node(method_declaration)) - var_names_in_target = set(get_variables_decl_in_node(target)) - + var_names_in_extracted = set(get_variables_decl_in_node(ast.get_subtree(method_declaration))) + var_names_in_target = set(get_variables_decl_in_node(ast.get_subtree(target))) + var_names_in_target = var_names_in_target.difference(m_inv_names) var_names_in_extracted = var_names_in_extracted.difference(m_inv_names) - intersected_names = var_names_in_target & var_names_in_extracted - if not intersected_names: + + if not var_names_in_target or not var_names_in_extracted: return True - return False + intersected_names = var_names_in_target.difference(var_names_in_extracted) + if not intersected_names: + return False + + return True def get_stats_for_pruned_cases( From 507ed01df500554abe4d6cf5657fdcf27a32954a Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 20 Jan 2021 18:11:57 +0300 Subject: [PATCH 48/52] Fix test due to functionality refactoring --- test/dataset_collection/Example_nested.java | 2 +- .../InlineTestExamples/ReturnTypeUseless.java | 11 ++- .../InlineWithParams.java | 0 .../test_dataset_collection.py | 79 +++++++++---------- 4 files changed, 46 insertions(+), 46 deletions(-) rename test/dataset_collection/{InlineExamples => }/InlineWithParams.java (100%) diff --git a/test/dataset_collection/Example_nested.java b/test/dataset_collection/Example_nested.java index 4bd9f42c..c8b0a268 100644 --- a/test/dataset_collection/Example_nested.java +++ b/test/dataset_collection/Example_nested.java @@ -3,7 +3,7 @@ private void doAction () { // proceed operation Restarter r; try { - if ((r = handleAction ()) != null) { + if (handleAction () != null) { presentActionNeedsRestart (r); } else { presentActionDone (); diff --git a/test/dataset_collection/InlineTestExamples/ReturnTypeUseless.java b/test/dataset_collection/InlineTestExamples/ReturnTypeUseless.java index 6764ff30..3dfe78c0 100644 --- a/test/dataset_collection/InlineTestExamples/ReturnTypeUseless.java +++ b/test/dataset_collection/InlineTestExamples/ReturnTypeUseless.java @@ -33,9 +33,16 @@ public int invocation() { public void method_decl() { int b = 0; System.out.println(b); - invocation(); + int i = 0; + System.out.println(0); + if (i == 0) System.out.println(0); + while(i < -1) { + System.out.println(0); + --i; + } + int c = 5; - +=c; + ++c; } } diff --git a/test/dataset_collection/InlineExamples/InlineWithParams.java b/test/dataset_collection/InlineWithParams.java similarity index 100% rename from test/dataset_collection/InlineExamples/InlineWithParams.java rename to test/dataset_collection/InlineWithParams.java diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index 7444426d..88bff3f8 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -6,7 +6,7 @@ determine_algorithm_insertion_type, method_body_lines, is_match_to_the_conditions, - find_lines_in_changed_file, are_functional_arguments_equal, are_not_var_crossed) + find_lines_in_changed_file, are_functional_arguments_equal, are_not_var_crossed, InvocationType) from veniq.ast_framework import AST, ASTNodeType from veniq.dataset_collection.types_identifier import ( InlineTypesAlgorithms, @@ -39,7 +39,7 @@ def test_determine_type_without_return_without_arguments(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'method_without_params'][0] d = {'method_without_params': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS) def test_determine_type_with_return_without_parameters(self): @@ -53,7 +53,7 @@ def test_determine_type_with_return_without_parameters(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'closeServer_return'][0] d = {'closeServer_return': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS) def test_determine_type_with_parameters(self): @@ -67,8 +67,9 @@ def test_determine_type_with_parameters(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'method_with_parameters'][0] d = {'method_with_parameters': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) - self.assertEqual(type, InlineTypesAlgorithms.DO_NOTHING) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) + # We can insert such cases now + self.assertNotEqual(type, InlineTypesAlgorithms.DO_NOTHING) def test_determine_type_with_overridden_functions(self): m_decl = [ @@ -81,7 +82,7 @@ def test_determine_type_with_overridden_functions(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'overridden_func'][0] d = {'overridden_func': m_decl_original} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.DO_NOTHING) def test_determine_type_with_invalid_functions(self): @@ -98,7 +99,7 @@ def test_determine_type_with_invalid_functions(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'overridden_func'][0] d = {'SOME_RANDOM_NAME': m_decl_original} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.DO_NOTHING) def test_determine_type_without_variables_declaration(self): @@ -112,7 +113,7 @@ def test_determine_type_without_variables_declaration(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'method_without_params'][0] d = {'method_without_params': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS) # We consider all cases (with or without return) @@ -128,7 +129,7 @@ def test_determine_type_without_variables_declaration(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'closeServer_return'][0] d = {'closeServer_return': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertEqual(type, InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS) @@ -143,9 +144,10 @@ def test_is_invocation_in_if_with_single_statement_valid(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'intersected_var'][0] - self.assertFalse(is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original)) + list_of_exceptions = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl) + self.assertFalse(InvocationType.SINGLE_STATEMENT_IN_IF in list_of_exceptions, True) - def test_is_return_type_not_assigning_value_valid(self): + def test_is_return_type_assigning_value(self): m_decl = [ x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) if x.name == 'method_decl'][0] @@ -155,7 +157,9 @@ def test_is_return_type_not_assigning_value_valid(self): m_decl_original = [ x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) if x.name == 'invocation'][0] - self.assertFalse(is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original)) + + list_of_exceptions = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl) + self.assertTrue(not list_of_exceptions, True) def test_determine_type_with_non_intersected_variables_declaration(self): m_decl = [ @@ -168,7 +172,7 @@ def test_determine_type_with_non_intersected_variables_declaration(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'intersected_var'][0] d = {'intersected_var': [m_decl_original]} - type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d) + type = determine_algorithm_insertion_type(self.example_ast, m_decl, m_inv, d, {}) self.assertTrue(type in [ InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS, InlineTypesAlgorithms.WITHOUT_RETURN_WITHOUT_ARGUMENTS]) @@ -180,7 +184,7 @@ def _get_lines(self, file, function_name): if x.name == function_name][0] return method_body_lines(m_decl, file) - @unittest.skip("This functionality is not implemented") + # @unittest.skip("This functionality is not implemented") def test_inline_with_return_type_but_not_returning(self): """ Test check whether we can inline code function with return type, but actually @@ -231,23 +235,12 @@ def test_is_valid_function_with_several_returns(self): m_inv = [ x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'severalReturns'][0] - is_matched = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original) - self.assertEqual(is_matched, False) - - def test_is_valid_function_with_one_return(self): - m_decl = [ - x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) - if x.name == 'runDelete'][0] - m_decl_original = [ - x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) - if x.name == 'delete'][0] - m_inv = [ - x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) - if x.member == 'delete'][0] - is_matched = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original) - self.assertEqual(is_matched, True) + exceptions_list = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl_original) + self.assertEqual(InvocationType.SEVERAL_RETURNS in exceptions_list, False) def test_invocation_inside_if_not_process(self): + # Test checks if we have complex invocation inside if condition, + # but since we compare smth, result will be binary operation filepath = self.current_directory / "Example_nested.java" ast = AST.build_from_javalang(build_ast(filepath)) m_decl = [ @@ -259,8 +252,8 @@ def test_invocation_inside_if_not_process(self): m_inv = [ x for x in ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'handleAction'][0] - is_matched = is_match_to_the_conditions(ast, m_inv, m_decl_original) - self.assertEqual(is_matched, False) + is_matched_list = is_match_to_the_conditions(ast, m_inv, m_decl_original, m_decl) + self.assertEqual(InvocationType.INSIDE_BINARY_OPERATION in is_matched_list, False) def test_is_valid_function_with_return_in_the_middle(self): m_decl_original = [ @@ -269,8 +262,8 @@ def test_is_valid_function_with_return_in_the_middle(self): m_inv = [ x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'severalReturns'][0] - is_matched = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original) - self.assertEqual(is_matched, False) + exceptions_list = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl_original) + self.assertEqual(InvocationType.SEVERAL_RETURNS in exceptions_list, False) def test_inline_invocation_inside_var_declaration(self): filepath = self.current_directory / 'InlineExamples' / 'EntityResolver_cut.java' @@ -284,7 +277,7 @@ def test_inline_invocation_inside_var_declaration(self): open(test_filepath, encoding='utf-8') as test_ex: self.assertEqual(actual_file.read(), test_ex.read()) - def test_inline_inside_invokation_several_lines(self): + def test_inline_inside_invocation_several_lines(self): filepath = self.current_directory / 'InlineExamples' / 'AbstractMarshaller_cut.java' test_filepath = self.current_directory / 'InlineTestExamples' / 'AbstractMarshaller_cut.java' algorithm_type = InlineTypesAlgorithms.WITH_RETURN_WITHOUT_ARGUMENTS @@ -392,11 +385,11 @@ def test_check_start_end_lines(self): original_func=inlined_function_declaration, class_name='PainlessParser') - self.assertEqual(result['invocation_method_start_line'], 1022) - self.assertEqual(result['invocation_method_end_line'], 1083) + self.assertEqual(result['extract_method_start_line'], 1022) + self.assertEqual(result['extract_method_end_line'], 1083) def testFunctionParamsEqualToArguments(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -415,7 +408,7 @@ def testFunctionParamsEqualToArguments(self): self.assertEqual(res, True) def testFunctionParamsNotEqualToArguments(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -434,7 +427,7 @@ def testFunctionParamsNotEqualToArguments(self): self.assertEqual(res, False) def testFunctionParamsNotEqualToArgumentsWithReturn(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -453,7 +446,7 @@ def testFunctionParamsNotEqualToArgumentsWithReturn(self): self.assertEqual(res, True) def testFunctionParamsNotEqualWithComplexParams(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -472,7 +465,7 @@ def testFunctionParamsNotEqualWithComplexParams(self): self.assertEqual(res, False) def testCrossedVars(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -491,7 +484,7 @@ def testCrossedVars(self): self.assertEqual(res, False) def testNotCrossedVarsOnlyFuncArguments(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) @@ -510,7 +503,7 @@ def testNotCrossedVarsOnlyFuncArguments(self): self.assertEqual(res, True) def testNotCrossedVarsWithManyVars(self): - filename = self.current_directory / 'InlineExamples/InlineWithParams.java' + filename = self.current_directory / 'InlineWithParams.java' ast = AST.build_from_javalang(build_ast(filename)) class_decl = [ x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) From ebe51e077a39a71c39becc5f668b82976461d38c Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Wed, 20 Jan 2021 18:43:30 +0300 Subject: [PATCH 49/52] Fix constant --- veniq/dataset_collection/augmentation.py | 10 +++---- veniq/dataset_collection/full_stats.py | 37 ++++++++++++++---------- 2 files changed, 27 insertions(+), 20 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index f05ec459..27209999 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -140,7 +140,7 @@ class InvocationType(Enum): CROSSED_VAR_NAMES_INSIDE_FUNCTION = 20 CAST_IN_ACTUAL_PARAMS = 21 ABSTRACT_METHOD = 22 - NOT_CROSSED_FUNC_PARAMS = 23 + NOT_FUNC_PARAMS_EQUAL = 23 #METHOD_WITH_ARGUMENTS_VAR_CROSSED = 999 @@ -312,7 +312,7 @@ def get_stats_for_pruned_cases( is_not_inside_if, is_not_inside_while, is_not_lambda, is_not_method_inv_single_statement_in_if, is_not_parent_member_ref, is_not_several_returns, is_not_ternary, is_not_actual_param_cast, - is_not_is_extract_method_abstract, are_crossed_func_params, method_invoked) -> List[str]: + is_not_is_extract_method_abstract, are_functional_arguments_eq, method_invoked) -> List[str]: invocation_types_to_ignore: List[str] = [] if not is_not_is_extract_method_abstract: @@ -356,10 +356,10 @@ def get_stats_for_pruned_cases( if not is_not_at_the_same_line_as_prohibited_stats: invocation_types_to_ignore.append(InvocationType.IS_NOT_AT_THE_SAME_LINE_AS_PROHIBITED_STATS.name) - if not are_crossed_func_params: - invocation_types_to_ignore.append(InvocationType.NOT_CROSSED_FUNC_PARAMS) + if not are_functional_arguments_eq: + invocation_types_to_ignore.append(InvocationType.NOT_FUNC_PARAMS_EQUAL.name) if are_var_crossed_inside_extracted: - invocation_types_to_ignore.append(InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION) + invocation_types_to_ignore.append(InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION.name) return invocation_types_to_ignore diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index a3628704..9888c720 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -9,9 +9,9 @@ def remove_indices(df_to_filter: pd.DataFrame, src_df): def make_filtration(): - # df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\out.csv') + df = pd.read_csv(r'D:\git\veniq\veniq\dataset_collection\new_dataset\full_dataset\out.csv') - df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\01_19\out.csv') + # df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\01_19\out.csv') immutable_df = df.copy() print(f'Total lines: {df.shape[0]}') duplicateRowsDF = immutable_df[immutable_df.duplicated()] @@ -39,7 +39,7 @@ def make_filtration(): def must_have_filtration(immutable_df, df): print('Must-have filters') - filter_with_indices_exclusion(df, immutable_df, immutable_df['ncss_target'] < 3, 'ncss_target < 3') + filter_with_indices_exclusion(df, immutable_df, immutable_df['ncss_extracted'] < 3, 'ncss_extracted < 3') # exclude abstract extract methods filter_with_indices_exclusion(df, immutable_df, immutable_df['ABSTRACT_METHOD'] == True, 'abstract methods') # REMOVE METHOD CHAINING SINCE IT IS not correct @@ -69,12 +69,19 @@ def count_filters_for_df(immutable_df, df_changed): print('Filters with invocation types classification') is_valid_ast = immutable_df[immutable_df['is_valid_ast'] == True] - # filter_with_indices_exclusion( - # df_changed, - # immutable_df, - # immutable_df['CROSSED_VAR_NAMES'] == True, - # 'Samples where var names of extracted function is crossed with target method' - # ) + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['CROSSED_VAR_NAMES_INSIDE_FUNCTION'] == True, + 'Samples where var names of extracted function is crossed with target method excepting function arguments' + ) + + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['NOT_CROSSED_FUNC_PARAMS'] == True, + 'Samples where function parameters of invoked function are not matched with actual arguments' + ) filter_with_indices_exclusion( df_changed, @@ -104,12 +111,12 @@ def count_filters_for_df(immutable_df, df_changed): 'Samples where invocation was inside while condition' ) - filter_with_indices_exclusion( - df_changed, - immutable_df, - immutable_df['INSIDE_FOR'] == True, - 'Samples where invocation was inside for condition' - ) + # filter_with_indices_exclusion( + # df_changed, + # immutable_df, + # immutable_df['INSIDE_FOR'] == True, + # 'Samples where invocation was inside for condition' + # ) filter_with_indices_exclusion( df_changed, From c79dce77608f5b8c7bc0d39d8a48d424d7237599 Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 22 Jan 2021 11:08:41 +0300 Subject: [PATCH 50/52] Fix stats --- .../test_dataset_collection.py | 20 +++++++++++++++++++ veniq/dataset_collection/augmentation.py | 20 ++++++++++++------- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index 88bff3f8..d3c15607 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -520,3 +520,23 @@ def testNotCrossedVarsWithManyVars(self): res = are_not_var_crossed(extracted_invocation, extracted, target, ast) self.assertEqual(res, False) + + def testDoInlineWithThrow(self): + filename = self.current_directory / 'ThrowInline.java' + ast = AST.build_from_javalang(build_ast(filename)) + class_decl = [ + x for x in ast.get_proxy_nodes(ASTNodeType.CLASS_DECLARATION) + if x.name == 'ThrowInline'][0] + target = [ + x for x in class_decl.methods + if x.name == 'target'][0] + extracted = [ + x for x in class_decl.methods + if x.name == 'extracted'][0] + extracted_invocation = [ + x for x in ast.get_subtree(target).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'extracted'][0] + + res = is_match_to_the_conditions(ast, extracted_invocation, extracted, target) + self.assertEqual(InvocationType.THROW_IN_EXTRACTED.name in res, True) + diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 27209999..4b0313a5 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -141,6 +141,7 @@ class InvocationType(Enum): CAST_IN_ACTUAL_PARAMS = 21 ABSTRACT_METHOD = 22 NOT_FUNC_PARAMS_EQUAL = 23 + THROW_IN_EXTRACTED = 24 #METHOD_WITH_ARGUMENTS_VAR_CROSSED = 999 @@ -205,18 +206,21 @@ def is_match_to_the_conditions( if maybe_if.then_statement.expression.node_type == ASTNodeType.METHOD_INVOCATION: is_not_method_inv_single_statement_in_if = False - is_not_assign_value_with_return_type = True + ast_subtree_method_decl = ast.get_subtree(found_method_decl) + # is_not_assign_value_with_return_type = True is_not_several_returns = True if hasattr(found_method_decl, 'return_type'): if found_method_decl.return_type: - if parent.node_type == ASTNodeType.VARIABLE_DECLARATOR: - is_not_assign_value_with_return_type = False + # if parent.node_type == ASTNodeType.VARIABLE_DECLARATOR: + # is_not_assign_value_with_return_type = False - ast_subtree = ast.get_subtree(found_method_decl) - stats = [x for x in ast_subtree.get_proxy_nodes(ASTNodeType.RETURN_STATEMENT)] + stats = [x for x in ast_subtree_method_decl.get_proxy_nodes(ASTNodeType.RETURN_STATEMENT)] if len(stats) > 1: is_not_several_returns = False + has_not_throw = len([ + x for x in found_method_decl.body + if x.node_type == ASTNodeType.THROW_STATEMENT]) < 1 is_not_parent_member_ref = not (method_invoked.parent.node_type == ASTNodeType.MEMBER_REFERENCE) is_not_chain_before = not (parent.node_type == ASTNodeType.METHOD_INVOCATION) and no_children chains_after = [x for x in method_invoked.children if x.node_type == ASTNodeType.METHOD_INVOCATION] @@ -254,7 +258,7 @@ def is_match_to_the_conditions( ignored_cases = get_stats_for_pruned_cases( is_actual_parameter_simple, is_not_array_creator, - is_not_assign_value_with_return_type, + has_not_throw, is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, is_not_cast_of_return_type, @@ -303,7 +307,7 @@ def are_not_var_crossed( def get_stats_for_pruned_cases( - is_actual_parameter_simple, is_not_array_creator, is_not_assign_value_with_return_type, + is_actual_parameter_simple, is_not_array_creator, has_not_throw, is_not_at_the_same_line_as_prohibited_stats, is_not_binary_operation, is_not_cast_of_return_type, is_not_chain_after, is_not_chain_before, is_not_class_creator, is_not_enhanced_for_control, @@ -360,6 +364,8 @@ def get_stats_for_pruned_cases( invocation_types_to_ignore.append(InvocationType.NOT_FUNC_PARAMS_EQUAL.name) if are_var_crossed_inside_extracted: invocation_types_to_ignore.append(InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION.name) + if not has_not_throw: + invocation_types_to_ignore.append(InvocationType.THROW_IN_EXTRACTED.name) return invocation_types_to_ignore From 07765f4e262104400804786591d3722f53f93e8d Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 22 Jan 2021 11:09:00 +0300 Subject: [PATCH 51/52] Add case when throw in extracted --- veniq/dataset_collection/augmentation.py | 2 +- veniq/dataset_collection/full_stats.py | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index 4b0313a5..c303a32b 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -365,7 +365,7 @@ def get_stats_for_pruned_cases( if are_var_crossed_inside_extracted: invocation_types_to_ignore.append(InvocationType.CROSSED_VAR_NAMES_INSIDE_FUNCTION.name) if not has_not_throw: - invocation_types_to_ignore.append(InvocationType.THROW_IN_EXTRACTED.name) + invocation_types_to_ignore.append(InvocationType.THROW_IN_EXTRACTED.name) return invocation_types_to_ignore diff --git a/veniq/dataset_collection/full_stats.py b/veniq/dataset_collection/full_stats.py index 9888c720..2c82b023 100644 --- a/veniq/dataset_collection/full_stats.py +++ b/veniq/dataset_collection/full_stats.py @@ -9,9 +9,9 @@ def remove_indices(df_to_filter: pd.DataFrame, src_df): def make_filtration(): - df = pd.read_csv(r'D:\git\veniq\veniq\dataset_collection\new_dataset\full_dataset\out.csv') + # df = pd.read_csv(r'D:\git\veniq\veniq\dataset_collection\new_dataset\full_dataset\out.csv') - # df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\01_15\01_19\out.csv') + df = pd.read_csv(r'D:\temp\dataset_colelction_refactoring\1_21\out.csv') immutable_df = df.copy() print(f'Total lines: {df.shape[0]}') duplicateRowsDF = immutable_df[immutable_df.duplicated()] @@ -79,7 +79,7 @@ def count_filters_for_df(immutable_df, df_changed): filter_with_indices_exclusion( df_changed, immutable_df, - immutable_df['NOT_CROSSED_FUNC_PARAMS'] == True, + immutable_df['NOT_FUNC_PARAMS_EQUAL'] == True, 'Samples where function parameters of invoked function are not matched with actual arguments' ) @@ -194,6 +194,13 @@ def count_filters_for_df(immutable_df, df_changed): 'Samples where there are several returns in extracted method' ) + filter_with_indices_exclusion( + df_changed, + immutable_df, + immutable_df['THROW_IN_EXTRACTED'] == True, + 'Samples where we have throw statement in extracted method' + ) + filter_with_indices_exclusion( df_changed, immutable_df, From 1a9ef96dc731efc6dc7397f97f4c7b6bc92d472f Mon Sep 17 00:00:00 2001 From: Evgeny Maslov Date: Fri, 22 Jan 2021 12:35:34 +0300 Subject: [PATCH 52/52] Ignore cases when return is not inside main body --- test/dataset_collection/Example.java | 26 +++++++ test/dataset_collection/ThrowInline.java | 71 +++++++++++++++++++ .../test_dataset_collection.py | 26 ++++++- veniq/dataset_collection/augmentation.py | 13 +++- 4 files changed, 132 insertions(+), 4 deletions(-) create mode 100644 test/dataset_collection/ThrowInline.java diff --git a/test/dataset_collection/Example.java b/test/dataset_collection/Example.java index 1bb88f4f..d9c6276d 100644 --- a/test/dataset_collection/Example.java +++ b/test/dataset_collection/Example.java @@ -171,4 +171,30 @@ public void runDelete() { delete(); } + public int severalReturnsWithoutMainReturn() { + int i = 0, j = 0; + if (i < 0) { + return 0; + } + } + + private Object returnInsideTry() { + try { + Object event = events.poll(10, TimeUnit.SECONDS); + if (event == null) { + throw new AssertionError("Timed out waiting for event."); + } + return event; + } catch (InterruptedException e) { + throw new AssertionError(e); + } + } + + public void runSeveralReturnsWithoutMainReturn() { + int a = severalReturnsWithoutMainReturn(); + } + public void runSeveralReturnsInTry(String payload) { + Object actual = returnInsideTry(); + assertThat(actual).isEqualTo(new Message(payload)); + } } diff --git a/test/dataset_collection/ThrowInline.java b/test/dataset_collection/ThrowInline.java new file mode 100644 index 00000000..ffefea8f --- /dev/null +++ b/test/dataset_collection/ThrowInline.java @@ -0,0 +1,71 @@ +public class ThrowInline +{ + private ListenableFuture extracted() + { + Iterator> iterator = dataRequests.iterator(); + while (iterator.hasNext()) { + ListenableFuture future = iterator.next(); + if (future.isDone()) { + iterator.remove(); + return future; + } + } + throw new IllegalStateException("No completed splits in the queue"); + } + + public Page target() + { + if (finished) { + return null; + } + if (!loadAllSplits()) { + return null; + } + if (dataSignalFuture == null) { + checkState(contexts.isEmpty() && dataRequests.isEmpty(), "some splits are already started"); + if (splits.isEmpty()) { + finished = true; + return null; + } + for (int i = 0; i < min(lookupRequestsConcurrency, splits.size()); i++) { + startDataFetchForNextSplit(); + } + updateSignalAndStatusFutures(); + } + if (!dataSignalFuture.isDone()) { + return null; + } + ListenableFuture resultFuture = extracted(); + RunningSplitContext resultContext = contexts.remove(resultFuture); + checkState(resultContext != null, "no associated context for the request"); + PrestoThriftPageResult pageResult = getFutureValue(resultFuture); + Page page = pageResult.toPage(outputColumnTypes); + if (page != null) { + long pageSize = page.getSizeInBytes(); + completedBytes += pageSize; + completedPositions += page.getPositionCount(); + stats.addIndexPageSize(pageSize); + } + else { + stats.addIndexPageSize(0); + } + if (pageResult.getNextToken() != null) { + sendDataRequest(resultContext, pageResult.getNextToken()); + updateSignalAndStatusFutures(); + return page; + } + if (splitIndex < splits.size()) { + startDataFetchForNextSplit(); + updateSignalAndStatusFutures(); + } + else if (!dataRequests.isEmpty()) { + updateSignalAndStatusFutures(); + } + else { + dataSignalFuture = null; + statusFuture = null; + finished = true; + } + return page; + } +} \ No newline at end of file diff --git a/test/dataset_collection/test_dataset_collection.py b/test/dataset_collection/test_dataset_collection.py index d3c15607..f030d186 100644 --- a/test/dataset_collection/test_dataset_collection.py +++ b/test/dataset_collection/test_dataset_collection.py @@ -236,7 +236,31 @@ def test_is_valid_function_with_several_returns(self): x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) if x.member == 'severalReturns'][0] exceptions_list = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl_original) - self.assertEqual(InvocationType.SEVERAL_RETURNS in exceptions_list, False) + self.assertEqual(InvocationType.SEVERAL_RETURNS.name in exceptions_list, True) + + m_decl = [ + x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) + if x.name == 'runSeveralReturnsWithoutMainReturn'][0] + m_decl_original = [ + x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) + if x.name == 'severalReturnsWithoutMainReturn'][0] + m_inv = [ + x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'severalReturnsWithoutMainReturn'][0] + exceptions_list = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl_original) + self.assertEqual(InvocationType.SEVERAL_RETURNS.name in exceptions_list, True) + + m_decl = [ + x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) + if x.name == 'runSeveralReturnsInTry'][0] + m_decl_original = [ + x for x in self.example_ast.get_proxy_nodes(ASTNodeType.METHOD_DECLARATION) + if x.name == 'returnInsideTry'][0] + m_inv = [ + x for x in self.example_ast.get_subtree(m_decl).get_proxy_nodes(ASTNodeType.METHOD_INVOCATION) + if x.member == 'returnInsideTry'][0] + exceptions_list = is_match_to_the_conditions(self.example_ast, m_inv, m_decl_original, m_decl_original) + self.assertEqual(InvocationType.SEVERAL_RETURNS.name in exceptions_list, True) def test_invocation_inside_if_not_process(self): # Test checks if we have complex invocation inside if condition, diff --git a/veniq/dataset_collection/augmentation.py b/veniq/dataset_collection/augmentation.py index c303a32b..b831df50 100644 --- a/veniq/dataset_collection/augmentation.py +++ b/veniq/dataset_collection/augmentation.py @@ -142,6 +142,7 @@ class InvocationType(Enum): ABSTRACT_METHOD = 22 NOT_FUNC_PARAMS_EQUAL = 23 THROW_IN_EXTRACTED = 24 + RETURN_IN_ANOTHER_SCOPE = 25 #METHOD_WITH_ARGUMENTS_VAR_CROSSED = 999 @@ -211,11 +212,16 @@ def is_match_to_the_conditions( is_not_several_returns = True if hasattr(found_method_decl, 'return_type'): if found_method_decl.return_type: - # if parent.node_type == ASTNodeType.VARIABLE_DECLARATOR: - # is_not_assign_value_with_return_type = False + return_stats = len([ + x for x in found_method_decl.body + if x.node_type == ASTNodeType.RETURN_STATEMENT] + ) + # If we do not have return in function body, + # it means that we will have deep inside AST tree, so remember it stats = [x for x in ast_subtree_method_decl.get_proxy_nodes(ASTNodeType.RETURN_STATEMENT)] - if len(stats) > 1: + total_return_statements = len(stats) - return_stats + if total_return_statements > 0: is_not_several_returns = False has_not_throw = len([ @@ -715,6 +721,7 @@ def make_insertion( # default init for case_name in InvocationType.list_types(): log_of_inline[case_name] = False + log_of_inline['ONE_LINE_FUNCTION'] = False if not ignored_cases: insert_code_with_new_file_creation(