From fd9bf9050f3fe282e1d2231a013150432e9cf339 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 12:49:18 -0700 Subject: [PATCH 01/11] Merge --- pythonFiles/create_venv.py | 250 +++ pythonFiles/install_debugpy.py | 66 + pythonFiles/installed_check.py | 129 ++ pythonFiles/normalizeSelection.py | 305 ++++ pythonFiles/pyproject.toml | 36 + pythonFiles/testing_tools/adapter/__init__.py | 2 + pythonFiles/testing_tools/adapter/__main__.py | 106 ++ pythonFiles/testing_tools/adapter/errors.py | 16 + pythonFiles/testing_tools/adapter/info.py | 120 ++ .../testing_tools/adapter/pytest/_cli.py | 17 + .../adapter/pytest/_discovery.py | 109 ++ .../adapter/pytest/_pytest_item.py | 630 +++++++ pythonFiles/testing_tools/adapter/util.py | 287 +++ .../.data/error_parametrize_discovery.py | 10 + .../.data/error_pytest_import.txt | 6 + .../.data/error_raise_exception.py | 14 + .../pytestadapter/.data/parametrize_tests.py | 22 + .../.data/test_multi_class_nest.py | 19 + .../pytestadapter/.data/text_docstring.txt | 4 + .../expected_execution_test_output.py | 686 ++++++++ .../tests/pytestadapter/test_execution.py | 278 +++ pythonFiles/tests/testing_tools/__init__.py | 2 + .../tests/testing_tools/adapter/__init__.py | 2 + .../testing_tools/adapter/pytest/__init__.py | 2 + .../adapter/pytest/test_discovery.py | 1553 +++++++++++++++++ .../testing_tools/adapter/test___main__.py | 199 +++ .../testing_tools/adapter/test_discovery.py | 675 +++++++ .../testing_tools/adapter/test_functional.py | 1535 ++++++++++++++++ .../unittestadapter/.data/discovery_empty.py | 15 + .../.data/discovery_error/file_one.py | 20 + .../.data/discovery_error/file_two.py | 18 + .../unittestadapter/.data/discovery_simple.py | 18 + .../.data/utils_decorated_tree.py | 29 + .../.data/utils_nested_cases/file_one.py | 17 + .../.data/utils_simple_cases.py | 17 + .../.data/utils_simple_tree.py | 17 + .../tests/unittestadapter/test_discovery.py | 233 +++ .../tests/unittestadapter/test_execution.py | 275 +++ src/client/api.ts | 5 +- .../extension/adapter/remoteLaunchers.ts | 6 +- src/client/debugger/pythonDebugger.ts | 30 + src/client/jupyter/jupyterIntegration.ts | 4 +- .../adapter/remoteLaunchers.unit.test.ts | 16 +- src/test/pythonFiles/dummy.py | 1 + 44 files changed, 7784 insertions(+), 17 deletions(-) create mode 100644 pythonFiles/create_venv.py create mode 100644 pythonFiles/install_debugpy.py create mode 100644 pythonFiles/installed_check.py create mode 100644 pythonFiles/normalizeSelection.py create mode 100644 pythonFiles/pyproject.toml create mode 100644 pythonFiles/testing_tools/adapter/__init__.py create mode 100644 pythonFiles/testing_tools/adapter/__main__.py create mode 100644 pythonFiles/testing_tools/adapter/errors.py create mode 100644 pythonFiles/testing_tools/adapter/info.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_cli.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_discovery.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_pytest_item.py create mode 100644 pythonFiles/testing_tools/adapter/util.py create mode 100644 pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py create mode 100644 pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt create mode 100644 pythonFiles/tests/pytestadapter/.data/error_raise_exception.py create mode 100644 pythonFiles/tests/pytestadapter/.data/parametrize_tests.py create mode 100644 pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py create mode 100644 pythonFiles/tests/pytestadapter/.data/text_docstring.txt create mode 100644 pythonFiles/tests/pytestadapter/expected_execution_test_output.py create mode 100644 pythonFiles/tests/pytestadapter/test_execution.py create mode 100644 pythonFiles/tests/testing_tools/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/pytest/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test___main__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test_discovery.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test_functional.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_empty.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_simple.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py create mode 100644 pythonFiles/tests/unittestadapter/test_discovery.py create mode 100644 pythonFiles/tests/unittestadapter/test_execution.py create mode 100644 src/client/debugger/pythonDebugger.ts create mode 100644 src/test/pythonFiles/dummy.py diff --git a/pythonFiles/create_venv.py b/pythonFiles/create_venv.py new file mode 100644 index 000000000000..092286f986cf --- /dev/null +++ b/pythonFiles/create_venv.py @@ -0,0 +1,250 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import importlib.util as import_util +import json +import os +import pathlib +import subprocess +import sys +import urllib.request as url_lib +from typing import List, Optional, Sequence, Union + +VENV_NAME = ".venv" +CWD = pathlib.Path.cwd() +MICROVENV_SCRIPT_PATH = pathlib.Path(__file__).parent / "create_microvenv.py" + + +class VenvError(Exception): + pass + + +def parse_args(argv: Sequence[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser() + + parser.add_argument( + "--requirements", + action="append", + default=[], + help="Install additional dependencies into the virtual environment.", + ) + + parser.add_argument( + "--toml", + action="store", + default=None, + help="Install additional dependencies from sources like `pyproject.toml` into the virtual environment.", + ) + parser.add_argument( + "--extras", + action="append", + default=[], + help="Install specific package groups from `pyproject.toml` into the virtual environment.", + ) + + parser.add_argument( + "--git-ignore", + action="store_true", + default=False, + help="Add .gitignore to the newly created virtual environment.", + ) + parser.add_argument( + "--name", + default=VENV_NAME, + type=str, + help="Name of the virtual environment.", + metavar="NAME", + action="store", + ) + parser.add_argument( + "--stdin", + action="store_true", + default=False, + help="Read arguments from stdin.", + ) + return parser.parse_args(argv) + + +def is_installed(module: str) -> bool: + return import_util.find_spec(module) is not None + + +def file_exists(path: Union[str, pathlib.PurePath]) -> bool: + return os.path.exists(path) + + +def venv_exists(name: str) -> bool: + return os.path.exists(CWD / name) and file_exists(get_venv_path(name)) + + +def run_process(args: Sequence[str], error_message: str) -> None: + try: + print("Running: " + " ".join(args)) + subprocess.run(args, cwd=os.getcwd(), check=True) + except subprocess.CalledProcessError: + raise VenvError(error_message) + + +def get_venv_path(name: str) -> str: + # See `venv` doc here for more details on binary location: + # https://docs.python.org/3/library/venv.html#creating-virtual-environments + if sys.platform == "win32": + return os.fspath(CWD / name / "Scripts" / "python.exe") + else: + return os.fspath(CWD / name / "bin" / "python") + + +def install_requirements(venv_path: str, requirements: List[str]) -> None: + if not requirements: + return + + for requirement in requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirement}") + run_process( + [venv_path, "-m", "pip", "install", "-r", requirement], + "CREATE_VENV.PIP_FAILED_INSTALL_REQUIREMENTS", + ) + print("CREATE_VENV.PIP_INSTALLED_REQUIREMENTS") + + +def install_toml(venv_path: str, extras: List[str]) -> None: + args = "." if len(extras) == 0 else f".[{','.join(extras)}]" + run_process( + [venv_path, "-m", "pip", "install", "-e", args], + "CREATE_VENV.PIP_FAILED_INSTALL_PYPROJECT", + ) + print("CREATE_VENV.PIP_INSTALLED_PYPROJECT") + + +def upgrade_pip(venv_path: str) -> None: + print("CREATE_VENV.UPGRADING_PIP") + run_process( + [venv_path, "-m", "pip", "install", "--upgrade", "pip"], + "CREATE_VENV.UPGRADE_PIP_FAILED", + ) + print("CREATE_VENV.UPGRADED_PIP") + + +def add_gitignore(name: str) -> None: + git_ignore = CWD / name / ".gitignore" + if not file_exists(git_ignore): + print("Creating: " + os.fspath(git_ignore)) + with open(git_ignore, "w") as f: + f.write("*") + + +def download_pip_pyz(name: str): + url = "https://bootstrap.pypa.io/pip/pip.pyz" + print("CREATE_VENV.DOWNLOADING_PIP") + + try: + with url_lib.urlopen(url) as response: + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + with open(pip_pyz_path, "wb") as out_file: + data = response.read() + out_file.write(data) + out_file.flush() + except Exception: + raise VenvError("CREATE_VENV.DOWNLOAD_PIP_FAILED") + + +def install_pip(name: str): + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + executable = get_venv_path(name) + print("CREATE_VENV.INSTALLING_PIP") + run_process( + [executable, pip_pyz_path, "install", "pip"], + "CREATE_VENV.INSTALL_PIP_FAILED", + ) + + +def get_requirements_from_args(args: argparse.Namespace) -> List[str]: + requirements = [] + if args.stdin: + data = json.loads(sys.stdin.read()) + requirements = data.get("requirements", []) + if args.requirements: + requirements.extend(args.requirements) + return requirements + + +def main(argv: Optional[Sequence[str]] = None) -> None: + if argv is None: + argv = [] + args = parse_args(argv) + + use_micro_venv = False + venv_installed = is_installed("venv") + pip_installed = is_installed("pip") + ensure_pip_installed = is_installed("ensurepip") + distutils_installed = is_installed("distutils") + + if not venv_installed: + if sys.platform == "win32": + raise VenvError("CREATE_VENV.VENV_NOT_FOUND") + else: + use_micro_venv = True + if not distutils_installed: + print("Install `python3-distutils` package or equivalent for your OS.") + print("On Debian/Ubuntu: `sudo apt install python3-distutils`") + raise VenvError("CREATE_VENV.DISTUTILS_NOT_INSTALLED") + + if venv_exists(args.name): + # A virtual environment with same name exists. + # We will use the existing virtual environment. + venv_path = get_venv_path(args.name) + print(f"EXISTING_VENV:{venv_path}") + else: + if use_micro_venv: + # `venv` was not found but on this platform we can use `microvenv` + run_process( + [ + sys.executable, + os.fspath(MICROVENV_SCRIPT_PATH), + "--name", + args.name, + ], + "CREATE_VENV.MICROVENV_FAILED_CREATION", + ) + elif not pip_installed or not ensure_pip_installed: + # `venv` was found but `pip` or `ensurepip` was not found. + # We create a venv without `pip` in it. We will later install `pip`. + run_process( + [sys.executable, "-m", "venv", "--without-pip", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + else: + # Both `venv` and `pip` were found. So create a .venv normally + run_process( + [sys.executable, "-m", "venv", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + + venv_path = get_venv_path(args.name) + print(f"CREATED_VENV:{venv_path}") + + if args.git_ignore: + add_gitignore(args.name) + + # At this point we have a .venv. Now we handle installing `pip`. + if pip_installed and ensure_pip_installed: + # We upgrade pip if it is already installed. + upgrade_pip(venv_path) + else: + # `pip` was not found, so we download it and install it. + download_pip_pyz(args.name) + install_pip(args.name) + + requirements = get_requirements_from_args(args) + if requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirements}") + install_requirements(venv_path, requirements) + + if args.toml: + print(f"VENV_INSTALLING_PYPROJECT: {args.toml}") + install_toml(venv_path, args.extras) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/pythonFiles/install_debugpy.py b/pythonFiles/install_debugpy.py new file mode 100644 index 000000000000..cabb620ea1f2 --- /dev/null +++ b/pythonFiles/install_debugpy.py @@ -0,0 +1,66 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import io +import json +import os +import urllib.request as url_lib +import zipfile + +from packaging.version import parse as version_parser + +EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +DEBUGGER_DEST = os.path.join(EXTENSION_ROOT, "pythonFiles", "lib", "python") +DEBUGGER_PACKAGE = "debugpy" +DEBUGGER_PYTHON_ABI_VERSIONS = ("cp310",) +DEBUGGER_VERSION = "1.6.7" # can also be "latest" + + +def _contains(s, parts=()): + return any(p in s for p in parts) + + +def _get_package_data(): + json_uri = "https://pypi.org/pypi/{0}/json".format(DEBUGGER_PACKAGE) + # Response format: https://warehouse.readthedocs.io/api-reference/json/#project + # Release metadata format: https://github.com/pypa/interoperability-peps/blob/master/pep-0426-core-metadata.rst + with url_lib.urlopen(json_uri) as response: + return json.loads(response.read()) + + +def _get_debugger_wheel_urls(data, version): + return list( + r["url"] + for r in data["releases"][version] + if _contains(r["url"], DEBUGGER_PYTHON_ABI_VERSIONS) + ) + + +def _download_and_extract(root, url, version): + root = os.getcwd() if root is None or root == "." else root + print(url) + with url_lib.urlopen(url) as response: + data = response.read() + with zipfile.ZipFile(io.BytesIO(data), "r") as wheel: + for zip_info in wheel.infolist(): + # Ignore dist info since we are merging multiple wheels + if ".dist-info/" in zip_info.filename: + continue + print("\t" + zip_info.filename) + wheel.extract(zip_info.filename, root) + + +def main(root): + data = _get_package_data() + + if DEBUGGER_VERSION == "latest": + use_version = max(data["releases"].keys(), key=version_parser) + else: + use_version = DEBUGGER_VERSION + + for url in _get_debugger_wheel_urls(data, use_version): + _download_and_extract(root, url, use_version) + + +if __name__ == "__main__": + main(DEBUGGER_DEST) diff --git a/pythonFiles/installed_check.py b/pythonFiles/installed_check.py new file mode 100644 index 000000000000..f0e1c268d270 --- /dev/null +++ b/pythonFiles/installed_check.py @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import json +import os +import pathlib +import sys +from typing import Dict, List, Optional, Sequence, Tuple, Union + +LIB_ROOT = pathlib.Path(__file__).parent / "lib" / "python" +sys.path.insert(0, os.fspath(LIB_ROOT)) + +import tomli +from importlib_metadata import metadata +from packaging.requirements import Requirement + +DEFAULT_SEVERITY = 3 + + +def parse_args(argv: Optional[Sequence[str]] = None): + if argv is None: + argv = sys.argv[1:] + parser = argparse.ArgumentParser( + description="Check for installed packages against requirements" + ) + parser.add_argument("FILEPATH", type=str, help="Path to requirements.[txt, in]") + + return parser.parse_args(argv) + + +def parse_requirements(line: str) -> Optional[Requirement]: + try: + req = Requirement(line.strip("\\")) + if req.marker is None: + return req + elif req.marker.evaluate(): + return req + except: + return None + + +def process_requirements(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + for n, line in enumerate(req_file.read_text(encoding="utf-8").splitlines()): + if line.startswith(("#", "-", " ")) or line == "": + continue + + req = parse_requirements(line) + if req: + try: + # Check if package is installed + metadata(req.name) + except: + diagnostics.append( + { + "line": n, + "character": 0, + "endLine": n, + "endCharacter": len(req.name), + "package": req.name, + "code": "not-installed", + "severity": DEFAULT_SEVERITY, + } + ) + return diagnostics + + +def get_pos(lines: List[str], text: str) -> Tuple[int, int, int, int]: + for n, line in enumerate(lines): + index = line.find(text) + if index >= 0: + return n, index, n, index + len(text) + return (0, 0, 0, 0) + + +def process_pyproject(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + try: + raw_text = req_file.read_text(encoding="utf-8") + pyproject = tomli.loads(raw_text) + except: + return diagnostics + + lines = raw_text.splitlines() + reqs = pyproject.get("project", {}).get("dependencies", []) + for raw_req in reqs: + req = parse_requirements(raw_req) + n, start, _, end = get_pos(lines, raw_req) + if req: + try: + # Check if package is installed + metadata(req.name) + except: + diagnostics.append( + { + "line": n, + "character": start, + "endLine": n, + "endCharacter": end, + "package": req.name, + "code": "not-installed", + "severity": DEFAULT_SEVERITY, + } + ) + return diagnostics + + +def get_diagnostics(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + if not req_file.exists(): + return diagnostics + + if req_file.name == "pyproject.toml": + diagnostics = process_pyproject(req_file) + else: + diagnostics = process_requirements(req_file) + + return diagnostics + + +def main(): + args = parse_args() + diagnostics = get_diagnostics(pathlib.Path(args.FILEPATH)) + print(json.dumps(diagnostics, ensure_ascii=False)) + + +if __name__ == "__main__": + main() diff --git a/pythonFiles/normalizeSelection.py b/pythonFiles/normalizeSelection.py new file mode 100644 index 000000000000..7608ce8860f6 --- /dev/null +++ b/pythonFiles/normalizeSelection.py @@ -0,0 +1,305 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import ast +import json +import re +import sys +import textwrap +from typing import Iterable + + +def split_lines(source): + """ + Split selection lines in a version-agnostic way. + + Python grammar only treats \r, \n, and \r\n as newlines. + But splitlines() in Python 3 has a much larger list: for example, it also includes \v, \f. + As such, this function will split lines across all Python versions. + """ + return re.split(r"[\n\r]+", source) + + +def _get_statements(selection): + """ + Process a multiline selection into a list of its top-level statements. + This will remove empty newlines around and within the selection, dedent it, + and split it using the result of `ast.parse()`. + """ + + # Remove blank lines within the selection to prevent the REPL from thinking the block is finished. + lines = (line for line in split_lines(selection) if line.strip() != "") + + # Dedent the selection and parse it using the ast module. + # Note that leading comments in the selection will be discarded during parsing. + source = textwrap.dedent("\n".join(lines)) + tree = ast.parse(source) + + # We'll need the dedented lines to rebuild the selection. + lines = split_lines(source) + + # Get the line ranges for top-level blocks returned from parsing the dedented text + # and split the selection accordingly. + # tree.body is a list of AST objects, which we rely on to extract top-level statements. + # If we supported Python 3.8+ only we could use the lineno and end_lineno attributes of each object + # to get the boundaries of each block. + # However, earlier Python versions only have the lineno attribute, which is the range start position (1-indexed). + # Therefore, to retrieve the end line of each block in a version-agnostic way we need to do + # `end = next_block.lineno - 1` + # for all blocks except the last one, which will will just run until the last line. + ends = [] + for node in tree.body[1:]: + line_end = node.lineno - 1 + # Special handling of decorators: + # In Python 3.8 and higher, decorators are not taken into account in the value returned by lineno, + # and we have to use the length of the decorator_list array to compute the actual start line. + # Before that, lineno takes into account decorators, so this offset check is unnecessary. + # Also, not all AST objects can have decorators. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + line_end -= len(getattr(node, "decorator_list")) + ends.append(line_end) + ends.append(len(lines)) + + for node, end in zip(tree.body, ends): + # Given this selection: + # 1: if (m > 0 and + # 2: n < 3): + # 3: print('foo') + # 4: value = 'bar' + # + # The first block would have lineno = 1,and the second block lineno = 4 + start = node.lineno - 1 + + # Special handling of decorators similar to what's above. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + start -= len(getattr(node, "decorator_list")) + block = "\n".join(lines[start:end]) + + # If the block is multiline, add an extra newline character at its end. + # This way, when joining blocks back together, there will be a blank line between each multiline statement + # and no blank lines between single-line statements, or it would look like this: + # >>> x = 22 + # >>> + # >>> total = x + 30 + # >>> + # Note that for the multiline parentheses case this newline is redundant, + # since the closing parenthesis terminates the statement already. + # This means that for this pattern we'll end up with: + # >>> x = [ + # ... 1 + # ... ] + # >>> + # >>> y = [ + # ... 2 + # ...] + if end - start > 1: + block += "\n" + + yield block + + +def normalize_lines(selection): + """ + Normalize the text selection received from the extension. + + If it is a single line selection, dedent it and append a newline and + send it back to the extension. + Otherwise, sanitize the multiline selection before returning it: + split it in a list of top-level statements + and add newlines between each of them so the REPL knows where each block ends. + """ + try: + # Parse the selection into a list of top-level blocks. + # We don't differentiate between single and multiline statements + # because it's not a perf bottleneck, + # and the overhead from splitting and rejoining strings in the multiline case is one-off. + statements = _get_statements(selection) + + # Insert a newline between each top-level statement, and append a newline to the selection. + source = "\n".join(statements) + "\n" + if selection[-2] == "}" or selection[-2] == "]": + source = source[:-1] + except Exception: + # If there's a problem when parsing statements, + # append a blank line to end the block and send it as-is. + source = selection + "\n\n" + + return source + + +top_level_nodes = [] +min_key = None + + +def check_exact_exist(top_level_nodes, start_line, end_line): + exact_nodes = [] + for node in top_level_nodes: + if node.lineno == start_line and node.end_lineno == end_line: + exact_nodes.append(node) + + return exact_nodes + + +def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): + """ + Intended to traverse through a user's given file content and find, collect all appropriate lines + that should be sent to the REPL in case of smart selection. + This could be exact statement such as just a single line print statement, + or a multiline dictionary, or differently styled multi-line list comprehension, etc. + Then call the normalize_lines function to normalize our smartly selected code block. + """ + parsed_file_content = None + + try: + parsed_file_content = ast.parse(wholeFileContent) + except Exception: + # Handle case where user is attempting to run code where file contains deprecated Python code. + # Let typescript side know and show warning message. + return { + "normalized_smart_result": "deprecated", + "which_line_next": 0, + } + + smart_code = "" + should_run_top_blocks = [] + + # Purpose of this loop is to fetch and collect all the + # AST top level nodes, and its node.body as child nodes. + # Individual nodes will contain information like + # the start line, end line and get source segment information + # that will be used to smartly select, and send normalized code. + for node in ast.iter_child_nodes(parsed_file_content): + top_level_nodes.append(node) + + ast_types_with_nodebody = ( + ast.Module, + ast.Interactive, + ast.Expression, + ast.FunctionDef, + ast.AsyncFunctionDef, + ast.ClassDef, + ast.For, + ast.AsyncFor, + ast.While, + ast.If, + ast.With, + ast.AsyncWith, + ast.Try, + ast.Lambda, + ast.IfExp, + ast.ExceptHandler, + ) + if isinstance(node, ast_types_with_nodebody) and isinstance( + node.body, Iterable + ): + for child_nodes in node.body: + top_level_nodes.append(child_nodes) + + exact_nodes = check_exact_exist(top_level_nodes, start_line, end_line) + + # Just return the exact top level line, if present. + if len(exact_nodes) > 0: + which_line_next = 0 + for same_line_node in exact_nodes: + should_run_top_blocks.append(same_line_node) + smart_code += ( + f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n" + ) + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": smart_code, + "which_line_next": which_line_next, + } + + # For each of the nodes in the parsed file content, + # add the appropriate source code line(s) to be sent to the REPL, dependent on + # user is trying to send and execute single line/statement or multiple with smart selection. + for top_node in ast.iter_child_nodes(parsed_file_content): + if start_line == top_node.lineno and end_line == top_node.end_lineno: + should_run_top_blocks.append(top_node) + + smart_code += f"{ast.get_source_segment(wholeFileContent, top_node)}\n" + break # If we found exact match, don't waste computation in parsing extra nodes. + elif start_line >= top_node.lineno and end_line <= top_node.end_lineno: + # Case to apply smart selection for multiple line. + # This is the case for when we have to add multiple lines that should be included in the smart send. + # For example: + # 'my_dictionary': { + # 'Audi': 'Germany', + # 'BMW': 'Germany', + # 'Genesis': 'Korea', + # } + # with the mouse cursor at 'BMW': 'Germany', should send all of the lines that pertains to my_dictionary. + + should_run_top_blocks.append(top_node) + + smart_code += str(ast.get_source_segment(wholeFileContent, top_node)) + smart_code += "\n" + + normalized_smart_result = normalize_lines(smart_code) + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": normalized_smart_result, + "which_line_next": which_line_next, + } + + +# Look at the last top block added, find lineno for the next upcoming block, +# This will be used in calculating lineOffset to move cursor in VS Code. +def get_next_block_lineno(which_line_next): + last_ran_lineno = int(which_line_next[-1].end_lineno) + next_lineno = int(which_line_next[-1].end_lineno) + + for reverse_node in top_level_nodes: + if reverse_node.lineno > last_ran_lineno: + next_lineno = reverse_node.lineno + break + return next_lineno + + +if __name__ == "__main__": + # Content is being sent from the extension as a JSON object. + # Decode the data from the raw bytes. + stdin = sys.stdin if sys.version_info < (3,) else sys.stdin.buffer + raw = stdin.read() + contents = json.loads(raw.decode("utf-8")) + # Empty highlight means user has not explicitly selected specific text. + empty_Highlight = contents.get("emptyHighlight", False) + + # We also get the activeEditor selection start line and end line from the typescript VS Code side. + # Remember to add 1 to each of the received since vscode starts line counting from 0 . + vscode_start_line = contents["startLine"] + 1 + vscode_end_line = contents["endLine"] + 1 + + # Send the normalized code back to the extension in a JSON object. + data = None + which_line_next = 0 + + if ( + empty_Highlight + and contents.get("smartSendExperimentEnabled") + and contents.get("smartSendSettingsEnabled") + ): + result = traverse_file( + contents["wholeFileContent"], + vscode_start_line, + vscode_end_line, + not empty_Highlight, + ) + normalized = result["normalized_smart_result"] + which_line_next = result["which_line_next"] + if normalized == "deprecated": + data = json.dumps({"normalized": normalized}) + else: + data = json.dumps( + {"normalized": normalized, "nextBlockLineno": result["which_line_next"]} + ) + else: + normalized = normalize_lines(contents["code"]) + data = json.dumps({"normalized": normalized}) + + stdout = sys.stdout if sys.version_info < (3,) else sys.stdout.buffer + stdout.write(data.encode("utf-8")) + stdout.close() diff --git a/pythonFiles/pyproject.toml b/pythonFiles/pyproject.toml new file mode 100644 index 000000000000..56237999e603 --- /dev/null +++ b/pythonFiles/pyproject.toml @@ -0,0 +1,36 @@ +[tool.black] +exclude = ''' + +( + /( + .data + | .vscode + | lib + )/ +) +''' + +[tool.pyright] +exclude = ['lib'] +extraPaths = ['lib/python', 'lib/jedilsp'] +ignore = [ + # Ignore all pre-existing code with issues + 'get-pip.py', + 'install_debugpy.py', + 'tensorboard_launcher.py', + 'testlauncher.py', + 'visualstudio_py_testlauncher.py', + 'testing_tools/unittest_discovery.py', + 'testing_tools/adapter/util.py', + 'testing_tools/adapter/pytest/_discovery.py', + 'testing_tools/adapter/pytest/_pytest_item.py', + 'tests/debug_adapter/test_install_debugpy.py', + 'tests/testing_tools/adapter/.data', + 'tests/testing_tools/adapter/test___main__.py', + 'tests/testing_tools/adapter/test_discovery.py', + 'tests/testing_tools/adapter/test_functional.py', + 'tests/testing_tools/adapter/test_report.py', + 'tests/testing_tools/adapter/test_util.py', + 'tests/testing_tools/adapter/pytest/test_cli.py', + 'tests/testing_tools/adapter/pytest/test_discovery.py', +] diff --git a/pythonFiles/testing_tools/adapter/__init__.py b/pythonFiles/testing_tools/adapter/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/testing_tools/adapter/__main__.py b/pythonFiles/testing_tools/adapter/__main__.py new file mode 100644 index 000000000000..5857c63db049 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/__main__.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +import argparse +import sys + +from . import pytest, report +from .errors import UnsupportedToolError, UnsupportedCommandError + + +TOOLS = { + "pytest": { + "_add_subparser": pytest.add_cli_subparser, + "discover": pytest.discover, + }, +} +REPORTERS = { + "discover": report.report_discovered, +} + + +def parse_args( + # the args to parse + argv=sys.argv[1:], + # the program name + prog=sys.argv[0], +): + """ + Return the subcommand & tool to run, along with its args. + + This defines the standard CLI for the different testing frameworks. + """ + parser = argparse.ArgumentParser( + description="Run Python testing operations.", + prog=prog, + # ... + ) + cmdsubs = parser.add_subparsers(dest="cmd") + + # Add "run" and "debug" subcommands when ready. + for cmdname in ["discover"]: + sub = cmdsubs.add_parser(cmdname) + subsubs = sub.add_subparsers(dest="tool") + for toolname in sorted(TOOLS): + try: + add_subparser = TOOLS[toolname]["_add_subparser"] + except KeyError: + continue + subsub = add_subparser(cmdname, toolname, subsubs) + if cmdname == "discover": + subsub.add_argument("--simple", action="store_true") + subsub.add_argument( + "--no-hide-stdio", dest="hidestdio", action="store_false" + ) + subsub.add_argument("--pretty", action="store_true") + + # Parse the args! + if "--" in argv: + sep_index = argv.index("--") + toolargs = argv[sep_index + 1 :] + argv = argv[:sep_index] + else: + toolargs = [] + args = parser.parse_args(argv) + ns = vars(args) + + cmd = ns.pop("cmd") + if not cmd: + parser.error("missing command") + + tool = ns.pop("tool") + if not tool: + parser.error("missing tool") + + return tool, cmd, ns, toolargs + + +def main( + toolname, + cmdname, + subargs, + toolargs, + # internal args (for testing): + _tools=TOOLS, + _reporters=REPORTERS, +): + try: + tool = _tools[toolname] + except KeyError: + raise UnsupportedToolError(toolname) + + try: + run = tool[cmdname] + report_result = _reporters[cmdname] + except KeyError: + raise UnsupportedCommandError(cmdname) + + parents, result = run(toolargs, **subargs) + report_result(result, parents, **subargs) + + +if __name__ == "__main__": + tool, cmd, subargs, toolargs = parse_args() + main(tool, cmd, subargs, toolargs) diff --git a/pythonFiles/testing_tools/adapter/errors.py b/pythonFiles/testing_tools/adapter/errors.py new file mode 100644 index 000000000000..3e6ae5189cb8 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/errors.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class UnsupportedToolError(ValueError): + def __init__(self, tool): + msg = "unsupported tool {!r}".format(tool) + super(UnsupportedToolError, self).__init__(msg) + self.tool = tool + + +class UnsupportedCommandError(ValueError): + def __init__(self, cmd): + msg = "unsupported cmd {!r}".format(cmd) + super(UnsupportedCommandError, self).__init__(msg) + self.cmd = cmd diff --git a/pythonFiles/testing_tools/adapter/info.py b/pythonFiles/testing_tools/adapter/info.py new file mode 100644 index 000000000000..f99ce0b6f9a2 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/info.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from collections import namedtuple + + +class SingleTestPath(namedtuple("TestPath", "root relfile func sub")): + """Where to find a single test.""" + + def __new__(cls, root, relfile, func, sub=None): + self = super(SingleTestPath, cls).__new__( + cls, + str(root) if root else None, + str(relfile) if relfile else None, + str(func) if func else None, + [str(s) for s in sub] if sub else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.root is None: + raise TypeError("missing id") + if self.relfile is None: + raise TypeError("missing kind") + # self.func may be None (e.g. for doctests). + # self.sub may be None. + + +class ParentInfo(namedtuple("ParentInfo", "id kind name root relpath parentid")): + + KINDS = ("folder", "file", "suite", "function", "subtest") + + def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): + self = super(ParentInfo, cls).__new__( + cls, + id=str(id) if id else None, + kind=str(kind) if kind else None, + name=str(name) if name else None, + root=str(root) if root else None, + relpath=str(relpath) if relpath else None, + parentid=str(parentid) if parentid else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.kind is None: + raise TypeError("missing kind") + if self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + if self.name is None: + raise TypeError("missing name") + if self.root is None: + if self.parentid is not None or self.kind != "folder": + raise TypeError("missing root") + if self.relpath is not None: + raise TypeError("unexpected relpath {}".format(self.relpath)) + elif self.parentid is None: + raise TypeError("missing parentid") + elif self.relpath is None and self.kind in ("folder", "file"): + raise TypeError("missing relpath") + + +class SingleTestInfo( + namedtuple("TestInfo", "id name path source markers parentid kind") +): + """Info for a single test.""" + + MARKERS = ("skip", "skip-if", "expected-failure") + KINDS = ("function", "doctest") + + def __new__(cls, id, name, path, source, markers, parentid, kind="function"): + self = super(SingleTestInfo, cls).__new__( + cls, + str(id) if id else None, + str(name) if name else None, + path or None, + str(source) if source else None, + [str(marker) for marker in markers or ()], + str(parentid) if parentid else None, + str(kind) if kind else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.name is None: + raise TypeError("missing name") + if self.path is None: + raise TypeError("missing path") + if self.source is None: + raise TypeError("missing source") + else: + srcfile, _, lineno = self.source.rpartition(":") + if not srcfile or not lineno or int(lineno) < 0: + raise ValueError("bad source {!r}".format(self.source)) + if self.markers: + badmarkers = [m for m in self.markers if m not in self.MARKERS] + if badmarkers: + raise ValueError("unsupported markers {!r}".format(badmarkers)) + if self.parentid is None: + raise TypeError("missing parentid") + if self.kind is None: + raise TypeError("missing kind") + elif self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + + @property + def root(self): + return self.path.root + + @property + def srcfile(self): + return self.source.rpartition(":")[0] + + @property + def lineno(self): + return int(self.source.rpartition(":")[-1]) diff --git a/pythonFiles/testing_tools/adapter/pytest/_cli.py b/pythonFiles/testing_tools/adapter/pytest/_cli.py new file mode 100644 index 000000000000..3d3eec09a199 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_cli.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +from ..errors import UnsupportedCommandError + + +def add_subparser(cmd, name, parent): + """Add a new subparser to the given parent and add args to it.""" + parser = parent.add_parser(name) + if cmd == "discover": + # For now we don't have any tool-specific CLI options to add. + pass + else: + raise UnsupportedCommandError(cmd) + return parser diff --git a/pythonFiles/testing_tools/adapter/pytest/_discovery.py b/pythonFiles/testing_tools/adapter/pytest/_discovery.py new file mode 100644 index 000000000000..51c94527302d --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_discovery.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import sys + +import pytest + +from .. import util, discovery +from ._pytest_item import parse_item + + +def discover( + pytestargs=None, + hidestdio=False, + # *, + _pytest_main=pytest.main, + _plugin=None, + **_ignored +): + """Return the results of test discovery.""" + if _plugin is None: + _plugin = TestCollector() + + pytestargs = _adjust_pytest_args(pytestargs) + # We use this helper rather than "-pno:terminal" due to possible + # platform-dependent issues. + with (util.hide_stdio() if hidestdio else util.noop_cm()) as stdio: + ec = _pytest_main(pytestargs, [_plugin]) + # See: https://docs.pytest.org/en/latest/usage.html#possible-exit-codes + if ec == 5: + # No tests were discovered. + pass + elif ec != 0: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery failed (exit code {})".format(ec)) + if not _plugin._started: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery did not start") + return ( + _plugin._tests.parents, + list(_plugin._tests), + ) + + +def _adjust_pytest_args(pytestargs): + """Return a corrected copy of the given pytest CLI args.""" + pytestargs = list(pytestargs) if pytestargs else [] + # Duplicate entries should be okay. + pytestargs.insert(0, "--collect-only") + # TODO: pull in code from: + # src/client/testing/pytest/services/discoveryService.ts + # src/client/testing/pytest/services/argsService.ts + return pytestargs + + +class TestCollector(object): + """This is a pytest plugin that collects the discovered tests.""" + + @classmethod + def parse_item(cls, item): + return parse_item(item) + + def __init__(self, tests=None): + if tests is None: + tests = discovery.DiscoveredTests() + self._tests = tests + self._started = False + + # Relevant plugin hooks: + # https://docs.pytest.org/en/latest/reference.html#collection-hooks + + def pytest_collection_modifyitems(self, session, config, items): + self._started = True + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) + + # This hook is not specified in the docs, so we also provide + # the "modifyitems" hook just in case. + def pytest_collection_finish(self, session): + self._started = True + try: + items = session.items + except AttributeError: + # TODO: Is there an alternative? + return + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) diff --git a/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py b/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py new file mode 100644 index 000000000000..2c22db21d4de --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py @@ -0,0 +1,630 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +""" +During "collection", pytest finds all the tests it supports. These are +called "items". The process is top-down, mostly tracing down through +the file system. Aside from its own machinery, pytest supports hooks +that find tests. Effectively, pytest starts with a set of "collectors"; +objects that can provide a list of tests and sub-collectors. All +collectors in the resulting tree are visited and the tests aggregated. +For the most part, each test's (and collector's) parent is identified +as the collector that collected it. + +Collectors and items are collectively identified as "nodes". The pytest +API relies on collector and item objects providing specific methods and +attributes. In addition to corresponding base classes, pytest provides +a number of concrete implementations. + +The following are the known pytest node types: + + Node + Collector + FSCollector + Session (the top-level collector) + File + Module + Package + DoctestTextfile + DoctestModule + PyCollector + (Module) + (...) + Class + UnitTestCase + Instance + Item + Function + TestCaseFunction + DoctestItem + +Here are the unique attrs for those classes: + + Node + name + nodeid (readonly) + config + session + (parent) - the parent node + (fspath) - the file from which the node was collected + ---- + own_marksers - explicit markers (e.g. with @pytest.mark()) + keywords + extra_keyword_matches + + Item + location - where the actual test source code is: (relfspath, lno, fullname) + user_properties + + PyCollector + module + class + instance + obj + + Function + module + class + instance + obj + function + (callspec) + (fixturenames) + funcargs + originalname - w/o decorations, e.g. [...] for parameterized + + DoctestItem + dtest + obj + +When parsing an item, we make use of the following attributes: + +* name +* nodeid +* __class__ + + __name__ +* fspath +* location +* function + + __name__ + + __code__ + + __closure__ +* own_markers +""" + +from __future__ import absolute_import, print_function + +import sys + +import pytest +import _pytest.doctest +import _pytest.unittest + +from ..info import SingleTestInfo, SingleTestPath +from ..util import fix_fileid, PATH_SEP, NORMCASE + + +def should_never_reach_here(item, **extra): + """Indicates a code path we should never reach.""" + print("The Python extension has run into an unexpected situation") + print("while processing a pytest node during test discovery. Please") + print("Please open an issue at:") + print(" https://github.com/microsoft/vscode-python/issues") + print("and paste the following output there.") + print() + for field, info in _summarize_item(item): + print("{}: {}".format(field, info)) + if extra: + print() + print("extra info:") + for name, info in extra.items(): + print("{:10}".format(name + ":"), end="") + if isinstance(info, str): + print(info) + else: + try: + print(*info) + except TypeError: + print(info) + print() + print("traceback:") + import traceback + + traceback.print_stack() + + msg = "Unexpected pytest node (see printed output)." + exc = NotImplementedError(msg) + exc.item = item + return exc + + +def parse_item( + item, + # *, + _get_item_kind=(lambda *a: _get_item_kind(*a)), + _parse_node_id=(lambda *a: _parse_node_id(*a)), + _split_fspath=(lambda *a: _split_fspath(*a)), + _get_location=(lambda *a: _get_location(*a)), +): + """Return (TestInfo, [suite ID]) for the given item. + + The suite IDs, if any, are in parent order with the item's direct + parent at the beginning. The parent of the last suite ID (or of + the test if there are no suites) is the file ID, which corresponds + to TestInfo.path. + + """ + # _debug_item(item, showsummary=True) + kind, _ = _get_item_kind(item) + # Skip plugin generated tests + if kind is None: + return None, None + (nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id( + item.nodeid, kind + ) + # Note: testfunc does not necessarily match item.function.__name__. + # This can result from importing a test function from another module. + + # Figure out the file. + testroot, relfile = _split_fspath(str(item.fspath), fileid, item) + location, fullname = _get_location(item, testroot, relfile) + if kind == "function": + if testfunc and fullname != testfunc + parameterized: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + parameterized=parameterized, + # ... + ) + elif kind == "doctest": + if testfunc and fullname != testfunc and fullname != "[doctest] " + testfunc: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + # ... + ) + testfunc = None + + # Sort out the parent. + if parents: + parentid, _, _ = parents[0] + else: + parentid = None + + # Sort out markers. + # See: https://docs.pytest.org/en/latest/reference.html#marks + markers = set() + for marker in getattr(item, "own_markers", []): + if marker.name == "parameterize": + # We've already covered these. + continue + elif marker.name == "skip": + markers.add("skip") + elif marker.name == "skipif": + markers.add("skip-if") + elif marker.name == "xfail": + markers.add("expected-failure") + # We can add support for other markers as we need them? + + test = SingleTestInfo( + id=nodeid, + name=item.name, + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=testfunc, + sub=[parameterized] if parameterized else None, + ), + source=location, + markers=sorted(markers) if markers else None, + parentid=parentid, + ) + if parents and parents[-1] == (".", None, "folder"): # This should always be true? + parents[-1] = (".", testroot, "folder") + return test, parents + + +def _split_fspath( + fspath, + fileid, + item, + # *, + _normcase=NORMCASE, +): + """Return (testroot, relfile) for the given fspath. + + "relfile" will match "fileid". + """ + # "fileid" comes from nodeid and is always relative to the testroot + # (with a "./" prefix). There are no guarantees about casing, so we + # normcase just be to sure. + relsuffix = fileid[1:] # Drop (only) the "." prefix. + if not _normcase(fspath).endswith(_normcase(relsuffix)): + raise should_never_reach_here( + item, + fspath=fspath, + fileid=fileid, + # ... + ) + testroot = fspath[: -len(fileid) + 1] # Ignore the "./" prefix. + relfile = "." + fspath[-len(fileid) + 1 :] # Keep the pathsep. + return testroot, relfile + + +def _get_location( + item, + testroot, + relfile, + # *, + _matches_relfile=(lambda *a: _matches_relfile(*a)), + _is_legacy_wrapper=(lambda *a: _is_legacy_wrapper(*a)), + _unwrap_decorator=(lambda *a: _unwrap_decorator(*a)), + _pathsep=PATH_SEP, +): + """Return (loc str, fullname) for the given item.""" + # When it comes to normcase, we favor relfile (from item.fspath) + # over item.location in this function. + + srcfile, lineno, fullname = item.location + if _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + else: + # pytest supports discovery of tests imported from other + # modules. This is reflected by a different filename + # in item.location. + + if _is_legacy_wrapper(srcfile): + srcfile = relfile + unwrapped = _unwrap_decorator(item.function) + if unwrapped is None: + # It was an invalid legacy wrapper so we just say + # "somewhere in relfile". + lineno = None + else: + _srcfile, lineno = unwrapped + if not _matches_relfile(_srcfile, testroot, relfile): + # For legacy wrappers we really expect the wrapped + # function to be in relfile. So here we ignore any + # other file and just say "somewhere in relfile". + lineno = None + elif _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + # Otherwise we just return the info from item.location as-is. + + if not srcfile.startswith("." + _pathsep): + srcfile = "." + _pathsep + srcfile + + if lineno is None: + lineno = -1 # i.e. "unknown" + + # from pytest, line numbers are 0-based + location = "{}:{}".format(srcfile, int(lineno) + 1) + return location, fullname + + +def _matches_relfile( + srcfile, + testroot, + relfile, + # *, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Return True if "srcfile" matches the given relfile.""" + testroot = _normcase(testroot) + srcfile = _normcase(srcfile) + relfile = _normcase(relfile) + if srcfile == relfile: + return True + elif srcfile == relfile[len(_pathsep) + 1 :]: + return True + elif srcfile == testroot + relfile[1:]: + return True + else: + return False + + +def _is_legacy_wrapper( + srcfile, + # *, + _pathsep=PATH_SEP, + _pyversion=sys.version_info, +): + """Return True if the test might be wrapped. + + In Python 2 unittest's decorators (e.g. unittest.skip) do not wrap + properly, so we must manually unwrap them. + """ + if _pyversion > (3,): + return False + if (_pathsep + "unittest" + _pathsep + "case.py") not in srcfile: + return False + return True + + +def _unwrap_decorator(func): + """Return (filename, lineno) for the func the given func wraps. + + If the wrapped func cannot be identified then return None. Likewise + for the wrapped filename. "lineno" is None if it cannot be found + but the filename could. + """ + try: + func = func.__closure__[0].cell_contents + except (IndexError, AttributeError): + return None + else: + if not callable(func): + return None + try: + filename = func.__code__.co_filename + except AttributeError: + return None + else: + try: + lineno = func.__code__.co_firstlineno - 1 + except AttributeError: + return (filename, None) + else: + return filename, lineno + + +def _parse_node_id( + testid, + kind, + # *, + _iter_nodes=(lambda *a: _iter_nodes(*a)), +): + """Return the components of the given node ID, in heirarchical order.""" + nodes = iter(_iter_nodes(testid, kind)) + + testid, name, kind = next(nodes) + parents = [] + parameterized = None + if kind == "doctest": + parents = list(nodes) + fileid, _, _ = parents[0] + return testid, parents, fileid, name, parameterized + elif kind is None: + fullname = None + else: + if kind == "subtest": + node = next(nodes) + parents.append(node) + funcid, funcname, _ = node + parameterized = testid[len(funcid) :] + elif kind == "function": + funcname = name + else: + raise should_never_reach_here( + testid, + kind=kind, + # ... + ) + fullname = funcname + + for node in nodes: + parents.append(node) + parentid, name, kind = node + if kind == "file": + fileid = parentid + break + elif fullname is None: + # We don't guess how to interpret the node ID for these tests. + continue + elif kind == "suite": + fullname = name + "." + fullname + else: + raise should_never_reach_here( + testid, + node=node, + # ... + ) + else: + fileid = None + parents.extend(nodes) # Add the rest in as-is. + + return ( + testid, + parents, + fileid, + fullname, + parameterized or "", + ) + + +def _find_left_bracket(nodeid): + """Return tuple of part before final bracket open, separator [, and the remainder. + Notes: + Testcase names in case of parametrized tests are wrapped in []. + Examples: + dirname[sometext]/dirname/testfile.py::testset::testname[testcase] + => ('dirname[sometext]/dirname/testfile.py::testset::testname', '[', 'testcase]') + dirname/dirname/testfile.py::testset::testname[testcase] + => ('dirname/dirname/testfile.py::testset::testname', '[', 'testcase]') + dirname/dirname/testfile.py::testset::testname[testcase[x]] + => ('dirname/dirname/testfile.py::testset::testname', '[', 'testcase[x]]') + """ + if not nodeid.endswith("]"): + return nodeid, "", "" + bracketcount = 0 + for index, char in enumerate(nodeid[::-1]): + if char == "]": + bracketcount += 1 + elif char == "[": + bracketcount -= 1 + if bracketcount == 0: + n = len(nodeid) - 1 - index + return nodeid[:n], nodeid[n], nodeid[n + 1 :] + return nodeid, "", "" + + +def _iter_nodes( + testid, + kind, + # *, + _normalize_test_id=(lambda *a: _normalize_test_id(*a)), + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Yield (nodeid, name, kind) for the given node ID and its parents.""" + nodeid, testid = _normalize_test_id(testid, kind) + if len(nodeid) > len(testid): + testid = "." + _pathsep + testid + + if kind == "function" and nodeid.endswith("]"): + funcid, sep, parameterized = _find_left_bracket(nodeid) + if not sep: + raise should_never_reach_here( + nodeid, + # ... + ) + yield (nodeid, sep + parameterized, "subtest") + nodeid = funcid + + parentid, _, name = nodeid.rpartition("::") + if not parentid: + if kind is None: + # This assumes that plugins can generate nodes that do not + # have a parent. All the builtin nodes have one. + yield (nodeid, name, kind) + return + # We expect at least a filename and a name. + raise should_never_reach_here( + nodeid, + # ... + ) + yield (nodeid, name, kind) + + # Extract the suites. + while "::" in parentid: + suiteid = parentid + parentid, _, name = parentid.rpartition("::") + yield (suiteid, name, "suite") + + # Extract the file and folders. + fileid = parentid + raw = testid[: len(fileid)] + _parentid, _, filename = _normcase(fileid).rpartition(_pathsep) + parentid = fileid[: len(_parentid)] + raw, name = raw[: len(_parentid)], raw[-len(filename) :] + yield (fileid, name, "file") + # We're guaranteed at least one (the test root). + while _pathsep in _normcase(parentid): + folderid = parentid + _parentid, _, foldername = _normcase(folderid).rpartition(_pathsep) + parentid = folderid[: len(_parentid)] + raw, name = raw[: len(parentid)], raw[-len(foldername) :] + yield (folderid, name, "folder") + # We set the actual test root later at the bottom of parse_item(). + testroot = None + yield (parentid, testroot, "folder") + + +def _normalize_test_id( + testid, + kind, + # *, + _fix_fileid=fix_fileid, + _pathsep=PATH_SEP, +): + """Return the canonical form for the given node ID.""" + while "::()::" in testid: + testid = testid.replace("::()::", "::") + if kind is None: + return testid, testid + orig = testid + + # We need to keep the testid as-is, or else pytest won't recognize + # it when we try to use it later (e.g. to run a test). The only + # exception is that we add a "./" prefix for relative paths. + # Note that pytest always uses "/" as the path separator in IDs. + fileid, sep, remainder = testid.partition("::") + fileid = _fix_fileid(fileid) + if not fileid.startswith("./"): # Absolute "paths" not expected. + raise should_never_reach_here( + testid, + fileid=fileid, + # ... + ) + testid = fileid + sep + remainder + + return testid, orig + + +def _get_item_kind(item): + """Return (kind, isunittest) for the given item.""" + if isinstance(item, _pytest.doctest.DoctestItem): + return "doctest", False + elif isinstance(item, _pytest.unittest.TestCaseFunction): + return "function", True + elif isinstance(item, pytest.Function): + # We *could* be more specific, e.g. "method", "subtest". + return "function", False + else: + return None, False + + +############################# +# useful for debugging + +_FIELDS = [ + "nodeid", + "kind", + "class", + "name", + "fspath", + "location", + "function", + "markers", + "user_properties", + "attrnames", +] + + +def _summarize_item(item): + if not hasattr(item, "nodeid"): + yield "nodeid", item + return + + for field in _FIELDS: + try: + if field == "kind": + yield field, _get_item_kind(item) + elif field == "class": + yield field, item.__class__.__name__ + elif field == "markers": + yield field, item.own_markers + # yield field, list(item.iter_markers()) + elif field == "attrnames": + yield field, dir(item) + else: + yield field, getattr(item, field, "") + except Exception as exc: + yield field, "".format(exc) + + +def _debug_item(item, showsummary=False): + item._debugging = True + try: + summary = dict(_summarize_item(item)) + finally: + item._debugging = False + + if showsummary: + print(item.nodeid) + for key in ( + "kind", + "class", + "name", + "fspath", + "location", + "func", + "markers", + "props", + ): + print(" {:12} {}".format(key, summary[key])) + print() + + return summary diff --git a/pythonFiles/testing_tools/adapter/util.py b/pythonFiles/testing_tools/adapter/util.py new file mode 100644 index 000000000000..77778c5b6126 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/util.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import contextlib +import io + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # 2.7 +import os +import os.path +import sys +import tempfile + + +@contextlib.contextmanager +def noop_cm(): + yield + + +def group_attr_names(attrnames): + grouped = { + "dunder": [], + "private": [], + "constants": [], + "classes": [], + "vars": [], + "other": [], + } + for name in attrnames: + if name.startswith("__") and name.endswith("__"): + group = "dunder" + elif name.startswith("_"): + group = "private" + elif name.isupper(): + group = "constants" + elif name.islower(): + group = "vars" + elif name == name.capitalize(): + group = "classes" + else: + group = "other" + grouped[group].append(name) + return grouped + + +if sys.version_info < (3,): + _str_to_lower = lambda val: val.decode().lower() +else: + _str_to_lower = str.lower + + +############################# +# file paths + +_os_path = os.path +# Uncomment to test Windows behavior on non-windows OS: +# import ntpath as _os_path +PATH_SEP = _os_path.sep +NORMCASE = _os_path.normcase +DIRNAME = _os_path.dirname +BASENAME = _os_path.basename +IS_ABS_PATH = _os_path.isabs +PATH_JOIN = _os_path.join + + +def fix_path( + path, + # *, + _pathsep=PATH_SEP, +): + """Return a platform-appropriate path for the given path.""" + if not path: + return "." + return path.replace("/", _pathsep) + + +def fix_relpath( + path, + # *, + _fix_path=fix_path, + _path_isabs=IS_ABS_PATH, + _pathsep=PATH_SEP, +): + """Return a ./-prefixed, platform-appropriate path for the given path.""" + path = _fix_path(path) + if path in (".", ".."): + return path + if not _path_isabs(path): + if not path.startswith("." + _pathsep): + path = "." + _pathsep + path + return path + + +def _resolve_relpath( + path, + rootdir=None, + # *, + _path_isabs=IS_ABS_PATH, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + # "path" is expected to use "/" for its path separator, regardless + # of the provided "_pathsep". + + if path.startswith("./"): + return path[2:] + if not _path_isabs(path): + return path + + # Deal with root-dir-as-fileid. + _, sep, relpath = path.partition("/") + if sep and not relpath.replace("/", ""): + return "" + + if rootdir is None: + return None + rootdir = _normcase(rootdir) + if not rootdir.endswith(_pathsep): + rootdir += _pathsep + + if not _normcase(path).startswith(rootdir): + return None + return path[len(rootdir) :] + + +def fix_fileid( + fileid, + rootdir=None, + # *, + normalize=False, + strictpathsep=None, + _pathsep=PATH_SEP, + **kwargs +): + """Return a pathsep-separated file ID ("./"-prefixed) for the given value. + + The file ID may be absolute. If so and "rootdir" is + provided then make the file ID relative. If absolute but "rootdir" + is not provided then leave it absolute. + """ + if not fileid or fileid == ".": + return fileid + + # We default to "/" (forward slash) as the final path sep, since + # that gives us a consistent, cross-platform result. (Windows does + # actually support "/" as a path separator.) Most notably, node IDs + # from pytest use "/" as the path separator by default. + _fileid = fileid.replace(_pathsep, "/") + + relpath = _resolve_relpath( + _fileid, + rootdir, + _pathsep=_pathsep, + # ... + **kwargs + ) + if relpath: # Note that we treat "" here as an absolute path. + _fileid = "./" + relpath + + if normalize: + if strictpathsep: + raise ValueError("cannot normalize *and* keep strict path separator") + _fileid = _str_to_lower(_fileid) + elif strictpathsep: + # We do not use _normcase since we want to preserve capitalization. + _fileid = _fileid.replace("/", _pathsep) + return _fileid + + +############################# +# stdio + + +@contextlib.contextmanager +def _replace_fd(file, target): + """ + Temporarily replace the file descriptor for `file`, + for which sys.stdout or sys.stderr is passed. + """ + try: + fd = file.fileno() + except (AttributeError, io.UnsupportedOperation): + # `file` does not have fileno() so it's been replaced from the + # default sys.stdout, etc. Return with noop. + yield + return + target_fd = target.fileno() + + # Keep the original FD to be restored in the finally clause. + dup_fd = os.dup(fd) + try: + # Point the FD at the target. + os.dup2(target_fd, fd) + try: + yield + finally: + # Point the FD back at the original. + os.dup2(dup_fd, fd) + finally: + os.close(dup_fd) + + +@contextlib.contextmanager +def _replace_stdout(target): + orig = sys.stdout + sys.stdout = target + try: + yield orig + finally: + sys.stdout = orig + + +@contextlib.contextmanager +def _replace_stderr(target): + orig = sys.stderr + sys.stderr = target + try: + yield orig + finally: + sys.stderr = orig + + +if sys.version_info < (3,): + _coerce_unicode = lambda s: unicode(s) +else: + _coerce_unicode = lambda s: s + + +@contextlib.contextmanager +def _temp_io(): + sio = StringIO() + with tempfile.TemporaryFile("r+") as tmp: + try: + yield sio, tmp + finally: + tmp.seek(0) + buff = tmp.read() + sio.write(_coerce_unicode(buff)) + + +@contextlib.contextmanager +def hide_stdio(): + """Swallow stdout and stderr.""" + with _temp_io() as (sio, fileobj): + with _replace_fd(sys.stdout, fileobj): + with _replace_stdout(fileobj): + with _replace_fd(sys.stderr, fileobj): + with _replace_stderr(fileobj): + yield sio + + +############################# +# shell + + +def shlex_unsplit(argv): + """Return the shell-safe string for the given arguments. + + This effectively the equivalent of reversing shlex.split(). + """ + argv = [_quote_arg(a) for a in argv] + return " ".join(argv) + + +try: + from shlex import quote as _quote_arg +except ImportError: + + def _quote_arg(arg): + parts = None + for i, c in enumerate(arg): + if c.isspace(): + pass + elif c == '"': + pass + elif c == "'": + c = "'\"'\"'" + else: + continue + if parts is None: + parts = list(arg) + parts[i] = c + if parts is not None: + arg = "'" + "".join(parts) + "'" + return arg diff --git a/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py b/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py new file mode 100644 index 000000000000..8e48224edf3b --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import pytest + + +# This test has an error which will appear on pytest discovery. +# This error is intentional and is meant to test pytest discovery error handling. +@pytest.mark.parametrize("actual,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)]) +def test_function(): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt b/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt new file mode 100644 index 000000000000..7d65dee2ccc6 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +@pytest.mark.parametrize("num", range(1, 89)) +def test_odd_even(num): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py new file mode 100644 index 000000000000..2506089abe07 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +@pytest.fixture +def raise_fixture(): + raise Exception("Dummy exception") + + +class TestSomething: + def test_a(self, raise_fixture): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py new file mode 100644 index 000000000000..c4dbadc32d6e --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +# Testing pytest with parametrized tests. The first two pass, the third fails. +# The tests ids are parametrize_tests.py::test_adding[3+5-8] and so on. +@pytest.mark.parametrize( # test_marker--test_adding + "actual, expected", [("3+5", 8), ("2+4", 6), ("6+9", 16)] +) +def test_adding(actual, expected): + assert eval(actual) == expected + + +# Testing pytest with parametrized tests. All three pass. +# The tests ids are parametrize_tests.py::test_under_ten[1] and so on. +@pytest.mark.parametrize( # test_marker--test_string + "string", ["hello", "complicated split [] ()"] +) +def test_string(string): + assert string == "hello" diff --git a/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py b/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py new file mode 100644 index 000000000000..209f9d51915b --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class TestFirstClass: + class TestSecondClass: + def test_second(self): # test_marker--test_second + assert 1 == 2 + + def test_first(self): # test_marker--test_first + assert 1 == 2 + + class TestSecondClass2: + def test_second2(self): # test_marker--test_second2 + assert 1 == 1 + + +def test_independent(): # test_marker--test_independent + assert 1 == 1 diff --git a/pythonFiles/tests/pytestadapter/.data/text_docstring.txt b/pythonFiles/tests/pytestadapter/.data/text_docstring.txt new file mode 100644 index 000000000000..b29132c10b57 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/text_docstring.txt @@ -0,0 +1,4 @@ +This is a doctest test which passes #test_marker--text_docstring.txt +>>> x = 3 +>>> x +3 diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py new file mode 100644 index 000000000000..44f3d3d0abce --- /dev/null +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -0,0 +1,686 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +from .helpers import TEST_DATA_PATH, get_absolute_test_id + +TEST_SUBTRACT_FUNCTION = "unittest_folder/test_subtract.py::TestSubtractFunction::" +TEST_ADD_FUNCTION = "unittest_folder/test_add.py::TestAddFunction::" +SUCCESS = "success" +FAILURE = "failure" + +# This is the expected output for the unittest_folder execute tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# ├── test_subtract_negative_numbers: failure +# └── test_subtract_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +uf_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ), + "outcome": FAILURE, + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder only execute add.py tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_single_file_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder execute only signle method +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +uf_single_method_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the unittest_folder tests run where two tests +# run are in different files. +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# └── test_subtract_positive_numbers: success +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_non_adjacent_tests_execution_expected_output = { + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", test_subtract_path + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the simple_pytest.py file. +# └── simple_pytest.py +# └── test_function: success +simple_pytest_path = TEST_DATA_PATH / "unittest_folder" / "simple_pytest.py" + +simple_execution_pytest_expected_output = { + get_absolute_test_id("test_function", simple_pytest_path): { + "test": get_absolute_test_id("test_function", simple_pytest_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the unittest_pytest_same_file.py file. +# ├── unittest_pytest_same_file.py +# ├── TestExample +# │ └── test_true_unittest: success +# └── test_true_pytest: success +unit_pytest_same_file_path = TEST_DATA_PATH / "unittest_pytest_same_file.py" +unit_pytest_same_file_execution_expected_output = { + get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", unit_pytest_same_file_path + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the error_raised_exception.py file. +# └── error_raise_exception.py +# ├── TestSomething +# │ └── test_a: failure +error_raised_exception_path = TEST_DATA_PATH / "error_raise_exception.py" +error_raised_exception_execution_expected_output = { + get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", error_raised_exception_path + ): { + "test": get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", + error_raised_exception_path, + ), + "outcome": "error", + "message": "ERROR MESSAGE", + "traceback": "TRACEBACK", + "subtest": None, + } +} + +# This is the expected output for the skip_tests.py file. +# └── test_something: success +# └── test_another_thing: skipped +# └── test_decorator_thing: skipped +# └── test_decorator_thing_2: skipped +# ├── TestClass +# │ └── test_class_function_a: skipped +# │ └── test_class_function_b: skipped + +skip_tests_path = TEST_DATA_PATH / "skip_tests.py" +skip_tests_execution_expected_output = { + get_absolute_test_id("skip_tests.py::test_something", skip_tests_path): { + "test": get_absolute_test_id("skip_tests.py::test_something", skip_tests_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_another_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_another_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing_2", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing_2", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the dual_level_nested_folder.py tests +# └── dual_level_nested_folder +# └── test_top_folder.py +# └── test_top_function_t: success +# └── test_top_function_f: failure +# └── nested_folder_one +# └── test_bottom_folder.py +# └── test_bottom_function_t: success +# └── test_bottom_function_f: failure +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +dual_level_nested_folder_execution_expected_output = { + get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the nested_folder tests. +# └── folder_a +# └── folder_b +# └── folder_a +# └── test_nest.py +# └── test_function: success + +nested_folder_path = ( + TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +) +double_nested_folder_expected_execution_output = { + get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ): { + "test": get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} +# This is the expected output for the nested_folder tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +# └── test_adding[2+4-6]: success +# └── test_adding[6+9-16]: failure +parametrize_tests_path = TEST_DATA_PATH / "parametrize_tests.py" + +parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +single_parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── text_docstring.txt +# └── text_docstring: success +doc_test_path = TEST_DATA_PATH / "text_docstring.txt" +doctest_pytest_expected_execution_output = { + get_absolute_test_id("text_docstring.txt::text_docstring.txt", doc_test_path): { + "test": get_absolute_test_id( + "text_docstring.txt::text_docstring.txt", doc_test_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# Will run all tests in the cwd that fit the test file naming pattern. +folder_a_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +unittest_folder_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +unittest_folder_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" + +no_test_ids_pytest_execution_expected_output = { + get_absolute_test_id("test_function", folder_a_path): { + "test": get_absolute_test_id("test_function", folder_a_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_t", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_f", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the root folder with the config file referenced. +# └── test_a.py +# └── test_a_function: success +test_add_path = TEST_DATA_PATH / "root" / "tests" / "test_a.py" +config_file_pytest_expected_execution_output = { + get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path): { + "test": get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the test logging file. +# └── test_logging.py +# └── test_logging2: failure +# └── test_logging: success +test_logging_path = TEST_DATA_PATH / "test_logging.py" + +logging_test_expected_execution_output = { + get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging2", test_logging_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_logging.py::test_logging", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging", test_logging_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test safe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env: success +# └── test_check_env: success + +test_safe_clear_env_vars_path = TEST_DATA_PATH / "test_env_vars.py" +safe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test unsafe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env_unsafe: success +# └── test_check_env_unsafe: success +unsafe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py new file mode 100644 index 000000000000..dd32b61fa262 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -0,0 +1,278 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import os +import shutil +from typing import Any, Dict, List + +import pytest + +from tests.pytestadapter import expected_execution_test_output + +from .helpers import TEST_DATA_PATH, runner, runner_with_cwd + + +def test_config_file(): + """Test pytest execution when a config file is specified.""" + args = [ + "-c", + "tests/pytest.ini", + str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), + ] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +def test_rootdir_specified(): + """Test pytest execution when a --rootdir is specified.""" + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + args = [rd, "tests/test_a.py::test_a_function"] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest execution on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. + + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +@pytest.mark.parametrize( + "test_ids, expected_const", + [ + ( + [ + "test_env_vars.py::test_clear_env", + "test_env_vars.py::test_check_env", + ], + expected_execution_test_output.safe_clear_env_vars_expected_execution_output, + ), + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + "skip_tests.py::TestClass::test_class_function_a", + "skip_tests.py::TestClass::test_class_function_b", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), + ( + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], + expected_execution_test_output.logging_test_expected_execution_output, + ), + ], +) +def test_pytest_execution(test_ids, expected_const): + """ + Test that pytest discovery works as expected where run pytest is always successful + but the actual test results are both successes and failures.: + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. + 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. + 3. uf_execution_expected_output: unittest tests run on multiple files. + 4. uf_single_file_expected_output: test run on a single file. + 5. uf_single_method_execution_expected_output: test run on a single method in a file. + 6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file + at the top level and one test file in a nested folder. + 9. double_nested_folder_expected_execution_output: test run on a double nested folder. + 10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 11. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 12. doctest_pytest_expected_execution_output: test run on doctest file. + 13. logging_test_expected_execution_output: test run on a file with logging. + + + Keyword arguments: + test_ids -- an array of test_ids to run. + expected_const -- a dictionary of the expected output from running pytest discovery on the files. + """ + args = test_ids + actual = runner(args) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + actual_result_dict.update(actual_item["result"]) + for key in actual_result_dict: + if ( + actual_result_dict[key]["outcome"] == "failure" + or actual_result_dict[key]["outcome"] == "error" + ): + actual_result_dict[key]["message"] = "ERROR MESSAGE" + if actual_result_dict[key]["traceback"] is not None: + actual_result_dict[key]["traceback"] = "TRACEBACK" + assert actual_result_dict == expected_const diff --git a/pythonFiles/tests/testing_tools/__init__.py b/pythonFiles/tests/testing_tools/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/__init__.py b/pythonFiles/tests/testing_tools/adapter/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py b/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py b/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py new file mode 100644 index 000000000000..8ef4305f40b9 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py @@ -0,0 +1,1553 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import print_function, unicode_literals + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # type: ignore (for Pylance) +import os +import sys +import tempfile +import unittest + +import pytest +import _pytest.doctest + +from .... import util +from testing_tools.adapter import util as adapter_util +from testing_tools.adapter.pytest import _pytest_item as pytest_item +from testing_tools.adapter import info +from testing_tools.adapter.pytest import _discovery + +# In Python 3.8 __len__ is called twice, which impacts some of the test assertions we do below. +PYTHON_38_OR_LATER = sys.version_info[0] >= 3 and sys.version_info[1] >= 8 + + +class StubPyTest(util.StubProxy): + def __init__(self, stub=None): + super(StubPyTest, self).__init__(stub, "pytest") + self.return_main = 0 + + def main(self, args, plugins): + self.add_call("main", None, {"args": args, "plugins": plugins}) + return self.return_main + + +class StubPlugin(util.StubProxy): + + _started = True + + def __init__(self, stub=None, tests=None): + super(StubPlugin, self).__init__(stub, "plugin") + if tests is None: + tests = StubDiscoveredTests(self.stub) + self._tests = tests + + def __getattr__(self, name): + if not name.startswith("pytest_"): + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubDiscoveredTests(util.StubProxy): + + NOT_FOUND = object() + + def __init__(self, stub=None): + super(StubDiscoveredTests, self).__init__(stub, "discovered") + self.return_items = [] + self.return_parents = [] + + def __len__(self): + self.add_call("__len__", None, None) + return len(self.return_items) + + def __getitem__(self, index): + self.add_call("__getitem__", (index,), None) + return self.return_items[index] + + @property + def parents(self): + self.add_call("parents", None, None) + return self.return_parents + + def reset(self): + self.add_call("reset", None, None) + + def add_test(self, test, parents): + self.add_call("add_test", None, {"test": test, "parents": parents}) + + +class FakeFunc(object): + def __init__(self, name): + self.__name__ = name + + +class FakeMarker(object): + def __init__(self, name): + self.name = name + + +class StubPytestItem(util.StubProxy): + + _debugging = False + _hasfunc = True + + def __init__(self, stub=None, **attrs): + super(StubPytestItem, self).__init__(stub, "pytest.Item") + if attrs.get("function") is None: + attrs.pop("function", None) + self._hasfunc = False + + attrs.setdefault("user_properties", []) + + slots = getattr(type(self), "__slots__", None) + if slots: + for name, value in attrs.items(): + if name in self.__slots__: + setattr(self, name, value) + else: + self.__dict__[name] = value + else: + self.__dict__.update(attrs) + + if "own_markers" not in attrs: + self.own_markers = () + + def __repr__(self): + return object.__repr__(self) + + def __getattr__(self, name): + if not self._debugging: + self.add_call(name + " (attr)", None, None) + if name == "function": + if not self._hasfunc: + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubSubtypedItem(StubPytestItem): + @classmethod + def from_args(cls, *args, **kwargs): + if not hasattr(cls, "from_parent"): + return cls(*args, **kwargs) + self = cls.from_parent(None, name=kwargs["name"], runner=None, dtest=None) + self.__init__(*args, **kwargs) + return self + + def __init__(self, *args, **kwargs): + super(StubSubtypedItem, self).__init__(*args, **kwargs) + if "nodeid" in self.__dict__: + self._nodeid = self.__dict__.pop("nodeid") + + @property + def location(self): + return self.__dict__.get("location") + + +class StubFunctionItem(StubSubtypedItem, pytest.Function): + @property + def function(self): + return self.__dict__.get("function") + + +def create_stub_function_item(*args, **kwargs): + return StubFunctionItem.from_args(*args, **kwargs) + + +class StubDoctestItem(StubSubtypedItem, _pytest.doctest.DoctestItem): + pass + + +def create_stub_doctest_item(*args, **kwargs): + return StubDoctestItem.from_args(*args, **kwargs) + + +class StubPytestSession(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestSession, self).__init__(stub, "pytest.Session") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubPytestConfig(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestConfig, self).__init__(stub, "pytest.Config") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +def generate_parse_item(pathsep): + if pathsep == "\\": + + def normcase(path): + path = path.lower() + return path.replace("/", "\\") + + else: + raise NotImplementedError + ########## + def _fix_fileid(*args): + return adapter_util.fix_fileid( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _normalize_test_id(*args): + return pytest_item._normalize_test_id( + *args, + **dict( + # dependency injection + _fix_fileid=_fix_fileid, + _pathsep=pathsep, + ) + ) + + def _iter_nodes(*args): + return pytest_item._iter_nodes( + *args, + **dict( + # dependency injection + _normalize_test_id=_normalize_test_id, + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _parse_node_id(*args): + return pytest_item._parse_node_id( + *args, + **dict( + # dependency injection + _iter_nodes=_iter_nodes, + ) + ) + + ########## + def _split_fspath(*args): + return pytest_item._split_fspath( + *args, + **dict( + # dependency injection + _normcase=normcase, + ) + ) + + ########## + def _matches_relfile(*args): + return pytest_item._matches_relfile( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _is_legacy_wrapper(*args): + return pytest_item._is_legacy_wrapper( + *args, + **dict( + # dependency injection + _pathsep=pathsep, + ) + ) + + def _get_location(*args): + return pytest_item._get_location( + *args, + **dict( + # dependency injection + _matches_relfile=_matches_relfile, + _is_legacy_wrapper=_is_legacy_wrapper, + _pathsep=pathsep, + ) + ) + + ########## + def _parse_item(item): + return pytest_item.parse_item( + item, + **dict( + # dependency injection + _parse_node_id=_parse_node_id, + _split_fspath=_split_fspath, + _get_location=_get_location, + ) + ) + + return _parse_item + + +################################## +# tests + + +def fake_pytest_main(stub, use_fd, pytest_stdout): + def ret(args, plugins): + stub.add_call("pytest.main", None, {"args": args, "plugins": plugins}) + if use_fd: + os.write(sys.stdout.fileno(), pytest_stdout.encode()) + else: + print(pytest_stdout, end="") + return 0 + + return ret + + +class DiscoverTests(unittest.TestCase): + + DEFAULT_ARGS = [ + "--collect-only", + ] + + def test_basic(self): + stub = util.Stub() + stubpytest = StubPyTest(stub) + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + parents, tests = _discovery.discover( + [], _pytest_main=stubpytest.main, _plugin=plugin + ) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(stub.calls, calls) + + def test_failure(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 2 + plugin = StubPlugin(stub) + + with self.assertRaises(Exception): + _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin) + + self.assertEqual( + stub.calls, + [ + # There's only one call. + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ], + ) + + def test_no_tests_found(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 5 + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + parents, tests = _discovery.discover( + [], _pytest_main=pytest.main, _plugin=plugin + ) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(stub.calls, calls) + + def test_stdio_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + # to simulate stdio behavior in methods like os.dup, + # use actual files (rather than StringIO) + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + + mock.seek(0) + captured = mock.read() + + self.assertEqual(captured, "") + self.assertEqual(stub.calls, calls) + + def test_stdio_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # Replace with contextlib.redirect_stdout() once Python 2.7 support is dropped. + sys.stdout = StringIO() + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + captured = sys.stdout.read() + self.assertEqual(captured, "") + finally: + sys.stdout = sys.__stdout__ + + def test_stdio_not_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + buf = StringIO() + + sys.stdout = buf + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + captured = buf.getvalue() + + self.assertEqual(captured, pytest_stdout) + self.assertEqual(stub.calls, calls) + + def test_stdio_not_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + stub.calls = [] + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + finally: + mock.seek(0) + captured = sys.stdout.read() + sys.stdout = sys.__stdout__ + self.assertEqual(captured, pytest_stdout) + + +class CollectorTests(unittest.TestCase): + def test_modifyitems(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + config = StubPytestConfig(stub) + collector = _discovery.TestCollector(tests=discovered) + + testroot = adapter_util.fix_path("/a/b/c") + relfile1 = adapter_util.fix_path("./test_spam.py") + relfile2 = adapter_util.fix_path("x/y/z/test_eggs.py") + + collector.pytest_collection_modifyitems( + session, + config, + [ + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_one", + name="test_one", + location=("test_spam.py", 12, "SpamTests.test_one"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_one"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_other", + name="test_other", + location=("test_spam.py", 19, "SpamTests.test_other"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_other"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_all", + name="test_all", + location=("test_spam.py", 144, "test_all"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_all"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + location=("test_spam.py", 273, "test_each[10-10]"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_each"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_first", + name="test_first", + location=(relfile2, 31, "All.BasicTests.test_first"), + fspath=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_first"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_each[1+2-3]", + name="test_each[1+2-3]", + location=(relfile2, 62, "All.BasicTests.test_each[1+2-3]"), + fspath=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_each"), + own_markers=[ + FakeMarker(v) + for v in [ + # supported + "skip", + "skipif", + "xfail", + # duplicate + "skip", + # ignored (pytest-supported) + "parameterize", + "usefixtures", + "filterwarnings", + # ignored (custom) + "timeout", + ] + ], + ), + ], + ) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_one", + name="test_one", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_one", + sub=None, + ), + source="{}:{}".format(relfile1, 13), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_other", + name="test_other", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_other", + sub=None, + ), + source="{}:{}".format(relfile1, 20), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_all", + name="test_all", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_all", + sub=None, + ), + source="{}:{}".format(relfile1, 145), + markers=None, + parentid="./test_spam.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::test_each", "test_each", "function"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_each[10-10]", + name="test_each[10-10]", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile1, 274), + markers=None, + parentid="./test_spam.py::test_each", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_first", + name="test_first", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile2), 32 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::All::BasicTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests::test_each", + "test_each", + "function", + ), + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_each[1+2-3]", + name="test_each[1+2-3]", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_each", + sub=["[1+2-3]"], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile2), 63 + ), + markers=["expected-failure", "skip", "skip-if"], + parentid="./x/y/z/test_eggs.py::All::BasicTests::test_each", + ), + ), + ), + ], + ) + + def test_finish(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_doctest(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + doctestfile = adapter_util.fix_path("x/test_doctest.txt") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_doctest_item( + stub, + nodeid=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + location=(doctestfile, 0, "[doctest] test_doctest.txt"), + fspath=adapter_util.PATH_JOIN(testroot, doctestfile), + ), + # With --doctest-modules + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs", + name="test_eggs", + location=(relfile, 0, "[doctest] test_eggs"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + location=(relfile, 12, "[doctest] test_eggs.TestSpam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + location=(relfile, 27, "[doctest] test_eggs.TestSpam.TestEggs"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/test_doctest.txt", "test_doctest.txt", "file"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/test_doctest.txt::test_doctest.txt", + name="test_doctest.txt", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(doctestfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(doctestfile), 1 + ), + markers=[], + parentid="./x/test_doctest.txt", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs", + name="test_eggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(relfile), 1), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 28 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) + + def test_nested_brackets(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam[a-[b]-c]", + name="test_spam[a-[b]-c]", + location=(relfile, 12, "SpamTests.test_spam[a-[b]-c]"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::test_spam", + "test_spam", + "function", + ), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam[a-[b]-c]", + name="test_spam[a-[b]-c]", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=["[a-[b]-c]"], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::test_spam", + ), + ), + ), + ], + ) + + def test_nested_suite(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.Ham.Eggs.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + "Eggs", + "suite", + ), + ("./x/y/z/test_eggs.py::SpamTests::Ham", "Ham", "suite"), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.Ham.Eggs.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + ), + ), + ), + ], + ) + + def test_windows(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = r"C:\A\B\C" + altroot = testroot.replace("\\", "/") + relfile = r"X\Y\Z\test_Eggs.py" + session.items = [ + # typical: + create_stub_function_item( + stub, + # pytest always uses "/" as the path separator in node IDs: + nodeid="X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + # normal path separator (contrast with nodeid): + location=(relfile, 12, "SpamTests.test_spam"), + # path separator matches location: + fspath=testroot + "\\" + relfile, + function=FakeFunc("test_spam"), + ), + ] + tests = [ + # permutations of path separators + (r"X/test_a.py", "\\", "\\"), # typical + (r"X/test_b.py", "\\", "/"), + (r"X/test_c.py", "/", "\\"), + (r"X/test_d.py", "/", "/"), + (r"X\test_e.py", "\\", "\\"), + (r"X\test_f.py", "\\", "/"), + (r"X\test_g.py", "/", "\\"), + (r"X\test_h.py", "/", "/"), + ] + for fileid, locfile, fspath in tests: + if locfile == "/": + locfile = fileid.replace("\\", "/") + elif locfile == "\\": + locfile = fileid.replace("/", "\\") + if fspath == "/": + fspath = (testroot + "/" + fileid).replace("\\", "/") + elif fspath == "\\": + fspath = (testroot + "/" + fileid).replace("/", "\\") + session.items.append( + create_stub_function_item( + stub, + nodeid=fileid + "::test_spam", + name="test_spam", + location=(locfile, 12, "test_spam"), + fspath=fspath, + function=FakeFunc("test_spam"), + ) + ) + collector = _discovery.TestCollector(tests=discovered) + if os.name != "nt": + collector.parse_item = generate_parse_item("\\") + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/Y/Z/test_Eggs.py::SpamTests", "SpamTests", "suite"), + (r"./X/Y/Z/test_Eggs.py", "test_Eggs.py", "file"), + (r"./X/Y/Z", "Z", "folder"), + (r"./X/Y", "Y", "folder"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, # not normalized + relfile=r".\X\Y\Z\test_Eggs.py", # not normalized + func="SpamTests.test_spam", + sub=None, + ), + source=r".\X\Y\Z\test_Eggs.py:13", # not normalized + markers=None, + parentid=r"./X/Y/Z/test_Eggs.py::SpamTests", + ), + ), + ), + # permutations + # (*all* the IDs use "/") + # (source path separator should match relfile, not location) + # /, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_a.py", "test_a.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_a.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_a.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_a.py:13", + markers=None, + parentid=r"./X/test_a.py", + ), + ), + ), + # /, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_b.py", "test_b.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_b.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_b.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_b.py:13", + markers=None, + parentid=r"./X/test_b.py", + ), + ), + ), + # /, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_c.py", "test_c.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_c.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_c.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_c.py:13", + markers=None, + parentid=r"./X/test_c.py", + ), + ), + ), + # /, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_d.py", "test_d.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_d.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_d.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_d.py:13", + markers=None, + parentid=r"./X/test_d.py", + ), + ), + ), + # \, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_e.py", "test_e.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_e.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_e.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_e.py:13", + markers=None, + parentid=r"./X/test_e.py", + ), + ), + ), + # \, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_f.py", "test_f.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_f.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_f.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_f.py:13", + markers=None, + parentid=r"./X/test_f.py", + ), + ), + ), + # \, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_g.py", "test_g.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_g.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_g.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_g.py:13", + markers=None, + parentid=r"./X/test_g.py", + ), + ), + ), + # \, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_h.py", "test_h.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_h.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_h.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_h.py:13", + markers=None, + parentid=r"./X/test_h.py", + ), + ), + ), + ], + ) + + def test_mysterious_parens(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::()::()::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=[], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_imported_test(self): + # pytest will even discover tests that were imported from + # another module! + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + srcfile = adapter_util.fix_path("x/y/z/_extern.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + location=(srcfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + create_stub_function_item( + stub, + nodeid=relfile + "::test_ham", + name="test_ham", + location=(srcfile, 3, "test_ham"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(srcfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_ham", + name="test_ham", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="test_ham", + sub=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(srcfile), 4), + markers=None, + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) diff --git a/pythonFiles/tests/testing_tools/adapter/test___main__.py b/pythonFiles/tests/testing_tools/adapter/test___main__.py new file mode 100644 index 000000000000..d0a778c1d024 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test___main__.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +from ...util import Stub, StubProxy +from testing_tools.adapter.__main__ import ( + parse_args, + main, + UnsupportedToolError, + UnsupportedCommandError, +) + + +class StubTool(StubProxy): + def __init__(self, name, stub=None): + super(StubTool, self).__init__(stub, name) + self.return_discover = None + + def discover(self, args, **kwargs): + self.add_call("discover", (args,), kwargs) + if self.return_discover is None: + raise NotImplementedError + return self.return_discover + + +class StubReporter(StubProxy): + def __init__(self, stub=None): + super(StubReporter, self).__init__(stub, "reporter") + + def report(self, tests, parents, **kwargs): + self.add_call("report", (tests, parents), kwargs or None) + + +################################## +# tests + + +class ParseGeneralTests(unittest.TestCase): + def test_unsupported_command(self): + with self.assertRaises(SystemExit): + parse_args(["run", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["debug", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["???", "pytest"]) + + +class ParseDiscoverTests(unittest.TestCase): + def test_pytest_default(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual(toolargs, []) + + def test_pytest_full(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + # no adapter-specific options yet + "--", + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual( + toolargs, + [ + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ], + ) + + def test_pytest_opts(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + "--simple", + "--no-hide-stdio", + "--pretty", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": True, "hidestdio": False, "simple": True}) + self.assertEqual(toolargs, []) + + def test_unsupported_tool(self): + with self.assertRaises(SystemExit): + parse_args(["discover", "unittest"]) + with self.assertRaises(SystemExit): + parse_args(["discover", "???"]) + + +class MainTests(unittest.TestCase): + + # TODO: We could use an integration test for pytest.discover(). + + def test_discover(self): + stub = Stub() + tool = StubTool("spamspamspam", stub) + tests, parents = object(), object() + tool.return_discover = (parents, tests) + reporter = StubReporter(stub) + main( + tool.name, + "discover", + {"spam": "eggs"}, + [], + _tools={ + tool.name: { + "discover": tool.discover, + } + }, + _reporters={ + "discover": reporter.report, + }, + ) + + self.assertEqual( + tool.calls, + [ + ("spamspamspam.discover", ([],), {"spam": "eggs"}), + ("reporter.report", (tests, parents), {"spam": "eggs"}), + ], + ) + + def test_unsupported_tool(self): + with self.assertRaises(UnsupportedToolError): + main( + "unittest", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + with self.assertRaises(UnsupportedToolError): + main( + "???", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + + def test_unsupported_command(self): + tool = StubTool("pytest") + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "run", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "debug", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "???", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + self.assertEqual(tool.calls, []) diff --git a/pythonFiles/tests/testing_tools/adapter/test_discovery.py b/pythonFiles/tests/testing_tools/adapter/test_discovery.py new file mode 100644 index 000000000000..ec3d198b0108 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test_discovery.py @@ -0,0 +1,675 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import unittest + +from testing_tools.adapter.util import fix_path, fix_relpath +from testing_tools.adapter.info import SingleTestInfo, SingleTestPath, ParentInfo +from testing_tools.adapter.discovery import fix_nodeid, DiscoveredTests + + +def _fix_nodeid(nodeid): + + nodeid = nodeid.replace("\\", "/") + if not nodeid.startswith("./"): + nodeid = "./" + nodeid + return nodeid + + +class DiscoveredTestsTests(unittest.TestCase): + def test_list(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_spam.py") + tests = [ + SingleTestInfo( + # missing "./": + id="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./": + parentid="test_spam.py::test_each", + ), + SingleTestInfo( + id="test_spam.py::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 62), + markers=None, + parentid="test_spam.py::All::BasicTests", + ), + ] + allparents = [ + [ + (fix_path("./test_spam.py::test_each"), "test_each", "function"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + [ + (fix_path("./test_spam.py::All::BasicTests"), "BasicTests", "suite"), + (fix_path("./test_spam.py::All"), "All", "suite"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in tests + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + size = len(discovered) + items = [discovered[0], discovered[1]] + snapshot = list(discovered) + + self.maxDiff = None + self.assertEqual(size, 2) + self.assertEqual(items, expected) + self.assertEqual(snapshot, expected) + + def test_reset(self): + testroot = fix_path("/a/b/c") + discovered = DiscoveredTests() + discovered.add_test( + SingleTestInfo( + id="./test_spam.py::test_each", + name="test_each", + path=SingleTestPath( + root=testroot, + relfile="test_spam.py", + func="test_each", + ), + source="test_spam.py:11", + markers=[], + parentid="./test_spam.py", + ), + [ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ) + + before = len(discovered), len(discovered.parents) + discovered.reset() + after = len(discovered), len(discovered.parents) + + self.assertEqual(before, (1, 2)) + self.assertEqual(after, (0, 0)) + + def test_parents(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("x/y/z/test_spam.py") + tests = [ + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::test_each", + ), + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 61), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::All::BasicTests", + ), + ] + allparents = [ + # missing "./", using pathsep: + [ + (relfile + "::test_each", "test_each", "function"), + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + # missing "./", using pathsep: + [ + (relfile + "::All::BasicTests", "BasicTests", "suite"), + (relfile + "::All", "All", "suite"), + (relfile, "test_spam.py", "file"), + (fix_path("x/y/z"), "z", "folder"), + (fix_path("x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_spam.py", + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All", + kind="suite", + name="All", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All::BasicTests", + kind="suite", + name="BasicTests", + root=testroot, + parentid="./x/y/z/test_spam.py::All", + ), + ParentInfo( + id="./x/y/z/test_spam.py::test_each", + kind="function", + name="test_each", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ], + ) + + def test_add_test_simple(self): + testroot = fix_path("/a/b/c") + relfile = "test_spam.py" + test = SingleTestInfo( + # missing "./": + id=relfile + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + # missing "./": + relfile=relfile, + func="test_spam", + ), + # missing "./": + source="{}:{}".format(relfile, 11), + markers=[], + # missing "./": + parentid=relfile, + ) + expected = test._replace( + id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid) + ) + discovered = DiscoveredTests() + + before = list(discovered), discovered.parents + discovered.add_test( + test, + [ + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + ) + after = list(discovered), discovered.parents + + self.maxDiff = None + self.assertEqual(before, ([], [])) + self.assertEqual( + after, + ( + [expected], + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name=relfile, + root=testroot, + relpath=relfile, + parentid=".", + ), + ], + ), + ) + + def test_multiroot(self): + # the first root + testroot1 = fix_path("/a/b/c") + relfile1 = "test_spam.py" + alltests = [ + SingleTestInfo( + # missing "./": + id=relfile1 + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + # missing "./": + parentid=relfile1, + ), + ] + allparents = [ + # missing "./": + [ + (relfile1, "test_spam.py", "file"), + (".", testroot1, "folder"), + ], + ] + # the second root + testroot2 = fix_path("/x/y/z") + relfile2 = fix_path("w/test_eggs.py") + alltests.extend( + [ + SingleTestInfo( + id=relfile2 + "::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid=relfile2 + "::BasicTests", + ), + ] + ) + allparents.extend( + [ + # missing "./", using pathsep: + [ + (relfile2 + "::BasicTests", "BasicTests", "suite"), + (relfile2, "test_eggs.py", "file"), + (fix_path("./w"), "w", "folder"), + (".", testroot2, "folder"), + ], + ] + ) + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + tests, + [ + # the first root + SingleTestInfo( + id="./test_spam.py::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + parentid="./test_spam.py", + ), + # the secondroot + SingleTestInfo( + id="./w/test_eggs.py::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid="./w/test_eggs.py::BasicTests", + ), + ], + ) + self.assertEqual( + parents, + [ + # the first root + ParentInfo( + id=".", + kind="folder", + name=testroot1, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name="test_spam.py", + root=testroot1, + relpath=fix_relpath(relfile1), + parentid=".", + ), + # the secondroot + ParentInfo( + id=".", + kind="folder", + name=testroot2, + ), + ParentInfo( + id="./w", + kind="folder", + name="w", + root=testroot2, + relpath=fix_path("./w"), + parentid=".", + ), + ParentInfo( + id="./w/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot2, + relpath=fix_relpath(relfile2), + parentid="./w", + ), + ParentInfo( + id="./w/test_eggs.py::BasicTests", + kind="suite", + name="BasicTests", + root=testroot2, + parentid="./w/test_eggs.py", + ), + ], + ) + + def test_doctest(self): + testroot = fix_path("/a/b/c") + doctestfile = fix_path("./x/test_doctest.txt") + relfile = fix_path("./x/y/z/test_eggs.py") + alltests = [ + SingleTestInfo( + id=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + path=SingleTestPath( + root=testroot, + relfile=doctestfile, + func=None, + ), + source="{}:{}".format(doctestfile, 0), + markers=[], + parentid=doctestfile, + ), + # With --doctest-modules + SingleTestInfo( + id=relfile + "::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 0), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 12), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 27), + markers=[], + parentid=relfile, + ), + ] + allparents = [ + [ + (doctestfile, "test_doctest.txt", "file"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/test_doctest.txt", + kind="file", + name="test_doctest.txt", + root=testroot, + relpath=fix_path(doctestfile), + parentid="./x", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ], + ) + + def test_nested_suite_simple(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_eggs.py") + alltests = [ + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_spam", + ), + source="{}:{}".format(relfile, 10), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_eggs", + ), + source="{}:{}".format(relfile, 21), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + ] + allparents = [ + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid=".", + ), + ParentInfo( + id="./test_eggs.py::TestOuter", + kind="suite", + name="TestOuter", + root=testroot, + parentid="./test_eggs.py", + ), + ParentInfo( + id="./test_eggs.py::TestOuter::TestInner", + kind="suite", + name="TestInner", + root=testroot, + parentid="./test_eggs.py::TestOuter", + ), + ], + ) diff --git a/pythonFiles/tests/testing_tools/adapter/test_functional.py b/pythonFiles/tests/testing_tools/adapter/test_functional.py new file mode 100644 index 000000000000..153ad5508d9b --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test_functional.py @@ -0,0 +1,1535 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, unicode_literals + +import json +import os +import os.path +import subprocess +import sys +import unittest + +from ...__main__ import TESTING_TOOLS_ROOT +from testing_tools.adapter.util import fix_path, PATH_SEP + +# Pytest 3.7 and later uses pathlib/pathlib2 for path resolution. +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path # type: ignore (for Pylance) + + +CWD = os.getcwd() +DATA_DIR = os.path.join(os.path.dirname(__file__), ".data") +SCRIPT = os.path.join(TESTING_TOOLS_ROOT, "run_adapter.py") + + +def resolve_testroot(name): + projroot = os.path.join(DATA_DIR, name) + testroot = os.path.join(projroot, "tests") + return str(Path(projroot).resolve()), str(Path(testroot).resolve()) + + +def run_adapter(cmd, tool, *cliargs): + try: + return _run_adapter(cmd, tool, *cliargs) + except subprocess.CalledProcessError as exc: + print(exc.output) + + +def _run_adapter(cmd, tool, *cliargs, **kwargs): + hidestdio = kwargs.pop("hidestdio", True) + assert not kwargs or tuple(kwargs) == ("stderr",) + kwds = kwargs + argv = [sys.executable, SCRIPT, cmd, tool, "--"] + list(cliargs) + if not hidestdio: + argv.insert(4, "--no-hide-stdio") + kwds["stderr"] = subprocess.STDOUT + argv.append("--cache-clear") + print( + "running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv)) + ) + output = subprocess.check_output(argv, universal_newlines=True, **kwds) + return output + + +def fix_test_order(tests): + if sys.version_info >= (3, 6): + return tests + fixed = [] + curfile = None + group = [] + for test in tests: + if (curfile or "???") not in test["id"]: + fixed.extend(sorted(group, key=lambda t: t["id"])) + group = [] + curfile = test["id"].partition(".py::")[0] + ".py" + group.append(test) + fixed.extend(sorted(group, key=lambda t: t["id"])) + return fixed + + +def fix_source(tests, testid, srcfile, lineno): + for test in tests: + if test["id"] == testid: + break + else: + raise KeyError("test {!r} not found".format(testid)) + if not srcfile: + srcfile = test["source"].rpartition(":")[0] + test["source"] = fix_path("{}:{}".format(srcfile, lineno)) + + +def sorted_object(obj): + if isinstance(obj, dict): + return sorted((key, sorted_object(obj[key])) for key in obj.keys()) + if isinstance(obj, list): + return sorted((sorted_object(x) for x in obj)) + else: + return obj + + +# Note that these tests are skipped if util.PATH_SEP is not os.path.sep. +# This is because the functional tests should reflect the actual +# operating environment. + + +class PytestTests(unittest.TestCase): + def setUp(self): + if PATH_SEP is not os.path.sep: + raise unittest.SkipTest("functional tests require unmodified env") + super(PytestTests, self).setUp() + + def complex(self, testroot): + results = COMPLEX.copy() + results["root"] = testroot + return [results] + + def test_discover_simple(self): + projroot, testroot = resolve_testroot("simple") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/test_spam.py"), + "parentid": "./tests", + }, + ], + "tests": [ + { + "id": "./tests/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_spam.py:2"), + "markers": [], + "parentid": "./tests/test_spam.py", + }, + ], + } + ], + ) + + def test_discover_complex_default(self): + projroot, testroot = resolve_testroot("complex") + expected = self.complex(projroot) + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_complex_doctest(self): + projroot, _ = resolve_testroot("complex") + expected = self.complex(projroot) + # add in doctests from test suite + expected[0]["parents"].insert( + 3, + { + "id": "./tests/test_doctest.py", + "kind": "file", + "name": "test_doctest.py", + "relpath": fix_path("./tests/test_doctest.py"), + "parentid": "./tests", + }, + ) + expected[0]["tests"].insert( + 2, + { + "id": "./tests/test_doctest.py::tests.test_doctest", + "name": "tests.test_doctest", + "source": fix_path("./tests/test_doctest.py:1"), + "markers": [], + "parentid": "./tests/test_doctest.py", + }, + ) + # add in doctests from non-test module + expected[0]["parents"].insert( + 0, + { + "id": "./mod.py", + "kind": "file", + "name": "mod.py", + "relpath": fix_path("./mod.py"), + "parentid": ".", + }, + ) + expected[0]["tests"] = [ + { + "id": "./mod.py::mod", + "name": "mod", + "source": fix_path("./mod.py:1"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam", + "name": "mod.Spam", + "source": fix_path("./mod.py:33"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam.eggs", + "name": "mod.Spam.eggs", + "source": fix_path("./mod.py:43"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.square", + "name": "mod.square", + "source": fix_path("./mod.py:18"), + "markers": [], + "parentid": "./mod.py", + }, + ] + expected[0]["tests"] + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter( + "discover", "pytest", "--rootdir", projroot, "--doctest-modules", projroot + ) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_not_found(self): + projroot, testroot = resolve_testroot("notests") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual(result, []) + # TODO: Expect the following instead? + # self.assertEqual(result, [{ + # 'root': projroot, + # 'rootid': '.', + # 'parents': [], + # 'tests': [], + # }]) + + @unittest.skip("broken in CI") + def test_discover_bad_args(self): + projroot, testroot = resolve_testroot("simple") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--spam", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 4)", cm.exception.output) + + def test_discover_syntax_error(self): + projroot, testroot = resolve_testroot("syntax-error") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 2)", cm.exception.output) + + def test_discover_normcase(self): + projroot, testroot = resolve_testroot("NormCase") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertTrue(projroot.endswith("NormCase")) + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/A", + "kind": "folder", + "name": "A", + "relpath": fix_path("./tests/A"), + "parentid": "./tests", + }, + { + "id": "./tests/A/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/A/b"), + "parentid": "./tests/A", + }, + { + "id": "./tests/A/b/C", + "kind": "folder", + "name": "C", + "relpath": fix_path("./tests/A/b/C"), + "parentid": "./tests/A/b", + }, + { + "id": "./tests/A/b/C/test_Spam.py", + "kind": "file", + "name": "test_Spam.py", + "relpath": fix_path("./tests/A/b/C/test_Spam.py"), + "parentid": "./tests/A/b/C", + }, + ], + "tests": [ + { + "id": "./tests/A/b/C/test_Spam.py::test_okay", + "name": "test_okay", + "source": fix_path("./tests/A/b/C/test_Spam.py:2"), + "markers": [], + "parentid": "./tests/A/b/C/test_Spam.py", + }, + ], + } + ], + ) + + +COMPLEX = { + "root": None, + "rootid": ".", + "parents": [ + # + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + # +++ + { + "id": "./tests/test_42-43.py", + "kind": "file", + "name": "test_42-43.py", + "relpath": fix_path("./tests/test_42-43.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_42.py", + "kind": "file", + "name": "test_42.py", + "relpath": fix_path("./tests/test_42.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_doctest.txt", + "kind": "file", + "name": "test_doctest.txt", + "relpath": fix_path("./tests/test_doctest.txt"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_foo.py", + "kind": "file", + "name": "test_foo.py", + "relpath": fix_path("./tests/test_foo.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_mixed.py", + "kind": "file", + "name": "test_mixed.py", + "relpath": fix_path("./tests/test_mixed.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_mixed.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite", + "kind": "suite", + "name": "TestMySuite", + "parentid": "./tests/test_mixed.py", + }, + # +++ + { + "id": "./tests/test_pytest.py", + "kind": "file", + "name": "test_pytest.py", + "relpath": fix_path("./tests/test_pytest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest.py::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam", + "kind": "suite", + "name": "TestParam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestSpam", + "kind": "suite", + "name": "TestSpam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam", + "kind": "suite", + "name": "TestHam", + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py::TestSpam::TestHam", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param", + "kind": "function", + "name": "test_fixture_param", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_01", + "kind": "function", + "name": "test_param_01", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_11", + "kind": "function", + "name": "test_param_11", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers", + "kind": "function", + "name": "test_param_13_markers", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat", + "kind": "function", + "name": "test_param_13_repeat", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped", + "kind": "function", + "name": "test_param_13_skipped", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13", + "kind": "function", + "name": "test_param_23_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises", + "kind": "function", + "name": "test_param_23_raises", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33", + "kind": "function", + "name": "test_param_33", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids", + "kind": "function", + "name": "test_param_33_ids", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture", + "kind": "function", + "name": "test_param_fixture", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture", + "kind": "function", + "name": "test_param_mark_fixture", + "parentid": "./tests/test_pytest.py", + }, + # +++ + { + "id": "./tests/test_pytest_param.py", + "kind": "file", + "name": "test_pytest_param.py", + "relpath": fix_path("./tests/test_pytest_param.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest_param.py", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py", + }, + # +++ + { + "id": "./tests/test_unittest.py", + "kind": "file", + "name": "test_unittest.py", + "relpath": fix_path("./tests/test_unittest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_unittest.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_unittest.py", + }, + { + "id": "./tests/test_unittest.py::OtherTests", + "kind": "suite", + "name": "OtherTests", + "parentid": "./tests/test_unittest.py", + }, + ## + { + "id": "./tests/v", + "kind": "folder", + "name": "v", + "relpath": fix_path("./tests/v"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/v/test_eggs.py", + "kind": "file", + "name": "test_eggs.py", + "relpath": fix_path("./tests/v/test_eggs.py"), + "parentid": "./tests/v", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple", + "kind": "suite", + "name": "TestSimple", + "parentid": "./tests/v/test_eggs.py", + }, + ## +++ + { + "id": "./tests/v/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/v/test_ham.py"), + "parentid": "./tests/v", + }, + ## +++ + { + "id": "./tests/v/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/v/test_spam.py"), + "parentid": "./tests/v", + }, + ## + { + "id": "./tests/w", + "kind": "folder", + "name": "w", + "relpath": fix_path("./tests/w"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/w/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/w/test_spam.py"), + "parentid": "./tests/w", + }, + ## +++ + { + "id": "./tests/w/test_spam_ex.py", + "kind": "file", + "name": "test_spam_ex.py", + "relpath": fix_path("./tests/w/test_spam_ex.py"), + "parentid": "./tests/w", + }, + ## + { + "id": "./tests/x", + "kind": "folder", + "name": "x", + "relpath": fix_path("./tests/x"), + "parentid": "./tests", + }, + ### + { + "id": "./tests/x/y", + "kind": "folder", + "name": "y", + "relpath": fix_path("./tests/x/y"), + "parentid": "./tests/x", + }, + #### + { + "id": "./tests/x/y/z", + "kind": "folder", + "name": "z", + "relpath": fix_path("./tests/x/y/z"), + "parentid": "./tests/x/y", + }, + ##### + { + "id": "./tests/x/y/z/a", + "kind": "folder", + "name": "a", + "relpath": fix_path("./tests/x/y/z/a"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/a/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/a/test_spam.py"), + "parentid": "./tests/x/y/z/a", + }, + ##### + { + "id": "./tests/x/y/z/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/x/y/z/b"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/b/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/b/test_spam.py"), + "parentid": "./tests/x/y/z/b", + }, + #### +++ + { + "id": "./tests/x/y/z/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/x/y/z/test_ham.py"), + "parentid": "./tests/x/y/z", + }, + ], + "tests": [ + ########## + { + "id": "./tests/test_42-43.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42-43.py:2"), + "markers": [], + "parentid": "./tests/test_42-43.py", + }, + ##### + { + "id": "./tests/test_42.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42.py:2"), + "markers": [], + "parentid": "./tests/test_42.py", + }, + ##### + { + "id": "./tests/test_doctest.txt::test_doctest.txt", + "name": "test_doctest.txt", + "source": fix_path("./tests/test_doctest.txt:1"), + "markers": [], + "parentid": "./tests/test_doctest.txt", + }, + ##### + { + "id": "./tests/test_foo.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_foo.py:3"), + "markers": [], + "parentid": "./tests/test_foo.py", + }, + ##### + { + "id": "./tests/test_mixed.py::test_top_level", + "name": "test_top_level", + "source": fix_path("./tests/test_mixed.py:5"), + "markers": [], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:9"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:16"), + "markers": [], + "parentid": "./tests/test_mixed.py::TestMySuite", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:22"), + "markers": [], + "parentid": "./tests/test_mixed.py::MyTests", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:25"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py::MyTests", + }, + ##### + { + "id": "./tests/test_pytest.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:6"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_pytest.py:10"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_skipped", + "name": "test_runtime_skipped", + "source": fix_path("./tests/test_pytest.py:14"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_failed", + "name": "test_runtime_failed", + "source": fix_path("./tests/test_pytest.py:18"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_raises", + "name": "test_raises", + "source": fix_path("./tests/test_pytest.py:22"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:26"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_pytest.py:31"), + "markers": ["skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_pytest.py:36"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_warned", + "name": "test_warned", + "source": fix_path("./tests/test_pytest.py:41"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_custom_marker", + "name": "test_custom_marker", + "source": fix_path("./tests/test_pytest.py:46"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_multiple_markers", + "name": "test_multiple_markers", + "source": fix_path("./tests/test_pytest.py:51"), + "markers": ["expected-failure", "skip", "skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_1", + "name": "test_dynamic_1", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_2", + "name": "test_dynamic_2", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_3", + "name": "test_dynamic_3", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:70"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:73"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:81"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + }, + { + "id": "./tests/test_pytest.py::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:93"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestEggs", + }, + { + "id": "./tests/test_pytest.py::test_param_01[]", + "name": "test_param_01[]", + "source": fix_path("./tests/test_pytest.py:103"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_01", + }, + { + "id": "./tests/test_pytest.py::test_param_11[x0]", + "name": "test_param_11[x0]", + "source": fix_path("./tests/test_pytest.py:108"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_11", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x0]", + "name": "test_param_13_repeat[x0]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x1]", + "name": "test_param_13_repeat[x1]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x2]", + "name": "test_param_13_repeat[x2]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_33[1-1-1]", + "name": "test_param_33[1-1-1]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[3-4-5]", + "name": "test_param_33[3-4-5]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[0-0-0]", + "name": "test_param_33[0-0-0]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v1]", + "name": "test_param_33_ids[v1]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v2]", + "name": "test_param_33_ids[v2]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v3]", + "name": "test_param_33_ids[v3]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z0]", + "name": "test_param_23_13[1-1-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z1]", + "name": "test_param_23_13[1-1-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z2]", + "name": "test_param_23_13[1-1-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z0]", + "name": "test_param_23_13[3-4-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z1]", + "name": "test_param_23_13[3-4-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z2]", + "name": "test_param_23_13[3-4-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z0]", + "name": "test_param_23_13[0-0-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z1]", + "name": "test_param_23_13[0-0-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z2]", + "name": "test_param_23_13[0-0-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[x0]", + "name": "test_param_13_markers[x0]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[???]", + "name": "test_param_13_markers[???]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[2]", + "name": "test_param_13_markers[2]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x0]", + "name": "test_param_13_skipped[x0]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x1]", + "name": "test_param_13_skipped[x1]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x2]", + "name": "test_param_13_skipped[x2]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1-None]", + "name": "test_param_23_raises[1-None]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1.0-None]", + "name": "test_param_23_raises[1.0-None]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[2-catch2]", + "name": "test_param_23_raises[2-catch2]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:164"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x0]", + "name": "test_spam_13[x0]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x1]", + "name": "test_spam_13[x1]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x2]", + "name": "test_spam_13[x2]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::test_fixture", + "name": "test_fixture", + "source": fix_path("./tests/test_pytest.py:192"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_mark_fixture", + "name": "test_mark_fixture", + "source": fix_path("./tests/test_pytest.py:196"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x0]", + "name": "test_param_fixture[x0]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x1]", + "name": "test_param_fixture[x1]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x2]", + "name": "test_param_fixture[x2]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x0]", + "name": "test_param_mark_fixture[x0]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x1]", + "name": "test_param_mark_fixture[x1]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x2]", + "name": "test_param_mark_fixture[x2]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[spam]", + "name": "test_fixture_param[spam]", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[eggs]", + "name": "test_fixture_param[eggs]", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + ###### + { + "id": "./tests/test_pytest_param.py::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]", + "name": "test_spam_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]", + "name": "test_spam_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]", + "name": "test_spam_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + ###### + { + "id": "./tests/test_unittest.py::MyTests::test_dynamic_", + "name": "test_dynamic_", + "source": fix_path("./tests/test_unittest.py:54"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_unittest.py:34"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_unittest.py:37"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + "name": "test_maybe_not_skipped", + "source": fix_path("./tests/test_unittest.py:17"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_unittest.py:13"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:6"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_unittest.py:9"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped_inside", + "name": "test_skipped_inside", + "source": fix_path("./tests/test_unittest.py:21"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_nested_subtests", + "name": "test_with_nested_subtests", + "source": fix_path("./tests/test_unittest.py:46"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_subtests", + "name": "test_with_subtests", + "source": fix_path("./tests/test_unittest.py:41"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::OtherTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:61"), + "markers": [], + "parentid": "./tests/test_unittest.py::OtherTests", + }, + ########### + { + "id": "./tests/v/test_eggs.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_eggs.py", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:8"), + "markers": [], + "parentid": "./tests/v/test_eggs.py::TestSimple", + }, + ###### + { + "id": "./tests/v/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + { + "id": "./tests/v/test_ham.py::test_not_hard", + "name": "test_not_hard", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + ###### + { + "id": "./tests/v/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + { + "id": "./tests/v/test_spam.py::test_simpler", + "name": "test_simpler", + "source": fix_path("./tests/v/test_spam.py:4"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + ########### + { + "id": "./tests/w/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam.py", + }, + { + "id": "./tests/w/test_spam_ex.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam_ex.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam_ex.py", + }, + ########### + { + "id": "./tests/x/y/z/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/test_ham.py:2"), + "markers": [], + "parentid": "./tests/x/y/z/test_ham.py", + }, + ###### + { + "id": "./tests/x/y/z/a/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/a/test_spam.py:11"), + "markers": [], + "parentid": "./tests/x/y/z/a/test_spam.py", + }, + { + "id": "./tests/x/y/z/b/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/b/test_spam.py:7"), + "markers": [], + "parentid": "./tests/x/y/z/b/test_spam.py", + }, + ], +} diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_empty.py b/pythonFiles/tests/unittestadapter/.data/discovery_empty.py new file mode 100644 index 000000000000..9af5071303ce --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_empty.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryEmpty(unittest.TestCase): + """Test class for the test_empty_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + + def something(self) -> bool: + return True diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py new file mode 100644 index 000000000000..42f84f046760 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +import something_else # type: ignore + + +class DiscoveryErrorOne(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py new file mode 100644 index 000000000000..5d6d54f886a1 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryErrorTwo(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_simple.py b/pythonFiles/tests/unittestadapter/.data/discovery_simple.py new file mode 100644 index 000000000000..1859436d5b5b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_simple.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoverySimple(unittest.TestCase): + """Test class for the test_simple_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree + if unittest discovery was performed successfully. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py b/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py new file mode 100644 index 000000000000..90fdfc89a27b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest +from functools import wraps + + +def my_decorator(f): + @wraps(f) + def wrapper(*args, **kwds): + print("Calling decorated function") + return f(*args, **kwds) + + return wrapper + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_decorated_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + @my_decorator + def test_one(self) -> None: + self.assertGreater(2, 1) + + @my_decorator + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py b/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py new file mode 100644 index 000000000000..84f7fefc4ebd --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseTwoFileOne(unittest.TestCase): + """Test class for the test_nested_test_cases test. + + get_test_case should return tests from the test suites in this folder. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py b/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py new file mode 100644 index 000000000000..fb3ae7eb7909 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseOne(unittest.TestCase): + """Test class for the test_simple_test_cases test. + + get_test_case should return tests from the test suite. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py b/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py new file mode 100644 index 000000000000..6db51a4fd80b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_simple_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/test_discovery.py b/pythonFiles/tests/unittestadapter/test_discovery.py new file mode 100644 index 000000000000..7d7db772a4a4 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/test_discovery.py @@ -0,0 +1,233 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +from typing import List + +import pytest +from unittestadapter.discovery import discover_tests +from unittestadapter.utils import TestNodeTypeEnum, parse_unittest_args + +from . import expected_discovery_test_output +from .helpers import TEST_DATA_PATH, is_same_tree + + +@pytest.mark.parametrize( + "args, expected", + [ + ( + ["-s", "something", "-p", "other*", "-t", "else"], + ("something", "other*", "else", 1, None, None), + ), + ( + [ + "--start-directory", + "foo", + "--pattern", + "bar*", + "--top-level-directory", + "baz", + ], + ("foo", "bar*", "baz", 1, None, None), + ), + ( + ["--foo", "something"], + (".", "test*.py", None, 1, None, None), + ), + ( + ["--foo", "something", "-v"], + (".", "test*.py", None, 2, None, None), + ), + ( + ["--foo", "something", "-f"], + (".", "test*.py", None, 1, True, None), + ), + ( + ["--foo", "something", "--verbose", "-f"], + (".", "test*.py", None, 2, True, None), + ), + ( + ["--foo", "something", "-q", "--failfast"], + (".", "test*.py", None, 0, True, None), + ), + ( + ["--foo", "something", "--quiet"], + (".", "test*.py", None, 0, None, None), + ), + ( + ["--foo", "something", "--quiet", "--locals"], + (".", "test*.py", None, 0, None, True), + ), + ], +) +def test_parse_unittest_args(args: List[str], expected: List[str]) -> None: + """The parse_unittest_args function should return values for the start_dir, pattern, and top_level_dir arguments + when passed as command-line options, and ignore unrecognized arguments. + """ + actual = parse_unittest_args(args) + + assert actual == expected + + +def test_simple_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and a test tree + if unittest discovery was performed successfully. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_simple*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "discovery_simple.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoverySimple", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoverySimple", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert is_same_tree(actual.get("tests"), expected) + assert "error" not in actual + + +def test_empty_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_empty*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert "error" not in actual + + +def test_error_discovery() -> None: + """The discover_tests function should return a dictionary with an "error" status, a uuid, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + # Discover tests in .data/discovery_error/. + start_path = pathlib.PurePath(TEST_DATA_PATH / "discovery_error") + start_dir = os.fsdecode(start_path) + pattern = "file*" + + file_path = os.fsdecode(start_path / "file_two.py") + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": "discovery_error", + "children": [ + { + "name": "file_two.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoveryErrorTwo", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoveryErrorTwo", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "error" + assert is_same_tree(expected, actual.get("tests")) + assert len(actual.get("error", [])) == 1 + + +def test_unit_skip() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and test tree. + if unittest discovery was performed and found a test in one file marked as skipped and another file marked as skipped. + """ + start_dir = os.fsdecode(TEST_DATA_PATH / "unittest_skip") + pattern = "unittest_*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert is_same_tree( + actual.get("tests"), + expected_discovery_test_output.skip_unittest_folder_discovery_output, + ) + assert "error" not in actual diff --git a/pythonFiles/tests/unittestadapter/test_execution.py b/pythonFiles/tests/unittestadapter/test_execution.py new file mode 100644 index 000000000000..7d11c656b57b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/test_execution.py @@ -0,0 +1,275 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +import sys + +import pytest + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) + +from unittestadapter.execution import run_tests + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +def test_no_ids_run() -> None: + """This test runs on an empty array of test_ids, therefore it should return + an empty dict for the result. + """ + start_dir: str = os.fspath(TEST_DATA_PATH) + testids = [] + pattern = "discovery_simple*" + actual = run_tests(start_dir, testids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + if actual["result"] is not None: + assert len(actual["result"]) == 0 + else: + raise AssertionError("actual['result'] is None") + + +def test_single_ids_run() -> None: + """This test runs on a single test_id, therefore it should return + a dict with a single key-value pair for the result. + + This single test passes so the outcome should be 'success'. + """ + id = "discovery_simple.DiscoverySimple.test_one" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "discovery_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 1 + assert id in result + id_result = result[id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "success" + + +def test_subtest_run() -> None: + """This test runs on a the test_subtest which has a single method, test_even, + that uses unittest subtest. + + The actual result of run should return a dict payload with 6 entry for the 6 subtests. + """ + id = "test_subtest.NumbersTest.test_even" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "test_subtest.py", + None, + "fake-uuid", + 1, + None, + ) + subtests_ids = [ + "test_subtest.NumbersTest.test_even (i=0)", + "test_subtest.NumbersTest.test_even (i=1)", + "test_subtest.NumbersTest.test_even (i=2)", + "test_subtest.NumbersTest.test_even (i=3)", + "test_subtest.NumbersTest.test_even (i=4)", + "test_subtest.NumbersTest.test_even (i=5)", + ] + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 6 + for id in subtests_ids: + assert id in result + + +@pytest.mark.parametrize( + "test_ids, pattern, cwd, expected_outcome", + [ + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + ], + "test_add.py", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + "test_subtract.TestSubtractFunction.test_subtract_negative_numbers", + "test_subtract.TestSubtractFunction.test_subtract_positive_numbers", + ], + "test*", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "pattern_a_test.DiscoveryA.test_one_a", + "pattern_a_test.DiscoveryA.test_two_a", + ], + "*test", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "test_pattern_b.DiscoveryB.test_one_b", + "test_pattern_b.DiscoveryB.test_two_b", + ], + "test_*", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "file_one.CaseTwoFileOne.test_one", + "file_one.CaseTwoFileOne.test_two", + "folder.file_two.CaseTwoFileTwo.test_one", + "folder.file_two.CaseTwoFileTwo.test_two", + ], + "*", + os.fspath(TEST_DATA_PATH / "utils_nested_cases"), + "success", + ), + ( + [ + "test_two_classes.ClassOne.test_one", + "test_two_classes.ClassTwo.test_two", + ], + "test_two_classes.py", + os.fspath(TEST_DATA_PATH), + "success", + ), + ], +) +def test_multiple_ids_run(test_ids, pattern, cwd, expected_outcome) -> None: + """ + The following are all successful tests of different formats. + + # 1. Two tests with the `pattern` specified as a file + # 2. Two test files in the same folder called `unittest_folder` + # 3. A folder with two different test file patterns, this test gathers pattern `*test` + # 4. A folder with two different test file patterns, this test gathers pattern `test_*` + # 5. A nested structure where a test file is on the same level as a folder containing a test file + # 6. Test file with two test classes + + All tests should have the outcome of `success`. + """ + actual = run_tests(cwd, test_ids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == cwd + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == expected_outcome + assert True + + +def test_failed_tests(): + """This test runs on a single file `test_fail` with two tests that fail.""" + test_ids = [ + "test_fail_simple.RunFailSimple.test_one_fail", + "test_fail_simple.RunFailSimple.test_two_fail", + ] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "failure" + assert "message" and "traceback" in id_result + assert "2 not greater than 3" in str(id_result["message"]) or "1 == 1" in str( + id_result["traceback"] + ) + assert True + + +def test_unknown_id(): + """This test runs on a unknown test_id, therefore it should return + an error as the outcome as it attempts to find the given test. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + assert "unittest.loader._FailedTest.unknown_id" in result + id_result = result["unittest.loader._FailedTest.unknown_id"] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "error" + assert "message" and "traceback" in id_result + + +def test_incorrect_path(): + """This test runs on a non existent path, therefore it should return + an error as the outcome as it attempts to find the given folder. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH / "unknown_folder"), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status", "error")) + assert actual["status"] == "error" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH / "unknown_folder") diff --git a/src/client/api.ts b/src/client/api.ts index 81a5f676cc22..aaaba540af23 100644 --- a/src/client/api.ts +++ b/src/client/api.ts @@ -12,7 +12,7 @@ import { ILanguageServerOutputChannel } from './activation/types'; import { PythonExtension } from './api/types'; import { isTestExecution, PYTHON_LANGUAGE } from './common/constants'; import { IConfigurationService, Resource } from './common/types'; -import { getDebugpyLauncherArgs, getDebugpyPackagePath } from './debugger/extension/adapter/remoteLaunchers'; +import { getDebugpyLauncherArgs } from './debugger/extension/adapter/remoteLaunchers'; import { IInterpreterService } from './interpreter/contracts'; import { IServiceContainer, IServiceManager } from './ioc/types'; import { JupyterExtensionIntegration } from './jupyter/jupyterIntegration'; @@ -22,6 +22,7 @@ import { buildEnvironmentApi } from './environmentApi'; import { ApiForPylance } from './pylanceApi'; import { getTelemetryReporter } from './telemetry'; import { TensorboardExtensionIntegration } from './tensorBoard/tensorboardIntegration'; +import { getDebugpyPath } from './debugger/pythonDebugger'; export function buildApi( ready: Promise, @@ -122,7 +123,7 @@ export function buildApi( }); }, async getDebuggerPackagePath(): Promise { - return getDebugpyPackagePath(); + return getDebugpyPath(); }, }, settings: { diff --git a/src/client/debugger/extension/adapter/remoteLaunchers.ts b/src/client/debugger/extension/adapter/remoteLaunchers.ts index 80e0289e3ad8..0746cdc2328c 100644 --- a/src/client/debugger/extension/adapter/remoteLaunchers.ts +++ b/src/client/debugger/extension/adapter/remoteLaunchers.ts @@ -8,7 +8,7 @@ import { EXTENSION_ROOT_DIR } from '../../../common/constants'; import '../../../common/extensions'; const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); -const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); +// const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); type RemoteDebugOptions = { host: string; @@ -25,7 +25,3 @@ export function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath ...waitArgs, ]; } - -export function getDebugpyPackagePath(): string { - return pathToDebugger; -} diff --git a/src/client/debugger/pythonDebugger.ts b/src/client/debugger/pythonDebugger.ts new file mode 100644 index 000000000000..3450e95f3cee --- /dev/null +++ b/src/client/debugger/pythonDebugger.ts @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { extensions } from 'vscode'; + +interface IPythonDebuggerExtensionApi { + debug: { + getDebuggerPackagePath(): Promise; + }; +} + +async function activateExtension() { + const extension = extensions.getExtension('ms-python.debugpy'); + if (extension) { + if (!extension.isActive) { + await extension.activate(); + } + } + return extension; +} + +async function getPythonDebuggerExtensionAPI(): Promise { + const extension = await activateExtension(); + return extension?.exports as IPythonDebuggerExtensionApi; +} + +export async function getDebugpyPath(): Promise { + const api = await getPythonDebuggerExtensionAPI(); + return api?.debug.getDebuggerPackagePath() ?? ''; +} diff --git a/src/client/jupyter/jupyterIntegration.ts b/src/client/jupyter/jupyterIntegration.ts index a385248bff06..dbc44ed7b92a 100644 --- a/src/client/jupyter/jupyterIntegration.ts +++ b/src/client/jupyter/jupyterIntegration.ts @@ -11,13 +11,13 @@ import type { SemVer } from 'semver'; import { IContextKeyManager, IWorkspaceService } from '../common/application/types'; import { JUPYTER_EXTENSION_ID, PYLANCE_EXTENSION_ID } from '../common/constants'; import { GLOBAL_MEMENTO, IExtensions, IMemento, Resource } from '../common/types'; -import { getDebugpyPackagePath } from '../debugger/extension/adapter/remoteLaunchers'; import { IEnvironmentActivationService } from '../interpreter/activation/types'; import { IInterpreterQuickPickItem, IInterpreterSelector } from '../interpreter/configuration/types'; import { ICondaService, IInterpreterDisplay, IInterpreterStatusbarVisibilityFilter } from '../interpreter/contracts'; import { PythonEnvironment } from '../pythonEnvironments/info'; import { PylanceApi } from '../activation/node/pylanceApi'; import { ExtensionContextKey } from '../common/application/contextKeys'; +import { getDebugpyPath } from '../debugger/pythonDebugger'; type PythonApiForJupyterExtension = { /** @@ -101,7 +101,7 @@ export class JupyterExtensionIntegration { this.interpreterSelector.getAllSuggestions(resource), getKnownSuggestions: (resource: Resource): IInterpreterQuickPickItem[] => this.interpreterSelector.getSuggestions(resource), - getDebuggerPath: async () => dirname(getDebugpyPackagePath()), + getDebuggerPath: async () => dirname(await getDebugpyPath()), getInterpreterPathSelectedForJupyterServer: () => this.globalState.get('INTERPRETER_PATH_SELECTED_FOR_JUPYTER_SERVER'), registerInterpreterStatusFilter: this.interpreterDisplay.registerVisibilityFilter.bind( diff --git a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts index 2a75f6316a09..2531ce83191e 100644 --- a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts +++ b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts @@ -51,11 +51,11 @@ suite('External debugpy Debugger Launcher', () => { }); }); -suite('Path To Debugger Package', () => { - const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); - test('Path to debugpy debugger package', () => { - const actual = launchers.getDebugpyPackagePath(); - const expected = path.join(pathToPythonLibDir, 'debugpy'); - expect(actual).to.be.deep.equal(expected); - }); -}); +// suite('Path To Debugger Package', () => { +// const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); +// test('Path to debugpy debugger package', () => { +// const actual = launchers.getDebugpyPackagePath(); +// const expected = path.join(pathToPythonLibDir, 'debugpy'); +// expect(actual).to.be.deep.equal(expected); +// }); +// }); diff --git a/src/test/pythonFiles/dummy.py b/src/test/pythonFiles/dummy.py new file mode 100644 index 000000000000..10f13768abe0 --- /dev/null +++ b/src/test/pythonFiles/dummy.py @@ -0,0 +1 @@ +#dummy file to be opened by Test VS Code instance, so that Python Configuration (workspace configuration will be initialized) \ No newline at end of file From f1be1c105c6ef1086f3891b5ea57485ad6e5b8e7 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 13:46:11 -0700 Subject: [PATCH 02/11] fix tests --- src/client/debugger/extension/adapter/factory.ts | 9 +++------ .../debugger/extension/adapter/remoteLaunchers.ts | 12 ++++++------ src/test/debugger/extension/adapter/adapter.test.ts | 4 ++-- .../extension/adapter/remoteLaunchers.unit.test.ts | 2 +- src/test/debugger/utils.ts | 10 +++++----- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/client/debugger/extension/adapter/factory.ts b/src/client/debugger/extension/adapter/factory.ts index cfc8cf91aba3..e02810f7d3a1 100644 --- a/src/client/debugger/extension/adapter/factory.ts +++ b/src/client/debugger/extension/adapter/factory.ts @@ -26,6 +26,7 @@ import { Common, Interpreters } from '../../../common/utils/localize'; import { IPersistentStateFactory } from '../../../common/types'; import { Commands } from '../../../common/constants'; import { ICommandManager } from '../../../common/application/types'; +import { getDebugpyPath } from '../../pythonDebugger'; // persistent state names, exported to make use of in testing export enum debugStateKeys { @@ -90,13 +91,9 @@ export class DebugAdapterDescriptorFactory implements IDebugAdapterDescriptorFac traceLog(`DAP Server launched with command: ${executable} ${args.join(' ')}`); return new DebugAdapterExecutable(executable, args); } - + const debugpyPath = await getDebugpyPath() const debuggerAdapterPathToUse = path.join( - EXTENSION_ROOT_DIR, - 'python_files', - 'lib', - 'python', - 'debugpy', + debugpyPath, 'adapter', ); diff --git a/src/client/debugger/extension/adapter/remoteLaunchers.ts b/src/client/debugger/extension/adapter/remoteLaunchers.ts index 0746cdc2328c..af79f2c64578 100644 --- a/src/client/debugger/extension/adapter/remoteLaunchers.ts +++ b/src/client/debugger/extension/adapter/remoteLaunchers.ts @@ -3,12 +3,8 @@ 'use strict'; -import * as path from 'path'; -import { EXTENSION_ROOT_DIR } from '../../../common/constants'; import '../../../common/extensions'; - -const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); -// const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); +import { getDebugpyPath } from '../../pythonDebugger'; type RemoteDebugOptions = { host: string; @@ -16,7 +12,11 @@ type RemoteDebugOptions = { waitUntilDebuggerAttaches: boolean; }; -export function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath: string = pathToDebugger) { +export async function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath?: string) { + if (!debuggerPath){ + debuggerPath = await getDebugpyPath(); + } + const waitArgs = options.waitUntilDebuggerAttaches ? ['--wait-for-client'] : []; return [ debuggerPath.fileToCommandArgumentForPythonExt(), diff --git a/src/test/debugger/extension/adapter/adapter.test.ts b/src/test/debugger/extension/adapter/adapter.test.ts index 2f60290897af..dd0e9d560bca 100644 --- a/src/test/debugger/extension/adapter/adapter.test.ts +++ b/src/test/debugger/extension/adapter/adapter.test.ts @@ -70,7 +70,7 @@ suite('Debugger Integration', () => { } const [configName, scriptArgs] = tests[kind]; test(kind, async () => { - const session = fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); + const session = await fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); await session.start(); // Any debugger ops would go here. await new Promise((r) => setTimeout(r, 300)); // 0.3 seconds @@ -93,7 +93,7 @@ suite('Debugger Integration', () => { } const [configName, scriptArgs] = tests[kind]; test(kind, async () => { - const session = fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); + const session = await fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); const bp = session.addBreakpoint(file, 21); // line: "time.sleep()" await session.start(); await session.waitForBreakpoint(bp); diff --git a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts index 2531ce83191e..dfce1aafc63a 100644 --- a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts +++ b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts @@ -5,7 +5,7 @@ import { expect } from 'chai'; import * as path from 'path'; -import { EXTENSION_ROOT_DIR } from '../../../../client/common/constants'; +// import { EXTENSION_ROOT_DIR } from '../../../../client/common/constants'; import '../../../../client/common/extensions'; import * as launchers from '../../../../client/debugger/extension/adapter/remoteLaunchers'; diff --git a/src/test/debugger/utils.ts b/src/test/debugger/utils.ts index 4a41489940b8..749adb359597 100644 --- a/src/test/debugger/utils.ts +++ b/src/test/debugger/utils.ts @@ -277,12 +277,12 @@ class DebuggerSession { } export class DebuggerFixture extends PythonFixture { - public resolveDebugger( + public async resolveDebugger( configName: string, file: string, scriptArgs: string[], wsRoot?: vscode.WorkspaceFolder, - ): DebuggerSession { + ): Promise { const config = getConfig(configName); let proc: Proc | undefined; if (config.request === 'launch') { @@ -292,7 +292,7 @@ export class DebuggerFixture extends PythonFixture { // XXX set the file in the current vscode editor? } else if (config.request === 'attach') { if (config.port) { - proc = this.runDebugger(config.port, file, ...scriptArgs); + proc = await this.runDebugger(config.port, file, ...scriptArgs); if (wsRoot && config.name === 'attach to a local port') { config.pathMappings.localRoot = wsRoot.uri.fsPath; } @@ -352,8 +352,8 @@ export class DebuggerFixture extends PythonFixture { } } - public runDebugger(port: number, filename: string, ...scriptArgs: string[]) { - const args = getDebugpyLauncherArgs({ + public async runDebugger(port: number, filename: string, ...scriptArgs: string[]) { + const args = await getDebugpyLauncherArgs({ host: 'localhost', port: port, // This causes problems if we set it to true. From 154f5e0cb5f0be96c19c0d44ac7253d37883d4a2 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 12:49:18 -0700 Subject: [PATCH 03/11] Merge --- pythonFiles/create_venv.py | 250 +++ pythonFiles/install_debugpy.py | 66 + pythonFiles/installed_check.py | 129 ++ pythonFiles/normalizeSelection.py | 305 ++++ pythonFiles/pyproject.toml | 36 + pythonFiles/testing_tools/adapter/__init__.py | 2 + pythonFiles/testing_tools/adapter/__main__.py | 106 ++ pythonFiles/testing_tools/adapter/errors.py | 16 + pythonFiles/testing_tools/adapter/info.py | 120 ++ .../testing_tools/adapter/pytest/_cli.py | 17 + .../adapter/pytest/_discovery.py | 109 ++ .../adapter/pytest/_pytest_item.py | 630 +++++++ pythonFiles/testing_tools/adapter/util.py | 287 +++ .../.data/error_parametrize_discovery.py | 10 + .../.data/error_pytest_import.txt | 6 + .../.data/error_raise_exception.py | 14 + .../pytestadapter/.data/parametrize_tests.py | 22 + .../.data/test_multi_class_nest.py | 19 + .../pytestadapter/.data/text_docstring.txt | 4 + .../expected_execution_test_output.py | 686 ++++++++ .../tests/pytestadapter/test_execution.py | 278 +++ pythonFiles/tests/testing_tools/__init__.py | 2 + .../tests/testing_tools/adapter/__init__.py | 2 + .../testing_tools/adapter/pytest/__init__.py | 2 + .../adapter/pytest/test_discovery.py | 1553 +++++++++++++++++ .../testing_tools/adapter/test___main__.py | 199 +++ .../testing_tools/adapter/test_discovery.py | 675 +++++++ .../testing_tools/adapter/test_functional.py | 1535 ++++++++++++++++ .../unittestadapter/.data/discovery_empty.py | 15 + .../.data/discovery_error/file_one.py | 20 + .../.data/discovery_error/file_two.py | 18 + .../unittestadapter/.data/discovery_simple.py | 18 + .../.data/utils_decorated_tree.py | 29 + .../.data/utils_nested_cases/file_one.py | 17 + .../.data/utils_simple_cases.py | 17 + .../.data/utils_simple_tree.py | 17 + .../tests/unittestadapter/test_discovery.py | 233 +++ .../tests/unittestadapter/test_execution.py | 275 +++ src/client/api.ts | 5 +- .../extension/adapter/remoteLaunchers.ts | 6 +- src/client/debugger/pythonDebugger.ts | 30 + src/client/jupyter/jupyterIntegration.ts | 4 +- .../adapter/remoteLaunchers.unit.test.ts | 16 +- src/test/pythonFiles/dummy.py | 1 + 44 files changed, 7784 insertions(+), 17 deletions(-) create mode 100644 pythonFiles/create_venv.py create mode 100644 pythonFiles/install_debugpy.py create mode 100644 pythonFiles/installed_check.py create mode 100644 pythonFiles/normalizeSelection.py create mode 100644 pythonFiles/pyproject.toml create mode 100644 pythonFiles/testing_tools/adapter/__init__.py create mode 100644 pythonFiles/testing_tools/adapter/__main__.py create mode 100644 pythonFiles/testing_tools/adapter/errors.py create mode 100644 pythonFiles/testing_tools/adapter/info.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_cli.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_discovery.py create mode 100644 pythonFiles/testing_tools/adapter/pytest/_pytest_item.py create mode 100644 pythonFiles/testing_tools/adapter/util.py create mode 100644 pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py create mode 100644 pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt create mode 100644 pythonFiles/tests/pytestadapter/.data/error_raise_exception.py create mode 100644 pythonFiles/tests/pytestadapter/.data/parametrize_tests.py create mode 100644 pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py create mode 100644 pythonFiles/tests/pytestadapter/.data/text_docstring.txt create mode 100644 pythonFiles/tests/pytestadapter/expected_execution_test_output.py create mode 100644 pythonFiles/tests/pytestadapter/test_execution.py create mode 100644 pythonFiles/tests/testing_tools/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/pytest/__init__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test___main__.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test_discovery.py create mode 100644 pythonFiles/tests/testing_tools/adapter/test_functional.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_empty.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py create mode 100644 pythonFiles/tests/unittestadapter/.data/discovery_simple.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py create mode 100644 pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py create mode 100644 pythonFiles/tests/unittestadapter/test_discovery.py create mode 100644 pythonFiles/tests/unittestadapter/test_execution.py create mode 100644 src/client/debugger/pythonDebugger.ts create mode 100644 src/test/pythonFiles/dummy.py diff --git a/pythonFiles/create_venv.py b/pythonFiles/create_venv.py new file mode 100644 index 000000000000..092286f986cf --- /dev/null +++ b/pythonFiles/create_venv.py @@ -0,0 +1,250 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import importlib.util as import_util +import json +import os +import pathlib +import subprocess +import sys +import urllib.request as url_lib +from typing import List, Optional, Sequence, Union + +VENV_NAME = ".venv" +CWD = pathlib.Path.cwd() +MICROVENV_SCRIPT_PATH = pathlib.Path(__file__).parent / "create_microvenv.py" + + +class VenvError(Exception): + pass + + +def parse_args(argv: Sequence[str]) -> argparse.Namespace: + parser = argparse.ArgumentParser() + + parser.add_argument( + "--requirements", + action="append", + default=[], + help="Install additional dependencies into the virtual environment.", + ) + + parser.add_argument( + "--toml", + action="store", + default=None, + help="Install additional dependencies from sources like `pyproject.toml` into the virtual environment.", + ) + parser.add_argument( + "--extras", + action="append", + default=[], + help="Install specific package groups from `pyproject.toml` into the virtual environment.", + ) + + parser.add_argument( + "--git-ignore", + action="store_true", + default=False, + help="Add .gitignore to the newly created virtual environment.", + ) + parser.add_argument( + "--name", + default=VENV_NAME, + type=str, + help="Name of the virtual environment.", + metavar="NAME", + action="store", + ) + parser.add_argument( + "--stdin", + action="store_true", + default=False, + help="Read arguments from stdin.", + ) + return parser.parse_args(argv) + + +def is_installed(module: str) -> bool: + return import_util.find_spec(module) is not None + + +def file_exists(path: Union[str, pathlib.PurePath]) -> bool: + return os.path.exists(path) + + +def venv_exists(name: str) -> bool: + return os.path.exists(CWD / name) and file_exists(get_venv_path(name)) + + +def run_process(args: Sequence[str], error_message: str) -> None: + try: + print("Running: " + " ".join(args)) + subprocess.run(args, cwd=os.getcwd(), check=True) + except subprocess.CalledProcessError: + raise VenvError(error_message) + + +def get_venv_path(name: str) -> str: + # See `venv` doc here for more details on binary location: + # https://docs.python.org/3/library/venv.html#creating-virtual-environments + if sys.platform == "win32": + return os.fspath(CWD / name / "Scripts" / "python.exe") + else: + return os.fspath(CWD / name / "bin" / "python") + + +def install_requirements(venv_path: str, requirements: List[str]) -> None: + if not requirements: + return + + for requirement in requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirement}") + run_process( + [venv_path, "-m", "pip", "install", "-r", requirement], + "CREATE_VENV.PIP_FAILED_INSTALL_REQUIREMENTS", + ) + print("CREATE_VENV.PIP_INSTALLED_REQUIREMENTS") + + +def install_toml(venv_path: str, extras: List[str]) -> None: + args = "." if len(extras) == 0 else f".[{','.join(extras)}]" + run_process( + [venv_path, "-m", "pip", "install", "-e", args], + "CREATE_VENV.PIP_FAILED_INSTALL_PYPROJECT", + ) + print("CREATE_VENV.PIP_INSTALLED_PYPROJECT") + + +def upgrade_pip(venv_path: str) -> None: + print("CREATE_VENV.UPGRADING_PIP") + run_process( + [venv_path, "-m", "pip", "install", "--upgrade", "pip"], + "CREATE_VENV.UPGRADE_PIP_FAILED", + ) + print("CREATE_VENV.UPGRADED_PIP") + + +def add_gitignore(name: str) -> None: + git_ignore = CWD / name / ".gitignore" + if not file_exists(git_ignore): + print("Creating: " + os.fspath(git_ignore)) + with open(git_ignore, "w") as f: + f.write("*") + + +def download_pip_pyz(name: str): + url = "https://bootstrap.pypa.io/pip/pip.pyz" + print("CREATE_VENV.DOWNLOADING_PIP") + + try: + with url_lib.urlopen(url) as response: + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + with open(pip_pyz_path, "wb") as out_file: + data = response.read() + out_file.write(data) + out_file.flush() + except Exception: + raise VenvError("CREATE_VENV.DOWNLOAD_PIP_FAILED") + + +def install_pip(name: str): + pip_pyz_path = os.fspath(CWD / name / "pip.pyz") + executable = get_venv_path(name) + print("CREATE_VENV.INSTALLING_PIP") + run_process( + [executable, pip_pyz_path, "install", "pip"], + "CREATE_VENV.INSTALL_PIP_FAILED", + ) + + +def get_requirements_from_args(args: argparse.Namespace) -> List[str]: + requirements = [] + if args.stdin: + data = json.loads(sys.stdin.read()) + requirements = data.get("requirements", []) + if args.requirements: + requirements.extend(args.requirements) + return requirements + + +def main(argv: Optional[Sequence[str]] = None) -> None: + if argv is None: + argv = [] + args = parse_args(argv) + + use_micro_venv = False + venv_installed = is_installed("venv") + pip_installed = is_installed("pip") + ensure_pip_installed = is_installed("ensurepip") + distutils_installed = is_installed("distutils") + + if not venv_installed: + if sys.platform == "win32": + raise VenvError("CREATE_VENV.VENV_NOT_FOUND") + else: + use_micro_venv = True + if not distutils_installed: + print("Install `python3-distutils` package or equivalent for your OS.") + print("On Debian/Ubuntu: `sudo apt install python3-distutils`") + raise VenvError("CREATE_VENV.DISTUTILS_NOT_INSTALLED") + + if venv_exists(args.name): + # A virtual environment with same name exists. + # We will use the existing virtual environment. + venv_path = get_venv_path(args.name) + print(f"EXISTING_VENV:{venv_path}") + else: + if use_micro_venv: + # `venv` was not found but on this platform we can use `microvenv` + run_process( + [ + sys.executable, + os.fspath(MICROVENV_SCRIPT_PATH), + "--name", + args.name, + ], + "CREATE_VENV.MICROVENV_FAILED_CREATION", + ) + elif not pip_installed or not ensure_pip_installed: + # `venv` was found but `pip` or `ensurepip` was not found. + # We create a venv without `pip` in it. We will later install `pip`. + run_process( + [sys.executable, "-m", "venv", "--without-pip", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + else: + # Both `venv` and `pip` were found. So create a .venv normally + run_process( + [sys.executable, "-m", "venv", args.name], + "CREATE_VENV.VENV_FAILED_CREATION", + ) + + venv_path = get_venv_path(args.name) + print(f"CREATED_VENV:{venv_path}") + + if args.git_ignore: + add_gitignore(args.name) + + # At this point we have a .venv. Now we handle installing `pip`. + if pip_installed and ensure_pip_installed: + # We upgrade pip if it is already installed. + upgrade_pip(venv_path) + else: + # `pip` was not found, so we download it and install it. + download_pip_pyz(args.name) + install_pip(args.name) + + requirements = get_requirements_from_args(args) + if requirements: + print(f"VENV_INSTALLING_REQUIREMENTS: {requirements}") + install_requirements(venv_path, requirements) + + if args.toml: + print(f"VENV_INSTALLING_PYPROJECT: {args.toml}") + install_toml(venv_path, args.extras) + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/pythonFiles/install_debugpy.py b/pythonFiles/install_debugpy.py new file mode 100644 index 000000000000..cabb620ea1f2 --- /dev/null +++ b/pythonFiles/install_debugpy.py @@ -0,0 +1,66 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import io +import json +import os +import urllib.request as url_lib +import zipfile + +from packaging.version import parse as version_parser + +EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +DEBUGGER_DEST = os.path.join(EXTENSION_ROOT, "pythonFiles", "lib", "python") +DEBUGGER_PACKAGE = "debugpy" +DEBUGGER_PYTHON_ABI_VERSIONS = ("cp310",) +DEBUGGER_VERSION = "1.6.7" # can also be "latest" + + +def _contains(s, parts=()): + return any(p in s for p in parts) + + +def _get_package_data(): + json_uri = "https://pypi.org/pypi/{0}/json".format(DEBUGGER_PACKAGE) + # Response format: https://warehouse.readthedocs.io/api-reference/json/#project + # Release metadata format: https://github.com/pypa/interoperability-peps/blob/master/pep-0426-core-metadata.rst + with url_lib.urlopen(json_uri) as response: + return json.loads(response.read()) + + +def _get_debugger_wheel_urls(data, version): + return list( + r["url"] + for r in data["releases"][version] + if _contains(r["url"], DEBUGGER_PYTHON_ABI_VERSIONS) + ) + + +def _download_and_extract(root, url, version): + root = os.getcwd() if root is None or root == "." else root + print(url) + with url_lib.urlopen(url) as response: + data = response.read() + with zipfile.ZipFile(io.BytesIO(data), "r") as wheel: + for zip_info in wheel.infolist(): + # Ignore dist info since we are merging multiple wheels + if ".dist-info/" in zip_info.filename: + continue + print("\t" + zip_info.filename) + wheel.extract(zip_info.filename, root) + + +def main(root): + data = _get_package_data() + + if DEBUGGER_VERSION == "latest": + use_version = max(data["releases"].keys(), key=version_parser) + else: + use_version = DEBUGGER_VERSION + + for url in _get_debugger_wheel_urls(data, use_version): + _download_and_extract(root, url, use_version) + + +if __name__ == "__main__": + main(DEBUGGER_DEST) diff --git a/pythonFiles/installed_check.py b/pythonFiles/installed_check.py new file mode 100644 index 000000000000..f0e1c268d270 --- /dev/null +++ b/pythonFiles/installed_check.py @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import argparse +import json +import os +import pathlib +import sys +from typing import Dict, List, Optional, Sequence, Tuple, Union + +LIB_ROOT = pathlib.Path(__file__).parent / "lib" / "python" +sys.path.insert(0, os.fspath(LIB_ROOT)) + +import tomli +from importlib_metadata import metadata +from packaging.requirements import Requirement + +DEFAULT_SEVERITY = 3 + + +def parse_args(argv: Optional[Sequence[str]] = None): + if argv is None: + argv = sys.argv[1:] + parser = argparse.ArgumentParser( + description="Check for installed packages against requirements" + ) + parser.add_argument("FILEPATH", type=str, help="Path to requirements.[txt, in]") + + return parser.parse_args(argv) + + +def parse_requirements(line: str) -> Optional[Requirement]: + try: + req = Requirement(line.strip("\\")) + if req.marker is None: + return req + elif req.marker.evaluate(): + return req + except: + return None + + +def process_requirements(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + for n, line in enumerate(req_file.read_text(encoding="utf-8").splitlines()): + if line.startswith(("#", "-", " ")) or line == "": + continue + + req = parse_requirements(line) + if req: + try: + # Check if package is installed + metadata(req.name) + except: + diagnostics.append( + { + "line": n, + "character": 0, + "endLine": n, + "endCharacter": len(req.name), + "package": req.name, + "code": "not-installed", + "severity": DEFAULT_SEVERITY, + } + ) + return diagnostics + + +def get_pos(lines: List[str], text: str) -> Tuple[int, int, int, int]: + for n, line in enumerate(lines): + index = line.find(text) + if index >= 0: + return n, index, n, index + len(text) + return (0, 0, 0, 0) + + +def process_pyproject(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + try: + raw_text = req_file.read_text(encoding="utf-8") + pyproject = tomli.loads(raw_text) + except: + return diagnostics + + lines = raw_text.splitlines() + reqs = pyproject.get("project", {}).get("dependencies", []) + for raw_req in reqs: + req = parse_requirements(raw_req) + n, start, _, end = get_pos(lines, raw_req) + if req: + try: + # Check if package is installed + metadata(req.name) + except: + diagnostics.append( + { + "line": n, + "character": start, + "endLine": n, + "endCharacter": end, + "package": req.name, + "code": "not-installed", + "severity": DEFAULT_SEVERITY, + } + ) + return diagnostics + + +def get_diagnostics(req_file: pathlib.Path) -> List[Dict[str, Union[str, int]]]: + diagnostics = [] + if not req_file.exists(): + return diagnostics + + if req_file.name == "pyproject.toml": + diagnostics = process_pyproject(req_file) + else: + diagnostics = process_requirements(req_file) + + return diagnostics + + +def main(): + args = parse_args() + diagnostics = get_diagnostics(pathlib.Path(args.FILEPATH)) + print(json.dumps(diagnostics, ensure_ascii=False)) + + +if __name__ == "__main__": + main() diff --git a/pythonFiles/normalizeSelection.py b/pythonFiles/normalizeSelection.py new file mode 100644 index 000000000000..7608ce8860f6 --- /dev/null +++ b/pythonFiles/normalizeSelection.py @@ -0,0 +1,305 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import ast +import json +import re +import sys +import textwrap +from typing import Iterable + + +def split_lines(source): + """ + Split selection lines in a version-agnostic way. + + Python grammar only treats \r, \n, and \r\n as newlines. + But splitlines() in Python 3 has a much larger list: for example, it also includes \v, \f. + As such, this function will split lines across all Python versions. + """ + return re.split(r"[\n\r]+", source) + + +def _get_statements(selection): + """ + Process a multiline selection into a list of its top-level statements. + This will remove empty newlines around and within the selection, dedent it, + and split it using the result of `ast.parse()`. + """ + + # Remove blank lines within the selection to prevent the REPL from thinking the block is finished. + lines = (line for line in split_lines(selection) if line.strip() != "") + + # Dedent the selection and parse it using the ast module. + # Note that leading comments in the selection will be discarded during parsing. + source = textwrap.dedent("\n".join(lines)) + tree = ast.parse(source) + + # We'll need the dedented lines to rebuild the selection. + lines = split_lines(source) + + # Get the line ranges for top-level blocks returned from parsing the dedented text + # and split the selection accordingly. + # tree.body is a list of AST objects, which we rely on to extract top-level statements. + # If we supported Python 3.8+ only we could use the lineno and end_lineno attributes of each object + # to get the boundaries of each block. + # However, earlier Python versions only have the lineno attribute, which is the range start position (1-indexed). + # Therefore, to retrieve the end line of each block in a version-agnostic way we need to do + # `end = next_block.lineno - 1` + # for all blocks except the last one, which will will just run until the last line. + ends = [] + for node in tree.body[1:]: + line_end = node.lineno - 1 + # Special handling of decorators: + # In Python 3.8 and higher, decorators are not taken into account in the value returned by lineno, + # and we have to use the length of the decorator_list array to compute the actual start line. + # Before that, lineno takes into account decorators, so this offset check is unnecessary. + # Also, not all AST objects can have decorators. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + line_end -= len(getattr(node, "decorator_list")) + ends.append(line_end) + ends.append(len(lines)) + + for node, end in zip(tree.body, ends): + # Given this selection: + # 1: if (m > 0 and + # 2: n < 3): + # 3: print('foo') + # 4: value = 'bar' + # + # The first block would have lineno = 1,and the second block lineno = 4 + start = node.lineno - 1 + + # Special handling of decorators similar to what's above. + if hasattr(node, "decorator_list") and sys.version_info >= (3, 8): + # Using getattr instead of node.decorator_list or pyright will complain about an unknown member. + start -= len(getattr(node, "decorator_list")) + block = "\n".join(lines[start:end]) + + # If the block is multiline, add an extra newline character at its end. + # This way, when joining blocks back together, there will be a blank line between each multiline statement + # and no blank lines between single-line statements, or it would look like this: + # >>> x = 22 + # >>> + # >>> total = x + 30 + # >>> + # Note that for the multiline parentheses case this newline is redundant, + # since the closing parenthesis terminates the statement already. + # This means that for this pattern we'll end up with: + # >>> x = [ + # ... 1 + # ... ] + # >>> + # >>> y = [ + # ... 2 + # ...] + if end - start > 1: + block += "\n" + + yield block + + +def normalize_lines(selection): + """ + Normalize the text selection received from the extension. + + If it is a single line selection, dedent it and append a newline and + send it back to the extension. + Otherwise, sanitize the multiline selection before returning it: + split it in a list of top-level statements + and add newlines between each of them so the REPL knows where each block ends. + """ + try: + # Parse the selection into a list of top-level blocks. + # We don't differentiate between single and multiline statements + # because it's not a perf bottleneck, + # and the overhead from splitting and rejoining strings in the multiline case is one-off. + statements = _get_statements(selection) + + # Insert a newline between each top-level statement, and append a newline to the selection. + source = "\n".join(statements) + "\n" + if selection[-2] == "}" or selection[-2] == "]": + source = source[:-1] + except Exception: + # If there's a problem when parsing statements, + # append a blank line to end the block and send it as-is. + source = selection + "\n\n" + + return source + + +top_level_nodes = [] +min_key = None + + +def check_exact_exist(top_level_nodes, start_line, end_line): + exact_nodes = [] + for node in top_level_nodes: + if node.lineno == start_line and node.end_lineno == end_line: + exact_nodes.append(node) + + return exact_nodes + + +def traverse_file(wholeFileContent, start_line, end_line, was_highlighted): + """ + Intended to traverse through a user's given file content and find, collect all appropriate lines + that should be sent to the REPL in case of smart selection. + This could be exact statement such as just a single line print statement, + or a multiline dictionary, or differently styled multi-line list comprehension, etc. + Then call the normalize_lines function to normalize our smartly selected code block. + """ + parsed_file_content = None + + try: + parsed_file_content = ast.parse(wholeFileContent) + except Exception: + # Handle case where user is attempting to run code where file contains deprecated Python code. + # Let typescript side know and show warning message. + return { + "normalized_smart_result": "deprecated", + "which_line_next": 0, + } + + smart_code = "" + should_run_top_blocks = [] + + # Purpose of this loop is to fetch and collect all the + # AST top level nodes, and its node.body as child nodes. + # Individual nodes will contain information like + # the start line, end line and get source segment information + # that will be used to smartly select, and send normalized code. + for node in ast.iter_child_nodes(parsed_file_content): + top_level_nodes.append(node) + + ast_types_with_nodebody = ( + ast.Module, + ast.Interactive, + ast.Expression, + ast.FunctionDef, + ast.AsyncFunctionDef, + ast.ClassDef, + ast.For, + ast.AsyncFor, + ast.While, + ast.If, + ast.With, + ast.AsyncWith, + ast.Try, + ast.Lambda, + ast.IfExp, + ast.ExceptHandler, + ) + if isinstance(node, ast_types_with_nodebody) and isinstance( + node.body, Iterable + ): + for child_nodes in node.body: + top_level_nodes.append(child_nodes) + + exact_nodes = check_exact_exist(top_level_nodes, start_line, end_line) + + # Just return the exact top level line, if present. + if len(exact_nodes) > 0: + which_line_next = 0 + for same_line_node in exact_nodes: + should_run_top_blocks.append(same_line_node) + smart_code += ( + f"{ast.get_source_segment(wholeFileContent, same_line_node)}\n" + ) + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": smart_code, + "which_line_next": which_line_next, + } + + # For each of the nodes in the parsed file content, + # add the appropriate source code line(s) to be sent to the REPL, dependent on + # user is trying to send and execute single line/statement or multiple with smart selection. + for top_node in ast.iter_child_nodes(parsed_file_content): + if start_line == top_node.lineno and end_line == top_node.end_lineno: + should_run_top_blocks.append(top_node) + + smart_code += f"{ast.get_source_segment(wholeFileContent, top_node)}\n" + break # If we found exact match, don't waste computation in parsing extra nodes. + elif start_line >= top_node.lineno and end_line <= top_node.end_lineno: + # Case to apply smart selection for multiple line. + # This is the case for when we have to add multiple lines that should be included in the smart send. + # For example: + # 'my_dictionary': { + # 'Audi': 'Germany', + # 'BMW': 'Germany', + # 'Genesis': 'Korea', + # } + # with the mouse cursor at 'BMW': 'Germany', should send all of the lines that pertains to my_dictionary. + + should_run_top_blocks.append(top_node) + + smart_code += str(ast.get_source_segment(wholeFileContent, top_node)) + smart_code += "\n" + + normalized_smart_result = normalize_lines(smart_code) + which_line_next = get_next_block_lineno(should_run_top_blocks) + return { + "normalized_smart_result": normalized_smart_result, + "which_line_next": which_line_next, + } + + +# Look at the last top block added, find lineno for the next upcoming block, +# This will be used in calculating lineOffset to move cursor in VS Code. +def get_next_block_lineno(which_line_next): + last_ran_lineno = int(which_line_next[-1].end_lineno) + next_lineno = int(which_line_next[-1].end_lineno) + + for reverse_node in top_level_nodes: + if reverse_node.lineno > last_ran_lineno: + next_lineno = reverse_node.lineno + break + return next_lineno + + +if __name__ == "__main__": + # Content is being sent from the extension as a JSON object. + # Decode the data from the raw bytes. + stdin = sys.stdin if sys.version_info < (3,) else sys.stdin.buffer + raw = stdin.read() + contents = json.loads(raw.decode("utf-8")) + # Empty highlight means user has not explicitly selected specific text. + empty_Highlight = contents.get("emptyHighlight", False) + + # We also get the activeEditor selection start line and end line from the typescript VS Code side. + # Remember to add 1 to each of the received since vscode starts line counting from 0 . + vscode_start_line = contents["startLine"] + 1 + vscode_end_line = contents["endLine"] + 1 + + # Send the normalized code back to the extension in a JSON object. + data = None + which_line_next = 0 + + if ( + empty_Highlight + and contents.get("smartSendExperimentEnabled") + and contents.get("smartSendSettingsEnabled") + ): + result = traverse_file( + contents["wholeFileContent"], + vscode_start_line, + vscode_end_line, + not empty_Highlight, + ) + normalized = result["normalized_smart_result"] + which_line_next = result["which_line_next"] + if normalized == "deprecated": + data = json.dumps({"normalized": normalized}) + else: + data = json.dumps( + {"normalized": normalized, "nextBlockLineno": result["which_line_next"]} + ) + else: + normalized = normalize_lines(contents["code"]) + data = json.dumps({"normalized": normalized}) + + stdout = sys.stdout if sys.version_info < (3,) else sys.stdout.buffer + stdout.write(data.encode("utf-8")) + stdout.close() diff --git a/pythonFiles/pyproject.toml b/pythonFiles/pyproject.toml new file mode 100644 index 000000000000..56237999e603 --- /dev/null +++ b/pythonFiles/pyproject.toml @@ -0,0 +1,36 @@ +[tool.black] +exclude = ''' + +( + /( + .data + | .vscode + | lib + )/ +) +''' + +[tool.pyright] +exclude = ['lib'] +extraPaths = ['lib/python', 'lib/jedilsp'] +ignore = [ + # Ignore all pre-existing code with issues + 'get-pip.py', + 'install_debugpy.py', + 'tensorboard_launcher.py', + 'testlauncher.py', + 'visualstudio_py_testlauncher.py', + 'testing_tools/unittest_discovery.py', + 'testing_tools/adapter/util.py', + 'testing_tools/adapter/pytest/_discovery.py', + 'testing_tools/adapter/pytest/_pytest_item.py', + 'tests/debug_adapter/test_install_debugpy.py', + 'tests/testing_tools/adapter/.data', + 'tests/testing_tools/adapter/test___main__.py', + 'tests/testing_tools/adapter/test_discovery.py', + 'tests/testing_tools/adapter/test_functional.py', + 'tests/testing_tools/adapter/test_report.py', + 'tests/testing_tools/adapter/test_util.py', + 'tests/testing_tools/adapter/pytest/test_cli.py', + 'tests/testing_tools/adapter/pytest/test_discovery.py', +] diff --git a/pythonFiles/testing_tools/adapter/__init__.py b/pythonFiles/testing_tools/adapter/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/testing_tools/adapter/__main__.py b/pythonFiles/testing_tools/adapter/__main__.py new file mode 100644 index 000000000000..5857c63db049 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/__main__.py @@ -0,0 +1,106 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +import argparse +import sys + +from . import pytest, report +from .errors import UnsupportedToolError, UnsupportedCommandError + + +TOOLS = { + "pytest": { + "_add_subparser": pytest.add_cli_subparser, + "discover": pytest.discover, + }, +} +REPORTERS = { + "discover": report.report_discovered, +} + + +def parse_args( + # the args to parse + argv=sys.argv[1:], + # the program name + prog=sys.argv[0], +): + """ + Return the subcommand & tool to run, along with its args. + + This defines the standard CLI for the different testing frameworks. + """ + parser = argparse.ArgumentParser( + description="Run Python testing operations.", + prog=prog, + # ... + ) + cmdsubs = parser.add_subparsers(dest="cmd") + + # Add "run" and "debug" subcommands when ready. + for cmdname in ["discover"]: + sub = cmdsubs.add_parser(cmdname) + subsubs = sub.add_subparsers(dest="tool") + for toolname in sorted(TOOLS): + try: + add_subparser = TOOLS[toolname]["_add_subparser"] + except KeyError: + continue + subsub = add_subparser(cmdname, toolname, subsubs) + if cmdname == "discover": + subsub.add_argument("--simple", action="store_true") + subsub.add_argument( + "--no-hide-stdio", dest="hidestdio", action="store_false" + ) + subsub.add_argument("--pretty", action="store_true") + + # Parse the args! + if "--" in argv: + sep_index = argv.index("--") + toolargs = argv[sep_index + 1 :] + argv = argv[:sep_index] + else: + toolargs = [] + args = parser.parse_args(argv) + ns = vars(args) + + cmd = ns.pop("cmd") + if not cmd: + parser.error("missing command") + + tool = ns.pop("tool") + if not tool: + parser.error("missing tool") + + return tool, cmd, ns, toolargs + + +def main( + toolname, + cmdname, + subargs, + toolargs, + # internal args (for testing): + _tools=TOOLS, + _reporters=REPORTERS, +): + try: + tool = _tools[toolname] + except KeyError: + raise UnsupportedToolError(toolname) + + try: + run = tool[cmdname] + report_result = _reporters[cmdname] + except KeyError: + raise UnsupportedCommandError(cmdname) + + parents, result = run(toolargs, **subargs) + report_result(result, parents, **subargs) + + +if __name__ == "__main__": + tool, cmd, subargs, toolargs = parse_args() + main(tool, cmd, subargs, toolargs) diff --git a/pythonFiles/testing_tools/adapter/errors.py b/pythonFiles/testing_tools/adapter/errors.py new file mode 100644 index 000000000000..3e6ae5189cb8 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/errors.py @@ -0,0 +1,16 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class UnsupportedToolError(ValueError): + def __init__(self, tool): + msg = "unsupported tool {!r}".format(tool) + super(UnsupportedToolError, self).__init__(msg) + self.tool = tool + + +class UnsupportedCommandError(ValueError): + def __init__(self, cmd): + msg = "unsupported cmd {!r}".format(cmd) + super(UnsupportedCommandError, self).__init__(msg) + self.cmd = cmd diff --git a/pythonFiles/testing_tools/adapter/info.py b/pythonFiles/testing_tools/adapter/info.py new file mode 100644 index 000000000000..f99ce0b6f9a2 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/info.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from collections import namedtuple + + +class SingleTestPath(namedtuple("TestPath", "root relfile func sub")): + """Where to find a single test.""" + + def __new__(cls, root, relfile, func, sub=None): + self = super(SingleTestPath, cls).__new__( + cls, + str(root) if root else None, + str(relfile) if relfile else None, + str(func) if func else None, + [str(s) for s in sub] if sub else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.root is None: + raise TypeError("missing id") + if self.relfile is None: + raise TypeError("missing kind") + # self.func may be None (e.g. for doctests). + # self.sub may be None. + + +class ParentInfo(namedtuple("ParentInfo", "id kind name root relpath parentid")): + + KINDS = ("folder", "file", "suite", "function", "subtest") + + def __new__(cls, id, kind, name, root=None, relpath=None, parentid=None): + self = super(ParentInfo, cls).__new__( + cls, + id=str(id) if id else None, + kind=str(kind) if kind else None, + name=str(name) if name else None, + root=str(root) if root else None, + relpath=str(relpath) if relpath else None, + parentid=str(parentid) if parentid else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.kind is None: + raise TypeError("missing kind") + if self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + if self.name is None: + raise TypeError("missing name") + if self.root is None: + if self.parentid is not None or self.kind != "folder": + raise TypeError("missing root") + if self.relpath is not None: + raise TypeError("unexpected relpath {}".format(self.relpath)) + elif self.parentid is None: + raise TypeError("missing parentid") + elif self.relpath is None and self.kind in ("folder", "file"): + raise TypeError("missing relpath") + + +class SingleTestInfo( + namedtuple("TestInfo", "id name path source markers parentid kind") +): + """Info for a single test.""" + + MARKERS = ("skip", "skip-if", "expected-failure") + KINDS = ("function", "doctest") + + def __new__(cls, id, name, path, source, markers, parentid, kind="function"): + self = super(SingleTestInfo, cls).__new__( + cls, + str(id) if id else None, + str(name) if name else None, + path or None, + str(source) if source else None, + [str(marker) for marker in markers or ()], + str(parentid) if parentid else None, + str(kind) if kind else None, + ) + return self + + def __init__(self, *args, **kwargs): + if self.id is None: + raise TypeError("missing id") + if self.name is None: + raise TypeError("missing name") + if self.path is None: + raise TypeError("missing path") + if self.source is None: + raise TypeError("missing source") + else: + srcfile, _, lineno = self.source.rpartition(":") + if not srcfile or not lineno or int(lineno) < 0: + raise ValueError("bad source {!r}".format(self.source)) + if self.markers: + badmarkers = [m for m in self.markers if m not in self.MARKERS] + if badmarkers: + raise ValueError("unsupported markers {!r}".format(badmarkers)) + if self.parentid is None: + raise TypeError("missing parentid") + if self.kind is None: + raise TypeError("missing kind") + elif self.kind not in self.KINDS: + raise ValueError("unsupported kind {!r}".format(self.kind)) + + @property + def root(self): + return self.path.root + + @property + def srcfile(self): + return self.source.rpartition(":")[0] + + @property + def lineno(self): + return int(self.source.rpartition(":")[-1]) diff --git a/pythonFiles/testing_tools/adapter/pytest/_cli.py b/pythonFiles/testing_tools/adapter/pytest/_cli.py new file mode 100644 index 000000000000..3d3eec09a199 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_cli.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import + +from ..errors import UnsupportedCommandError + + +def add_subparser(cmd, name, parent): + """Add a new subparser to the given parent and add args to it.""" + parser = parent.add_parser(name) + if cmd == "discover": + # For now we don't have any tool-specific CLI options to add. + pass + else: + raise UnsupportedCommandError(cmd) + return parser diff --git a/pythonFiles/testing_tools/adapter/pytest/_discovery.py b/pythonFiles/testing_tools/adapter/pytest/_discovery.py new file mode 100644 index 000000000000..51c94527302d --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_discovery.py @@ -0,0 +1,109 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import sys + +import pytest + +from .. import util, discovery +from ._pytest_item import parse_item + + +def discover( + pytestargs=None, + hidestdio=False, + # *, + _pytest_main=pytest.main, + _plugin=None, + **_ignored +): + """Return the results of test discovery.""" + if _plugin is None: + _plugin = TestCollector() + + pytestargs = _adjust_pytest_args(pytestargs) + # We use this helper rather than "-pno:terminal" due to possible + # platform-dependent issues. + with (util.hide_stdio() if hidestdio else util.noop_cm()) as stdio: + ec = _pytest_main(pytestargs, [_plugin]) + # See: https://docs.pytest.org/en/latest/usage.html#possible-exit-codes + if ec == 5: + # No tests were discovered. + pass + elif ec != 0: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery failed (exit code {})".format(ec)) + if not _plugin._started: + print( + "equivalent command: {} -m pytest {}".format( + sys.executable, util.shlex_unsplit(pytestargs) + ) + ) + if hidestdio: + print(stdio.getvalue(), file=sys.stderr) + sys.stdout.flush() + raise Exception("pytest discovery did not start") + return ( + _plugin._tests.parents, + list(_plugin._tests), + ) + + +def _adjust_pytest_args(pytestargs): + """Return a corrected copy of the given pytest CLI args.""" + pytestargs = list(pytestargs) if pytestargs else [] + # Duplicate entries should be okay. + pytestargs.insert(0, "--collect-only") + # TODO: pull in code from: + # src/client/testing/pytest/services/discoveryService.ts + # src/client/testing/pytest/services/argsService.ts + return pytestargs + + +class TestCollector(object): + """This is a pytest plugin that collects the discovered tests.""" + + @classmethod + def parse_item(cls, item): + return parse_item(item) + + def __init__(self, tests=None): + if tests is None: + tests = discovery.DiscoveredTests() + self._tests = tests + self._started = False + + # Relevant plugin hooks: + # https://docs.pytest.org/en/latest/reference.html#collection-hooks + + def pytest_collection_modifyitems(self, session, config, items): + self._started = True + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) + + # This hook is not specified in the docs, so we also provide + # the "modifyitems" hook just in case. + def pytest_collection_finish(self, session): + self._started = True + try: + items = session.items + except AttributeError: + # TODO: Is there an alternative? + return + self._tests.reset() + for item in items: + test, parents = self.parse_item(item) + if test is not None: + self._tests.add_test(test, parents) diff --git a/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py b/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py new file mode 100644 index 000000000000..2c22db21d4de --- /dev/null +++ b/pythonFiles/testing_tools/adapter/pytest/_pytest_item.py @@ -0,0 +1,630 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +""" +During "collection", pytest finds all the tests it supports. These are +called "items". The process is top-down, mostly tracing down through +the file system. Aside from its own machinery, pytest supports hooks +that find tests. Effectively, pytest starts with a set of "collectors"; +objects that can provide a list of tests and sub-collectors. All +collectors in the resulting tree are visited and the tests aggregated. +For the most part, each test's (and collector's) parent is identified +as the collector that collected it. + +Collectors and items are collectively identified as "nodes". The pytest +API relies on collector and item objects providing specific methods and +attributes. In addition to corresponding base classes, pytest provides +a number of concrete implementations. + +The following are the known pytest node types: + + Node + Collector + FSCollector + Session (the top-level collector) + File + Module + Package + DoctestTextfile + DoctestModule + PyCollector + (Module) + (...) + Class + UnitTestCase + Instance + Item + Function + TestCaseFunction + DoctestItem + +Here are the unique attrs for those classes: + + Node + name + nodeid (readonly) + config + session + (parent) - the parent node + (fspath) - the file from which the node was collected + ---- + own_marksers - explicit markers (e.g. with @pytest.mark()) + keywords + extra_keyword_matches + + Item + location - where the actual test source code is: (relfspath, lno, fullname) + user_properties + + PyCollector + module + class + instance + obj + + Function + module + class + instance + obj + function + (callspec) + (fixturenames) + funcargs + originalname - w/o decorations, e.g. [...] for parameterized + + DoctestItem + dtest + obj + +When parsing an item, we make use of the following attributes: + +* name +* nodeid +* __class__ + + __name__ +* fspath +* location +* function + + __name__ + + __code__ + + __closure__ +* own_markers +""" + +from __future__ import absolute_import, print_function + +import sys + +import pytest +import _pytest.doctest +import _pytest.unittest + +from ..info import SingleTestInfo, SingleTestPath +from ..util import fix_fileid, PATH_SEP, NORMCASE + + +def should_never_reach_here(item, **extra): + """Indicates a code path we should never reach.""" + print("The Python extension has run into an unexpected situation") + print("while processing a pytest node during test discovery. Please") + print("Please open an issue at:") + print(" https://github.com/microsoft/vscode-python/issues") + print("and paste the following output there.") + print() + for field, info in _summarize_item(item): + print("{}: {}".format(field, info)) + if extra: + print() + print("extra info:") + for name, info in extra.items(): + print("{:10}".format(name + ":"), end="") + if isinstance(info, str): + print(info) + else: + try: + print(*info) + except TypeError: + print(info) + print() + print("traceback:") + import traceback + + traceback.print_stack() + + msg = "Unexpected pytest node (see printed output)." + exc = NotImplementedError(msg) + exc.item = item + return exc + + +def parse_item( + item, + # *, + _get_item_kind=(lambda *a: _get_item_kind(*a)), + _parse_node_id=(lambda *a: _parse_node_id(*a)), + _split_fspath=(lambda *a: _split_fspath(*a)), + _get_location=(lambda *a: _get_location(*a)), +): + """Return (TestInfo, [suite ID]) for the given item. + + The suite IDs, if any, are in parent order with the item's direct + parent at the beginning. The parent of the last suite ID (or of + the test if there are no suites) is the file ID, which corresponds + to TestInfo.path. + + """ + # _debug_item(item, showsummary=True) + kind, _ = _get_item_kind(item) + # Skip plugin generated tests + if kind is None: + return None, None + (nodeid, parents, fileid, testfunc, parameterized) = _parse_node_id( + item.nodeid, kind + ) + # Note: testfunc does not necessarily match item.function.__name__. + # This can result from importing a test function from another module. + + # Figure out the file. + testroot, relfile = _split_fspath(str(item.fspath), fileid, item) + location, fullname = _get_location(item, testroot, relfile) + if kind == "function": + if testfunc and fullname != testfunc + parameterized: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + parameterized=parameterized, + # ... + ) + elif kind == "doctest": + if testfunc and fullname != testfunc and fullname != "[doctest] " + testfunc: + raise should_never_reach_here( + item, + fullname=fullname, + testfunc=testfunc, + # ... + ) + testfunc = None + + # Sort out the parent. + if parents: + parentid, _, _ = parents[0] + else: + parentid = None + + # Sort out markers. + # See: https://docs.pytest.org/en/latest/reference.html#marks + markers = set() + for marker in getattr(item, "own_markers", []): + if marker.name == "parameterize": + # We've already covered these. + continue + elif marker.name == "skip": + markers.add("skip") + elif marker.name == "skipif": + markers.add("skip-if") + elif marker.name == "xfail": + markers.add("expected-failure") + # We can add support for other markers as we need them? + + test = SingleTestInfo( + id=nodeid, + name=item.name, + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=testfunc, + sub=[parameterized] if parameterized else None, + ), + source=location, + markers=sorted(markers) if markers else None, + parentid=parentid, + ) + if parents and parents[-1] == (".", None, "folder"): # This should always be true? + parents[-1] = (".", testroot, "folder") + return test, parents + + +def _split_fspath( + fspath, + fileid, + item, + # *, + _normcase=NORMCASE, +): + """Return (testroot, relfile) for the given fspath. + + "relfile" will match "fileid". + """ + # "fileid" comes from nodeid and is always relative to the testroot + # (with a "./" prefix). There are no guarantees about casing, so we + # normcase just be to sure. + relsuffix = fileid[1:] # Drop (only) the "." prefix. + if not _normcase(fspath).endswith(_normcase(relsuffix)): + raise should_never_reach_here( + item, + fspath=fspath, + fileid=fileid, + # ... + ) + testroot = fspath[: -len(fileid) + 1] # Ignore the "./" prefix. + relfile = "." + fspath[-len(fileid) + 1 :] # Keep the pathsep. + return testroot, relfile + + +def _get_location( + item, + testroot, + relfile, + # *, + _matches_relfile=(lambda *a: _matches_relfile(*a)), + _is_legacy_wrapper=(lambda *a: _is_legacy_wrapper(*a)), + _unwrap_decorator=(lambda *a: _unwrap_decorator(*a)), + _pathsep=PATH_SEP, +): + """Return (loc str, fullname) for the given item.""" + # When it comes to normcase, we favor relfile (from item.fspath) + # over item.location in this function. + + srcfile, lineno, fullname = item.location + if _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + else: + # pytest supports discovery of tests imported from other + # modules. This is reflected by a different filename + # in item.location. + + if _is_legacy_wrapper(srcfile): + srcfile = relfile + unwrapped = _unwrap_decorator(item.function) + if unwrapped is None: + # It was an invalid legacy wrapper so we just say + # "somewhere in relfile". + lineno = None + else: + _srcfile, lineno = unwrapped + if not _matches_relfile(_srcfile, testroot, relfile): + # For legacy wrappers we really expect the wrapped + # function to be in relfile. So here we ignore any + # other file and just say "somewhere in relfile". + lineno = None + elif _matches_relfile(srcfile, testroot, relfile): + srcfile = relfile + # Otherwise we just return the info from item.location as-is. + + if not srcfile.startswith("." + _pathsep): + srcfile = "." + _pathsep + srcfile + + if lineno is None: + lineno = -1 # i.e. "unknown" + + # from pytest, line numbers are 0-based + location = "{}:{}".format(srcfile, int(lineno) + 1) + return location, fullname + + +def _matches_relfile( + srcfile, + testroot, + relfile, + # *, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Return True if "srcfile" matches the given relfile.""" + testroot = _normcase(testroot) + srcfile = _normcase(srcfile) + relfile = _normcase(relfile) + if srcfile == relfile: + return True + elif srcfile == relfile[len(_pathsep) + 1 :]: + return True + elif srcfile == testroot + relfile[1:]: + return True + else: + return False + + +def _is_legacy_wrapper( + srcfile, + # *, + _pathsep=PATH_SEP, + _pyversion=sys.version_info, +): + """Return True if the test might be wrapped. + + In Python 2 unittest's decorators (e.g. unittest.skip) do not wrap + properly, so we must manually unwrap them. + """ + if _pyversion > (3,): + return False + if (_pathsep + "unittest" + _pathsep + "case.py") not in srcfile: + return False + return True + + +def _unwrap_decorator(func): + """Return (filename, lineno) for the func the given func wraps. + + If the wrapped func cannot be identified then return None. Likewise + for the wrapped filename. "lineno" is None if it cannot be found + but the filename could. + """ + try: + func = func.__closure__[0].cell_contents + except (IndexError, AttributeError): + return None + else: + if not callable(func): + return None + try: + filename = func.__code__.co_filename + except AttributeError: + return None + else: + try: + lineno = func.__code__.co_firstlineno - 1 + except AttributeError: + return (filename, None) + else: + return filename, lineno + + +def _parse_node_id( + testid, + kind, + # *, + _iter_nodes=(lambda *a: _iter_nodes(*a)), +): + """Return the components of the given node ID, in heirarchical order.""" + nodes = iter(_iter_nodes(testid, kind)) + + testid, name, kind = next(nodes) + parents = [] + parameterized = None + if kind == "doctest": + parents = list(nodes) + fileid, _, _ = parents[0] + return testid, parents, fileid, name, parameterized + elif kind is None: + fullname = None + else: + if kind == "subtest": + node = next(nodes) + parents.append(node) + funcid, funcname, _ = node + parameterized = testid[len(funcid) :] + elif kind == "function": + funcname = name + else: + raise should_never_reach_here( + testid, + kind=kind, + # ... + ) + fullname = funcname + + for node in nodes: + parents.append(node) + parentid, name, kind = node + if kind == "file": + fileid = parentid + break + elif fullname is None: + # We don't guess how to interpret the node ID for these tests. + continue + elif kind == "suite": + fullname = name + "." + fullname + else: + raise should_never_reach_here( + testid, + node=node, + # ... + ) + else: + fileid = None + parents.extend(nodes) # Add the rest in as-is. + + return ( + testid, + parents, + fileid, + fullname, + parameterized or "", + ) + + +def _find_left_bracket(nodeid): + """Return tuple of part before final bracket open, separator [, and the remainder. + Notes: + Testcase names in case of parametrized tests are wrapped in []. + Examples: + dirname[sometext]/dirname/testfile.py::testset::testname[testcase] + => ('dirname[sometext]/dirname/testfile.py::testset::testname', '[', 'testcase]') + dirname/dirname/testfile.py::testset::testname[testcase] + => ('dirname/dirname/testfile.py::testset::testname', '[', 'testcase]') + dirname/dirname/testfile.py::testset::testname[testcase[x]] + => ('dirname/dirname/testfile.py::testset::testname', '[', 'testcase[x]]') + """ + if not nodeid.endswith("]"): + return nodeid, "", "" + bracketcount = 0 + for index, char in enumerate(nodeid[::-1]): + if char == "]": + bracketcount += 1 + elif char == "[": + bracketcount -= 1 + if bracketcount == 0: + n = len(nodeid) - 1 - index + return nodeid[:n], nodeid[n], nodeid[n + 1 :] + return nodeid, "", "" + + +def _iter_nodes( + testid, + kind, + # *, + _normalize_test_id=(lambda *a: _normalize_test_id(*a)), + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + """Yield (nodeid, name, kind) for the given node ID and its parents.""" + nodeid, testid = _normalize_test_id(testid, kind) + if len(nodeid) > len(testid): + testid = "." + _pathsep + testid + + if kind == "function" and nodeid.endswith("]"): + funcid, sep, parameterized = _find_left_bracket(nodeid) + if not sep: + raise should_never_reach_here( + nodeid, + # ... + ) + yield (nodeid, sep + parameterized, "subtest") + nodeid = funcid + + parentid, _, name = nodeid.rpartition("::") + if not parentid: + if kind is None: + # This assumes that plugins can generate nodes that do not + # have a parent. All the builtin nodes have one. + yield (nodeid, name, kind) + return + # We expect at least a filename and a name. + raise should_never_reach_here( + nodeid, + # ... + ) + yield (nodeid, name, kind) + + # Extract the suites. + while "::" in parentid: + suiteid = parentid + parentid, _, name = parentid.rpartition("::") + yield (suiteid, name, "suite") + + # Extract the file and folders. + fileid = parentid + raw = testid[: len(fileid)] + _parentid, _, filename = _normcase(fileid).rpartition(_pathsep) + parentid = fileid[: len(_parentid)] + raw, name = raw[: len(_parentid)], raw[-len(filename) :] + yield (fileid, name, "file") + # We're guaranteed at least one (the test root). + while _pathsep in _normcase(parentid): + folderid = parentid + _parentid, _, foldername = _normcase(folderid).rpartition(_pathsep) + parentid = folderid[: len(_parentid)] + raw, name = raw[: len(parentid)], raw[-len(foldername) :] + yield (folderid, name, "folder") + # We set the actual test root later at the bottom of parse_item(). + testroot = None + yield (parentid, testroot, "folder") + + +def _normalize_test_id( + testid, + kind, + # *, + _fix_fileid=fix_fileid, + _pathsep=PATH_SEP, +): + """Return the canonical form for the given node ID.""" + while "::()::" in testid: + testid = testid.replace("::()::", "::") + if kind is None: + return testid, testid + orig = testid + + # We need to keep the testid as-is, or else pytest won't recognize + # it when we try to use it later (e.g. to run a test). The only + # exception is that we add a "./" prefix for relative paths. + # Note that pytest always uses "/" as the path separator in IDs. + fileid, sep, remainder = testid.partition("::") + fileid = _fix_fileid(fileid) + if not fileid.startswith("./"): # Absolute "paths" not expected. + raise should_never_reach_here( + testid, + fileid=fileid, + # ... + ) + testid = fileid + sep + remainder + + return testid, orig + + +def _get_item_kind(item): + """Return (kind, isunittest) for the given item.""" + if isinstance(item, _pytest.doctest.DoctestItem): + return "doctest", False + elif isinstance(item, _pytest.unittest.TestCaseFunction): + return "function", True + elif isinstance(item, pytest.Function): + # We *could* be more specific, e.g. "method", "subtest". + return "function", False + else: + return None, False + + +############################# +# useful for debugging + +_FIELDS = [ + "nodeid", + "kind", + "class", + "name", + "fspath", + "location", + "function", + "markers", + "user_properties", + "attrnames", +] + + +def _summarize_item(item): + if not hasattr(item, "nodeid"): + yield "nodeid", item + return + + for field in _FIELDS: + try: + if field == "kind": + yield field, _get_item_kind(item) + elif field == "class": + yield field, item.__class__.__name__ + elif field == "markers": + yield field, item.own_markers + # yield field, list(item.iter_markers()) + elif field == "attrnames": + yield field, dir(item) + else: + yield field, getattr(item, field, "") + except Exception as exc: + yield field, "".format(exc) + + +def _debug_item(item, showsummary=False): + item._debugging = True + try: + summary = dict(_summarize_item(item)) + finally: + item._debugging = False + + if showsummary: + print(item.nodeid) + for key in ( + "kind", + "class", + "name", + "fspath", + "location", + "func", + "markers", + "props", + ): + print(" {:12} {}".format(key, summary[key])) + print() + + return summary diff --git a/pythonFiles/testing_tools/adapter/util.py b/pythonFiles/testing_tools/adapter/util.py new file mode 100644 index 000000000000..77778c5b6126 --- /dev/null +++ b/pythonFiles/testing_tools/adapter/util.py @@ -0,0 +1,287 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import contextlib +import io + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # 2.7 +import os +import os.path +import sys +import tempfile + + +@contextlib.contextmanager +def noop_cm(): + yield + + +def group_attr_names(attrnames): + grouped = { + "dunder": [], + "private": [], + "constants": [], + "classes": [], + "vars": [], + "other": [], + } + for name in attrnames: + if name.startswith("__") and name.endswith("__"): + group = "dunder" + elif name.startswith("_"): + group = "private" + elif name.isupper(): + group = "constants" + elif name.islower(): + group = "vars" + elif name == name.capitalize(): + group = "classes" + else: + group = "other" + grouped[group].append(name) + return grouped + + +if sys.version_info < (3,): + _str_to_lower = lambda val: val.decode().lower() +else: + _str_to_lower = str.lower + + +############################# +# file paths + +_os_path = os.path +# Uncomment to test Windows behavior on non-windows OS: +# import ntpath as _os_path +PATH_SEP = _os_path.sep +NORMCASE = _os_path.normcase +DIRNAME = _os_path.dirname +BASENAME = _os_path.basename +IS_ABS_PATH = _os_path.isabs +PATH_JOIN = _os_path.join + + +def fix_path( + path, + # *, + _pathsep=PATH_SEP, +): + """Return a platform-appropriate path for the given path.""" + if not path: + return "." + return path.replace("/", _pathsep) + + +def fix_relpath( + path, + # *, + _fix_path=fix_path, + _path_isabs=IS_ABS_PATH, + _pathsep=PATH_SEP, +): + """Return a ./-prefixed, platform-appropriate path for the given path.""" + path = _fix_path(path) + if path in (".", ".."): + return path + if not _path_isabs(path): + if not path.startswith("." + _pathsep): + path = "." + _pathsep + path + return path + + +def _resolve_relpath( + path, + rootdir=None, + # *, + _path_isabs=IS_ABS_PATH, + _normcase=NORMCASE, + _pathsep=PATH_SEP, +): + # "path" is expected to use "/" for its path separator, regardless + # of the provided "_pathsep". + + if path.startswith("./"): + return path[2:] + if not _path_isabs(path): + return path + + # Deal with root-dir-as-fileid. + _, sep, relpath = path.partition("/") + if sep and not relpath.replace("/", ""): + return "" + + if rootdir is None: + return None + rootdir = _normcase(rootdir) + if not rootdir.endswith(_pathsep): + rootdir += _pathsep + + if not _normcase(path).startswith(rootdir): + return None + return path[len(rootdir) :] + + +def fix_fileid( + fileid, + rootdir=None, + # *, + normalize=False, + strictpathsep=None, + _pathsep=PATH_SEP, + **kwargs +): + """Return a pathsep-separated file ID ("./"-prefixed) for the given value. + + The file ID may be absolute. If so and "rootdir" is + provided then make the file ID relative. If absolute but "rootdir" + is not provided then leave it absolute. + """ + if not fileid or fileid == ".": + return fileid + + # We default to "/" (forward slash) as the final path sep, since + # that gives us a consistent, cross-platform result. (Windows does + # actually support "/" as a path separator.) Most notably, node IDs + # from pytest use "/" as the path separator by default. + _fileid = fileid.replace(_pathsep, "/") + + relpath = _resolve_relpath( + _fileid, + rootdir, + _pathsep=_pathsep, + # ... + **kwargs + ) + if relpath: # Note that we treat "" here as an absolute path. + _fileid = "./" + relpath + + if normalize: + if strictpathsep: + raise ValueError("cannot normalize *and* keep strict path separator") + _fileid = _str_to_lower(_fileid) + elif strictpathsep: + # We do not use _normcase since we want to preserve capitalization. + _fileid = _fileid.replace("/", _pathsep) + return _fileid + + +############################# +# stdio + + +@contextlib.contextmanager +def _replace_fd(file, target): + """ + Temporarily replace the file descriptor for `file`, + for which sys.stdout or sys.stderr is passed. + """ + try: + fd = file.fileno() + except (AttributeError, io.UnsupportedOperation): + # `file` does not have fileno() so it's been replaced from the + # default sys.stdout, etc. Return with noop. + yield + return + target_fd = target.fileno() + + # Keep the original FD to be restored in the finally clause. + dup_fd = os.dup(fd) + try: + # Point the FD at the target. + os.dup2(target_fd, fd) + try: + yield + finally: + # Point the FD back at the original. + os.dup2(dup_fd, fd) + finally: + os.close(dup_fd) + + +@contextlib.contextmanager +def _replace_stdout(target): + orig = sys.stdout + sys.stdout = target + try: + yield orig + finally: + sys.stdout = orig + + +@contextlib.contextmanager +def _replace_stderr(target): + orig = sys.stderr + sys.stderr = target + try: + yield orig + finally: + sys.stderr = orig + + +if sys.version_info < (3,): + _coerce_unicode = lambda s: unicode(s) +else: + _coerce_unicode = lambda s: s + + +@contextlib.contextmanager +def _temp_io(): + sio = StringIO() + with tempfile.TemporaryFile("r+") as tmp: + try: + yield sio, tmp + finally: + tmp.seek(0) + buff = tmp.read() + sio.write(_coerce_unicode(buff)) + + +@contextlib.contextmanager +def hide_stdio(): + """Swallow stdout and stderr.""" + with _temp_io() as (sio, fileobj): + with _replace_fd(sys.stdout, fileobj): + with _replace_stdout(fileobj): + with _replace_fd(sys.stderr, fileobj): + with _replace_stderr(fileobj): + yield sio + + +############################# +# shell + + +def shlex_unsplit(argv): + """Return the shell-safe string for the given arguments. + + This effectively the equivalent of reversing shlex.split(). + """ + argv = [_quote_arg(a) for a in argv] + return " ".join(argv) + + +try: + from shlex import quote as _quote_arg +except ImportError: + + def _quote_arg(arg): + parts = None + for i, c in enumerate(arg): + if c.isspace(): + pass + elif c == '"': + pass + elif c == "'": + c = "'\"'\"'" + else: + continue + if parts is None: + parts = list(arg) + parts[i] = c + if parts is not None: + arg = "'" + "".join(parts) + "'" + return arg diff --git a/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py b/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py new file mode 100644 index 000000000000..8e48224edf3b --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_parametrize_discovery.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import pytest + + +# This test has an error which will appear on pytest discovery. +# This error is intentional and is meant to test pytest discovery error handling. +@pytest.mark.parametrize("actual,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)]) +def test_function(): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt b/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt new file mode 100644 index 000000000000..7d65dee2ccc6 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_pytest_import.txt @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +@pytest.mark.parametrize("num", range(1, 89)) +def test_odd_even(num): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py new file mode 100644 index 000000000000..2506089abe07 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/error_raise_exception.py @@ -0,0 +1,14 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +@pytest.fixture +def raise_fixture(): + raise Exception("Dummy exception") + + +class TestSomething: + def test_a(self, raise_fixture): + assert True diff --git a/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py new file mode 100644 index 000000000000..c4dbadc32d6e --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/parametrize_tests.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + + +# Testing pytest with parametrized tests. The first two pass, the third fails. +# The tests ids are parametrize_tests.py::test_adding[3+5-8] and so on. +@pytest.mark.parametrize( # test_marker--test_adding + "actual, expected", [("3+5", 8), ("2+4", 6), ("6+9", 16)] +) +def test_adding(actual, expected): + assert eval(actual) == expected + + +# Testing pytest with parametrized tests. All three pass. +# The tests ids are parametrize_tests.py::test_under_ten[1] and so on. +@pytest.mark.parametrize( # test_marker--test_string + "string", ["hello", "complicated split [] ()"] +) +def test_string(string): + assert string == "hello" diff --git a/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py b/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py new file mode 100644 index 000000000000..209f9d51915b --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/test_multi_class_nest.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + + +class TestFirstClass: + class TestSecondClass: + def test_second(self): # test_marker--test_second + assert 1 == 2 + + def test_first(self): # test_marker--test_first + assert 1 == 2 + + class TestSecondClass2: + def test_second2(self): # test_marker--test_second2 + assert 1 == 1 + + +def test_independent(): # test_marker--test_independent + assert 1 == 1 diff --git a/pythonFiles/tests/pytestadapter/.data/text_docstring.txt b/pythonFiles/tests/pytestadapter/.data/text_docstring.txt new file mode 100644 index 000000000000..b29132c10b57 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/text_docstring.txt @@ -0,0 +1,4 @@ +This is a doctest test which passes #test_marker--text_docstring.txt +>>> x = 3 +>>> x +3 diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py new file mode 100644 index 000000000000..44f3d3d0abce --- /dev/null +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -0,0 +1,686 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +from .helpers import TEST_DATA_PATH, get_absolute_test_id + +TEST_SUBTRACT_FUNCTION = "unittest_folder/test_subtract.py::TestSubtractFunction::" +TEST_ADD_FUNCTION = "unittest_folder/test_add.py::TestAddFunction::" +SUCCESS = "success" +FAILURE = "failure" + +# This is the expected output for the unittest_folder execute tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# ├── test_subtract_negative_numbers: failure +# └── test_subtract_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +uf_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + test_subtract_path, + ), + "outcome": FAILURE, + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder only execute add.py tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_single_file_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_negative_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder execute only signle method +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +uf_single_method_execution_expected_output = { + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the unittest_folder tests run where two tests +# run are in different files. +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# └── test_subtract_positive_numbers: success +test_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" +test_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" + +uf_non_adjacent_tests_execution_expected_output = { + get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", test_subtract_path + ): { + "test": get_absolute_test_id( + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + test_subtract_path, + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ): { + "test": get_absolute_test_id( + f"{TEST_ADD_FUNCTION}test_add_positive_numbers", test_add_path + ), + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the simple_pytest.py file. +# └── simple_pytest.py +# └── test_function: success +simple_pytest_path = TEST_DATA_PATH / "unittest_folder" / "simple_pytest.py" + +simple_execution_pytest_expected_output = { + get_absolute_test_id("test_function", simple_pytest_path): { + "test": get_absolute_test_id("test_function", simple_pytest_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the unittest_pytest_same_file.py file. +# ├── unittest_pytest_same_file.py +# ├── TestExample +# │ └── test_true_unittest: success +# └── test_true_pytest: success +unit_pytest_same_file_path = TEST_DATA_PATH / "unittest_pytest_same_file.py" +unit_pytest_same_file_execution_expected_output = { + get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", unit_pytest_same_file_path + ): { + "test": get_absolute_test_id( + "unittest_pytest_same_file.py::test_true_pytest", + unit_pytest_same_file_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the error_raised_exception.py file. +# └── error_raise_exception.py +# ├── TestSomething +# │ └── test_a: failure +error_raised_exception_path = TEST_DATA_PATH / "error_raise_exception.py" +error_raised_exception_execution_expected_output = { + get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", error_raised_exception_path + ): { + "test": get_absolute_test_id( + "error_raise_exception.py::TestSomething::test_a", + error_raised_exception_path, + ), + "outcome": "error", + "message": "ERROR MESSAGE", + "traceback": "TRACEBACK", + "subtest": None, + } +} + +# This is the expected output for the skip_tests.py file. +# └── test_something: success +# └── test_another_thing: skipped +# └── test_decorator_thing: skipped +# └── test_decorator_thing_2: skipped +# ├── TestClass +# │ └── test_class_function_a: skipped +# │ └── test_class_function_b: skipped + +skip_tests_path = TEST_DATA_PATH / "skip_tests.py" +skip_tests_execution_expected_output = { + get_absolute_test_id("skip_tests.py::test_something", skip_tests_path): { + "test": get_absolute_test_id("skip_tests.py::test_something", skip_tests_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_another_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_another_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("skip_tests.py::test_decorator_thing_2", skip_tests_path): { + "test": get_absolute_test_id( + "skip_tests.py::test_decorator_thing_2", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_a", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ): { + "test": get_absolute_test_id( + "skip_tests.py::TestClass::test_class_function_b", skip_tests_path + ), + "outcome": "skipped", + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the dual_level_nested_folder.py tests +# └── dual_level_nested_folder +# └── test_top_folder.py +# └── test_top_function_t: success +# └── test_top_function_f: failure +# └── nested_folder_one +# └── test_bottom_folder.py +# └── test_bottom_function_t: success +# └── test_bottom_function_f: failure +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +dual_level_nested_folder_execution_expected_output = { + get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ): { + "test": get_absolute_test_id( + "test_top_folder.py::test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + dual_level_nested_folder_bottom_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ): { + "test": get_absolute_test_id( + "nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + dual_level_nested_folder_bottom_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the nested_folder tests. +# └── folder_a +# └── folder_b +# └── folder_a +# └── test_nest.py +# └── test_function: success + +nested_folder_path = ( + TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +) +double_nested_folder_expected_execution_output = { + get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ): { + "test": get_absolute_test_id( + "folder_a/folder_b/folder_a/test_nest.py::test_function", nested_folder_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} +# This is the expected output for the nested_folder tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +# └── test_adding[2+4-6]: success +# └── test_adding[6+9-16]: failure +parametrize_tests_path = TEST_DATA_PATH / "parametrize_tests.py" + +parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[2+4-6]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[6+9-16]", parametrize_tests_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +single_parametrize_tests_expected_execution_output = { + get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ): { + "test": get_absolute_test_id( + "parametrize_tests.py::test_adding[3+5-8]", parametrize_tests_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── text_docstring.txt +# └── text_docstring: success +doc_test_path = TEST_DATA_PATH / "text_docstring.txt" +doctest_pytest_expected_execution_output = { + get_absolute_test_id("text_docstring.txt::text_docstring.txt", doc_test_path): { + "test": get_absolute_test_id( + "text_docstring.txt::text_docstring.txt", doc_test_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# Will run all tests in the cwd that fit the test file naming pattern. +folder_a_path = TEST_DATA_PATH / "folder_a" / "folder_b" / "folder_a" / "test_nest.py" +dual_level_nested_folder_top_path = ( + TEST_DATA_PATH / "dual_level_nested_folder" / "test_top_folder.py" +) +dual_level_nested_folder_bottom_path = ( + TEST_DATA_PATH + / "dual_level_nested_folder" + / "nested_folder_one" + / "test_bottom_folder.py" +) +unittest_folder_add_path = TEST_DATA_PATH / "unittest_folder" / "test_add.py" +unittest_folder_subtract_path = TEST_DATA_PATH / "unittest_folder" / "test_subtract.py" + +no_test_ids_pytest_execution_expected_output = { + get_absolute_test_id("test_function", folder_a_path): { + "test": get_absolute_test_id("test_function", folder_a_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_t", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_t", dual_level_nested_folder_top_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_top_function_f", dual_level_nested_folder_top_path): { + "test": get_absolute_test_id( + "test_top_function_f", dual_level_nested_folder_top_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_t", dual_level_nested_folder_bottom_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ): { + "test": get_absolute_test_id( + "test_bottom_function_f", dual_level_nested_folder_bottom_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_negative_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ): { + "test": get_absolute_test_id( + "TestAddFunction::test_add_positive_numbers", unittest_folder_add_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_negative_numbers", + unittest_folder_subtract_path, + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ): { + "test": get_absolute_test_id( + "TestSubtractFunction::test_subtract_positive_numbers", + unittest_folder_subtract_path, + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the root folder with the config file referenced. +# └── test_a.py +# └── test_a_function: success +test_add_path = TEST_DATA_PATH / "root" / "tests" / "test_a.py" +config_file_pytest_expected_execution_output = { + get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path): { + "test": get_absolute_test_id("tests/test_a.py::test_a_function", test_add_path), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + + +# This is the expected output for the test logging file. +# └── test_logging.py +# └── test_logging2: failure +# └── test_logging: success +test_logging_path = TEST_DATA_PATH / "test_logging.py" + +logging_test_expected_execution_output = { + get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging2", test_logging_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_logging.py::test_logging", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging", test_logging_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test safe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env: success +# └── test_check_env: success + +test_safe_clear_env_vars_path = TEST_DATA_PATH / "test_env_vars.py" +safe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the test unsafe clear env vars file. +# └── test_env_vars.py +# └── test_clear_env_unsafe: success +# └── test_check_env_unsafe: success +unsafe_clear_env_vars_expected_execution_output = { + get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_clear_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ): { + "test": get_absolute_test_id( + "test_env_vars.py::test_check_env_unsafe", test_safe_clear_env_vars_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py new file mode 100644 index 000000000000..dd32b61fa262 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -0,0 +1,278 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import os +import shutil +from typing import Any, Dict, List + +import pytest + +from tests.pytestadapter import expected_execution_test_output + +from .helpers import TEST_DATA_PATH, runner, runner_with_cwd + + +def test_config_file(): + """Test pytest execution when a config file is specified.""" + args = [ + "-c", + "tests/pytest.ini", + str(TEST_DATA_PATH / "root" / "tests" / "test_a.py::test_a_function"), + ] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +def test_rootdir_specified(): + """Test pytest execution when a --rootdir is specified.""" + rd = f"--rootdir={TEST_DATA_PATH / 'root' / 'tests'}" + args = [rd, "tests/test_a.py::test_a_function"] + new_cwd = TEST_DATA_PATH / "root" + actual = runner_with_cwd(args, new_cwd) + expected_const = ( + expected_execution_test_output.config_file_pytest_expected_execution_output + ) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(new_cwd) + actual_result_dict.update(actual_item["result"]) + assert actual_result_dict == expected_const + + +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest execution on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. + + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "error") + ) + assert actual_item.get("status") == "error" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + error_content = actual_item.get("error") + if error_content is not None and isinstance( + error_content, (list, tuple, str) + ): # You can add other types if needed + assert len(error_content) == 1 + else: + assert False + + +@pytest.mark.parametrize( + "test_ids, expected_const", + [ + ( + [ + "test_env_vars.py::test_clear_env", + "test_env_vars.py::test_check_env", + ], + expected_execution_test_output.safe_clear_env_vars_expected_execution_output, + ), + ( + [ + "skip_tests.py::test_something", + "skip_tests.py::test_another_thing", + "skip_tests.py::test_decorator_thing", + "skip_tests.py::test_decorator_thing_2", + "skip_tests.py::TestClass::test_class_function_a", + "skip_tests.py::TestClass::test_class_function_b", + ], + expected_execution_test_output.skip_tests_execution_expected_output, + ), + ( + ["error_raise_exception.py::TestSomething::test_a"], + expected_execution_test_output.error_raised_exception_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + ["folder_a/folder_b/folder_a/test_nest.py::test_function"], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), + ( + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], + expected_execution_test_output.logging_test_expected_execution_output, + ), + ], +) +def test_pytest_execution(test_ids, expected_const): + """ + Test that pytest discovery works as expected where run pytest is always successful + but the actual test results are both successes and failures.: + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. + 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. + 3. uf_execution_expected_output: unittest tests run on multiple files. + 4. uf_single_file_expected_output: test run on a single file. + 5. uf_single_method_execution_expected_output: test run on a single method in a file. + 6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file + at the top level and one test file in a nested folder. + 9. double_nested_folder_expected_execution_output: test run on a double nested folder. + 10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 11. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 12. doctest_pytest_expected_execution_output: test run on doctest file. + 13. logging_test_expected_execution_output: test run on a file with logging. + + + Keyword arguments: + test_ids -- an array of test_ids to run. + expected_const -- a dictionary of the expected output from running pytest discovery on the files. + """ + args = test_ids + actual = runner(args) + assert actual + actual_list: List[Dict[str, Any]] = actual + assert actual_list.pop(-1).get("eot") + assert len(actual_list) == len(expected_const) + actual_result_dict = dict() + if actual_list is not None: + for actual_item in actual_list: + assert all( + item in actual_item.keys() for item in ("status", "cwd", "result") + ) + assert actual_item.get("status") == "success" + assert actual_item.get("cwd") == os.fspath(TEST_DATA_PATH) + actual_result_dict.update(actual_item["result"]) + for key in actual_result_dict: + if ( + actual_result_dict[key]["outcome"] == "failure" + or actual_result_dict[key]["outcome"] == "error" + ): + actual_result_dict[key]["message"] = "ERROR MESSAGE" + if actual_result_dict[key]["traceback"] is not None: + actual_result_dict[key]["traceback"] = "TRACEBACK" + assert actual_result_dict == expected_const diff --git a/pythonFiles/tests/testing_tools/__init__.py b/pythonFiles/tests/testing_tools/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/__init__.py b/pythonFiles/tests/testing_tools/adapter/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py b/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py new file mode 100644 index 000000000000..5b7f7a925cc0 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/pytest/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. diff --git a/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py b/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py new file mode 100644 index 000000000000..8ef4305f40b9 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/pytest/test_discovery.py @@ -0,0 +1,1553 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import print_function, unicode_literals + +try: + from io import StringIO +except ImportError: + from StringIO import StringIO # type: ignore (for Pylance) +import os +import sys +import tempfile +import unittest + +import pytest +import _pytest.doctest + +from .... import util +from testing_tools.adapter import util as adapter_util +from testing_tools.adapter.pytest import _pytest_item as pytest_item +from testing_tools.adapter import info +from testing_tools.adapter.pytest import _discovery + +# In Python 3.8 __len__ is called twice, which impacts some of the test assertions we do below. +PYTHON_38_OR_LATER = sys.version_info[0] >= 3 and sys.version_info[1] >= 8 + + +class StubPyTest(util.StubProxy): + def __init__(self, stub=None): + super(StubPyTest, self).__init__(stub, "pytest") + self.return_main = 0 + + def main(self, args, plugins): + self.add_call("main", None, {"args": args, "plugins": plugins}) + return self.return_main + + +class StubPlugin(util.StubProxy): + + _started = True + + def __init__(self, stub=None, tests=None): + super(StubPlugin, self).__init__(stub, "plugin") + if tests is None: + tests = StubDiscoveredTests(self.stub) + self._tests = tests + + def __getattr__(self, name): + if not name.startswith("pytest_"): + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubDiscoveredTests(util.StubProxy): + + NOT_FOUND = object() + + def __init__(self, stub=None): + super(StubDiscoveredTests, self).__init__(stub, "discovered") + self.return_items = [] + self.return_parents = [] + + def __len__(self): + self.add_call("__len__", None, None) + return len(self.return_items) + + def __getitem__(self, index): + self.add_call("__getitem__", (index,), None) + return self.return_items[index] + + @property + def parents(self): + self.add_call("parents", None, None) + return self.return_parents + + def reset(self): + self.add_call("reset", None, None) + + def add_test(self, test, parents): + self.add_call("add_test", None, {"test": test, "parents": parents}) + + +class FakeFunc(object): + def __init__(self, name): + self.__name__ = name + + +class FakeMarker(object): + def __init__(self, name): + self.name = name + + +class StubPytestItem(util.StubProxy): + + _debugging = False + _hasfunc = True + + def __init__(self, stub=None, **attrs): + super(StubPytestItem, self).__init__(stub, "pytest.Item") + if attrs.get("function") is None: + attrs.pop("function", None) + self._hasfunc = False + + attrs.setdefault("user_properties", []) + + slots = getattr(type(self), "__slots__", None) + if slots: + for name, value in attrs.items(): + if name in self.__slots__: + setattr(self, name, value) + else: + self.__dict__[name] = value + else: + self.__dict__.update(attrs) + + if "own_markers" not in attrs: + self.own_markers = () + + def __repr__(self): + return object.__repr__(self) + + def __getattr__(self, name): + if not self._debugging: + self.add_call(name + " (attr)", None, None) + if name == "function": + if not self._hasfunc: + raise AttributeError(name) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubSubtypedItem(StubPytestItem): + @classmethod + def from_args(cls, *args, **kwargs): + if not hasattr(cls, "from_parent"): + return cls(*args, **kwargs) + self = cls.from_parent(None, name=kwargs["name"], runner=None, dtest=None) + self.__init__(*args, **kwargs) + return self + + def __init__(self, *args, **kwargs): + super(StubSubtypedItem, self).__init__(*args, **kwargs) + if "nodeid" in self.__dict__: + self._nodeid = self.__dict__.pop("nodeid") + + @property + def location(self): + return self.__dict__.get("location") + + +class StubFunctionItem(StubSubtypedItem, pytest.Function): + @property + def function(self): + return self.__dict__.get("function") + + +def create_stub_function_item(*args, **kwargs): + return StubFunctionItem.from_args(*args, **kwargs) + + +class StubDoctestItem(StubSubtypedItem, _pytest.doctest.DoctestItem): + pass + + +def create_stub_doctest_item(*args, **kwargs): + return StubDoctestItem.from_args(*args, **kwargs) + + +class StubPytestSession(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestSession, self).__init__(stub, "pytest.Session") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +class StubPytestConfig(util.StubProxy): + def __init__(self, stub=None): + super(StubPytestConfig, self).__init__(stub, "pytest.Config") + + def __getattr__(self, name): + self.add_call(name + " (attr)", None, None) + + def func(*args, **kwargs): + self.add_call(name, args or None, kwargs or None) + + return func + + +def generate_parse_item(pathsep): + if pathsep == "\\": + + def normcase(path): + path = path.lower() + return path.replace("/", "\\") + + else: + raise NotImplementedError + ########## + def _fix_fileid(*args): + return adapter_util.fix_fileid( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _normalize_test_id(*args): + return pytest_item._normalize_test_id( + *args, + **dict( + # dependency injection + _fix_fileid=_fix_fileid, + _pathsep=pathsep, + ) + ) + + def _iter_nodes(*args): + return pytest_item._iter_nodes( + *args, + **dict( + # dependency injection + _normalize_test_id=_normalize_test_id, + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _parse_node_id(*args): + return pytest_item._parse_node_id( + *args, + **dict( + # dependency injection + _iter_nodes=_iter_nodes, + ) + ) + + ########## + def _split_fspath(*args): + return pytest_item._split_fspath( + *args, + **dict( + # dependency injection + _normcase=normcase, + ) + ) + + ########## + def _matches_relfile(*args): + return pytest_item._matches_relfile( + *args, + **dict( + # dependency injection + _normcase=normcase, + _pathsep=pathsep, + ) + ) + + def _is_legacy_wrapper(*args): + return pytest_item._is_legacy_wrapper( + *args, + **dict( + # dependency injection + _pathsep=pathsep, + ) + ) + + def _get_location(*args): + return pytest_item._get_location( + *args, + **dict( + # dependency injection + _matches_relfile=_matches_relfile, + _is_legacy_wrapper=_is_legacy_wrapper, + _pathsep=pathsep, + ) + ) + + ########## + def _parse_item(item): + return pytest_item.parse_item( + item, + **dict( + # dependency injection + _parse_node_id=_parse_node_id, + _split_fspath=_split_fspath, + _get_location=_get_location, + ) + ) + + return _parse_item + + +################################## +# tests + + +def fake_pytest_main(stub, use_fd, pytest_stdout): + def ret(args, plugins): + stub.add_call("pytest.main", None, {"args": args, "plugins": plugins}) + if use_fd: + os.write(sys.stdout.fileno(), pytest_stdout.encode()) + else: + print(pytest_stdout, end="") + return 0 + + return ret + + +class DiscoverTests(unittest.TestCase): + + DEFAULT_ARGS = [ + "--collect-only", + ] + + def test_basic(self): + stub = util.Stub() + stubpytest = StubPyTest(stub) + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + parents, tests = _discovery.discover( + [], _pytest_main=stubpytest.main, _plugin=plugin + ) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(stub.calls, calls) + + def test_failure(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 2 + plugin = StubPlugin(stub) + + with self.assertRaises(Exception): + _discovery.discover([], _pytest_main=pytest.main, _plugin=plugin) + + self.assertEqual( + stub.calls, + [ + # There's only one call. + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ], + ) + + def test_no_tests_found(self): + stub = util.Stub() + pytest = StubPyTest(stub) + pytest.return_main = 5 + plugin = StubPlugin(stub) + expected = [] + plugin.discovered = expected + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + parents, tests = _discovery.discover( + [], _pytest_main=pytest.main, _plugin=plugin + ) + + self.assertEqual(parents, []) + self.assertEqual(tests, expected) + self.assertEqual(stub.calls, calls) + + def test_stdio_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + # to simulate stdio behavior in methods like os.dup, + # use actual files (rather than StringIO) + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + + mock.seek(0) + captured = mock.read() + + self.assertEqual(captured, "") + self.assertEqual(stub.calls, calls) + + def test_stdio_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # Replace with contextlib.redirect_stdout() once Python 2.7 support is dropped. + sys.stdout = StringIO() + try: + _discovery.discover( + [], + hidestdio=True, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + captured = sys.stdout.read() + self.assertEqual(captured, "") + finally: + sys.stdout = sys.__stdout__ + + def test_stdio_not_hidden_file(self): + stub = util.Stub() + + plugin = StubPlugin(stub) + plugin.discovered = [] + calls = [ + ("pytest.main", None, {"args": self.DEFAULT_ARGS, "plugins": [plugin]}), + ("discovered.parents", None, None), + ("discovered.__len__", None, None), + ("discovered.__getitem__", (0,), None), + ] + pytest_stdout = "spamspamspamspamspamspamspammityspam" + + # In Python 3.8 __len__ is called twice. + if PYTHON_38_OR_LATER: + calls.insert(3, ("discovered.__len__", None, None)) + + buf = StringIO() + + sys.stdout = buf + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, False, pytest_stdout), + _plugin=plugin, + ) + finally: + sys.stdout = sys.__stdout__ + captured = buf.getvalue() + + self.assertEqual(captured, pytest_stdout) + self.assertEqual(stub.calls, calls) + + def test_stdio_not_hidden_fd(self): + # simulate cases where stdout comes from the lower layer than sys.stdout + # via file descriptors (e.g., from cython) + stub = util.Stub() + plugin = StubPlugin(stub) + pytest_stdout = "spamspamspamspamspamspamspammityspam" + stub.calls = [] + with tempfile.TemporaryFile("r+") as mock: + sys.stdout = mock + try: + _discovery.discover( + [], + hidestdio=False, + _pytest_main=fake_pytest_main(stub, True, pytest_stdout), + _plugin=plugin, + ) + finally: + mock.seek(0) + captured = sys.stdout.read() + sys.stdout = sys.__stdout__ + self.assertEqual(captured, pytest_stdout) + + +class CollectorTests(unittest.TestCase): + def test_modifyitems(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + config = StubPytestConfig(stub) + collector = _discovery.TestCollector(tests=discovered) + + testroot = adapter_util.fix_path("/a/b/c") + relfile1 = adapter_util.fix_path("./test_spam.py") + relfile2 = adapter_util.fix_path("x/y/z/test_eggs.py") + + collector.pytest_collection_modifyitems( + session, + config, + [ + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_one", + name="test_one", + location=("test_spam.py", 12, "SpamTests.test_one"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_one"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::SpamTests::test_other", + name="test_other", + location=("test_spam.py", 19, "SpamTests.test_other"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_other"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_all", + name="test_all", + location=("test_spam.py", 144, "test_all"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_all"), + ), + create_stub_function_item( + stub, + nodeid="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + location=("test_spam.py", 273, "test_each[10-10]"), + fspath=adapter_util.PATH_JOIN(testroot, "test_spam.py"), + function=FakeFunc("test_each"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_first", + name="test_first", + location=(relfile2, 31, "All.BasicTests.test_first"), + fspath=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_first"), + ), + create_stub_function_item( + stub, + nodeid=relfile2 + "::All::BasicTests::test_each[1+2-3]", + name="test_each[1+2-3]", + location=(relfile2, 62, "All.BasicTests.test_each[1+2-3]"), + fspath=adapter_util.PATH_JOIN(testroot, relfile2), + function=FakeFunc("test_each"), + own_markers=[ + FakeMarker(v) + for v in [ + # supported + "skip", + "skipif", + "xfail", + # duplicate + "skip", + # ignored (pytest-supported) + "parameterize", + "usefixtures", + "filterwarnings", + # ignored (custom) + "timeout", + ] + ], + ), + ], + ) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_one", + name="test_one", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_one", + sub=None, + ), + source="{}:{}".format(relfile1, 13), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::SpamTests", "SpamTests", "suite"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::SpamTests::test_other", + name="test_other", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="SpamTests.test_other", + sub=None, + ), + source="{}:{}".format(relfile1, 20), + markers=None, + parentid="./test_spam.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_all", + name="test_all", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_all", + sub=None, + ), + source="{}:{}".format(relfile1, 145), + markers=None, + parentid="./test_spam.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./test_spam.py::test_each", "test_each", "function"), + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./test_spam.py::test_each[10-10]", + name="test_each[10-10]", + path=info.SingleTestPath( + root=testroot, + relfile=relfile1, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile1, 274), + markers=None, + parentid="./test_spam.py::test_each", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_first", + name="test_first", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile2), 32 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::All::BasicTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::All::BasicTests::test_each", + "test_each", + "function", + ), + ( + "./x/y/z/test_eggs.py::All::BasicTests", + "BasicTests", + "suite", + ), + ("./x/y/z/test_eggs.py::All", "All", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::All::BasicTests::test_each[1+2-3]", + name="test_each[1+2-3]", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile2), + func="All.BasicTests.test_each", + sub=["[1+2-3]"], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile2), 63 + ), + markers=["expected-failure", "skip", "skip-if"], + parentid="./x/y/z/test_eggs.py::All::BasicTests::test_each", + ), + ), + ), + ], + ) + + def test_finish(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_doctest(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + doctestfile = adapter_util.fix_path("x/test_doctest.txt") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_doctest_item( + stub, + nodeid=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + location=(doctestfile, 0, "[doctest] test_doctest.txt"), + fspath=adapter_util.PATH_JOIN(testroot, doctestfile), + ), + # With --doctest-modules + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs", + name="test_eggs", + location=(relfile, 0, "[doctest] test_eggs"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + location=(relfile, 12, "[doctest] test_eggs.TestSpam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + create_stub_doctest_item( + stub, + nodeid=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + location=(relfile, 27, "[doctest] test_eggs.TestSpam.TestEggs"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/test_doctest.txt", "test_doctest.txt", "file"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/test_doctest.txt::test_doctest.txt", + name="test_doctest.txt", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(doctestfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(doctestfile), 1 + ), + markers=[], + parentid="./x/test_doctest.txt", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs", + name="test_eggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(relfile), 1), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 28 + ), + markers=[], + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) + + def test_nested_brackets(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam[a-[b]-c]", + name="test_spam[a-[b]-c]", + location=(relfile, 12, "SpamTests.test_spam[a-[b]-c]"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::test_spam", + "test_spam", + "function", + ), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam[a-[b]-c]", + name="test_spam[a-[b]-c]", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=["[a-[b]-c]"], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::test_spam", + ), + ), + ), + ], + ) + + def test_nested_suite(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.Ham.Eggs.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ( + "./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + "Eggs", + "suite", + ), + ("./x/y/z/test_eggs.py::SpamTests::Ham", "Ham", "suite"), + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.Ham.Eggs.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests::Ham::Eggs", + ), + ), + ), + ], + ) + + def test_windows(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = r"C:\A\B\C" + altroot = testroot.replace("\\", "/") + relfile = r"X\Y\Z\test_Eggs.py" + session.items = [ + # typical: + create_stub_function_item( + stub, + # pytest always uses "/" as the path separator in node IDs: + nodeid="X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + # normal path separator (contrast with nodeid): + location=(relfile, 12, "SpamTests.test_spam"), + # path separator matches location: + fspath=testroot + "\\" + relfile, + function=FakeFunc("test_spam"), + ), + ] + tests = [ + # permutations of path separators + (r"X/test_a.py", "\\", "\\"), # typical + (r"X/test_b.py", "\\", "/"), + (r"X/test_c.py", "/", "\\"), + (r"X/test_d.py", "/", "/"), + (r"X\test_e.py", "\\", "\\"), + (r"X\test_f.py", "\\", "/"), + (r"X\test_g.py", "/", "\\"), + (r"X\test_h.py", "/", "/"), + ] + for fileid, locfile, fspath in tests: + if locfile == "/": + locfile = fileid.replace("\\", "/") + elif locfile == "\\": + locfile = fileid.replace("/", "\\") + if fspath == "/": + fspath = (testroot + "/" + fileid).replace("\\", "/") + elif fspath == "\\": + fspath = (testroot + "/" + fileid).replace("/", "\\") + session.items.append( + create_stub_function_item( + stub, + nodeid=fileid + "::test_spam", + name="test_spam", + location=(locfile, 12, "test_spam"), + fspath=fspath, + function=FakeFunc("test_spam"), + ) + ) + collector = _discovery.TestCollector(tests=discovered) + if os.name != "nt": + collector.parse_item = generate_parse_item("\\") + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/Y/Z/test_Eggs.py::SpamTests", "SpamTests", "suite"), + (r"./X/Y/Z/test_Eggs.py", "test_Eggs.py", "file"), + (r"./X/Y/Z", "Z", "folder"), + (r"./X/Y", "Y", "folder"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/Y/Z/test_Eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, # not normalized + relfile=r".\X\Y\Z\test_Eggs.py", # not normalized + func="SpamTests.test_spam", + sub=None, + ), + source=r".\X\Y\Z\test_Eggs.py:13", # not normalized + markers=None, + parentid=r"./X/Y/Z/test_Eggs.py::SpamTests", + ), + ), + ), + # permutations + # (*all* the IDs use "/") + # (source path separator should match relfile, not location) + # /, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_a.py", "test_a.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_a.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_a.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_a.py:13", + markers=None, + parentid=r"./X/test_a.py", + ), + ), + ), + # /, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_b.py", "test_b.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_b.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_b.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_b.py:13", + markers=None, + parentid=r"./X/test_b.py", + ), + ), + ), + # /, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_c.py", "test_c.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_c.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_c.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_c.py:13", + markers=None, + parentid=r"./X/test_c.py", + ), + ), + ), + # /, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_d.py", "test_d.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_d.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_d.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_d.py:13", + markers=None, + parentid=r"./X/test_d.py", + ), + ), + ), + # \, \, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_e.py", "test_e.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_e.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_e.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_e.py:13", + markers=None, + parentid=r"./X/test_e.py", + ), + ), + ), + # \, \, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_f.py", "test_f.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_f.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_f.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_f.py:13", + markers=None, + parentid=r"./X/test_f.py", + ), + ), + ), + # \, /, \ + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_g.py", "test_g.py", "file"), + (r"./X", "X", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_g.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=r".\X\test_g.py", + func="test_spam", + sub=None, + ), + source=r".\X\test_g.py:13", + markers=None, + parentid=r"./X/test_g.py", + ), + ), + ), + # \, /, / + ( + "discovered.add_test", + None, + dict( + parents=[ + (r"./X/test_h.py", "test_h.py", "file"), + (r"./X", "X", "folder"), + (".", altroot, "folder"), + ], + test=info.SingleTestInfo( + id=r"./X/test_h.py::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=altroot, + relfile=r"./X/test_h.py", + func="test_spam", + sub=None, + ), + source=r"./X/test_h.py:13", + markers=None, + parentid=r"./X/test_h.py", + ), + ), + ), + ], + ) + + def test_mysterious_parens(self): + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::()::()::test_spam", + name="test_spam", + location=(relfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=[], + ), + source="{}:{}".format( + adapter_util.fix_relpath(relfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ], + ) + + def test_imported_test(self): + # pytest will even discover tests that were imported from + # another module! + stub = util.Stub() + discovered = StubDiscoveredTests(stub) + session = StubPytestSession(stub) + testroot = adapter_util.fix_path("/a/b/c") + relfile = adapter_util.fix_path("x/y/z/test_eggs.py") + srcfile = adapter_util.fix_path("x/y/z/_extern.py") + session.items = [ + create_stub_function_item( + stub, + nodeid=relfile + "::SpamTests::test_spam", + name="test_spam", + location=(srcfile, 12, "SpamTests.test_spam"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + create_stub_function_item( + stub, + nodeid=relfile + "::test_ham", + name="test_ham", + location=(srcfile, 3, "test_ham"), + fspath=adapter_util.PATH_JOIN(testroot, relfile), + function=FakeFunc("test_spam"), + ), + ] + collector = _discovery.TestCollector(tests=discovered) + + collector.pytest_collection_finish(session) + + self.maxDiff = None + self.assertEqual( + stub.calls, + [ + ("discovered.reset", None, None), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py::SpamTests", "SpamTests", "suite"), + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::SpamTests::test_spam", + name="test_spam", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="SpamTests.test_spam", + sub=None, + ), + source="{}:{}".format( + adapter_util.fix_relpath(srcfile), 13 + ), + markers=None, + parentid="./x/y/z/test_eggs.py::SpamTests", + ), + ), + ), + ( + "discovered.add_test", + None, + dict( + parents=[ + ("./x/y/z/test_eggs.py", "test_eggs.py", "file"), + ("./x/y/z", "z", "folder"), + ("./x/y", "y", "folder"), + ("./x", "x", "folder"), + (".", testroot, "folder"), + ], + test=info.SingleTestInfo( + id="./x/y/z/test_eggs.py::test_ham", + name="test_ham", + path=info.SingleTestPath( + root=testroot, + relfile=adapter_util.fix_relpath(relfile), + func="test_ham", + sub=None, + ), + source="{}:{}".format(adapter_util.fix_relpath(srcfile), 4), + markers=None, + parentid="./x/y/z/test_eggs.py", + ), + ), + ), + ], + ) diff --git a/pythonFiles/tests/testing_tools/adapter/test___main__.py b/pythonFiles/tests/testing_tools/adapter/test___main__.py new file mode 100644 index 000000000000..d0a778c1d024 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test___main__.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +from ...util import Stub, StubProxy +from testing_tools.adapter.__main__ import ( + parse_args, + main, + UnsupportedToolError, + UnsupportedCommandError, +) + + +class StubTool(StubProxy): + def __init__(self, name, stub=None): + super(StubTool, self).__init__(stub, name) + self.return_discover = None + + def discover(self, args, **kwargs): + self.add_call("discover", (args,), kwargs) + if self.return_discover is None: + raise NotImplementedError + return self.return_discover + + +class StubReporter(StubProxy): + def __init__(self, stub=None): + super(StubReporter, self).__init__(stub, "reporter") + + def report(self, tests, parents, **kwargs): + self.add_call("report", (tests, parents), kwargs or None) + + +################################## +# tests + + +class ParseGeneralTests(unittest.TestCase): + def test_unsupported_command(self): + with self.assertRaises(SystemExit): + parse_args(["run", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["debug", "pytest"]) + with self.assertRaises(SystemExit): + parse_args(["???", "pytest"]) + + +class ParseDiscoverTests(unittest.TestCase): + def test_pytest_default(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual(toolargs, []) + + def test_pytest_full(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + # no adapter-specific options yet + "--", + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": False, "hidestdio": True, "simple": False}) + self.assertEqual( + toolargs, + [ + "--strict", + "--ignore", + "spam,ham,eggs", + "--pastebin=xyz", + "--no-cov", + "-d", + ], + ) + + def test_pytest_opts(self): + tool, cmd, args, toolargs = parse_args( + [ + "discover", + "pytest", + "--simple", + "--no-hide-stdio", + "--pretty", + ] + ) + + self.assertEqual(tool, "pytest") + self.assertEqual(cmd, "discover") + self.assertEqual(args, {"pretty": True, "hidestdio": False, "simple": True}) + self.assertEqual(toolargs, []) + + def test_unsupported_tool(self): + with self.assertRaises(SystemExit): + parse_args(["discover", "unittest"]) + with self.assertRaises(SystemExit): + parse_args(["discover", "???"]) + + +class MainTests(unittest.TestCase): + + # TODO: We could use an integration test for pytest.discover(). + + def test_discover(self): + stub = Stub() + tool = StubTool("spamspamspam", stub) + tests, parents = object(), object() + tool.return_discover = (parents, tests) + reporter = StubReporter(stub) + main( + tool.name, + "discover", + {"spam": "eggs"}, + [], + _tools={ + tool.name: { + "discover": tool.discover, + } + }, + _reporters={ + "discover": reporter.report, + }, + ) + + self.assertEqual( + tool.calls, + [ + ("spamspamspam.discover", ([],), {"spam": "eggs"}), + ("reporter.report", (tests, parents), {"spam": "eggs"}), + ], + ) + + def test_unsupported_tool(self): + with self.assertRaises(UnsupportedToolError): + main( + "unittest", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + with self.assertRaises(UnsupportedToolError): + main( + "???", + "discover", + {"spam": "eggs"}, + [], + _tools={"pytest": None}, + _reporters=None, + ) + + def test_unsupported_command(self): + tool = StubTool("pytest") + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "run", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "debug", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + with self.assertRaises(UnsupportedCommandError): + main( + "pytest", + "???", + {"spam": "eggs"}, + [], + _tools={"pytest": {"discover": tool.discover}}, + _reporters=None, + ) + self.assertEqual(tool.calls, []) diff --git a/pythonFiles/tests/testing_tools/adapter/test_discovery.py b/pythonFiles/tests/testing_tools/adapter/test_discovery.py new file mode 100644 index 000000000000..ec3d198b0108 --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test_discovery.py @@ -0,0 +1,675 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, print_function + +import unittest + +from testing_tools.adapter.util import fix_path, fix_relpath +from testing_tools.adapter.info import SingleTestInfo, SingleTestPath, ParentInfo +from testing_tools.adapter.discovery import fix_nodeid, DiscoveredTests + + +def _fix_nodeid(nodeid): + + nodeid = nodeid.replace("\\", "/") + if not nodeid.startswith("./"): + nodeid = "./" + nodeid + return nodeid + + +class DiscoveredTestsTests(unittest.TestCase): + def test_list(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_spam.py") + tests = [ + SingleTestInfo( + # missing "./": + id="test_spam.py::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./": + parentid="test_spam.py::test_each", + ), + SingleTestInfo( + id="test_spam.py::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 62), + markers=None, + parentid="test_spam.py::All::BasicTests", + ), + ] + allparents = [ + [ + (fix_path("./test_spam.py::test_each"), "test_each", "function"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + [ + (fix_path("./test_spam.py::All::BasicTests"), "BasicTests", "suite"), + (fix_path("./test_spam.py::All"), "All", "suite"), + (fix_path("./test_spam.py"), "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in tests + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + size = len(discovered) + items = [discovered[0], discovered[1]] + snapshot = list(discovered) + + self.maxDiff = None + self.assertEqual(size, 2) + self.assertEqual(items, expected) + self.assertEqual(snapshot, expected) + + def test_reset(self): + testroot = fix_path("/a/b/c") + discovered = DiscoveredTests() + discovered.add_test( + SingleTestInfo( + id="./test_spam.py::test_each", + name="test_each", + path=SingleTestPath( + root=testroot, + relfile="test_spam.py", + func="test_each", + ), + source="test_spam.py:11", + markers=[], + parentid="./test_spam.py", + ), + [ + ("./test_spam.py", "test_spam.py", "file"), + (".", testroot, "folder"), + ], + ) + + before = len(discovered), len(discovered.parents) + discovered.reset() + after = len(discovered), len(discovered.parents) + + self.assertEqual(before, (1, 2)) + self.assertEqual(after, (0, 0)) + + def test_parents(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("x/y/z/test_spam.py") + tests = [ + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::test_each[10-10]", + name="test_each[10-10]", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="test_each", + sub=["[10-10]"], + ), + source="{}:{}".format(relfile, 10), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::test_each", + ), + SingleTestInfo( + # missing "./", using pathsep: + id=relfile + "::All::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot, + relfile=fix_relpath(relfile), + func="All.BasicTests.test_first", + sub=None, + ), + source="{}:{}".format(relfile, 61), + markers=None, + # missing "./", using pathsep: + parentid=relfile + "::All::BasicTests", + ), + ] + allparents = [ + # missing "./", using pathsep: + [ + (relfile + "::test_each", "test_each", "function"), + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + # missing "./", using pathsep: + [ + (relfile + "::All::BasicTests", "BasicTests", "suite"), + (relfile + "::All", "All", "suite"), + (relfile, "test_spam.py", "file"), + (fix_path("x/y/z"), "z", "folder"), + (fix_path("x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + discovered = DiscoveredTests() + for test, parents in zip(tests, allparents): + discovered.add_test(test, parents) + + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_spam.py", + kind="file", + name="test_spam.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All", + kind="suite", + name="All", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ParentInfo( + id="./x/y/z/test_spam.py::All::BasicTests", + kind="suite", + name="BasicTests", + root=testroot, + parentid="./x/y/z/test_spam.py::All", + ), + ParentInfo( + id="./x/y/z/test_spam.py::test_each", + kind="function", + name="test_each", + root=testroot, + parentid="./x/y/z/test_spam.py", + ), + ], + ) + + def test_add_test_simple(self): + testroot = fix_path("/a/b/c") + relfile = "test_spam.py" + test = SingleTestInfo( + # missing "./": + id=relfile + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + # missing "./": + relfile=relfile, + func="test_spam", + ), + # missing "./": + source="{}:{}".format(relfile, 11), + markers=[], + # missing "./": + parentid=relfile, + ) + expected = test._replace( + id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid) + ) + discovered = DiscoveredTests() + + before = list(discovered), discovered.parents + discovered.add_test( + test, + [ + (relfile, relfile, "file"), + (".", testroot, "folder"), + ], + ) + after = list(discovered), discovered.parents + + self.maxDiff = None + self.assertEqual(before, ([], [])) + self.assertEqual( + after, + ( + [expected], + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name=relfile, + root=testroot, + relpath=relfile, + parentid=".", + ), + ], + ), + ) + + def test_multiroot(self): + # the first root + testroot1 = fix_path("/a/b/c") + relfile1 = "test_spam.py" + alltests = [ + SingleTestInfo( + # missing "./": + id=relfile1 + "::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + # missing "./": + parentid=relfile1, + ), + ] + allparents = [ + # missing "./": + [ + (relfile1, "test_spam.py", "file"), + (".", testroot1, "folder"), + ], + ] + # the second root + testroot2 = fix_path("/x/y/z") + relfile2 = fix_path("w/test_eggs.py") + alltests.extend( + [ + SingleTestInfo( + id=relfile2 + "::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid=relfile2 + "::BasicTests", + ), + ] + ) + allparents.extend( + [ + # missing "./", using pathsep: + [ + (relfile2 + "::BasicTests", "BasicTests", "suite"), + (relfile2, "test_eggs.py", "file"), + (fix_path("./w"), "w", "folder"), + (".", testroot2, "folder"), + ], + ] + ) + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual( + tests, + [ + # the first root + SingleTestInfo( + id="./test_spam.py::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot1, + relfile=fix_relpath(relfile1), + func="test_spam", + ), + source="{}:{}".format(relfile1, 10), + markers=[], + parentid="./test_spam.py", + ), + # the secondroot + SingleTestInfo( + id="./w/test_eggs.py::BasicTests::test_first", + name="test_first", + path=SingleTestPath( + root=testroot2, + relfile=fix_relpath(relfile2), + func="BasicTests.test_first", + ), + source="{}:{}".format(relfile2, 61), + markers=[], + parentid="./w/test_eggs.py::BasicTests", + ), + ], + ) + self.assertEqual( + parents, + [ + # the first root + ParentInfo( + id=".", + kind="folder", + name=testroot1, + ), + ParentInfo( + id="./test_spam.py", + kind="file", + name="test_spam.py", + root=testroot1, + relpath=fix_relpath(relfile1), + parentid=".", + ), + # the secondroot + ParentInfo( + id=".", + kind="folder", + name=testroot2, + ), + ParentInfo( + id="./w", + kind="folder", + name="w", + root=testroot2, + relpath=fix_path("./w"), + parentid=".", + ), + ParentInfo( + id="./w/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot2, + relpath=fix_relpath(relfile2), + parentid="./w", + ), + ParentInfo( + id="./w/test_eggs.py::BasicTests", + kind="suite", + name="BasicTests", + root=testroot2, + parentid="./w/test_eggs.py", + ), + ], + ) + + def test_doctest(self): + testroot = fix_path("/a/b/c") + doctestfile = fix_path("./x/test_doctest.txt") + relfile = fix_path("./x/y/z/test_eggs.py") + alltests = [ + SingleTestInfo( + id=doctestfile + "::test_doctest.txt", + name="test_doctest.txt", + path=SingleTestPath( + root=testroot, + relfile=doctestfile, + func=None, + ), + source="{}:{}".format(doctestfile, 0), + markers=[], + parentid=doctestfile, + ), + # With --doctest-modules + SingleTestInfo( + id=relfile + "::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 0), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam", + name="test_eggs.TestSpam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 12), + markers=[], + parentid=relfile, + ), + SingleTestInfo( + id=relfile + "::test_eggs.TestSpam.TestEggs", + name="test_eggs.TestSpam.TestEggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func=None, + ), + source="{}:{}".format(relfile, 27), + markers=[], + parentid=relfile, + ), + ] + allparents = [ + [ + (doctestfile, "test_doctest.txt", "file"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + [ + (relfile, "test_eggs.py", "file"), + (fix_path("./x/y/z"), "z", "folder"), + (fix_path("./x/y"), "y", "folder"), + (fix_path("./x"), "x", "folder"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./x", + kind="folder", + name="x", + root=testroot, + relpath=fix_path("./x"), + parentid=".", + ), + ParentInfo( + id="./x/test_doctest.txt", + kind="file", + name="test_doctest.txt", + root=testroot, + relpath=fix_path(doctestfile), + parentid="./x", + ), + ParentInfo( + id="./x/y", + kind="folder", + name="y", + root=testroot, + relpath=fix_path("./x/y"), + parentid="./x", + ), + ParentInfo( + id="./x/y/z", + kind="folder", + name="z", + root=testroot, + relpath=fix_path("./x/y/z"), + parentid="./x/y", + ), + ParentInfo( + id="./x/y/z/test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid="./x/y/z", + ), + ], + ) + + def test_nested_suite_simple(self): + testroot = fix_path("/a/b/c") + relfile = fix_path("./test_eggs.py") + alltests = [ + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_spam", + name="test_spam", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_spam", + ), + source="{}:{}".format(relfile, 10), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + SingleTestInfo( + id=relfile + "::TestOuter::TestInner::test_eggs", + name="test_eggs", + path=SingleTestPath( + root=testroot, + relfile=relfile, + func="TestOuter.TestInner.test_eggs", + ), + source="{}:{}".format(relfile, 21), + markers=None, + parentid=relfile + "::TestOuter::TestInner", + ), + ] + allparents = [ + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + [ + (relfile + "::TestOuter::TestInner", "TestInner", "suite"), + (relfile + "::TestOuter", "TestOuter", "suite"), + (relfile, "test_eggs.py", "file"), + (".", testroot, "folder"), + ], + ] + expected = [ + test._replace(id=_fix_nodeid(test.id), parentid=_fix_nodeid(test.parentid)) + for test in alltests + ] + + discovered = DiscoveredTests() + for test, parents in zip(alltests, allparents): + discovered.add_test(test, parents) + tests = list(discovered) + parents = discovered.parents + + self.maxDiff = None + self.assertEqual(tests, expected) + self.assertEqual( + parents, + [ + ParentInfo( + id=".", + kind="folder", + name=testroot, + ), + ParentInfo( + id="./test_eggs.py", + kind="file", + name="test_eggs.py", + root=testroot, + relpath=fix_relpath(relfile), + parentid=".", + ), + ParentInfo( + id="./test_eggs.py::TestOuter", + kind="suite", + name="TestOuter", + root=testroot, + parentid="./test_eggs.py", + ), + ParentInfo( + id="./test_eggs.py::TestOuter::TestInner", + kind="suite", + name="TestInner", + root=testroot, + parentid="./test_eggs.py::TestOuter", + ), + ], + ) diff --git a/pythonFiles/tests/testing_tools/adapter/test_functional.py b/pythonFiles/tests/testing_tools/adapter/test_functional.py new file mode 100644 index 000000000000..153ad5508d9b --- /dev/null +++ b/pythonFiles/tests/testing_tools/adapter/test_functional.py @@ -0,0 +1,1535 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +from __future__ import absolute_import, unicode_literals + +import json +import os +import os.path +import subprocess +import sys +import unittest + +from ...__main__ import TESTING_TOOLS_ROOT +from testing_tools.adapter.util import fix_path, PATH_SEP + +# Pytest 3.7 and later uses pathlib/pathlib2 for path resolution. +try: + from pathlib import Path +except ImportError: + from pathlib2 import Path # type: ignore (for Pylance) + + +CWD = os.getcwd() +DATA_DIR = os.path.join(os.path.dirname(__file__), ".data") +SCRIPT = os.path.join(TESTING_TOOLS_ROOT, "run_adapter.py") + + +def resolve_testroot(name): + projroot = os.path.join(DATA_DIR, name) + testroot = os.path.join(projroot, "tests") + return str(Path(projroot).resolve()), str(Path(testroot).resolve()) + + +def run_adapter(cmd, tool, *cliargs): + try: + return _run_adapter(cmd, tool, *cliargs) + except subprocess.CalledProcessError as exc: + print(exc.output) + + +def _run_adapter(cmd, tool, *cliargs, **kwargs): + hidestdio = kwargs.pop("hidestdio", True) + assert not kwargs or tuple(kwargs) == ("stderr",) + kwds = kwargs + argv = [sys.executable, SCRIPT, cmd, tool, "--"] + list(cliargs) + if not hidestdio: + argv.insert(4, "--no-hide-stdio") + kwds["stderr"] = subprocess.STDOUT + argv.append("--cache-clear") + print( + "running {!r}".format(" ".join(arg.rpartition(CWD + "/")[-1] for arg in argv)) + ) + output = subprocess.check_output(argv, universal_newlines=True, **kwds) + return output + + +def fix_test_order(tests): + if sys.version_info >= (3, 6): + return tests + fixed = [] + curfile = None + group = [] + for test in tests: + if (curfile or "???") not in test["id"]: + fixed.extend(sorted(group, key=lambda t: t["id"])) + group = [] + curfile = test["id"].partition(".py::")[0] + ".py" + group.append(test) + fixed.extend(sorted(group, key=lambda t: t["id"])) + return fixed + + +def fix_source(tests, testid, srcfile, lineno): + for test in tests: + if test["id"] == testid: + break + else: + raise KeyError("test {!r} not found".format(testid)) + if not srcfile: + srcfile = test["source"].rpartition(":")[0] + test["source"] = fix_path("{}:{}".format(srcfile, lineno)) + + +def sorted_object(obj): + if isinstance(obj, dict): + return sorted((key, sorted_object(obj[key])) for key in obj.keys()) + if isinstance(obj, list): + return sorted((sorted_object(x) for x in obj)) + else: + return obj + + +# Note that these tests are skipped if util.PATH_SEP is not os.path.sep. +# This is because the functional tests should reflect the actual +# operating environment. + + +class PytestTests(unittest.TestCase): + def setUp(self): + if PATH_SEP is not os.path.sep: + raise unittest.SkipTest("functional tests require unmodified env") + super(PytestTests, self).setUp() + + def complex(self, testroot): + results = COMPLEX.copy() + results["root"] = testroot + return [results] + + def test_discover_simple(self): + projroot, testroot = resolve_testroot("simple") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/test_spam.py"), + "parentid": "./tests", + }, + ], + "tests": [ + { + "id": "./tests/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_spam.py:2"), + "markers": [], + "parentid": "./tests/test_spam.py", + }, + ], + } + ], + ) + + def test_discover_complex_default(self): + projroot, testroot = resolve_testroot("complex") + expected = self.complex(projroot) + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_complex_doctest(self): + projroot, _ = resolve_testroot("complex") + expected = self.complex(projroot) + # add in doctests from test suite + expected[0]["parents"].insert( + 3, + { + "id": "./tests/test_doctest.py", + "kind": "file", + "name": "test_doctest.py", + "relpath": fix_path("./tests/test_doctest.py"), + "parentid": "./tests", + }, + ) + expected[0]["tests"].insert( + 2, + { + "id": "./tests/test_doctest.py::tests.test_doctest", + "name": "tests.test_doctest", + "source": fix_path("./tests/test_doctest.py:1"), + "markers": [], + "parentid": "./tests/test_doctest.py", + }, + ) + # add in doctests from non-test module + expected[0]["parents"].insert( + 0, + { + "id": "./mod.py", + "kind": "file", + "name": "mod.py", + "relpath": fix_path("./mod.py"), + "parentid": ".", + }, + ) + expected[0]["tests"] = [ + { + "id": "./mod.py::mod", + "name": "mod", + "source": fix_path("./mod.py:1"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam", + "name": "mod.Spam", + "source": fix_path("./mod.py:33"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.Spam.eggs", + "name": "mod.Spam.eggs", + "source": fix_path("./mod.py:43"), + "markers": [], + "parentid": "./mod.py", + }, + { + "id": "./mod.py::mod.square", + "name": "mod.square", + "source": fix_path("./mod.py:18"), + "markers": [], + "parentid": "./mod.py", + }, + ] + expected[0]["tests"] + expected[0]["tests"] = fix_test_order(expected[0]["tests"]) + if sys.version_info < (3,): + decorated = [ + "./tests/test_unittest.py::MyTests::test_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + ] + for testid in decorated: + fix_source(expected[0]["tests"], testid, None, 0) + + out = run_adapter( + "discover", "pytest", "--rootdir", projroot, "--doctest-modules", projroot + ) + result = json.loads(out) + result[0]["tests"] = fix_test_order(result[0]["tests"]) + + self.maxDiff = None + self.assertEqual(sorted_object(result), sorted_object(expected)) + + def test_discover_not_found(self): + projroot, testroot = resolve_testroot("notests") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertEqual(result, []) + # TODO: Expect the following instead? + # self.assertEqual(result, [{ + # 'root': projroot, + # 'rootid': '.', + # 'parents': [], + # 'tests': [], + # }]) + + @unittest.skip("broken in CI") + def test_discover_bad_args(self): + projroot, testroot = resolve_testroot("simple") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--spam", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 4)", cm.exception.output) + + def test_discover_syntax_error(self): + projroot, testroot = resolve_testroot("syntax-error") + + with self.assertRaises(subprocess.CalledProcessError) as cm: + _run_adapter( + "discover", + "pytest", + "--rootdir", + projroot, + testroot, + stderr=subprocess.STDOUT, + ) + self.assertIn("(exit code 2)", cm.exception.output) + + def test_discover_normcase(self): + projroot, testroot = resolve_testroot("NormCase") + + out = run_adapter("discover", "pytest", "--rootdir", projroot, testroot) + result = json.loads(out) + + self.maxDiff = None + self.assertTrue(projroot.endswith("NormCase")) + self.assertEqual( + result, + [ + { + "root": projroot, + "rootid": ".", + "parents": [ + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + { + "id": "./tests/A", + "kind": "folder", + "name": "A", + "relpath": fix_path("./tests/A"), + "parentid": "./tests", + }, + { + "id": "./tests/A/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/A/b"), + "parentid": "./tests/A", + }, + { + "id": "./tests/A/b/C", + "kind": "folder", + "name": "C", + "relpath": fix_path("./tests/A/b/C"), + "parentid": "./tests/A/b", + }, + { + "id": "./tests/A/b/C/test_Spam.py", + "kind": "file", + "name": "test_Spam.py", + "relpath": fix_path("./tests/A/b/C/test_Spam.py"), + "parentid": "./tests/A/b/C", + }, + ], + "tests": [ + { + "id": "./tests/A/b/C/test_Spam.py::test_okay", + "name": "test_okay", + "source": fix_path("./tests/A/b/C/test_Spam.py:2"), + "markers": [], + "parentid": "./tests/A/b/C/test_Spam.py", + }, + ], + } + ], + ) + + +COMPLEX = { + "root": None, + "rootid": ".", + "parents": [ + # + { + "id": "./tests", + "kind": "folder", + "name": "tests", + "relpath": fix_path("./tests"), + "parentid": ".", + }, + # +++ + { + "id": "./tests/test_42-43.py", + "kind": "file", + "name": "test_42-43.py", + "relpath": fix_path("./tests/test_42-43.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_42.py", + "kind": "file", + "name": "test_42.py", + "relpath": fix_path("./tests/test_42.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_doctest.txt", + "kind": "file", + "name": "test_doctest.txt", + "relpath": fix_path("./tests/test_doctest.txt"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_foo.py", + "kind": "file", + "name": "test_foo.py", + "relpath": fix_path("./tests/test_foo.py"), + "parentid": "./tests", + }, + # +++ + { + "id": "./tests/test_mixed.py", + "kind": "file", + "name": "test_mixed.py", + "relpath": fix_path("./tests/test_mixed.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_mixed.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite", + "kind": "suite", + "name": "TestMySuite", + "parentid": "./tests/test_mixed.py", + }, + # +++ + { + "id": "./tests/test_pytest.py", + "kind": "file", + "name": "test_pytest.py", + "relpath": fix_path("./tests/test_pytest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest.py::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam", + "kind": "suite", + "name": "TestParam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest.py::TestParamAll", + }, + { + "id": "./tests/test_pytest.py::TestSpam", + "kind": "suite", + "name": "TestSpam", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam", + "kind": "suite", + "name": "TestHam", + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + "kind": "suite", + "name": "TestEggs", + "parentid": "./tests/test_pytest.py::TestSpam::TestHam", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param", + "kind": "function", + "name": "test_fixture_param", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_01", + "kind": "function", + "name": "test_param_01", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_11", + "kind": "function", + "name": "test_param_11", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers", + "kind": "function", + "name": "test_param_13_markers", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat", + "kind": "function", + "name": "test_param_13_repeat", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped", + "kind": "function", + "name": "test_param_13_skipped", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13", + "kind": "function", + "name": "test_param_23_13", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises", + "kind": "function", + "name": "test_param_23_raises", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33", + "kind": "function", + "name": "test_param_33", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids", + "kind": "function", + "name": "test_param_33_ids", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture", + "kind": "function", + "name": "test_param_fixture", + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture", + "kind": "function", + "name": "test_param_mark_fixture", + "parentid": "./tests/test_pytest.py", + }, + # +++ + { + "id": "./tests/test_pytest_param.py", + "kind": "file", + "name": "test_pytest_param.py", + "relpath": fix_path("./tests/test_pytest_param.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll", + "kind": "suite", + "name": "TestParamAll", + "parentid": "./tests/test_pytest_param.py", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + "kind": "function", + "name": "test_spam_13", + "parentid": "./tests/test_pytest_param.py::TestParamAll", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13", + "kind": "function", + "name": "test_param_13", + "parentid": "./tests/test_pytest_param.py", + }, + # +++ + { + "id": "./tests/test_unittest.py", + "kind": "file", + "name": "test_unittest.py", + "relpath": fix_path("./tests/test_unittest.py"), + "parentid": "./tests", + }, + { + "id": "./tests/test_unittest.py::MyTests", + "kind": "suite", + "name": "MyTests", + "parentid": "./tests/test_unittest.py", + }, + { + "id": "./tests/test_unittest.py::OtherTests", + "kind": "suite", + "name": "OtherTests", + "parentid": "./tests/test_unittest.py", + }, + ## + { + "id": "./tests/v", + "kind": "folder", + "name": "v", + "relpath": fix_path("./tests/v"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/v/test_eggs.py", + "kind": "file", + "name": "test_eggs.py", + "relpath": fix_path("./tests/v/test_eggs.py"), + "parentid": "./tests/v", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple", + "kind": "suite", + "name": "TestSimple", + "parentid": "./tests/v/test_eggs.py", + }, + ## +++ + { + "id": "./tests/v/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/v/test_ham.py"), + "parentid": "./tests/v", + }, + ## +++ + { + "id": "./tests/v/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/v/test_spam.py"), + "parentid": "./tests/v", + }, + ## + { + "id": "./tests/w", + "kind": "folder", + "name": "w", + "relpath": fix_path("./tests/w"), + "parentid": "./tests", + }, + ## +++ + { + "id": "./tests/w/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/w/test_spam.py"), + "parentid": "./tests/w", + }, + ## +++ + { + "id": "./tests/w/test_spam_ex.py", + "kind": "file", + "name": "test_spam_ex.py", + "relpath": fix_path("./tests/w/test_spam_ex.py"), + "parentid": "./tests/w", + }, + ## + { + "id": "./tests/x", + "kind": "folder", + "name": "x", + "relpath": fix_path("./tests/x"), + "parentid": "./tests", + }, + ### + { + "id": "./tests/x/y", + "kind": "folder", + "name": "y", + "relpath": fix_path("./tests/x/y"), + "parentid": "./tests/x", + }, + #### + { + "id": "./tests/x/y/z", + "kind": "folder", + "name": "z", + "relpath": fix_path("./tests/x/y/z"), + "parentid": "./tests/x/y", + }, + ##### + { + "id": "./tests/x/y/z/a", + "kind": "folder", + "name": "a", + "relpath": fix_path("./tests/x/y/z/a"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/a/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/a/test_spam.py"), + "parentid": "./tests/x/y/z/a", + }, + ##### + { + "id": "./tests/x/y/z/b", + "kind": "folder", + "name": "b", + "relpath": fix_path("./tests/x/y/z/b"), + "parentid": "./tests/x/y/z", + }, + ##### +++ + { + "id": "./tests/x/y/z/b/test_spam.py", + "kind": "file", + "name": "test_spam.py", + "relpath": fix_path("./tests/x/y/z/b/test_spam.py"), + "parentid": "./tests/x/y/z/b", + }, + #### +++ + { + "id": "./tests/x/y/z/test_ham.py", + "kind": "file", + "name": "test_ham.py", + "relpath": fix_path("./tests/x/y/z/test_ham.py"), + "parentid": "./tests/x/y/z", + }, + ], + "tests": [ + ########## + { + "id": "./tests/test_42-43.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42-43.py:2"), + "markers": [], + "parentid": "./tests/test_42-43.py", + }, + ##### + { + "id": "./tests/test_42.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_42.py:2"), + "markers": [], + "parentid": "./tests/test_42.py", + }, + ##### + { + "id": "./tests/test_doctest.txt::test_doctest.txt", + "name": "test_doctest.txt", + "source": fix_path("./tests/test_doctest.txt:1"), + "markers": [], + "parentid": "./tests/test_doctest.txt", + }, + ##### + { + "id": "./tests/test_foo.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_foo.py:3"), + "markers": [], + "parentid": "./tests/test_foo.py", + }, + ##### + { + "id": "./tests/test_mixed.py::test_top_level", + "name": "test_top_level", + "source": fix_path("./tests/test_mixed.py:5"), + "markers": [], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:9"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py", + }, + { + "id": "./tests/test_mixed.py::TestMySuite::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:16"), + "markers": [], + "parentid": "./tests/test_mixed.py::TestMySuite", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_mixed.py:22"), + "markers": [], + "parentid": "./tests/test_mixed.py::MyTests", + }, + { + "id": "./tests/test_mixed.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_mixed.py:25"), + "markers": ["skip"], + "parentid": "./tests/test_mixed.py::MyTests", + }, + ##### + { + "id": "./tests/test_pytest.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:6"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_pytest.py:10"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_skipped", + "name": "test_runtime_skipped", + "source": fix_path("./tests/test_pytest.py:14"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_runtime_failed", + "name": "test_runtime_failed", + "source": fix_path("./tests/test_pytest.py:18"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_raises", + "name": "test_raises", + "source": fix_path("./tests/test_pytest.py:22"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:26"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_pytest.py:31"), + "markers": ["skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_pytest.py:36"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_warned", + "name": "test_warned", + "source": fix_path("./tests/test_pytest.py:41"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_custom_marker", + "name": "test_custom_marker", + "source": fix_path("./tests/test_pytest.py:46"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_multiple_markers", + "name": "test_multiple_markers", + "source": fix_path("./tests/test_pytest.py:51"), + "markers": ["expected-failure", "skip", "skip-if"], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_1", + "name": "test_dynamic_1", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_2", + "name": "test_dynamic_2", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_dynamic_3", + "name": "test_dynamic_3", + "source": fix_path("./tests/test_pytest.py:62"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:70"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_pytest.py:73"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::TestSpam", + }, + { + "id": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:81"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestSpam::TestHam::TestEggs", + }, + { + "id": "./tests/test_pytest.py::TestEggs::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:93"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestEggs", + }, + { + "id": "./tests/test_pytest.py::test_param_01[]", + "name": "test_param_01[]", + "source": fix_path("./tests/test_pytest.py:103"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_01", + }, + { + "id": "./tests/test_pytest.py::test_param_11[x0]", + "name": "test_param_11[x0]", + "source": fix_path("./tests/test_pytest.py:108"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_11", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:113"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x0]", + "name": "test_param_13_repeat[x0]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x1]", + "name": "test_param_13_repeat[x1]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_13_repeat[x2]", + "name": "test_param_13_repeat[x2]", + "source": fix_path("./tests/test_pytest.py:118"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_repeat", + }, + { + "id": "./tests/test_pytest.py::test_param_33[1-1-1]", + "name": "test_param_33[1-1-1]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[3-4-5]", + "name": "test_param_33[3-4-5]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33[0-0-0]", + "name": "test_param_33[0-0-0]", + "source": fix_path("./tests/test_pytest.py:123"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v1]", + "name": "test_param_33_ids[v1]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v2]", + "name": "test_param_33_ids[v2]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_33_ids[v3]", + "name": "test_param_33_ids[v3]", + "source": fix_path("./tests/test_pytest.py:128"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_33_ids", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z0]", + "name": "test_param_23_13[1-1-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z1]", + "name": "test_param_23_13[1-1-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[1-1-z2]", + "name": "test_param_23_13[1-1-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z0]", + "name": "test_param_23_13[3-4-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z1]", + "name": "test_param_23_13[3-4-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[3-4-z2]", + "name": "test_param_23_13[3-4-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z0]", + "name": "test_param_23_13[0-0-z0]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z1]", + "name": "test_param_23_13[0-0-z1]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_23_13[0-0-z2]", + "name": "test_param_23_13[0-0-z2]", + "source": fix_path("./tests/test_pytest.py:134"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_13", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[x0]", + "name": "test_param_13_markers[x0]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[???]", + "name": "test_param_13_markers[???]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_markers[2]", + "name": "test_param_13_markers[2]", + "source": fix_path("./tests/test_pytest.py:140"), + "markers": ["expected-failure"], + "parentid": "./tests/test_pytest.py::test_param_13_markers", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x0]", + "name": "test_param_13_skipped[x0]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x1]", + "name": "test_param_13_skipped[x1]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_13_skipped[x2]", + "name": "test_param_13_skipped[x2]", + "source": fix_path("./tests/test_pytest.py:149"), + "markers": ["skip"], + "parentid": "./tests/test_pytest.py::test_param_13_skipped", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1-None]", + "name": "test_param_23_raises[1-None]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[1.0-None]", + "name": "test_param_23_raises[1.0-None]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::test_param_23_raises[2-catch2]", + "name": "test_param_23_raises[2-catch2]", + "source": fix_path("./tests/test_pytest.py:155"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_23_raises", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_pytest.py:164"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParam::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:167"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParam::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest.py:175"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x0]", + "name": "test_spam_13[x0]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x1]", + "name": "test_spam_13[x1]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::TestParamAll::test_spam_13[x2]", + "name": "test_spam_13[x2]", + "source": fix_path("./tests/test_pytest.py:178"), + "markers": [], + "parentid": "./tests/test_pytest.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest.py::test_fixture", + "name": "test_fixture", + "source": fix_path("./tests/test_pytest.py:192"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_mark_fixture", + "name": "test_mark_fixture", + "source": fix_path("./tests/test_pytest.py:196"), + "markers": [], + "parentid": "./tests/test_pytest.py", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x0]", + "name": "test_param_fixture[x0]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x1]", + "name": "test_param_fixture[x1]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_fixture[x2]", + "name": "test_param_fixture[x2]", + "source": fix_path("./tests/test_pytest.py:201"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x0]", + "name": "test_param_mark_fixture[x0]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x1]", + "name": "test_param_mark_fixture[x1]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_param_mark_fixture[x2]", + "name": "test_param_mark_fixture[x2]", + "source": fix_path("./tests/test_pytest.py:207"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_param_mark_fixture", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[spam]", + "name": "test_fixture_param[spam]", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + { + "id": "./tests/test_pytest.py::test_fixture_param[eggs]", + "name": "test_fixture_param[eggs]", + "source": fix_path("./tests/test_pytest.py:216"), + "markers": [], + "parentid": "./tests/test_pytest.py::test_fixture_param", + }, + ###### + { + "id": "./tests/test_pytest_param.py::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:8"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x0]", + "name": "test_param_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x1]", + "name": "test_param_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_param_13[x2]", + "name": "test_param_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:14"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_param_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x0]", + "name": "test_spam_13[x0]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x1]", + "name": "test_spam_13[x1]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + { + "id": "./tests/test_pytest_param.py::TestParamAll::test_spam_13[x2]", + "name": "test_spam_13[x2]", + "source": fix_path("./tests/test_pytest_param.py:17"), + "markers": [], + "parentid": "./tests/test_pytest_param.py::TestParamAll::test_spam_13", + }, + ###### + { + "id": "./tests/test_unittest.py::MyTests::test_dynamic_", + "name": "test_dynamic_", + "source": fix_path("./tests/test_unittest.py:54"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_failure", + "name": "test_failure", + "source": fix_path("./tests/test_unittest.py:34"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_known_failure", + "name": "test_known_failure", + "source": fix_path("./tests/test_unittest.py:37"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_not_skipped", + "name": "test_maybe_not_skipped", + "source": fix_path("./tests/test_unittest.py:17"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_maybe_skipped", + "name": "test_maybe_skipped", + "source": fix_path("./tests/test_unittest.py:13"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:6"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped", + "name": "test_skipped", + "source": fix_path("./tests/test_unittest.py:9"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_skipped_inside", + "name": "test_skipped_inside", + "source": fix_path("./tests/test_unittest.py:21"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_nested_subtests", + "name": "test_with_nested_subtests", + "source": fix_path("./tests/test_unittest.py:46"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::MyTests::test_with_subtests", + "name": "test_with_subtests", + "source": fix_path("./tests/test_unittest.py:41"), + "markers": [], + "parentid": "./tests/test_unittest.py::MyTests", + }, + { + "id": "./tests/test_unittest.py::OtherTests::test_simple", + "name": "test_simple", + "source": fix_path("./tests/test_unittest.py:61"), + "markers": [], + "parentid": "./tests/test_unittest.py::OtherTests", + }, + ########### + { + "id": "./tests/v/test_eggs.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_eggs.py", + }, + { + "id": "./tests/v/test_eggs.py::TestSimple::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:8"), + "markers": [], + "parentid": "./tests/v/test_eggs.py::TestSimple", + }, + ###### + { + "id": "./tests/v/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + { + "id": "./tests/v/test_ham.py::test_not_hard", + "name": "test_not_hard", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_ham.py", + }, + ###### + { + "id": "./tests/v/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/v/spam.py:2"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + { + "id": "./tests/v/test_spam.py::test_simpler", + "name": "test_simpler", + "source": fix_path("./tests/v/test_spam.py:4"), + "markers": [], + "parentid": "./tests/v/test_spam.py", + }, + ########### + { + "id": "./tests/w/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam.py", + }, + { + "id": "./tests/w/test_spam_ex.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/w/test_spam_ex.py:4"), + "markers": [], + "parentid": "./tests/w/test_spam_ex.py", + }, + ########### + { + "id": "./tests/x/y/z/test_ham.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/test_ham.py:2"), + "markers": [], + "parentid": "./tests/x/y/z/test_ham.py", + }, + ###### + { + "id": "./tests/x/y/z/a/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/a/test_spam.py:11"), + "markers": [], + "parentid": "./tests/x/y/z/a/test_spam.py", + }, + { + "id": "./tests/x/y/z/b/test_spam.py::test_simple", + "name": "test_simple", + "source": fix_path("./tests/x/y/z/b/test_spam.py:7"), + "markers": [], + "parentid": "./tests/x/y/z/b/test_spam.py", + }, + ], +} diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_empty.py b/pythonFiles/tests/unittestadapter/.data/discovery_empty.py new file mode 100644 index 000000000000..9af5071303ce --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_empty.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryEmpty(unittest.TestCase): + """Test class for the test_empty_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + + def something(self) -> bool: + return True diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py new file mode 100644 index 000000000000..42f84f046760 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_one.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + +import something_else # type: ignore + + +class DiscoveryErrorOne(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py new file mode 100644 index 000000000000..5d6d54f886a1 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_error/file_two.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoveryErrorTwo(unittest.TestCase): + """Test class for the test_error_discovery test. + + The discover_tests function should return a dictionary with an "error" status, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/discovery_simple.py b/pythonFiles/tests/unittestadapter/.data/discovery_simple.py new file mode 100644 index 000000000000..1859436d5b5b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/discovery_simple.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class DiscoverySimple(unittest.TestCase): + """Test class for the test_simple_discovery test. + + The discover_tests function should return a dictionary with a "success" status, no errors, and a test tree + if unittest discovery was performed successfully. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py b/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py new file mode 100644 index 000000000000..90fdfc89a27b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_decorated_tree.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest +from functools import wraps + + +def my_decorator(f): + @wraps(f) + def wrapper(*args, **kwds): + print("Calling decorated function") + return f(*args, **kwds) + + return wrapper + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_decorated_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + @my_decorator + def test_one(self) -> None: + self.assertGreater(2, 1) + + @my_decorator + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py b/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py new file mode 100644 index 000000000000..84f7fefc4ebd --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_nested_cases/file_one.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseTwoFileOne(unittest.TestCase): + """Test class for the test_nested_test_cases test. + + get_test_case should return tests from the test suites in this folder. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py b/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py new file mode 100644 index 000000000000..fb3ae7eb7909 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_simple_cases.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class CaseOne(unittest.TestCase): + """Test class for the test_simple_test_cases test. + + get_test_case should return tests from the test suite. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py b/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py new file mode 100644 index 000000000000..6db51a4fd80b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/.data/utils_simple_tree.py @@ -0,0 +1,17 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import unittest + + +class TreeOne(unittest.TestCase): + """Test class for the test_build_simple_tree test. + + build_test_tree should build a test tree with these test cases. + """ + + def test_one(self) -> None: + self.assertGreater(2, 1) + + def test_two(self) -> None: + self.assertNotEqual(2, 1) diff --git a/pythonFiles/tests/unittestadapter/test_discovery.py b/pythonFiles/tests/unittestadapter/test_discovery.py new file mode 100644 index 000000000000..7d7db772a4a4 --- /dev/null +++ b/pythonFiles/tests/unittestadapter/test_discovery.py @@ -0,0 +1,233 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +from typing import List + +import pytest +from unittestadapter.discovery import discover_tests +from unittestadapter.utils import TestNodeTypeEnum, parse_unittest_args + +from . import expected_discovery_test_output +from .helpers import TEST_DATA_PATH, is_same_tree + + +@pytest.mark.parametrize( + "args, expected", + [ + ( + ["-s", "something", "-p", "other*", "-t", "else"], + ("something", "other*", "else", 1, None, None), + ), + ( + [ + "--start-directory", + "foo", + "--pattern", + "bar*", + "--top-level-directory", + "baz", + ], + ("foo", "bar*", "baz", 1, None, None), + ), + ( + ["--foo", "something"], + (".", "test*.py", None, 1, None, None), + ), + ( + ["--foo", "something", "-v"], + (".", "test*.py", None, 2, None, None), + ), + ( + ["--foo", "something", "-f"], + (".", "test*.py", None, 1, True, None), + ), + ( + ["--foo", "something", "--verbose", "-f"], + (".", "test*.py", None, 2, True, None), + ), + ( + ["--foo", "something", "-q", "--failfast"], + (".", "test*.py", None, 0, True, None), + ), + ( + ["--foo", "something", "--quiet"], + (".", "test*.py", None, 0, None, None), + ), + ( + ["--foo", "something", "--quiet", "--locals"], + (".", "test*.py", None, 0, None, True), + ), + ], +) +def test_parse_unittest_args(args: List[str], expected: List[str]) -> None: + """The parse_unittest_args function should return values for the start_dir, pattern, and top_level_dir arguments + when passed as command-line options, and ignore unrecognized arguments. + """ + actual = parse_unittest_args(args) + + assert actual == expected + + +def test_simple_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and a test tree + if unittest discovery was performed successfully. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_simple*" + file_path = os.fsdecode(pathlib.PurePath(TEST_DATA_PATH / "discovery_simple.py")) + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": ".data", + "children": [ + { + "name": "discovery_simple.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoverySimple", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoverySimple" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoverySimple", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert is_same_tree(actual.get("tests"), expected) + assert "error" not in actual + + +def test_empty_discovery() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and no test tree + if unittest discovery was performed successfully but no tests were found. + """ + start_dir = os.fsdecode(TEST_DATA_PATH) + pattern = "discovery_empty*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert "error" not in actual + + +def test_error_discovery() -> None: + """The discover_tests function should return a dictionary with an "error" status, a uuid, the discovered tests, and a list of errors + if unittest discovery failed at some point. + """ + # Discover tests in .data/discovery_error/. + start_path = pathlib.PurePath(TEST_DATA_PATH / "discovery_error") + start_dir = os.fsdecode(start_path) + pattern = "file*" + + file_path = os.fsdecode(start_path / "file_two.py") + + expected = { + "path": start_dir, + "type_": TestNodeTypeEnum.folder, + "name": "discovery_error", + "children": [ + { + "name": "file_two.py", + "type_": TestNodeTypeEnum.file, + "path": file_path, + "children": [ + { + "name": "DiscoveryErrorTwo", + "path": file_path, + "type_": TestNodeTypeEnum.class_, + "children": [ + { + "name": "test_one", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "14", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_one", + }, + { + "name": "test_two", + "path": file_path, + "type_": TestNodeTypeEnum.test, + "lineno": "17", + "id_": file_path + + "\\" + + "DiscoveryErrorTwo" + + "\\" + + "test_two", + }, + ], + "id_": file_path + "\\" + "DiscoveryErrorTwo", + } + ], + "id_": file_path, + } + ], + "id_": start_dir, + } + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "error" + assert is_same_tree(expected, actual.get("tests")) + assert len(actual.get("error", [])) == 1 + + +def test_unit_skip() -> None: + """The discover_tests function should return a dictionary with a "success" status, a uuid, no errors, and test tree. + if unittest discovery was performed and found a test in one file marked as skipped and another file marked as skipped. + """ + start_dir = os.fsdecode(TEST_DATA_PATH / "unittest_skip") + pattern = "unittest_*" + + uuid = "some-uuid" + actual = discover_tests(start_dir, pattern, None, uuid) + + assert actual["status"] == "success" + assert "tests" in actual + assert is_same_tree( + actual.get("tests"), + expected_discovery_test_output.skip_unittest_folder_discovery_output, + ) + assert "error" not in actual diff --git a/pythonFiles/tests/unittestadapter/test_execution.py b/pythonFiles/tests/unittestadapter/test_execution.py new file mode 100644 index 000000000000..7d11c656b57b --- /dev/null +++ b/pythonFiles/tests/unittestadapter/test_execution.py @@ -0,0 +1,275 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import os +import pathlib +import sys + +import pytest + +script_dir = pathlib.Path(__file__).parent.parent +sys.path.insert(0, os.fspath(script_dir / "lib" / "python")) + +from unittestadapter.execution import run_tests + +TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" + + +def test_no_ids_run() -> None: + """This test runs on an empty array of test_ids, therefore it should return + an empty dict for the result. + """ + start_dir: str = os.fspath(TEST_DATA_PATH) + testids = [] + pattern = "discovery_simple*" + actual = run_tests(start_dir, testids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + if actual["result"] is not None: + assert len(actual["result"]) == 0 + else: + raise AssertionError("actual['result'] is None") + + +def test_single_ids_run() -> None: + """This test runs on a single test_id, therefore it should return + a dict with a single key-value pair for the result. + + This single test passes so the outcome should be 'success'. + """ + id = "discovery_simple.DiscoverySimple.test_one" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "discovery_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 1 + assert id in result + id_result = result[id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "success" + + +def test_subtest_run() -> None: + """This test runs on a the test_subtest which has a single method, test_even, + that uses unittest subtest. + + The actual result of run should return a dict payload with 6 entry for the 6 subtests. + """ + id = "test_subtest.NumbersTest.test_even" + actual = run_tests( + os.fspath(TEST_DATA_PATH), + [id], + "test_subtest.py", + None, + "fake-uuid", + 1, + None, + ) + subtests_ids = [ + "test_subtest.NumbersTest.test_even (i=0)", + "test_subtest.NumbersTest.test_even (i=1)", + "test_subtest.NumbersTest.test_even (i=2)", + "test_subtest.NumbersTest.test_even (i=3)", + "test_subtest.NumbersTest.test_even (i=4)", + "test_subtest.NumbersTest.test_even (i=5)", + ] + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == 6 + for id in subtests_ids: + assert id in result + + +@pytest.mark.parametrize( + "test_ids, pattern, cwd, expected_outcome", + [ + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + ], + "test_add.py", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "test_add.TestAddFunction.test_add_negative_numbers", + "test_add.TestAddFunction.test_add_positive_numbers", + "test_subtract.TestSubtractFunction.test_subtract_negative_numbers", + "test_subtract.TestSubtractFunction.test_subtract_positive_numbers", + ], + "test*", + os.fspath(TEST_DATA_PATH / "unittest_folder"), + "success", + ), + ( + [ + "pattern_a_test.DiscoveryA.test_one_a", + "pattern_a_test.DiscoveryA.test_two_a", + ], + "*test", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "test_pattern_b.DiscoveryB.test_one_b", + "test_pattern_b.DiscoveryB.test_two_b", + ], + "test_*", + os.fspath(TEST_DATA_PATH / "two_patterns"), + "success", + ), + ( + [ + "file_one.CaseTwoFileOne.test_one", + "file_one.CaseTwoFileOne.test_two", + "folder.file_two.CaseTwoFileTwo.test_one", + "folder.file_two.CaseTwoFileTwo.test_two", + ], + "*", + os.fspath(TEST_DATA_PATH / "utils_nested_cases"), + "success", + ), + ( + [ + "test_two_classes.ClassOne.test_one", + "test_two_classes.ClassTwo.test_two", + ], + "test_two_classes.py", + os.fspath(TEST_DATA_PATH), + "success", + ), + ], +) +def test_multiple_ids_run(test_ids, pattern, cwd, expected_outcome) -> None: + """ + The following are all successful tests of different formats. + + # 1. Two tests with the `pattern` specified as a file + # 2. Two test files in the same folder called `unittest_folder` + # 3. A folder with two different test file patterns, this test gathers pattern `*test` + # 4. A folder with two different test file patterns, this test gathers pattern `test_*` + # 5. A nested structure where a test file is on the same level as a folder containing a test file + # 6. Test file with two test classes + + All tests should have the outcome of `success`. + """ + actual = run_tests(cwd, test_ids, pattern, None, "fake-uuid", 1, None) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == cwd + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == expected_outcome + assert True + + +def test_failed_tests(): + """This test runs on a single file `test_fail` with two tests that fail.""" + test_ids = [ + "test_fail_simple.RunFailSimple.test_one_fail", + "test_fail_simple.RunFailSimple.test_two_fail", + ] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + for test_id in test_ids: + assert test_id in result + id_result = result[test_id] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "failure" + assert "message" and "traceback" in id_result + assert "2 not greater than 3" in str(id_result["message"]) or "1 == 1" in str( + id_result["traceback"] + ) + assert True + + +def test_unknown_id(): + """This test runs on a unknown test_id, therefore it should return + an error as the outcome as it attempts to find the given test. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert actual["result"] is not None + result = actual["result"] + assert len(result) == len(test_ids) + assert "unittest.loader._FailedTest.unknown_id" in result + id_result = result["unittest.loader._FailedTest.unknown_id"] + assert id_result is not None + assert "outcome" in id_result + assert id_result["outcome"] == "error" + assert "message" and "traceback" in id_result + + +def test_incorrect_path(): + """This test runs on a non existent path, therefore it should return + an error as the outcome as it attempts to find the given folder. + """ + test_ids = ["unknown_id"] + actual = run_tests( + os.fspath(TEST_DATA_PATH / "unknown_folder"), + test_ids, + "test_fail_simple*", + None, + "fake-uuid", + 1, + None, + ) + assert actual + assert all(item in actual for item in ("cwd", "status", "error")) + assert actual["status"] == "error" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH / "unknown_folder") diff --git a/src/client/api.ts b/src/client/api.ts index 81a5f676cc22..aaaba540af23 100644 --- a/src/client/api.ts +++ b/src/client/api.ts @@ -12,7 +12,7 @@ import { ILanguageServerOutputChannel } from './activation/types'; import { PythonExtension } from './api/types'; import { isTestExecution, PYTHON_LANGUAGE } from './common/constants'; import { IConfigurationService, Resource } from './common/types'; -import { getDebugpyLauncherArgs, getDebugpyPackagePath } from './debugger/extension/adapter/remoteLaunchers'; +import { getDebugpyLauncherArgs } from './debugger/extension/adapter/remoteLaunchers'; import { IInterpreterService } from './interpreter/contracts'; import { IServiceContainer, IServiceManager } from './ioc/types'; import { JupyterExtensionIntegration } from './jupyter/jupyterIntegration'; @@ -22,6 +22,7 @@ import { buildEnvironmentApi } from './environmentApi'; import { ApiForPylance } from './pylanceApi'; import { getTelemetryReporter } from './telemetry'; import { TensorboardExtensionIntegration } from './tensorBoard/tensorboardIntegration'; +import { getDebugpyPath } from './debugger/pythonDebugger'; export function buildApi( ready: Promise, @@ -122,7 +123,7 @@ export function buildApi( }); }, async getDebuggerPackagePath(): Promise { - return getDebugpyPackagePath(); + return getDebugpyPath(); }, }, settings: { diff --git a/src/client/debugger/extension/adapter/remoteLaunchers.ts b/src/client/debugger/extension/adapter/remoteLaunchers.ts index 80e0289e3ad8..0746cdc2328c 100644 --- a/src/client/debugger/extension/adapter/remoteLaunchers.ts +++ b/src/client/debugger/extension/adapter/remoteLaunchers.ts @@ -8,7 +8,7 @@ import { EXTENSION_ROOT_DIR } from '../../../common/constants'; import '../../../common/extensions'; const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); -const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); +// const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); type RemoteDebugOptions = { host: string; @@ -25,7 +25,3 @@ export function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath ...waitArgs, ]; } - -export function getDebugpyPackagePath(): string { - return pathToDebugger; -} diff --git a/src/client/debugger/pythonDebugger.ts b/src/client/debugger/pythonDebugger.ts new file mode 100644 index 000000000000..3450e95f3cee --- /dev/null +++ b/src/client/debugger/pythonDebugger.ts @@ -0,0 +1,30 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +import { extensions } from 'vscode'; + +interface IPythonDebuggerExtensionApi { + debug: { + getDebuggerPackagePath(): Promise; + }; +} + +async function activateExtension() { + const extension = extensions.getExtension('ms-python.debugpy'); + if (extension) { + if (!extension.isActive) { + await extension.activate(); + } + } + return extension; +} + +async function getPythonDebuggerExtensionAPI(): Promise { + const extension = await activateExtension(); + return extension?.exports as IPythonDebuggerExtensionApi; +} + +export async function getDebugpyPath(): Promise { + const api = await getPythonDebuggerExtensionAPI(); + return api?.debug.getDebuggerPackagePath() ?? ''; +} diff --git a/src/client/jupyter/jupyterIntegration.ts b/src/client/jupyter/jupyterIntegration.ts index ec030f1133f6..69583b744da9 100644 --- a/src/client/jupyter/jupyterIntegration.ts +++ b/src/client/jupyter/jupyterIntegration.ts @@ -11,7 +11,6 @@ import type { SemVer } from 'semver'; import { IContextKeyManager, IWorkspaceService } from '../common/application/types'; import { JUPYTER_EXTENSION_ID, PYLANCE_EXTENSION_ID } from '../common/constants'; import { GLOBAL_MEMENTO, IExtensions, IMemento, Resource } from '../common/types'; -import { getDebugpyPackagePath } from '../debugger/extension/adapter/remoteLaunchers'; import { IEnvironmentActivationService } from '../interpreter/activation/types'; import { IInterpreterQuickPickItem, IInterpreterSelector } from '../interpreter/configuration/types'; import { @@ -22,6 +21,7 @@ import { } from '../interpreter/contracts'; import { PylanceApi } from '../activation/node/pylanceApi'; import { ExtensionContextKey } from '../common/application/contextKeys'; +import { getDebugpyPath } from '../debugger/pythonDebugger'; import type { Environment } from '../api/types'; type PythonApiForJupyterExtension = { @@ -110,7 +110,7 @@ export class JupyterExtensionIntegration { this.interpreterSelector.getAllSuggestions(resource), getKnownSuggestions: (resource: Resource): IInterpreterQuickPickItem[] => this.interpreterSelector.getSuggestions(resource), - getDebuggerPath: async () => dirname(getDebugpyPackagePath()), + getDebuggerPath: async () => dirname(await getDebugpyPath()), getInterpreterPathSelectedForJupyterServer: () => this.globalState.get('INTERPRETER_PATH_SELECTED_FOR_JUPYTER_SERVER'), registerInterpreterStatusFilter: this.interpreterDisplay.registerVisibilityFilter.bind( diff --git a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts index 2a75f6316a09..2531ce83191e 100644 --- a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts +++ b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts @@ -51,11 +51,11 @@ suite('External debugpy Debugger Launcher', () => { }); }); -suite('Path To Debugger Package', () => { - const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); - test('Path to debugpy debugger package', () => { - const actual = launchers.getDebugpyPackagePath(); - const expected = path.join(pathToPythonLibDir, 'debugpy'); - expect(actual).to.be.deep.equal(expected); - }); -}); +// suite('Path To Debugger Package', () => { +// const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); +// test('Path to debugpy debugger package', () => { +// const actual = launchers.getDebugpyPackagePath(); +// const expected = path.join(pathToPythonLibDir, 'debugpy'); +// expect(actual).to.be.deep.equal(expected); +// }); +// }); diff --git a/src/test/pythonFiles/dummy.py b/src/test/pythonFiles/dummy.py new file mode 100644 index 000000000000..10f13768abe0 --- /dev/null +++ b/src/test/pythonFiles/dummy.py @@ -0,0 +1 @@ +#dummy file to be opened by Test VS Code instance, so that Python Configuration (workspace configuration will be initialized) \ No newline at end of file From e8297ed3377b603a677eefc72e7419f3d1d733de Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 13:46:11 -0700 Subject: [PATCH 04/11] fix tests --- src/client/debugger/extension/adapter/factory.ts | 9 +++------ .../debugger/extension/adapter/remoteLaunchers.ts | 12 ++++++------ src/test/debugger/extension/adapter/adapter.test.ts | 4 ++-- .../extension/adapter/remoteLaunchers.unit.test.ts | 2 +- src/test/debugger/utils.ts | 10 +++++----- 5 files changed, 17 insertions(+), 20 deletions(-) diff --git a/src/client/debugger/extension/adapter/factory.ts b/src/client/debugger/extension/adapter/factory.ts index cfc8cf91aba3..e02810f7d3a1 100644 --- a/src/client/debugger/extension/adapter/factory.ts +++ b/src/client/debugger/extension/adapter/factory.ts @@ -26,6 +26,7 @@ import { Common, Interpreters } from '../../../common/utils/localize'; import { IPersistentStateFactory } from '../../../common/types'; import { Commands } from '../../../common/constants'; import { ICommandManager } from '../../../common/application/types'; +import { getDebugpyPath } from '../../pythonDebugger'; // persistent state names, exported to make use of in testing export enum debugStateKeys { @@ -90,13 +91,9 @@ export class DebugAdapterDescriptorFactory implements IDebugAdapterDescriptorFac traceLog(`DAP Server launched with command: ${executable} ${args.join(' ')}`); return new DebugAdapterExecutable(executable, args); } - + const debugpyPath = await getDebugpyPath() const debuggerAdapterPathToUse = path.join( - EXTENSION_ROOT_DIR, - 'python_files', - 'lib', - 'python', - 'debugpy', + debugpyPath, 'adapter', ); diff --git a/src/client/debugger/extension/adapter/remoteLaunchers.ts b/src/client/debugger/extension/adapter/remoteLaunchers.ts index 0746cdc2328c..af79f2c64578 100644 --- a/src/client/debugger/extension/adapter/remoteLaunchers.ts +++ b/src/client/debugger/extension/adapter/remoteLaunchers.ts @@ -3,12 +3,8 @@ 'use strict'; -import * as path from 'path'; -import { EXTENSION_ROOT_DIR } from '../../../common/constants'; import '../../../common/extensions'; - -const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); -// const pathToDebugger = path.join(pathToPythonLibDir, 'debugpy'); +import { getDebugpyPath } from '../../pythonDebugger'; type RemoteDebugOptions = { host: string; @@ -16,7 +12,11 @@ type RemoteDebugOptions = { waitUntilDebuggerAttaches: boolean; }; -export function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath: string = pathToDebugger) { +export async function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath?: string) { + if (!debuggerPath){ + debuggerPath = await getDebugpyPath(); + } + const waitArgs = options.waitUntilDebuggerAttaches ? ['--wait-for-client'] : []; return [ debuggerPath.fileToCommandArgumentForPythonExt(), diff --git a/src/test/debugger/extension/adapter/adapter.test.ts b/src/test/debugger/extension/adapter/adapter.test.ts index 2f60290897af..dd0e9d560bca 100644 --- a/src/test/debugger/extension/adapter/adapter.test.ts +++ b/src/test/debugger/extension/adapter/adapter.test.ts @@ -70,7 +70,7 @@ suite('Debugger Integration', () => { } const [configName, scriptArgs] = tests[kind]; test(kind, async () => { - const session = fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); + const session = await fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); await session.start(); // Any debugger ops would go here. await new Promise((r) => setTimeout(r, 300)); // 0.3 seconds @@ -93,7 +93,7 @@ suite('Debugger Integration', () => { } const [configName, scriptArgs] = tests[kind]; test(kind, async () => { - const session = fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); + const session = await fix.resolveDebugger(configName, file, scriptArgs, workspaceRoot); const bp = session.addBreakpoint(file, 21); // line: "time.sleep()" await session.start(); await session.waitForBreakpoint(bp); diff --git a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts index 2531ce83191e..dfce1aafc63a 100644 --- a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts +++ b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts @@ -5,7 +5,7 @@ import { expect } from 'chai'; import * as path from 'path'; -import { EXTENSION_ROOT_DIR } from '../../../../client/common/constants'; +// import { EXTENSION_ROOT_DIR } from '../../../../client/common/constants'; import '../../../../client/common/extensions'; import * as launchers from '../../../../client/debugger/extension/adapter/remoteLaunchers'; diff --git a/src/test/debugger/utils.ts b/src/test/debugger/utils.ts index 4a41489940b8..749adb359597 100644 --- a/src/test/debugger/utils.ts +++ b/src/test/debugger/utils.ts @@ -277,12 +277,12 @@ class DebuggerSession { } export class DebuggerFixture extends PythonFixture { - public resolveDebugger( + public async resolveDebugger( configName: string, file: string, scriptArgs: string[], wsRoot?: vscode.WorkspaceFolder, - ): DebuggerSession { + ): Promise { const config = getConfig(configName); let proc: Proc | undefined; if (config.request === 'launch') { @@ -292,7 +292,7 @@ export class DebuggerFixture extends PythonFixture { // XXX set the file in the current vscode editor? } else if (config.request === 'attach') { if (config.port) { - proc = this.runDebugger(config.port, file, ...scriptArgs); + proc = await this.runDebugger(config.port, file, ...scriptArgs); if (wsRoot && config.name === 'attach to a local port') { config.pathMappings.localRoot = wsRoot.uri.fsPath; } @@ -352,8 +352,8 @@ export class DebuggerFixture extends PythonFixture { } } - public runDebugger(port: number, filename: string, ...scriptArgs: string[]) { - const args = getDebugpyLauncherArgs({ + public async runDebugger(port: number, filename: string, ...scriptArgs: string[]) { + const args = await getDebugpyLauncherArgs({ host: 'localhost', port: port, // This causes problems if we set it to true. From b1a0536605536e8607a53d5adfacec7955938813 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 15:31:20 -0700 Subject: [PATCH 05/11] fix lint --- src/client/debugger/extension/adapter/factory.ts | 7 ++----- src/client/debugger/extension/adapter/remoteLaunchers.ts | 2 +- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/client/debugger/extension/adapter/factory.ts b/src/client/debugger/extension/adapter/factory.ts index e02810f7d3a1..b955cddaff01 100644 --- a/src/client/debugger/extension/adapter/factory.ts +++ b/src/client/debugger/extension/adapter/factory.ts @@ -91,11 +91,8 @@ export class DebugAdapterDescriptorFactory implements IDebugAdapterDescriptorFac traceLog(`DAP Server launched with command: ${executable} ${args.join(' ')}`); return new DebugAdapterExecutable(executable, args); } - const debugpyPath = await getDebugpyPath() - const debuggerAdapterPathToUse = path.join( - debugpyPath, - 'adapter', - ); + const debugpyPath = await getDebugpyPath(); + const debuggerAdapterPathToUse = path.join(debugpyPath, 'adapter'); const args = command.concat([debuggerAdapterPathToUse, ...logArgs]); traceLog(`DAP Server launched with command: ${executable} ${args.join(' ')}`); diff --git a/src/client/debugger/extension/adapter/remoteLaunchers.ts b/src/client/debugger/extension/adapter/remoteLaunchers.ts index af79f2c64578..f68f747a8a8c 100644 --- a/src/client/debugger/extension/adapter/remoteLaunchers.ts +++ b/src/client/debugger/extension/adapter/remoteLaunchers.ts @@ -13,7 +13,7 @@ type RemoteDebugOptions = { }; export async function getDebugpyLauncherArgs(options: RemoteDebugOptions, debuggerPath?: string) { - if (!debuggerPath){ + if (!debuggerPath) { debuggerPath = await getDebugpyPath(); } From 69142b7abb37620ed48dbd85745c95bba0d9c731 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 17:10:24 -0700 Subject: [PATCH 06/11] fix unit tests --- .../extension/adapter/factory.unit.test.ts | 8 ++++++-- .../extension/adapter/remoteLaunchers.unit.test.ts | 14 ++------------ 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/src/test/debugger/extension/adapter/factory.unit.test.ts b/src/test/debugger/extension/adapter/factory.unit.test.ts index 172c2715b086..949036a3a0ca 100644 --- a/src/test/debugger/extension/adapter/factory.unit.test.ts +++ b/src/test/debugger/extension/adapter/factory.unit.test.ts @@ -28,6 +28,7 @@ import * as windowApis from '../../../../client/common/vscodeApis/windowApis'; import { PersistentState, PersistentStateFactory } from '../../../../client/common/persistentState'; import { ICommandManager } from '../../../../client/common/application/types'; import { CommandManager } from '../../../../client/common/application/commandManager'; +import * as pythonDebugger from '../../../../client/debugger/pythonDebugger'; use(chaiAsPromised); @@ -39,9 +40,11 @@ suite('Debugging - Adapter Factory', () => { let showErrorMessageStub: sinon.SinonStub; let readJSONSyncStub: sinon.SinonStub; let commandManager: ICommandManager; + let getDebugpyPathStub: sinon.SinonStub; const nodeExecutable = undefined; - const debugAdapterPath = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python', 'debugpy', 'adapter'); + const debugpyPath = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python', 'debugpy') + const debugAdapterPath = path.join(debugpyPath, 'adapter'); const pythonPath = path.join('path', 'to', 'python', 'interpreter'); const interpreter = { architecture: Architecture.Unknown, @@ -75,7 +78,8 @@ suite('Debugging - Adapter Factory', () => { stateFactory = mock(PersistentStateFactory); state = mock(PersistentState) as PersistentState; commandManager = mock(CommandManager); - + getDebugpyPathStub = sinon.stub(pythonDebugger, 'getDebugpyPath'); + getDebugpyPathStub.resolves(debugpyPath); showErrorMessageStub = sinon.stub(windowApis, 'showErrorMessage'); when( diff --git a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts index dfce1aafc63a..e8e2cbd5d15d 100644 --- a/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts +++ b/src/test/debugger/extension/adapter/remoteLaunchers.unit.test.ts @@ -5,7 +5,6 @@ import { expect } from 'chai'; import * as path from 'path'; -// import { EXTENSION_ROOT_DIR } from '../../../../client/common/constants'; import '../../../../client/common/extensions'; import * as launchers from '../../../../client/debugger/extension/adapter/remoteLaunchers'; @@ -24,7 +23,7 @@ suite('External debugpy Debugger Launcher', () => { ].forEach((testParams) => { suite(testParams.testName, async () => { test('Test remote debug launcher args (and do not wait for debugger to attach)', async () => { - const args = launchers.getDebugpyLauncherArgs( + const args = await launchers.getDebugpyLauncherArgs( { host: 'something', port: 1234, @@ -36,7 +35,7 @@ suite('External debugpy Debugger Launcher', () => { expect(args).to.be.deep.equal(expectedArgs); }); test('Test remote debug launcher args (and wait for debugger to attach)', async () => { - const args = launchers.getDebugpyLauncherArgs( + const args = await launchers.getDebugpyLauncherArgs( { host: 'something', port: 1234, @@ -50,12 +49,3 @@ suite('External debugpy Debugger Launcher', () => { }); }); }); - -// suite('Path To Debugger Package', () => { -// const pathToPythonLibDir = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python'); -// test('Path to debugpy debugger package', () => { -// const actual = launchers.getDebugpyPackagePath(); -// const expected = path.join(pathToPythonLibDir, 'debugpy'); -// expect(actual).to.be.deep.equal(expected); -// }); -// }); From 63f8a5a5a09269aa19eb554764df04e981c1199e Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 9 Apr 2024 18:00:52 -0700 Subject: [PATCH 07/11] format-fix --- src/test/debugger/extension/adapter/factory.unit.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/debugger/extension/adapter/factory.unit.test.ts b/src/test/debugger/extension/adapter/factory.unit.test.ts index 949036a3a0ca..6204bb835479 100644 --- a/src/test/debugger/extension/adapter/factory.unit.test.ts +++ b/src/test/debugger/extension/adapter/factory.unit.test.ts @@ -43,7 +43,7 @@ suite('Debugging - Adapter Factory', () => { let getDebugpyPathStub: sinon.SinonStub; const nodeExecutable = undefined; - const debugpyPath = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python', 'debugpy') + const debugpyPath = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python', 'debugpy'); const debugAdapterPath = path.join(debugpyPath, 'adapter'); const pythonPath = path.join('path', 'to', 'python', 'interpreter'); const interpreter = { From 7e8000547283049595d61450c4731ca89fee27ba Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Wed, 10 Apr 2024 12:47:54 -0700 Subject: [PATCH 08/11] fix functional test --- src/test/api.functional.test.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/test/api.functional.test.ts b/src/test/api.functional.test.ts index 851d56c00e07..eea0fb920b15 100644 --- a/src/test/api.functional.test.ts +++ b/src/test/api.functional.test.ts @@ -5,6 +5,7 @@ import { assert, expect } from 'chai'; import * as path from 'path'; +import * as sinon from 'sinon'; import { instance, mock, when } from 'ts-mockito'; import { buildApi } from '../client/api'; import { ConfigurationService } from '../client/common/configuration/service'; @@ -17,6 +18,7 @@ import { ServiceContainer } from '../client/ioc/container'; import { ServiceManager } from '../client/ioc/serviceManager'; import { IServiceContainer, IServiceManager } from '../client/ioc/types'; import { IDiscoveryAPI } from '../client/pythonEnvironments/base/locator'; +import * as pythonDebugger from '../client/debugger/pythonDebugger'; suite('Extension API', () => { const debuggerPath = path.join(EXTENSION_ROOT_DIR, 'python_files', 'lib', 'python', 'debugpy'); @@ -29,6 +31,7 @@ suite('Extension API', () => { let interpreterService: IInterpreterService; let discoverAPI: IDiscoveryAPI; let environmentVariablesProvider: IEnvironmentVariablesProvider; + let getDebugpyPathStub: sinon.SinonStub; setup(() => { serviceContainer = mock(ServiceContainer); @@ -47,6 +50,12 @@ suite('Extension API', () => { ); when(serviceContainer.get(IInterpreterService)).thenReturn(instance(interpreterService)); when(serviceContainer.get(IDisposableRegistry)).thenReturn([]); + getDebugpyPathStub = sinon.stub(pythonDebugger, 'getDebugpyPath'); + getDebugpyPathStub.resolves(debuggerPath); + }); + + teardown(() => { + sinon.restore(); }); test('Test debug launcher args (no-wait)', async () => { From cf4fea981aa9595f92eef97dd16ae86e4f223bd6 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Wed, 10 Apr 2024 15:27:14 -0700 Subject: [PATCH 09/11] Add error when debugpy is empty --- src/client/debugger/extension/adapter/factory.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client/debugger/extension/adapter/factory.ts b/src/client/debugger/extension/adapter/factory.ts index b955cddaff01..546414699971 100644 --- a/src/client/debugger/extension/adapter/factory.ts +++ b/src/client/debugger/extension/adapter/factory.ts @@ -15,7 +15,7 @@ import { } from 'vscode'; import { EXTENSION_ROOT_DIR } from '../../../constants'; import { IInterpreterService } from '../../../interpreter/contracts'; -import { traceLog, traceVerbose } from '../../../logging'; +import { traceError, traceLog, traceVerbose } from '../../../logging'; import { PythonEnvironment } from '../../../pythonEnvironments/info'; import { sendTelemetryEvent } from '../../../telemetry'; import { EventName } from '../../../telemetry/constants'; @@ -92,6 +92,10 @@ export class DebugAdapterDescriptorFactory implements IDebugAdapterDescriptorFac return new DebugAdapterExecutable(executable, args); } const debugpyPath = await getDebugpyPath(); + if (!debugpyPath) { + traceError('Could not find debugpy path.'); + throw new Error('Could not find debugpy path.'); + } const debuggerAdapterPathToUse = path.join(debugpyPath, 'adapter'); const args = command.concat([debuggerAdapterPathToUse, ...logArgs]); From fe82fea007613f72e493ec55b43a24c006b82b2a Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Wed, 10 Apr 2024 16:11:59 -0700 Subject: [PATCH 10/11] remove debugpy installation --- .github/ISSUE_TEMPLATE/config.yml | 6 +- .github/actions/build-vsix/action.yml | 3 +- .github/actions/smoke-tests/action.yml | 1 - .github/workflows/build.yml | 14 ---- .github/workflows/pr-check.yml | 19 ------ build/azure-pipeline.pre-release.yml | 3 +- build/azure-pipeline.stable.yml | 3 +- noxfile.py | 7 -- python_files/install_debugpy.py | 66 ------------------- python_files/pyproject.toml | 2 - python_files/tests/debug_adapter/__init__.py | 2 - .../debug_adapter/test_install_debugpy.py | 25 ------- 12 files changed, 6 insertions(+), 145 deletions(-) delete mode 100644 python_files/install_debugpy.py delete mode 100644 python_files/tests/debug_adapter/__init__.py delete mode 100644 python_files/tests/debug_adapter/test_install_debugpy.py diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index eaacc33b8d8d..c966f6bde856 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -9,9 +9,9 @@ contact_links: - name: 'Jupyter' url: https://github.com/microsoft/vscode-jupyter/issues about: 'For issues relating to the Jupyter extension (including the interactive window)' - - name: 'Debugpy' - url: https://github.com/microsoft/debugpy/issues - about: 'For issues relating to the debugpy debugger' + - name: 'Python Debugger' + url: https://github.com/microsoft/vscode-python-debugger/issues + about: 'For issues relating to the Python debugger' - name: Help/Support url: https://github.com/microsoft/vscode-python/discussions/categories/q-a about: 'Having trouble with the extension? Need help getting something to work?' diff --git a/.github/actions/build-vsix/action.yml b/.github/actions/build-vsix/action.yml index 5b8a569889a8..5eb05e6175d9 100644 --- a/.github/actions/build-vsix/action.yml +++ b/.github/actions/build-vsix/action.yml @@ -46,10 +46,9 @@ runs: with: options: '-t ./python_files/lib/python --implementation py' - - name: Install debugpy and get-pip + - name: Get-pip run: | python -m pip --disable-pip-version-check install packaging - python ./python_files/install_debugpy.py python ./python_files/download_get_pip.py shell: bash diff --git a/.github/actions/smoke-tests/action.yml b/.github/actions/smoke-tests/action.yml index 7eaa2483942a..cc2912115176 100644 --- a/.github/actions/smoke-tests/action.yml +++ b/.github/actions/smoke-tests/action.yml @@ -39,7 +39,6 @@ runs: - name: pip install system test requirements run: | python -m pip install --upgrade -r build/test-requirements.txt - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --implementation py --no-deps --upgrade --pre debugpy shell: bash # Bits from the VSIX are reused by smokeTest.ts to speed things up. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1791e9797fa7..928a679dc1a9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -97,7 +97,6 @@ jobs: - name: Install other Python requirements run: | - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright @@ -195,11 +194,6 @@ jobs: python ./python_files/download_get_pip.py shell: bash - - name: Install debugpy - run: | - # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy - - name: Install core Python requirements uses: brettcannon/pip-secure-install@v1 with: @@ -217,14 +211,6 @@ jobs: - name: Install test requirements run: python -m pip install --upgrade -r build/test-requirements.txt - - name: Install debugpy wheels (Python ${{ matrix.python }}) - run: | - python -m pip install wheel - python -m pip install -r build/build-install-requirements.txt - python ./python_files/install_debugpy.py - shell: bash - if: matrix.test-suite == 'debugger' - - name: Install functional test requirements run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt if: matrix.test-suite == 'functional' diff --git a/.github/workflows/pr-check.yml b/.github/workflows/pr-check.yml index 46c061d3cc1c..a5a94ddd1d5d 100644 --- a/.github/workflows/pr-check.yml +++ b/.github/workflows/pr-check.yml @@ -71,7 +71,6 @@ jobs: - name: Install other Python requirements run: | - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy python -m pip install --upgrade -r build/test-requirements.txt - name: Run Pyright @@ -176,11 +175,6 @@ jobs: with: python-version: ${{ matrix.python }} - - name: Install debugpy - run: | - # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --no-cache-dir --implementation py --no-deps --upgrade --pre debugpy - - name: Download get-pip.py run: | python -m pip install wheel @@ -203,14 +197,6 @@ jobs: - name: Install test requirements run: python -m pip install --upgrade -r build/test-requirements.txt - - name: Install debugpy wheels (Python ${{ matrix.python }}) - run: | - python -m pip install wheel - python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt - python ./python_files/install_debugpy.py - shell: bash - if: matrix.test-suite == 'debugger' - - name: Install functional test requirements run: python -m pip install --upgrade -r ./build/functional-test-requirements.txt if: matrix.test-suite == 'functional' @@ -408,11 +394,6 @@ jobs: requirements-file: './python_files/jedilsp_requirements/requirements.txt' options: '-t ./python_files/lib/jedilsp --implementation py' - - name: Install debugpy - run: | - # We need to have debugpy so that tests relying on it keep passing, but we don't need install_debugpy's logic in the test phase. - python -m pip --disable-pip-version-check install -t ./python_files/lib/python --implementation py --no-deps --upgrade --pre debugpy - - name: Install test requirements run: python -m pip install --upgrade -r build/test-requirements.txt diff --git a/build/azure-pipeline.pre-release.yml b/build/azure-pipeline.pre-release.yml index bee373845e2c..1b7b7a614010 100644 --- a/build/azure-pipeline.pre-release.yml +++ b/build/azure-pipeline.pre-release.yml @@ -54,9 +54,8 @@ extends: - script: | python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt - python ./python_files/install_debugpy.py python ./python_files/download_get_pip.py - displayName: Install debugpy and get-pip.py + displayName: Get-pip.py - script: | python -m pip install --no-deps --require-hashes --only-binary :all: -t ./python_files/lib/python --implementation py -r ./requirements.txt diff --git a/build/azure-pipeline.stable.yml b/build/azure-pipeline.stable.yml index 754a820e36e1..05e284ec92eb 100644 --- a/build/azure-pipeline.stable.yml +++ b/build/azure-pipeline.stable.yml @@ -49,9 +49,8 @@ extends: - script: | python -m pip --disable-pip-version-check install -r build/build-install-requirements.txt - python ./python_files/install_debugpy.py python ./python_files/download_get_pip.py - displayName: Install debugpy and get-pip.py + displayName: Get-pip.py - script: | python -m pip install --no-deps --require-hashes --only-binary :all: -t ./python_files/lib/python --implementation py -r ./requirements.txt diff --git a/noxfile.py b/noxfile.py index aa6d0253c660..7eb2da93cfe3 100644 --- a/noxfile.py +++ b/noxfile.py @@ -32,13 +32,6 @@ def install_python_libs(session: nox.Session): session.install("packaging") - # Install debugger - session.run( - "python", - "./python_files/install_debugpy.py", - env={"PYTHONPATH": "./python_files/lib/temp"}, - ) - # Download get-pip script session.run( "python", diff --git a/python_files/install_debugpy.py b/python_files/install_debugpy.py deleted file mode 100644 index e38ca82230c9..000000000000 --- a/python_files/install_debugpy.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import io -import json -import os -import urllib.request as url_lib -import zipfile - -from packaging.version import parse as version_parser - -EXTENSION_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -DEBUGGER_DEST = os.path.join(EXTENSION_ROOT, "python_files", "lib", "python") -DEBUGGER_PACKAGE = "debugpy" -DEBUGGER_PYTHON_ABI_VERSIONS = ("cp310",) -DEBUGGER_VERSION = "1.8.1" # can also be "latest" - - -def _contains(s, parts=()): - return any(p in s for p in parts) - - -def _get_package_data(): - json_uri = "https://pypi.org/pypi/{0}/json".format(DEBUGGER_PACKAGE) - # Response format: https://warehouse.readthedocs.io/api-reference/json/#project - # Release metadata format: https://github.com/pypa/interoperability-peps/blob/master/pep-0426-core-metadata.rst - with url_lib.urlopen(json_uri) as response: - return json.loads(response.read()) - - -def _get_debugger_wheel_urls(data, version): - return list( - r["url"] - for r in data["releases"][version] - if _contains(r["url"], DEBUGGER_PYTHON_ABI_VERSIONS) - ) - - -def _download_and_extract(root, url, version): - root = os.getcwd() if root is None or root == "." else root - print(url) - with url_lib.urlopen(url) as response: - data = response.read() - with zipfile.ZipFile(io.BytesIO(data), "r") as wheel: - for zip_info in wheel.infolist(): - # Ignore dist info since we are merging multiple wheels - if ".dist-info/" in zip_info.filename: - continue - print("\t" + zip_info.filename) - wheel.extract(zip_info.filename, root) - - -def main(root): - data = _get_package_data() - - if DEBUGGER_VERSION == "latest": - use_version = max(data["releases"].keys(), key=version_parser) - else: - use_version = DEBUGGER_VERSION - - for url in _get_debugger_wheel_urls(data, use_version): - _download_and_extract(root, url, use_version) - - -if __name__ == "__main__": - main(DEBUGGER_DEST) diff --git a/python_files/pyproject.toml b/python_files/pyproject.toml index 9e8f55910e70..0f1b0f466940 100644 --- a/python_files/pyproject.toml +++ b/python_files/pyproject.toml @@ -16,7 +16,6 @@ extraPaths = ['lib/python', 'lib/jedilsp'] ignore = [ # Ignore all pre-existing code with issues 'get-pip.py', - 'install_debugpy.py', 'tensorboard_launcher.py', 'testlauncher.py', 'visualstudio_py_testlauncher.py', @@ -24,7 +23,6 @@ ignore = [ 'testing_tools/adapter/util.py', 'testing_tools/adapter/pytest/_discovery.py', 'testing_tools/adapter/pytest/_pytest_item.py', - 'tests/debug_adapter/test_install_debugpy.py', 'tests/testing_tools/adapter/.data', 'tests/testing_tools/adapter/test___main__.py', 'tests/testing_tools/adapter/test_discovery.py', diff --git a/python_files/tests/debug_adapter/__init__.py b/python_files/tests/debug_adapter/__init__.py deleted file mode 100644 index 5b7f7a925cc0..000000000000 --- a/python_files/tests/debug_adapter/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. diff --git a/python_files/tests/debug_adapter/test_install_debugpy.py b/python_files/tests/debug_adapter/test_install_debugpy.py deleted file mode 100644 index f72e1089aaab..000000000000 --- a/python_files/tests/debug_adapter/test_install_debugpy.py +++ /dev/null @@ -1,25 +0,0 @@ -import os - - -def _check_binaries(dir_path): - expected_endswith = ( - "win_amd64.pyd", - "win32.pyd", - "darwin.so", - "x86_64-linux-gnu.so", - ) - - binaries = list(p for p in os.listdir(dir_path) if p.endswith(expected_endswith)) - - assert len(binaries) == len(expected_endswith) - - -def test_install_debugpy(tmpdir): - import install_debugpy - - install_debugpy.main(str(tmpdir)) - dir_path = os.path.join(str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_bundle") - _check_binaries(dir_path) - - dir_path = os.path.join(str(tmpdir), "debugpy", "_vendored", "pydevd", "_pydevd_frame_eval") - _check_binaries(dir_path) From 967718e3afce4156e9498b2aec5744c688f29621 Mon Sep 17 00:00:00 2001 From: Paula Camargo Date: Tue, 16 Apr 2024 01:19:18 -0700 Subject: [PATCH 11/11] fix actions name --- .github/actions/build-vsix/action.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/pr-check.yml | 2 +- build/azure-pipeline.pre-release.yml | 2 +- build/azure-pipeline.stable.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/actions/build-vsix/action.yml b/.github/actions/build-vsix/action.yml index 830263bea2cd..03279fa5fbdd 100644 --- a/.github/actions/build-vsix/action.yml +++ b/.github/actions/build-vsix/action.yml @@ -40,7 +40,7 @@ runs: run: python -m pip install wheel nox shell: bash - - name: Install Python Extension dependencies (jedi, debugpy, etc.) + - name: Install Python Extension dependencies (jedi, etc.) run: nox --session install_python_libs shell: bash diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2f23d329ca82..d07f6165315e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -194,7 +194,7 @@ jobs: - name: Install build pre-requisite run: python -m pip install wheel nox - - name: Install Python Extension dependencies (jedi, debugpy, etc.) + - name: Install Python Extension dependencies (jedi, etc.) run: nox --session install_python_libs - name: Install test requirements diff --git a/.github/workflows/pr-check.yml b/.github/workflows/pr-check.yml index 7b30c68d5860..f7102dd5342a 100644 --- a/.github/workflows/pr-check.yml +++ b/.github/workflows/pr-check.yml @@ -182,7 +182,7 @@ jobs: - name: Install build pre-requisite run: python -m pip install wheel nox - - name: Install Python Extension dependencies (jedi, debugpy, etc.) + - name: Install Python Extension dependencies (jedi, etc.) run: nox --session install_python_libs - name: Install test requirements diff --git a/build/azure-pipeline.pre-release.yml b/build/azure-pipeline.pre-release.yml index cf7e9ef18a28..cf8659f5c41f 100644 --- a/build/azure-pipeline.pre-release.yml +++ b/build/azure-pipeline.pre-release.yml @@ -54,7 +54,7 @@ extends: - script: | nox --session install_python_libs - displayName: Install debugpy, Jedi, get-pip, etc + displayName: Install Jedi, get-pip, etc - script: | python ./build/update_ext_version.py --for-publishing diff --git a/build/azure-pipeline.stable.yml b/build/azure-pipeline.stable.yml index 5a0701df9108..589000b9f4b3 100644 --- a/build/azure-pipeline.stable.yml +++ b/build/azure-pipeline.stable.yml @@ -49,7 +49,7 @@ extends: - script: | nox --session install_python_libs - displayName: Install debugpy, Jedi, get-pip, etc + displayName: Install Jedi, get-pip, etc - script: | python ./build/update_ext_version.py --release --for-publishing