|
| 1 | +// Copyright (c) Microsoft Corporation. All rights reserved. |
| 2 | +// Licensed under the MIT License. |
| 3 | + |
| 4 | +'use strict'; |
| 5 | + |
| 6 | +import { expect, use } from 'chai'; |
| 7 | +import * as chaipromise from 'chai-as-promised'; |
| 8 | +import * as typeMoq from 'typemoq'; |
| 9 | +import { CancellationToken, OutputChannel, Uri } from 'vscode'; |
| 10 | +import { getOSType } from '../../..//client/common/platform/osinfo'; |
| 11 | +import { IApplicationShell, ICommandManager } from '../../../client/common/application/types'; |
| 12 | +import { OSType } from '../../../client/common/platform/types'; |
| 13 | +import { IServiceContainer } from '../../../client/ioc/types'; |
| 14 | +import { TestsHelper } from '../../../client/unittests/common/testUtils'; |
| 15 | +import { TestFlatteningVisitor } from '../../../client/unittests/common/testVisitors/flatteningVisitor'; |
| 16 | +import { FlattenedTestFunction, TestDiscoveryOptions, Tests } from '../../../client/unittests/common/types'; |
| 17 | +import { TestsParser as PyTestsParser } from '../../../client/unittests/pytest/services/parserService'; |
| 18 | +import { PytestDataPlatformType, pytestScenarioData } from './pytest_unittest_parser_data'; |
| 19 | + |
| 20 | +use(chaipromise); |
| 21 | + |
| 22 | +// The PyTest test parsing is done via the stdout result of the |
| 23 | +// `pytest --collect-only` command. |
| 24 | +// |
| 25 | +// There are a few limitations with this approach, the largest issue is mixing |
| 26 | +// package and non-package style codebases (stdout does not give subdir |
| 27 | +// information of tests in a package when __init__.py is not present). |
| 28 | +// |
| 29 | +// However, to test all of the various layouts that are available, we have |
| 30 | +// created a JSON structure that defines all the tests - see file |
| 31 | +// `pytest_unittest_parser_data.ts` in this folder. |
| 32 | +suite('Unit Tests - PyTest - Test Parser used in discovery', () => { |
| 33 | + |
| 34 | + // Build tests for the test data that is relevant for this platform. |
| 35 | + const testPlatformType: PytestDataPlatformType = |
| 36 | + getOSType() === OSType.Windows ? |
| 37 | + PytestDataPlatformType.Windows : PytestDataPlatformType.NonWindows; |
| 38 | + |
| 39 | + pytestScenarioData.forEach((testScenario) => { |
| 40 | + if (testPlatformType === testScenario.platform) { |
| 41 | + |
| 42 | + const testDescription: string = |
| 43 | + `PyTest${testScenario.pytest_version_spec}: ${testScenario.description}`; |
| 44 | + |
| 45 | + test(testDescription, async () => { |
| 46 | + // Setup the service container for use by the parser. |
| 47 | + const serviceContainer = typeMoq.Mock.ofType<IServiceContainer>(); |
| 48 | + const appShell = typeMoq.Mock.ofType<IApplicationShell>(); |
| 49 | + const cmdMgr = typeMoq.Mock.ofType<ICommandManager>(); |
| 50 | + serviceContainer.setup(s => s.get(typeMoq.It.isValue(IApplicationShell), typeMoq.It.isAny())) |
| 51 | + .returns(() => { |
| 52 | + return appShell.object; |
| 53 | + }); |
| 54 | + serviceContainer.setup(s => s.get(typeMoq.It.isValue(ICommandManager), typeMoq.It.isAny())) |
| 55 | + .returns(() => { |
| 56 | + return cmdMgr.object; |
| 57 | + }); |
| 58 | + |
| 59 | + // Create mocks used in the test discovery setup. |
| 60 | + const outChannel = typeMoq.Mock.ofType<OutputChannel>(); |
| 61 | + const cancelToken = typeMoq.Mock.ofType<CancellationToken>(); |
| 62 | + cancelToken.setup(c => c.isCancellationRequested).returns(() => false); |
| 63 | + const wsFolder = typeMoq.Mock.ofType<Uri>(); |
| 64 | + |
| 65 | + // Create the test options for the mocked-up test. All data is either |
| 66 | + // mocked or is taken from the JSON test data itself. |
| 67 | + const options: TestDiscoveryOptions = { |
| 68 | + args: [], |
| 69 | + cwd: testScenario.rootdir, |
| 70 | + ignoreCache: true, |
| 71 | + outChannel: outChannel.object, |
| 72 | + token: cancelToken.object, |
| 73 | + workspaceFolder: wsFolder.object |
| 74 | + }; |
| 75 | + |
| 76 | + // Setup the parser. |
| 77 | + const testFlattener: TestFlatteningVisitor = new TestFlatteningVisitor(); |
| 78 | + const testHlp: TestsHelper = new TestsHelper(testFlattener, serviceContainer.object); |
| 79 | + const parser = new PyTestsParser(testHlp); |
| 80 | + |
| 81 | + // Each test scenario has a 'stdout' member that is an array of |
| 82 | + // stdout lines. Join them here such that the parser can operate |
| 83 | + // on stdout-like data. |
| 84 | + const stdout: string = testScenario.stdout.join('\n'); |
| 85 | + |
| 86 | + const parsedTests: Tests = parser.parse(stdout, options); |
| 87 | + |
| 88 | + // Now we can actually perform tests. |
| 89 | + expect(parsedTests).is.not.equal( |
| 90 | + undefined, |
| 91 | + 'Should have gotten tests extracted from the parsed pytest result content.'); |
| 92 | + |
| 93 | + expect(parsedTests.testFunctions.length).equals( |
| 94 | + testScenario.functionCount, |
| 95 | + `Parsed pytest summary contained ${testScenario.functionCount} test functions.`); |
| 96 | + |
| 97 | + testScenario.test_functions.forEach((funcName: string) => { |
| 98 | + const findAllTests: FlattenedTestFunction[] | undefined = parsedTests.testFunctions.filter( |
| 99 | + (tstFunc: FlattenedTestFunction) => { |
| 100 | + return tstFunc.testFunction.nameToRun === funcName; |
| 101 | + }); |
| 102 | + // Each test identified in the testScenario should exist once and only once. |
| 103 | + expect(findAllTests).is.not.equal(undefined, `Could not find "${funcName}" in tests.`); |
| 104 | + expect(findAllTests.length).is.equal(1, 'There should be exactly one instance of each test.'); |
| 105 | + }); |
| 106 | + |
| 107 | + }); |
| 108 | + } |
| 109 | + }); |
| 110 | +}); |
0 commit comments