Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions tests/config_edge_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from commit_check.config import load_config


@pytest.mark.benchmark
def test_load_config_invalid_toml():
"""Test handling of invalid TOML syntax."""
invalid_toml = b"""
Expand All @@ -23,6 +24,7 @@ def test_load_config_invalid_toml():
os.unlink(f.name)


@pytest.mark.benchmark
def test_load_config_file_permission_error():
"""Test handling of file permission errors."""
config_content = b"""
Expand All @@ -45,6 +47,7 @@ def test_load_config_file_permission_error():
os.unlink(f.name)


@pytest.mark.benchmark
def test_tomli_import_fallback():
"""Test the tomli import fallback when tomllib is not available."""
# We need to test the import fallback behavior
Expand Down
2 changes: 2 additions & 0 deletions tests/config_fallback_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@
import sys
import tempfile
import os
import pytest
from unittest.mock import patch


@pytest.mark.benchmark
def test_config_tomli_fallback_direct():
"""Test config.py fallback to tomli by manipulating imports."""

Expand Down
3 changes: 3 additions & 0 deletions tests/config_import_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,10 @@
import tempfile
import os
from unittest.mock import patch
import pytest


@pytest.mark.benchmark
def test_tomli_import_fallback_simulation():
"""Test tomli import fallback by simulating the ImportError condition."""

Expand Down Expand Up @@ -63,6 +65,7 @@ def load(f):
return __import__(name, *args, **kwargs)


@pytest.mark.benchmark
def test_import_paths_coverage():
"""Ensure both import paths are conceptually tested."""
# This test verifies that both the tomllib and tomli code paths
Expand Down
9 changes: 9 additions & 0 deletions tests/config_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@


class TestConfig:
@pytest.mark.benchmark
def test_load_config_with_path_hint(self):
"""Test loading config with explicit path hint."""
config_content = b"""
Expand All @@ -28,6 +29,7 @@ def test_load_config_with_path_hint(self):
finally:
os.unlink(f.name)

@pytest.mark.benchmark
def test_load_config_with_nonexistent_path_hint(self):
"""Test loading config when path hint doesn't exist - should raise FileNotFoundError."""
# Test that specifying a nonexistent config file raises an error
Expand All @@ -36,6 +38,7 @@ def test_load_config_with_nonexistent_path_hint(self):
):
load_config("nonexistent.toml")

@pytest.mark.benchmark
def test_load_config_default_cchk_toml(self):
"""Test loading config from default cchk.toml path."""
config_content = b"""
Expand All @@ -55,6 +58,7 @@ def test_load_config_default_cchk_toml(self):
finally:
os.chdir(original_cwd)

@pytest.mark.benchmark
def test_load_config_default_commit_check_toml(self):
"""Test loading config from default commit-check.toml path."""
config_content = b"""
Expand All @@ -74,6 +78,7 @@ def test_load_config_default_commit_check_toml(self):
finally:
os.chdir(original_cwd)

@pytest.mark.benchmark
def test_load_config_file_not_found(self):
"""Test returning empty config when no default config files exist."""
original_cwd = os.getcwd()
Expand All @@ -86,6 +91,7 @@ def test_load_config_file_not_found(self):
finally:
os.chdir(original_cwd)

@pytest.mark.benchmark
def test_load_config_file_not_found_with_invalid_path_hint(self):
"""Test FileNotFoundError when specified path hint doesn't exist."""
original_cwd = os.getcwd()
Expand All @@ -100,12 +106,14 @@ def test_load_config_file_not_found_with_invalid_path_hint(self):
finally:
os.chdir(original_cwd)

@pytest.mark.benchmark
def test_default_config_paths_constant(self):
"""Test that DEFAULT_CONFIG_PATHS contains expected paths."""
assert len(DEFAULT_CONFIG_PATHS) == 2
assert Path("cchk.toml") in DEFAULT_CONFIG_PATHS
assert Path("commit-check.toml") in DEFAULT_CONFIG_PATHS

@pytest.mark.benchmark
def test_toml_load_function_exists(self):
"""Test that toml_load function is properly set up."""
from commit_check.config import toml_load
Expand All @@ -128,6 +136,7 @@ def test_toml_load_function_exists(self):
finally:
os.unlink(f.name)

@pytest.mark.benchmark
def test_tomli_import_fallback(self):
"""Test that tomli is imported when tomllib is not available (lines 10-13)."""
import sys
Expand Down
18 changes: 18 additions & 0 deletions tests/engine_comprehensive_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,19 @@
CommitTypeValidator,
)
from commit_check.rule_builder import ValidationRule
import pytest


class TestValidationResult:
@pytest.mark.benchmark
def test_validation_result_values(self):
"""Test ValidationResult enum values."""
assert ValidationResult.PASS == 0
assert ValidationResult.FAIL == 1


class TestValidationContext:
@pytest.mark.benchmark
def test_validation_context_creation(self):
"""Test ValidationContext creation."""
context = ValidationContext()
Expand All @@ -41,6 +44,7 @@ def test_validation_context_creation(self):


class TestCommitMessageValidator:
@pytest.mark.benchmark
def test_commit_message_validator_creation(self):
"""Test CommitMessageValidator creation."""
rule = ValidationRule(
Expand All @@ -53,6 +57,7 @@ def test_commit_message_validator_creation(self):
assert validator.rule == rule

@patch("commit_check.engine.has_commits")
@pytest.mark.benchmark
def test_commit_message_validator_with_stdin(self, mock_has_commits):
"""Test CommitMessageValidator with stdin text."""
mock_has_commits.return_value = True
Expand All @@ -71,6 +76,7 @@ def test_commit_message_validator_with_stdin(self, mock_has_commits):

@patch("commit_check.engine.get_commit_info")
@patch("commit_check.engine.has_commits")
@pytest.mark.benchmark
def test_commit_message_validator_failure(
self, mock_has_commits, mock_get_commit_info
):
Expand All @@ -92,6 +98,7 @@ def test_commit_message_validator_failure(
mock_print.assert_called_once()

@patch("commit_check.engine.has_commits")
@pytest.mark.benchmark
def test_commit_message_validator_skip_validation(self, mock_has_commits):
"""Test CommitMessageValidator skips when no commits and no stdin."""
mock_has_commits.return_value = False
Expand All @@ -110,6 +117,7 @@ def test_commit_message_validator_skip_validation(self, mock_has_commits):


class TestSubjectCapitalizationValidator:
@pytest.mark.benchmark
def test_subject_capitalization_pass(self):
"""Test SubjectCapitalizationValidator pass case."""
rule = ValidationRule(
Expand All @@ -125,6 +133,7 @@ def test_subject_capitalization_pass(self):
result = validator.validate(context)
assert result == ValidationResult.PASS

@pytest.mark.benchmark
def test_subject_capitalization_fail(self):
"""Test SubjectCapitalizationValidator fail case."""
rule = ValidationRule(
Expand All @@ -144,6 +153,7 @@ def test_subject_capitalization_fail(self):


class TestSubjectImperativeValidator:
@pytest.mark.benchmark
def test_subject_imperative_pass(self):
"""Test SubjectImperativeValidator pass case."""
rule = ValidationRule(
Expand All @@ -159,6 +169,7 @@ def test_subject_imperative_pass(self):
result = validator.validate(context)
assert result == ValidationResult.PASS

@pytest.mark.benchmark
def test_subject_imperative_fail(self):
"""Test SubjectImperativeValidator fail case."""
rule = ValidationRule(
Expand All @@ -178,6 +189,7 @@ def test_subject_imperative_fail(self):


class TestSubjectLengthValidator:
@pytest.mark.benchmark
def test_subject_length_pass(self):
"""Test SubjectLengthValidator pass case."""
rule = ValidationRule(
Expand All @@ -193,6 +205,7 @@ def test_subject_length_pass(self):
result = validator.validate(context)
assert result == ValidationResult.PASS

@pytest.mark.benchmark
def test_subject_length_fail(self):
"""Test SubjectLengthValidator fail case."""
rule = ValidationRule(
Expand All @@ -212,6 +225,7 @@ def test_subject_length_fail(self):


class TestValidationEngine:
@pytest.mark.benchmark
def test_validation_engine_creation(self):
"""Test ValidationEngine creation."""
rules = [
Expand All @@ -225,6 +239,7 @@ def test_validation_engine_creation(self):
engine = ValidationEngine(rules)
assert engine.rules == rules

@pytest.mark.benchmark
def test_validation_engine_validator_map(self):
"""Test ValidationEngine VALIDATOR_MAP contains expected mappings."""
engine = ValidationEngine([])
Expand Down Expand Up @@ -252,6 +267,7 @@ def test_validation_engine_validator_map(self):
for check, validator_class in expected_mappings.items():
assert engine.VALIDATOR_MAP[check] == validator_class

@pytest.mark.benchmark
def test_validation_engine_validate_all_pass(self):
"""Test ValidationEngine validate_all with all passing rules."""
rules = [
Expand All @@ -269,6 +285,7 @@ def test_validation_engine_validate_all_pass(self):
result = engine.validate_all(context)
assert result == ValidationResult.PASS

@pytest.mark.benchmark
def test_validation_engine_validate_all_fail(self):
"""Test ValidationEngine validate_all with failing rule."""
rules = [
Expand All @@ -286,6 +303,7 @@ def test_validation_engine_validate_all_fail(self):
result = engine.validate_all(context)
assert result == ValidationResult.FAIL

@pytest.mark.benchmark
def test_validation_engine_unknown_validator(self):
"""Test ValidationEngine with unknown validator type."""
rules = [
Expand Down
Loading
Loading