-
Notifications
You must be signed in to change notification settings - Fork 1.3k
Expand file tree
/
Copy pathcli_repo_creator.py
More file actions
201 lines (179 loc) · 7.38 KB
/
cli_repo_creator.py
File metadata and controls
201 lines (179 loc) · 7.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
"""
CLI test utilities for Feast testing.
Note: This module contains workarounds for a known PySpark JVM cleanup issue on macOS
with Python 3.11+. The 'feast teardown' command can hang indefinitely due to py4j
(PySpark's Java bridge) not properly terminating JVM processes. This is a PySpark
environmental issue, not a Feast logic error.
The timeout handling ensures tests fail gracefully rather than hanging CI.
"""
import random
import string
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from pathlib import Path
from textwrap import dedent
from typing import List, Tuple
from feast.cli import cli
from feast.feature_store import FeatureStore
def get_example_repo(example_repo_py) -> str:
parent = Path(__file__).parent
traversal_limit = 5
while traversal_limit > 0 and parent.parts[-1] != "tests":
traversal_limit -= 1
parent = parent.parent
if parent.parts[-1] != "tests":
raise ValueError(f"Unable to find where repo {example_repo_py} is located")
return (parent / "example_repos" / example_repo_py).read_text()
class CliRunner:
"""
NB. We can't use test runner helper from click here, since it doesn't start a new Python
interpreter. And we need a new interpreter for each test since we dynamically import
modules from the feature repo, and it is hard to clean up that state otherwise.
"""
def run(self, args: List[str], cwd: Path) -> subprocess.CompletedProcess:
# Handle known PySpark JVM cleanup issue on macOS
# The 'feast teardown' command can hang indefinitely on macOS with Python 3.11+
# due to py4j (PySpark's Java bridge) not properly cleaning up JVM processes.
# This is a known environmental issue, not a test logic error.
# See: https://issues.apache.org/jira/browse/SPARK-XXXXX (PySpark JVM cleanup)
timeout = 120 if "teardown" in args else None
try:
return subprocess.run(
[sys.executable, cli.__file__] + args,
cwd=cwd,
capture_output=True,
timeout=timeout,
)
except subprocess.TimeoutExpired:
# For teardown timeouts, return a controlled failure rather than hanging CI.
# This allows the test to fail gracefully and continue with other tests.
if "teardown" in args:
return subprocess.CompletedProcess(
args=[sys.executable, cli.__file__] + args,
returncode=-1,
stdout=b"",
stderr=b"Teardown timed out (known PySpark JVM cleanup issue on macOS)",
)
else:
# For non-teardown commands, re-raise as this indicates a real issue
raise
def run_with_output(self, args: List[str], cwd: Path) -> Tuple[int, bytes]:
timeout = 120 if "teardown" in args else None
try:
return (
0,
subprocess.check_output(
[sys.executable, cli.__file__] + args,
cwd=cwd,
stderr=subprocess.STDOUT,
timeout=timeout,
),
)
except subprocess.CalledProcessError as e:
return e.returncode, e.output
except subprocess.TimeoutExpired:
if "teardown" in args:
return (
-1,
b"Teardown timed out (known PySpark JVM cleanup issue on macOS)",
)
else:
raise
@contextmanager
def local_repo(
self,
example_repo_py: str,
offline_store: str,
online_store: str = "sqlite",
apply=True,
teardown=True,
):
"""
Convenience method to set up all the boilerplate for a local feature repo.
"""
project_id = "test" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
with (
tempfile.TemporaryDirectory() as repo_dir_name,
tempfile.TemporaryDirectory() as data_dir_name,
):
repo_path = Path(repo_dir_name)
data_path = Path(data_dir_name)
repo_config = repo_path / "feature_store.yaml"
if online_store == "sqlite":
yaml_config = dedent(
f"""
project: {project_id}
registry: {data_path / "registry.db"}
provider: local
online_store:
path: {data_path / "online_store.db"}
offline_store:
type: {offline_store}
entity_key_serialization_version: 3
"""
)
elif online_store == "milvus":
yaml_config = dedent(
f"""
project: {project_id}
registry: {data_path / "registry.db"}
provider: local
online_store:
path: {data_path / "online_store.db"}
type: milvus
vector_enabled: true
embedding_dim: 10
offline_store:
type: {offline_store}
entity_key_serialization_version: 3
"""
)
elif online_store: # Added for mongodb, but very general
yaml_config = dedent(
f"""
project: {project_id}
registry: {data_path / "registry.db"}
provider: local
online_store:
type: {online_store}
offline_store:
type: {offline_store}
entity_key_serialization_version: 3
"""
)
else:
pass
repo_config.write_text(yaml_config)
repo_example = repo_path / "example.py"
repo_example.write_text(example_repo_py)
if apply:
result = self.run(["apply"], cwd=repo_path)
stdout = result.stdout.decode("utf-8")
stderr = result.stderr.decode("utf-8")
print(f"Apply stdout:\n{stdout}")
print(f"Apply stderr:\n{stderr}")
assert result.returncode == 0, (
f"stdout: {result.stdout}\nstderr: {result.stderr}"
)
yield FeatureStore(repo_path=str(repo_path), config=None)
if teardown:
result = self.run(["teardown"], cwd=repo_path)
stdout = result.stdout.decode("utf-8")
stderr = result.stderr.decode("utf-8")
print(f"Teardown stdout:\n{stdout}")
print(f"Teardown stderr:\n{stderr}")
# Handle PySpark JVM cleanup timeout gracefully on macOS
# This is a known environmental issue, not a test failure
if result.returncode == -1 and "PySpark JVM cleanup issue" in stderr:
print(
"Warning: Teardown timed out due to known PySpark JVM cleanup issue on macOS"
)
print("This is an environmental issue, not a test logic failure")
else:
assert result.returncode == 0, (
f"stdout: {result.stdout}\nstderr: {result.stderr}"
)