-
Notifications
You must be signed in to change notification settings - Fork 1.2k
feat: Offline store update pull_all_from_table_or_query to make timestampfield optional #5281
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
112014e
c75c264
b62fd1a
8b5bdae
e92d015
4765d8f
0bbdbbc
0336318
ac03a75
0033b9c
743a490
4285b5f
618c4b8
afcaf9c
8e02747
71daec2
23a0fb4
bcf1d7a
89ad0da
cdae95b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -72,10 +72,10 @@ def _should_validate(self): | |
| def build(self) -> ExecutionPlan: | ||
| last_node = self.build_source_node() | ||
|
|
||
| # PIT join entities to the feature data, and perform filtering | ||
| if isinstance(self.task, HistoricalRetrievalTask): | ||
| last_node = self.build_join_node(last_node) | ||
| # Join entity_df with source if needed | ||
| last_node = self.build_join_node(last_node) | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. without entity_df provided, the join node is just a pass through node |
||
|
|
||
| # PIT filter, TTL, and user-defined filter | ||
| last_node = self.build_filter_node(last_node) | ||
|
|
||
| if self._should_aggregate(): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,7 +2,6 @@ | |
|
|
||
| from feast.infra.common.materialization_job import MaterializationTask | ||
| from feast.infra.common.retrieval_task import HistoricalRetrievalTask | ||
| from feast.infra.compute_engines.dag.plan import ExecutionPlan | ||
| from feast.infra.compute_engines.feature_builder import FeatureBuilder | ||
| from feast.infra.compute_engines.local.backends.base import DataFrameBackend | ||
| from feast.infra.compute_engines.local.nodes import ( | ||
|
|
@@ -95,25 +94,3 @@ def build_output_nodes(self, input_node): | |
| node = LocalOutputNode("output") | ||
| node.add_input(input_node) | ||
| self.nodes.append(node) | ||
|
|
||
| def build(self) -> ExecutionPlan: | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we can remove it as it is the same with the Parent |
||
| last_node = self.build_source_node() | ||
|
|
||
| if isinstance(self.task, HistoricalRetrievalTask): | ||
| last_node = self.build_join_node(last_node) | ||
|
|
||
| last_node = self.build_filter_node(last_node) | ||
|
|
||
| if self._should_aggregate(): | ||
| last_node = self.build_aggregation_node(last_node) | ||
| elif isinstance(self.task, HistoricalRetrievalTask): | ||
| last_node = self.build_dedup_node(last_node) | ||
|
|
||
| if self._should_transform(): | ||
| last_node = self.build_transformation_node(last_node) | ||
|
|
||
| if self._should_validate(): | ||
| last_node = self.build_validation_node(last_node) | ||
|
|
||
| self.build_output_nodes(last_node) | ||
| return ExecutionPlan(self.nodes) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,8 +1,9 @@ | ||
| from datetime import timedelta | ||
| from datetime import datetime, timedelta | ||
| from typing import Optional | ||
|
|
||
| import pyarrow as pa | ||
|
|
||
| from feast.data_source import DataSource | ||
| from feast.infra.compute_engines.dag.context import ExecutionContext | ||
| from feast.infra.compute_engines.local.arrow_table_value import ArrowTableValue | ||
| from feast.infra.compute_engines.local.backends.base import DataFrameBackend | ||
|
|
@@ -15,14 +16,40 @@ | |
|
|
||
|
|
||
| class LocalSourceReadNode(LocalNode): | ||
| def __init__(self, name: str, feature_view, task): | ||
| def __init__( | ||
| self, | ||
| name: str, | ||
| source: DataSource, | ||
| start_time: Optional[datetime] = None, | ||
| end_time: Optional[datetime] = None, | ||
| ): | ||
| super().__init__(name) | ||
| self.feature_view = feature_view | ||
| self.task = task | ||
| self.source = source | ||
| self.start_time = start_time | ||
| self.end_time = end_time | ||
|
|
||
| def execute(self, context: ExecutionContext) -> ArrowTableValue: | ||
| # TODO : Implement the logic to read from offline store | ||
| return ArrowTableValue(data=pa.Table.from_pandas(context.entity_df)) | ||
| offline_store = context.offline_store | ||
| ( | ||
| join_key_columns, | ||
| feature_name_columns, | ||
| timestamp_field, | ||
| created_timestamp_column, | ||
| ) = context.column_info | ||
|
|
||
| # 📥 Reuse Feast's robust query resolver | ||
| retrieval_job = offline_store.pull_all_from_table_or_query( | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. reuse the |
||
| config=context.repo_config, | ||
| data_source=self.source, | ||
| join_key_columns=join_key_columns, | ||
| feature_name_columns=feature_name_columns, | ||
| timestamp_field=timestamp_field, | ||
| created_timestamp_column=created_timestamp_column, | ||
| start_date=self.start_time, | ||
| end_date=self.end_time, | ||
| ) | ||
| arrow_table = retrieval_job.to_arrow() | ||
| return ArrowTableValue(data=arrow_table) | ||
|
|
||
|
|
||
| class LocalJoinNode(LocalNode): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -52,6 +52,7 @@ | |
| BigQuerySource, | ||
| SavedDatasetBigQueryStorage, | ||
| ) | ||
| from .offline_utils import get_timestamp_filter_sql | ||
|
|
||
| try: | ||
| from google.api_core import client_info as http_client_info | ||
|
|
@@ -188,8 +189,9 @@ def pull_all_from_table_or_query( | |
| join_key_columns: List[str], | ||
| feature_name_columns: List[str], | ||
| timestamp_field: str, | ||
| start_date: datetime, | ||
| end_date: datetime, | ||
| created_timestamp_column: Optional[str] = None, | ||
| start_date: Optional[datetime] = None, | ||
| end_date: Optional[datetime] = None, | ||
| ) -> RetrievalJob: | ||
| assert isinstance(config.offline_store, BigQueryOfflineStoreConfig) | ||
| assert isinstance(data_source, BigQuerySource) | ||
|
|
@@ -201,15 +203,26 @@ def pull_all_from_table_or_query( | |
| project=project_id, | ||
| location=config.offline_store.location, | ||
| ) | ||
|
|
||
| timestamp_fields = [timestamp_field] | ||
| if created_timestamp_column: | ||
| timestamp_fields.append(created_timestamp_column) | ||
| field_string = ", ".join( | ||
| BigQueryOfflineStore._escape_query_columns(join_key_columns) | ||
| + BigQueryOfflineStore._escape_query_columns(feature_name_columns) | ||
| + [timestamp_field] | ||
| + timestamp_fields | ||
| ) | ||
| timestamp_filter = get_timestamp_filter_sql( | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. major change: get the timestamp filter from |
||
| start_date, | ||
| end_date, | ||
| timestamp_field, | ||
| quote_fields=False, | ||
| cast_style="timestamp_func", | ||
| ) | ||
| query = f""" | ||
| SELECT {field_string} | ||
| FROM {from_expression} | ||
| WHERE {timestamp_field} BETWEEN TIMESTAMP('{start_date}') AND TIMESTAMP('{end_date}') | ||
| WHERE {timestamp_filter} | ||
| """ | ||
| return BigQueryRetrievalJob( | ||
| query=query, | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
retrieval job doesn't have to provide start or end date