Skip to content

CombineOverlappingPeriods

CombineOverlappingPeriods

Bases: Node

CombineOverlappingPeriods takes overlapping and consecutive time periods the source table and combines them into a single time period with a single start and end date on a per patient level. For example, if a patient has two visits with the same start and end date, they will be combined into one visit. If a patient has two visits with overlapping dates, they will be combined into one visit with the earliest start date and the latest end date. If a patient has two visits with consecutive dates, they will be combined into one visit with the earliest start date and the latest end date. This is useful for creating a single time period for a patient, e.g. admission discharge periods, vaccination periods, etc. It is also useful for creating a single time period for a patient when there are multiple visits with the same start and end date, or overlapping dates.

Source code in phenex/derived_tables/combine_overlapping_periods.py
class CombineOverlappingPeriods(Node):
    """
    CombineOverlappingPeriods takes overlapping and consecutive time periods the source table and combines them into a single time period with a single start and end date on a per patient level. For example, if a patient has two visits with the same start and end date, they will be combined into one visit. If a patient has two visits with overlapping dates, they will be combined into one visit with the earliest start date and the latest end date. If a patient has two visits with consecutive dates, they will be combined into one visit with the earliest start date and the latest end date.
    This is useful for creating a single time period for a patient, e.g. admission discharge periods, vaccination periods, etc. It is also useful for creating a single time period for a patient when there are multiple visits with the same start and end date, or overlapping dates.
    """

    def __init__(
        self,
        domain: str,
        categorical_filter: Optional["CategoricalFilter"] = None,
        **kwargs
    ):
        self.domain = domain
        self.categorical_filter = categorical_filter
        super(CombineOverlappingPeriods, self).__init__(**kwargs)

    def _execute(
        self,
        tables: Dict[str, Table],
    ) -> "Table":
        # get the appropriate table
        table = tables[self.domain]
        if self.categorical_filter is not None:
            # apply the categorical filter to the table
            table = self.categorical_filter.autojoin_filter(table, tables)

        # subset to only the relevant columns and sort
        table = table.select("PERSON_ID", "START_DATE", "END_DATE")

        # Step 1: Sort by PERSON_ID, START_DATE, END_DATE
        table = table.order_by(["PERSON_ID", "START_DATE", "END_DATE"])

        # Step 1.5: Remove intervals that are wholly contained within other intervals
        # For each interval, check if there's another interval that completely contains it

        # Self-join to find containing intervals
        table_alias = table.view()  # Create an alias for self-join

        # Find intervals that are wholly contained within another interval
        contained_intervals = (
            table.inner_join(
                table_alias,
                [
                    table.PERSON_ID == table_alias.PERSON_ID,
                    table.START_DATE >= table_alias.START_DATE,
                    table.END_DATE <= table_alias.END_DATE,
                    # Not the same interval (at least one bound is strictly contained)
                    (table.START_DATE > table_alias.START_DATE)
                    | (table.END_DATE < table_alias.END_DATE),
                ],
            )
            .select(table.PERSON_ID, table.START_DATE, table.END_DATE)
            .distinct()
        )

        # Remove wholly contained intervals
        table = table.anti_join(
            contained_intervals, ["PERSON_ID", "START_DATE", "END_DATE"]
        )

        # Re-sort after filtering
        table = table.order_by(["PERSON_ID", "START_DATE", "END_DATE"])

        # Step 2: Create lag window to get previous end date for each person
        prev_end = table.END_DATE.lag(1).over(
            ibis.window(
                group_by=table.PERSON_ID, order_by=[table.START_DATE, table.END_DATE]
            )
        )

        # Step 3: Determine if current period starts a new group
        # New group starts if there's no previous period or if there's a gap > 1 day
        # This matches the original pandas logic: start > merged[-1][1] + pd.Timedelta(days=1)
        table = table.mutate(
            prev_end_date=prev_end,
            new_group=ibis.case()
            .when(prev_end.isnull(), 1)  # First record for person
            .when(
                table.START_DATE > (prev_end + ibis.interval(days=1)), 1
            )  # Gap > 1 day
            .else_(0)  # Overlapping, consecutive, or same day periods
            .end(),
        )

        # Step 4: Create group IDs using cumulative sum within each person
        # Create a row number first, then use it to calculate running sum properly
        table = table.mutate(
            row_num=ibis.row_number().over(
                ibis.window(
                    group_by=table.PERSON_ID,
                    order_by=[table.START_DATE, table.END_DATE],
                )
            )
        )

        # Now calculate cumulative sum of new_group within each person
        table = table.mutate(
            group_num=table.new_group.sum().over(
                ibis.window(
                    group_by=table.PERSON_ID,
                    order_by=table.row_num,
                    rows=(None, 0),  # Unbounded preceding to current row
                )
            )
        )

        # Create a composite group ID to ensure uniqueness across persons
        table = table.mutate(
            group_id=table.PERSON_ID.cast("string")
            + "_"
            + table.group_num.cast("string")
        )

        # Step 5: Aggregate by person and group to get merged periods
        result = table.group_by(["PERSON_ID", "group_id"]).aggregate(
            START_DATE=table.START_DATE.min(), END_DATE=table.END_DATE.max()
        )

        # Step 6: Select final columns and sort
        result = result.select("PERSON_ID", "START_DATE", "END_DATE").order_by(
            ["PERSON_ID", "START_DATE"]
        )

        return result

dependencies property

Recursively collect all dependencies of a node (including dependencies of dependencies).

Returns:

Type Description
Set[Node]

List[Node]: A list of Node objects on which this Node depends.

dependency_graph property

Build a dependency graph where each node maps to its direct dependencies (children).

Returns:

Type Description
Dict[Node, Set[Node]]

Dict[Node, Set[Node]: A mapping of Node's to their children Node's.

execution_metadata property

Retrieve the full execution metadata row for this node from the local DuckDB database.

Returns:

Type Description

pandas.DataFrame: A table containing NODE_NAME, NODE_HASH, NODE_PARAMS, EXECUTION_PARAMS, EXECUTION_START_TIME, EXECUTION_END_TIME, and EXECUTION_DURATION for execution of this node, or None if the node has never been executed.

reverse_dependency_graph property

Build a reverse dependency graph where each node maps to nodes that depend on it (parents).

Returns:

Type Description
Dict[Node, Set[Node]]

Dict[Node, List[Node]: A mapping of Node's to their parent Node's.

clear_cache(con=None, recursive=False)

Clear the cached state for this node, forcing re-execution on the next call to execute().

This method removes the node's hash from the node states table and optionally drops the materialized table from the database. After calling this method, the node will be treated as if it has never been executed before.

Parameters:

Name Type Description Default
con Optional[object]

Database connector. If provided, clears only runs with matching execution context and drops the materialized table. If None, clears all runs for the node.

None
recursive bool

If True, also clear the cache for all child nodes recursively. Defaults to False.

False
Example
# Clear all cached runs for a single node
my_node.clear_cache()

# Clear runs with specific execution context and drop materialized table
my_node.clear_cache(con=my_connector)

# Clear cache for node and all its dependencies
my_node.clear_cache(recursive=True)
Source code in phenex/node.py
def clear_cache(self, con: Optional[object] = None, recursive: bool = False):
    """
    Clear the cached state for this node, forcing re-execution on the next call to execute().

    This method removes the node's hash from the node states table and optionally drops the materialized table from the database. After calling this method, the node will be treated as if it has never been executed before.

    Parameters:
        con: Database connector. If provided, clears only runs with matching execution context and drops the materialized table. If None, clears all runs for the node.
        recursive: If True, also clear the cache for all child nodes recursively. Defaults to False.

    Example:
        ```python
        # Clear all cached runs for a single node
        my_node.clear_cache()

        # Clear runs with specific execution context and drop materialized table
        my_node.clear_cache(con=my_connector)

        # Clear cache for node and all its dependencies
        my_node.clear_cache(recursive=True)
        ```
    """
    # Delegate all logic to NodeManager
    return Node._node_manager.clear_cache(self, con=con, recursive=recursive)

execute(tables=None, con=None, overwrite=False, lazy_execution=False, n_threads=1)

Executes the Node computation for the current node and its dependencies.

Lazy Execution

When lazy_execution=True, nodes are only recomputed if changes are detected. The system tracks: 1. Node definition changes: Detected by hashing the node's parameters (from to_dict()) and class name 2. Execution environment changes: Detected by tracking source/destination database configurations

A node will be rerun if either: - The node's defining parameters have changed (different hash than last execution) - The database connector's source or destination databases have changed - The node has never been executed before

If no changes are detected, the node uses its cached result from the database instead of recomputing.

Requirements for lazy execution: - A database connector (con) must be provided to store and retrieve cached results - overwrite=True must be set to allow updating existing cached tables

State tracking is maintained in a local DuckDB database (__PHENEX_META__NODE_STATES table) that stores: - Node hashes, parameters, and execution metadata - Database connector configuration used during execution - Execution timing information

Parameters:

Name Type Description Default
tables Dict[str, Table]

A dictionary mapping domains to Table objects.

None
con Optional[object]

Connection to database for materializing outputs. If provided, outputs from the node and all children nodes will be materialized (written) to the database using the connector. Required for lazy_execution.

None
overwrite bool

If True, will overwrite any existing tables found in the database while writing. If False, will throw an error when an existing table is found. Has no effect if con is not passed. Must be True when using lazy_execution.

False
lazy_execution bool

If True, only re-executes nodes when changes are detected in either the node definition or execution environment. Defaults to False. Requires con to be provided.

False
n_threads int

Max number of Node's to execute simultaneously when this node has multiple children.

1

Returns:

Name Type Description
Table Table

The resulting table for this node. Also accessible through self.table after calling self.execute().

Raises:

Type Description
ValueError

If lazy_execution=True but overwrite=False or con=None.

Source code in phenex/node.py
def execute(
    self,
    tables: Dict[str, Table] = None,
    con: Optional[object] = None,
    overwrite: bool = False,
    lazy_execution: bool = False,
    n_threads: int = 1,
) -> Table:
    """
    Executes the Node computation for the current node and its dependencies.

    Lazy Execution:
        When lazy_execution=True, nodes are only recomputed if changes are detected. The system tracks:
        1. Node definition changes: Detected by hashing the node's parameters (from to_dict()) and class name
        2. Execution environment changes: Detected by tracking source/destination database configurations

        A node will be rerun if either:
        - The node's defining parameters have changed (different hash than last execution)
        - The database connector's source or destination databases have changed
        - The node has never been executed before

        If no changes are detected, the node uses its cached result from the database instead of recomputing.

        Requirements for lazy execution:
        - A database connector (con) must be provided to store and retrieve cached results
        - overwrite=True must be set to allow updating existing cached tables

        State tracking is maintained in a local DuckDB database (__PHENEX_META__NODE_STATES table) that stores:
        - Node hashes, parameters, and execution metadata
        - Database connector configuration used during execution
        - Execution timing information

    Parameters:
        tables: A dictionary mapping domains to Table objects.
        con: Connection to database for materializing outputs. If provided, outputs from the node and all children nodes will be materialized (written) to the database using the connector. Required for lazy_execution.
        overwrite: If True, will overwrite any existing tables found in the database while writing. If False, will throw an error when an existing table is found. Has no effect if con is not passed. Must be True when using lazy_execution.
        lazy_execution: If True, only re-executes nodes when changes are detected in either the node definition or execution environment. Defaults to False. Requires con to be provided.
        n_threads: Max number of Node's to execute simultaneously when this node has multiple children.

    Returns:
        Table: The resulting table for this node. Also accessible through self.table after calling self.execute().

    Raises:
        ValueError: If lazy_execution=True but overwrite=False or con=None.
    """
    # Handle None tables
    if tables is None:
        tables = {}

    # Build dependency graph for all dependencies
    all_deps = self.dependencies
    nodes = {node.name: node for node in all_deps}
    nodes[self.name] = self  # Add self to the nodes

    # Build dependency and reverse graphs
    dependency_graph = self._build_dependency_graph(nodes)
    reverse_graph = self._build_reverse_graph(dependency_graph)

    # Track completion status and results
    completed = set()
    completion_lock = threading.Lock()
    worker_exceptions = []  # Track exceptions from worker threads
    stop_all_workers = (
        threading.Event()
    )  # Signal to stop all workers on first error

    # Track in-degree for scheduling
    in_degree = {}
    for node_name, dependencies in dependency_graph.items():
        in_degree[node_name] = len(dependencies)
    for node_name in nodes:
        if node_name not in in_degree:
            in_degree[node_name] = 0

    # Queue for nodes ready to execute
    ready_queue = queue.Queue()

    # Add nodes with no dependencies to ready queue
    for node_name, degree in in_degree.items():
        if degree == 0:
            ready_queue.put(node_name)

    def worker():
        """Worker function for thread pool"""
        while not stop_all_workers.is_set():
            try:
                node_name = ready_queue.get(timeout=1)
                # timeout forces to wait 1 second to avoid busy waiting
                if node_name is None:  # Sentinel value to stop worker
                    break
            except queue.Empty:
                continue

            try:
                logger.info(
                    f"Thread {threading.current_thread().name}: executing node '{node_name}'"
                )
                node = nodes[node_name]

                # Execute the node (without recursive child execution since we handle dependencies here)
                if lazy_execution:
                    if not overwrite:
                        raise ValueError(
                            "lazy_execution only works with overwrite=True."
                        )
                    if con is None:
                        raise ValueError(
                            "A DatabaseConnector is required for lazy execution."
                        )

                    if Node._node_manager.should_rerun(node, con):
                        # Time the execution
                        node.lastexecution_start_time = datetime.now()
                        table = node._execute(tables)

                        if (
                            table is not None
                        ):  # Only create table if _execute returns something
                            con.create_table(table, node_name, overwrite=overwrite)
                            table = con.get_dest_table(node_name)

                        node.lastexecution_end_time = datetime.now()
                        node.lastexecution_duration = (
                            node.lastexecution_end_time
                            - node.lastexecution_start_time
                        ).total_seconds()

                        Node._node_manager.update_run_params(node, con)
                    else:
                        table = con.get_dest_table(node_name)
                else:
                    # Time the execution
                    node.lastexecution_start_time = datetime.now()
                    table = node._execute(tables)

                    if (
                        con and table is not None
                    ):  # Only create table if _execute returns something
                        con.create_table(table, node_name, overwrite=overwrite)
                        table = con.get_dest_table(node_name)

                    node.lastexecution_end_time = datetime.now()
                    node.lastexecution_duration = (
                        node.lastexecution_end_time - node.lastexecution_start_time
                    ).total_seconds()

                node.table = table

                with completion_lock:
                    completed.add(node_name)

                    # Update in-degree for dependent nodes and add ready ones to queue
                    for dependent in reverse_graph.get(node_name, set()):
                        in_degree[dependent] -= 1
                        if in_degree[dependent] == 0:
                            # Check if all dependencies are completed
                            deps_completed = all(
                                dep in completed
                                for dep in dependency_graph.get(dependent, set())
                            )
                            if deps_completed:
                                ready_queue.put(dependent)

                # Log completion with timing info
                if node.lastexecution_duration is not None:
                    logger.info(
                        f"Thread {threading.current_thread().name}: completed node '{node_name}' "
                        f"in {node.lastexecution_duration:.3f} seconds"
                    )
                else:
                    logger.info(
                        f"Thread {threading.current_thread().name}: completed node '{node_name}' (cached)"
                    )

            except Exception as e:
                logger.error(f"Error executing node '{node_name}': {str(e)}")
                with completion_lock:
                    # Store exception for main thread
                    worker_exceptions.append(e)
                    # Signal all workers to stop immediately and exit worker loop
                    stop_all_workers.set()
                    break
            finally:
                ready_queue.task_done()

    # Start worker threads
    threads = []
    for i in range(min(n_threads, len(nodes))):
        thread = threading.Thread(target=worker, name=f"PhenexWorker-{i}")
        thread.daemon = True
        thread.start()
        threads.append(thread)

    # Wait for all nodes to complete or for an error to occur
    while (
        len(completed) < len(nodes)
        and not worker_exceptions
        and not stop_all_workers.is_set()
    ):
        threading.Event().wait(0.1)  # Small delay to prevent busy waiting

    if not stop_all_workers.is_set():
        # Time to stop workers and cleanup
        stop_all_workers.set()

    # Check if any worker thread had an exception
    if worker_exceptions:
        # Signal workers to stop
        for _ in threads:
            ready_queue.put(None)
        # Wait for threads to finish
        for thread in threads:
            thread.join(timeout=1)
        # Re-raise the first exception
        raise worker_exceptions[0]

    # Signal workers to stop and wait for them
    for _ in threads:
        ready_queue.put(None)  # Sentinel value to stop workers

    for thread in threads:
        thread.join(timeout=1)

    logger.info(
        f"Node '{self.name}': completed multithreaded execution of {len(nodes)} nodes"
    )
    return self.table

to_dict()

Return a dictionary representation of the Node. The dictionary must contain all dependencies of the Node such that if anything in self.to_dict() changes, the Node must be recomputed.

Source code in phenex/node.py
def to_dict(self):
    """
    Return a dictionary representation of the Node. The dictionary must contain all dependencies of the Node such that if anything in self.to_dict() changes, the Node must be recomputed.
    """
    return to_dict(self)

visualize_dependencies()

Create a text visualization of the dependency graph for this node and its dependencies.

Returns:

Name Type Description
str str

A text representation of the dependency graph

Source code in phenex/node.py
def visualize_dependencies(self) -> str:
    """
    Create a text visualization of the dependency graph for this node and its dependencies.

    Returns:
        str: A text representation of the dependency graph
    """
    lines = [f"Dependencies for Node '{self.name}':"]

    # Get all dependencies
    all_deps = self.dependencies
    nodes = {node.name: node for node in all_deps}
    nodes[self.name] = self  # Add self to the nodes

    # Build dependency graph
    dependency_graph = self._build_dependency_graph(nodes)

    for node_name in sorted(nodes.keys()):
        dependencies = dependency_graph.get(node_name, set())
        if dependencies:
            deps_str = ", ".join(sorted(dependencies))
            lines.append(f"  {node_name} depends on: {deps_str}")
        else:
            lines.append(f"  {node_name} (no dependencies)")

    return "\n".join(lines)