Skip to content

graflo.hq

High-level orchestration modules for graflo.

This package provides high-level orchestration classes that coordinate multiple components for graph database operations.

Caster

Main class for data casting and ingestion.

This class handles the process of casting data into graph structures and ingesting them into the database. It supports batch processing, parallel execution, and various data formats.

Attributes:

Name Type Description
schema

Schema configuration for the graph

ingestion_params

IngestionParams instance controlling ingestion behavior

Source code in graflo/hq/caster.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
class Caster:
    """Main class for data casting and ingestion.

    This class handles the process of casting data into graph structures and
    ingesting them into the database. It supports batch processing, parallel
    execution, and various data formats.

    Attributes:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance controlling ingestion behavior
    """

    def __init__(
        self,
        schema: Schema,
        ingestion_params: IngestionParams | None = None,
        **kwargs,
    ):
        """Initialize the caster with schema and configuration.

        Args:
            schema: Schema configuration for the graph
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, creates IngestionParams from kwargs or uses defaults
            **kwargs: Additional configuration options (for backward compatibility):
                - clear_data: Whether to clear existing data before ingestion
                - n_cores: Number of CPU cores/threads to use for parallel processing
                - max_items: Maximum number of items to process
                - batch_size: Size of batches for processing
                - dry: Whether to perform a dry run
        """
        if ingestion_params is None:
            # Create IngestionParams from kwargs or use defaults
            ingestion_params = IngestionParams(**kwargs)
        self.ingestion_params = ingestion_params
        self.schema = schema

    @staticmethod
    def _datetime_range_where_sql(
        datetime_after: str | None,
        datetime_before: str | None,
        date_column: str,
    ) -> str:
        """Build SQL WHERE fragment for [datetime_after, datetime_before) via FilterExpression.

        Returns empty string if both bounds are None; otherwise uses column with >= and <.
        """
        if not datetime_after and not datetime_before:
            return ""
        parts: list[FilterExpression] = []
        if datetime_after is not None:
            parts.append(
                FilterExpression(
                    kind="leaf",
                    field=date_column,
                    cmp_operator=ComparisonOperator.GE,
                    value=[datetime_after],
                )
            )
        if datetime_before is not None:
            parts.append(
                FilterExpression(
                    kind="leaf",
                    field=date_column,
                    cmp_operator=ComparisonOperator.LT,
                    value=[datetime_before],
                )
            )
        if len(parts) == 1:
            return cast(str, parts[0](kind=ExpressionFlavor.SQL))
        expr = FilterExpression(
            kind="composite",
            operator=LogicalOperator.AND,
            deps=parts,
        )
        return cast(str, expr(kind=ExpressionFlavor.SQL))

    @staticmethod
    def discover_files(
        fpath: Path | str, pattern: FilePattern, limit_files=None
    ) -> list[Path]:
        """Discover files matching a pattern in a directory.

        Args:
            fpath: Path to search in (should be the directory containing files)
            pattern: Pattern to match files against
            limit_files: Optional limit on number of files to return

        Returns:
            list[Path]: List of matching file paths

        Raises:
            AssertionError: If pattern.sub_path is None
        """
        assert pattern.sub_path is not None
        if isinstance(fpath, str):
            fpath_pathlib = Path(fpath)
        else:
            fpath_pathlib = fpath

        # fpath is already the directory to search (pattern.sub_path from caller)
        # so we use it directly, not combined with pattern.sub_path again
        files = [
            f
            for f in fpath_pathlib.iterdir()
            if f.is_file()
            and (
                True
                if pattern.regex is None
                else re.search(pattern.regex, f.name) is not None
            )
        ]

        if limit_files is not None:
            files = files[:limit_files]

        return files

    async def cast_normal_resource(
        self, data, resource_name: str | None = None
    ) -> GraphContainer:
        """Cast data into a graph container using a resource.

        Args:
            data: Data to cast
            resource_name: Optional name of the resource to use

        Returns:
            GraphContainer: Container with cast graph data
        """
        rr = self.schema.fetch_resource(resource_name)

        # Process documents in parallel using asyncio
        semaphore = asyncio.Semaphore(self.ingestion_params.n_cores)

        async def process_doc(doc):
            async with semaphore:
                return await asyncio.to_thread(rr, doc)

        docs = await asyncio.gather(*[process_doc(doc) for doc in data])

        graph = GraphContainer.from_docs_list(docs)
        return graph

    async def process_batch(
        self,
        batch,
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a batch of data.

        Args:
            batch: Batch of data to process
            resource_name: Optional name of the resource to use
            conn_conf: Optional database connection configuration
        """
        gc = await self.cast_normal_resource(batch, resource_name=resource_name)

        if conn_conf is not None:
            await self.push_db(gc=gc, conn_conf=conn_conf, resource_name=resource_name)

    async def process_data_source(
        self,
        data_source: AbstractDataSource,
        resource_name: str | None = None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a data source.

        Args:
            data_source: Data source to process
            resource_name: Optional name of the resource (overrides data_source.resource_name)
            conn_conf: Optional database connection configuration
        """
        # Use provided resource_name or fall back to data_source's resource_name
        actual_resource_name = resource_name or data_source.resource_name

        # Use pattern-specific limit if available, otherwise use global max_items
        limit = getattr(data_source, "_pattern_limit", None)
        if limit is None:
            limit = self.ingestion_params.max_items

        for batch in data_source.iter_batches(
            batch_size=self.ingestion_params.batch_size, limit=limit
        ):
            await self.process_batch(
                batch, resource_name=actual_resource_name, conn_conf=conn_conf
            )

    async def process_resource(
        self,
        resource_instance: (
            Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
        ),
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
        **kwargs,
    ):
        """Process a resource instance from configuration or direct data.

        This method accepts either:
        1. A configuration dictionary with 'source_type' and data source parameters
        2. A file path (Path or str) - creates FileDataSource
        3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

        Args:
            resource_instance: Configuration dict, file path, or in-memory data.
                Configuration dict format:
                - {"source_type": "file", "path": "data.json"}
                - {"source_type": "api", "config": {"url": "https://..."}}
                - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
                - {"source_type": "in_memory", "data": [...]}
            resource_name: Optional name of the resource
            conn_conf: Optional database connection configuration
            **kwargs: Additional arguments passed to data source creation
                (e.g., columns for list[list], encoding for files)
        """
        # Handle configuration dictionary
        if isinstance(resource_instance, dict):
            config = resource_instance.copy()
            # Merge with kwargs (kwargs take precedence)
            config.update(kwargs)
            data_source = DataSourceFactory.create_data_source_from_config(config)
        # Handle file paths
        elif isinstance(resource_instance, (Path, str)):
            # File path - create FileDataSource
            # Extract only valid file data source parameters with proper typing
            file_type: str | ChunkerType | None = cast(
                str | ChunkerType | None, kwargs.get("file_type", None)
            )
            encoding: EncodingType = cast(
                EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
            )
            sep: str | None = cast(str | None, kwargs.get("sep", None))
            data_source = DataSourceFactory.create_file_data_source(
                path=resource_instance,
                file_type=file_type,
                encoding=encoding,
                sep=sep,
            )
        # Handle in-memory data
        else:
            # In-memory data - create InMemoryDataSource
            # Extract only valid in-memory data source parameters with proper typing
            columns: list[str] | None = cast(
                list[str] | None, kwargs.get("columns", None)
            )
            data_source = DataSourceFactory.create_in_memory_data_source(
                data=resource_instance,
                columns=columns,
            )

        data_source.resource_name = resource_name

        # Process using the data source
        await self.process_data_source(
            data_source=data_source,
            resource_name=resource_name,
            conn_conf=conn_conf,
        )

    async def push_db(
        self,
        gc: GraphContainer,
        conn_conf: DBConfig,
        resource_name: str | None,
    ):
        """Push graph container data to the database.

        Args:
            gc: Graph container with data to push
            conn_conf: Database connection configuration
            resource_name: Optional name of the resource
        """
        vc = self.schema.vertex_config
        resource = self.schema.fetch_resource(resource_name)

        # Push vertices in parallel (with configurable concurrency control to prevent deadlocks)
        # Some databases can deadlock when multiple transactions modify the same nodes
        # Use a semaphore to limit concurrent operations based on max_concurrent_db_ops
        max_concurrent = (
            self.ingestion_params.max_concurrent_db_ops
            if self.ingestion_params.max_concurrent_db_ops is not None
            else self.ingestion_params.n_cores
        )
        vertex_semaphore = asyncio.Semaphore(max_concurrent)

        async def push_vertex(vcol: str, data: list[dict]):
            async with vertex_semaphore:

                def _push_vertex_sync():
                    with ConnectionManager(connection_config=conn_conf) as db_client:
                        # blank nodes: push and get back their keys  {"_key": ...}
                        if vcol in vc.blank_vertices:
                            query0 = db_client.insert_return_batch(
                                data, vc.vertex_dbname(vcol)
                            )
                            cursor = db_client.execute(query0)
                            return vcol, [item for item in cursor]
                        else:
                            db_client.upsert_docs_batch(
                                data,
                                vc.vertex_dbname(vcol),
                                vc.index(vcol),
                                update_keys="doc",
                                filter_uniques=True,
                                dry=self.ingestion_params.dry,
                            )
                            return vcol, None

                return await asyncio.to_thread(_push_vertex_sync)

        # Process all vertices in parallel (with semaphore limiting concurrency for Neo4j)
        vertex_results = await asyncio.gather(
            *[push_vertex(vcol, data) for vcol, data in gc.vertices.items()]
        )

        # Update blank vertices with returned keys
        for vcol, result in vertex_results:
            if result is not None:
                gc.vertices[vcol] = result

        # update edge misc with blank node edges
        for vcol in vc.blank_vertices:
            for edge_id, edge in self.schema.edge_config.edges_items():
                vfrom, vto, relation = edge_id
                if vcol == vfrom or vcol == vto:
                    if edge_id not in gc.edges:
                        gc.edges[edge_id] = []
                    gc.edges[edge_id].extend(
                        [
                            (x, y, {})
                            for x, y in zip(gc.vertices[vfrom], gc.vertices[vto])
                        ]
                    )

        # Process extra weights
        async def process_extra_weights():
            def _process_extra_weights_sync():
                with ConnectionManager(connection_config=conn_conf) as db_client:
                    # currently works only on item level
                    for edge in resource.extra_weights:
                        if edge.weights is None:
                            continue
                        for weight in edge.weights.vertices:
                            if weight.name in vc.vertex_set:
                                index_fields = vc.index(weight.name)

                                if (
                                    not self.ingestion_params.dry
                                    and weight.name in gc.vertices
                                ):
                                    weights_per_item = (
                                        db_client.fetch_present_documents(
                                            class_name=vc.vertex_dbname(weight.name),
                                            batch=gc.vertices[weight.name],
                                            match_keys=index_fields.fields,
                                            keep_keys=weight.fields,
                                        )
                                    )

                                    for j, item in enumerate(gc.linear):
                                        weights = weights_per_item[j]

                                        for ee in item[edge.edge_id]:
                                            weight_collection_attached = {
                                                weight.cfield(k): v
                                                for k, v in weights[0].items()
                                            }
                                            ee.update(weight_collection_attached)
                            else:
                                logger.error(f"{weight.name} not a valid vertex")

            await asyncio.to_thread(_process_extra_weights_sync)

        await process_extra_weights()

        # Push edges in parallel (with configurable concurrency control to prevent deadlocks)
        # Some databases can deadlock when multiple transactions modify the same nodes/relationships
        # Use a semaphore to limit concurrent operations based on max_concurrent_db_ops
        edge_semaphore = asyncio.Semaphore(max_concurrent)

        async def push_edge(edge_id: tuple, edge: Edge):
            async with edge_semaphore:

                def _push_edge_sync():
                    with ConnectionManager(connection_config=conn_conf) as db_client:
                        for ee in gc.loop_over_relations(edge_id):
                            _, _, relation = ee
                            if not self.ingestion_params.dry:
                                data = gc.edges[ee]
                                db_client.insert_edges_batch(
                                    docs_edges=data,
                                    source_class=vc.vertex_dbname(edge.source),
                                    target_class=vc.vertex_dbname(edge.target),
                                    relation_name=relation,
                                    match_keys_source=vc.index(edge.source).fields,
                                    match_keys_target=vc.index(edge.target).fields,
                                    filter_uniques=False,
                                    dry=self.ingestion_params.dry,
                                    collection_name=edge.database_name,
                                )

                await asyncio.to_thread(_push_edge_sync)

        # Process all edges in parallel (with semaphore limiting concurrency for Neo4j)
        await asyncio.gather(
            *[
                push_edge(edge_id, edge)
                for edge_id, edge in self.schema.edge_config.edges_items()
            ]
        )

    async def process_with_queue(
        self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
    ):
        """Process tasks from a queue.

        Args:
            tasks: Async queue of tasks to process
            conn_conf: Optional database connection configuration
        """
        # Sentinel value to signal completion
        SENTINEL = None

        while True:
            try:
                # Get task from queue (will wait if queue is empty)
                task = await tasks.get()

                # Check for sentinel value
                if task is SENTINEL:
                    tasks.task_done()
                    break

                # Support both (Path, str) tuples and DataSource instances
                if isinstance(task, tuple) and len(task) == 2:
                    filepath, resource_name = task
                    await self.process_resource(
                        resource_instance=filepath,
                        resource_name=resource_name,
                        conn_conf=conn_conf,
                    )
                elif isinstance(task, AbstractDataSource):
                    await self.process_data_source(
                        data_source=task, conn_conf=conn_conf
                    )
                tasks.task_done()
            except Exception as e:
                logger.error(f"Error processing task: {e}", exc_info=True)
                tasks.task_done()
                break

    @staticmethod
    def normalize_resource(
        data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
    ) -> list[dict]:
        """Normalize resource data into a list of dictionaries.

        Args:
            data: Data to normalize (DataFrame, list of lists, or list of dicts)
            columns: Optional column names for list data

        Returns:
            list[dict]: Normalized data as list of dictionaries

        Raises:
            ValueError: If columns is not provided for list data
        """
        if isinstance(data, pd.DataFrame):
            columns = data.columns.tolist()
            _data = data.values.tolist()
        elif data and isinstance(data[0], list):
            _data = cast(list[list], data)  # Tell mypy this is list[list]
            if columns is None:
                raise ValueError("columns should be set")
        else:
            return cast(list[dict], data)  # Tell mypy this is list[dict]
        rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
        return rows_dressed

    async def ingest_data_sources(
        self,
        data_source_registry: DataSourceRegistry,
        conn_conf: DBConfig,
        ingestion_params: IngestionParams | None = None,
    ):
        """Ingest data from data sources in a registry.

        Note: Schema definition should be handled separately via GraphEngine.define_schema()
        before calling this method.

        Args:
            data_source_registry: Registry containing data sources mapped to resources
            conn_conf: Database connection configuration
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        if ingestion_params is None:
            ingestion_params = IngestionParams()

        # Update ingestion params (may override defaults set in __init__)
        self.ingestion_params = ingestion_params
        init_only = ingestion_params.init_only

        if init_only:
            logger.info("ingest execution bound to init")
            sys.exit(0)

        # Collect all data sources
        tasks: list[AbstractDataSource] = []
        for resource_name in self.schema._resources.keys():
            data_sources = data_source_registry.get_data_sources(resource_name)
            if data_sources:
                logger.info(
                    f"For resource name {resource_name} {len(data_sources)} data sources were found"
                )
                tasks.extend(data_sources)

        with Timer() as klepsidra:
            if self.ingestion_params.n_cores > 1:
                # Use asyncio for parallel processing
                queue_tasks: asyncio.Queue = asyncio.Queue()
                for item in tasks:
                    await queue_tasks.put(item)

                # Add sentinel values to signal workers to stop
                for _ in range(self.ingestion_params.n_cores):
                    await queue_tasks.put(None)

                # Create worker tasks
                worker_tasks = [
                    self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                    for _ in range(self.ingestion_params.n_cores)
                ]

                # Run all workers in parallel
                await asyncio.gather(*worker_tasks)
            else:
                for data_source in tasks:
                    await self.process_data_source(
                        data_source=data_source, conn_conf=conn_conf
                    )
        logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")

    def _register_file_sources(
        self,
        registry: DataSourceRegistry,
        resource_name: str,
        pattern: FilePattern,
        ingestion_params: IngestionParams,
    ) -> None:
        """Register file data sources for a resource.

        Args:
            registry: Data source registry to add sources to
            resource_name: Name of the resource
            pattern: File pattern configuration
            ingestion_params: Ingestion parameters
        """
        if pattern.sub_path is None:
            logger.warning(
                f"FilePattern for resource '{resource_name}' has no sub_path, skipping"
            )
            return

        path_obj = pattern.sub_path.expanduser()
        files = Caster.discover_files(
            path_obj, limit_files=ingestion_params.limit_files, pattern=pattern
        )
        logger.info(f"For resource name {resource_name} {len(files)} files were found")

        for file_path in files:
            file_source = DataSourceFactory.create_file_data_source(path=file_path)
            registry.register(file_source, resource_name=resource_name)

    def _register_sql_table_sources(
        self,
        registry: DataSourceRegistry,
        resource_name: str,
        pattern: TablePattern,
        patterns: "Patterns",
        ingestion_params: IngestionParams,
    ) -> None:
        """Register SQL table data sources for a resource.

        Uses SQLDataSource with batch processing (cursors) instead of loading
        all data into memory. This is efficient for large tables.

        Args:
            registry: Data source registry to add sources to
            resource_name: Name of the resource
            pattern: Table pattern configuration
            patterns: Patterns instance for accessing configs
            ingestion_params: Ingestion parameters
        """
        postgres_config = patterns.get_postgres_config(resource_name)
        if postgres_config is None:
            logger.warning(
                f"PostgreSQL table '{resource_name}' has no connection config, skipping"
            )
            return

        table_info = patterns.get_table_info(resource_name)
        if table_info is None:
            logger.warning(
                f"Could not get table info for resource '{resource_name}', skipping"
            )
            return

        table_name, schema_name = table_info
        effective_schema = schema_name or postgres_config.schema_name or "public"

        try:
            # Build base query
            query = f'SELECT * FROM "{effective_schema}"."{table_name}"'
            where_parts: list[str] = []
            pattern_where = pattern.build_where_clause()
            if pattern_where:
                where_parts.append(pattern_where)
            # Ingestion datetime range [datetime_after, datetime_before)
            date_column = pattern.date_field or ingestion_params.datetime_column
            if (
                ingestion_params.datetime_after or ingestion_params.datetime_before
            ) and date_column:
                datetime_where = Caster._datetime_range_where_sql(
                    ingestion_params.datetime_after,
                    ingestion_params.datetime_before,
                    date_column,
                )
                if datetime_where:
                    where_parts.append(datetime_where)
            elif ingestion_params.datetime_after or ingestion_params.datetime_before:
                logger.warning(
                    "datetime_after/datetime_before set but no date column: "
                    "set TablePattern.date_field or IngestionParams.datetime_column for resource %s",
                    resource_name,
                )
            if where_parts:
                query += " WHERE " + " AND ".join(where_parts)

            # Get SQLAlchemy connection string from PostgresConfig
            connection_string = postgres_config.to_sqlalchemy_connection_string()

            # Create SQLDataSource with pagination for efficient batch processing
            # Note: max_items limit is handled by SQLDataSource.iter_batches() limit parameter
            sql_config = SQLConfig(
                connection_string=connection_string,
                query=query,
                pagination=True,
                page_size=ingestion_params.batch_size,  # Use batch_size for page size
            )
            sql_source = SQLDataSource(config=sql_config)

            # Register the SQL data source (it will be processed in batches)
            registry.register(sql_source, resource_name=resource_name)

            logger.info(
                f"Created SQL data source for table '{effective_schema}.{table_name}' "
                f"mapped to resource '{resource_name}' (will process in batches of {ingestion_params.batch_size})"
            )
        except Exception as e:
            logger.error(
                f"Failed to create data source for PostgreSQL table '{resource_name}': {e}",
                exc_info=True,
            )

    def _build_registry_from_patterns(
        self,
        patterns: "Patterns",
        ingestion_params: IngestionParams,
    ) -> DataSourceRegistry:
        """Build data source registry from patterns.

        Args:
            patterns: Patterns instance mapping resources to data sources
            ingestion_params: Ingestion parameters

        Returns:
            DataSourceRegistry with registered data sources
        """
        registry = DataSourceRegistry()

        for resource in self.schema.resources:
            resource_name = resource.name
            resource_type = patterns.get_resource_type(resource_name)

            if resource_type is None:
                logger.warning(
                    f"No resource type found for resource '{resource_name}', skipping"
                )
                continue

            pattern = patterns.patterns.get(resource_name)
            if pattern is None:
                logger.warning(
                    f"No pattern found for resource '{resource_name}', skipping"
                )
                continue

            if resource_type == ResourceType.FILE:
                if not isinstance(pattern, FilePattern):
                    logger.warning(
                        f"Pattern for resource '{resource_name}' is not a FilePattern, skipping"
                    )
                    continue
                self._register_file_sources(
                    registry, resource_name, pattern, ingestion_params
                )

            elif resource_type == ResourceType.SQL_TABLE:
                if not isinstance(pattern, TablePattern):
                    logger.warning(
                        f"Pattern for resource '{resource_name}' is not a TablePattern, skipping"
                    )
                    continue
                self._register_sql_table_sources(
                    registry, resource_name, pattern, patterns, ingestion_params
                )

            else:
                logger.warning(
                    f"Unsupported resource type '{resource_type}' for resource '{resource_name}', skipping"
                )

        return registry

    def ingest(
        self,
        target_db_config: DBConfig,
        patterns: "Patterns | None" = None,
        ingestion_params: IngestionParams | None = None,
    ):
        """Ingest data into the graph database.

        This is the main ingestion method that takes:
        - Schema: Graph structure (already set in Caster)
        - OutputConfig: Target graph database configuration
        - Patterns: Mapping of resources to physical data sources
        - IngestionParams: Parameters controlling the ingestion process

        Args:
            target_db_config: Target database connection configuration (for writing graph)
            patterns: Patterns instance mapping resources to data sources
                If None, defaults to empty Patterns()
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        # Normalize parameters
        patterns = patterns or Patterns()
        ingestion_params = ingestion_params or IngestionParams()

        # Initialize vertex config with correct field types based on database type
        db_flavor = target_db_config.connection_type
        self.schema.vertex_config.db_flavor = db_flavor
        self.schema.vertex_config.finish_init()
        # Initialize edge config after vertex config is fully initialized
        self.schema.edge_config.finish_init(self.schema.vertex_config)

        # Build registry from patterns
        registry = self._build_registry_from_patterns(patterns, ingestion_params)

        # Ingest data sources
        asyncio.run(
            self.ingest_data_sources(
                data_source_registry=registry,
                conn_conf=target_db_config,
                ingestion_params=ingestion_params,
            )
        )

__init__(schema, ingestion_params=None, **kwargs)

Initialize the caster with schema and configuration.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, creates IngestionParams from kwargs or uses defaults

None
**kwargs

Additional configuration options (for backward compatibility): - clear_data: Whether to clear existing data before ingestion - n_cores: Number of CPU cores/threads to use for parallel processing - max_items: Maximum number of items to process - batch_size: Size of batches for processing - dry: Whether to perform a dry run

{}
Source code in graflo/hq/caster.py
def __init__(
    self,
    schema: Schema,
    ingestion_params: IngestionParams | None = None,
    **kwargs,
):
    """Initialize the caster with schema and configuration.

    Args:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, creates IngestionParams from kwargs or uses defaults
        **kwargs: Additional configuration options (for backward compatibility):
            - clear_data: Whether to clear existing data before ingestion
            - n_cores: Number of CPU cores/threads to use for parallel processing
            - max_items: Maximum number of items to process
            - batch_size: Size of batches for processing
            - dry: Whether to perform a dry run
    """
    if ingestion_params is None:
        # Create IngestionParams from kwargs or use defaults
        ingestion_params = IngestionParams(**kwargs)
    self.ingestion_params = ingestion_params
    self.schema = schema

cast_normal_resource(data, resource_name=None) async

Cast data into a graph container using a resource.

Parameters:

Name Type Description Default
data

Data to cast

required
resource_name str | None

Optional name of the resource to use

None

Returns:

Name Type Description
GraphContainer GraphContainer

Container with cast graph data

Source code in graflo/hq/caster.py
async def cast_normal_resource(
    self, data, resource_name: str | None = None
) -> GraphContainer:
    """Cast data into a graph container using a resource.

    Args:
        data: Data to cast
        resource_name: Optional name of the resource to use

    Returns:
        GraphContainer: Container with cast graph data
    """
    rr = self.schema.fetch_resource(resource_name)

    # Process documents in parallel using asyncio
    semaphore = asyncio.Semaphore(self.ingestion_params.n_cores)

    async def process_doc(doc):
        async with semaphore:
            return await asyncio.to_thread(rr, doc)

    docs = await asyncio.gather(*[process_doc(doc) for doc in data])

    graph = GraphContainer.from_docs_list(docs)
    return graph

discover_files(fpath, pattern, limit_files=None) staticmethod

Discover files matching a pattern in a directory.

Parameters:

Name Type Description Default
fpath Path | str

Path to search in (should be the directory containing files)

required
pattern FilePattern

Pattern to match files against

required
limit_files

Optional limit on number of files to return

None

Returns:

Type Description
list[Path]

list[Path]: List of matching file paths

Raises:

Type Description
AssertionError

If pattern.sub_path is None

Source code in graflo/hq/caster.py
@staticmethod
def discover_files(
    fpath: Path | str, pattern: FilePattern, limit_files=None
) -> list[Path]:
    """Discover files matching a pattern in a directory.

    Args:
        fpath: Path to search in (should be the directory containing files)
        pattern: Pattern to match files against
        limit_files: Optional limit on number of files to return

    Returns:
        list[Path]: List of matching file paths

    Raises:
        AssertionError: If pattern.sub_path is None
    """
    assert pattern.sub_path is not None
    if isinstance(fpath, str):
        fpath_pathlib = Path(fpath)
    else:
        fpath_pathlib = fpath

    # fpath is already the directory to search (pattern.sub_path from caller)
    # so we use it directly, not combined with pattern.sub_path again
    files = [
        f
        for f in fpath_pathlib.iterdir()
        if f.is_file()
        and (
            True
            if pattern.regex is None
            else re.search(pattern.regex, f.name) is not None
        )
    ]

    if limit_files is not None:
        files = files[:limit_files]

    return files

ingest(target_db_config, patterns=None, ingestion_params=None)

Ingest data into the graph database.

This is the main ingestion method that takes: - Schema: Graph structure (already set in Caster) - OutputConfig: Target graph database configuration - Patterns: Mapping of resources to physical data sources - IngestionParams: Parameters controlling the ingestion process

Parameters:

Name Type Description Default
target_db_config DBConfig

Target database connection configuration (for writing graph)

required
patterns Patterns | None

Patterns instance mapping resources to data sources If None, defaults to empty Patterns()

None
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/caster.py
def ingest(
    self,
    target_db_config: DBConfig,
    patterns: "Patterns | None" = None,
    ingestion_params: IngestionParams | None = None,
):
    """Ingest data into the graph database.

    This is the main ingestion method that takes:
    - Schema: Graph structure (already set in Caster)
    - OutputConfig: Target graph database configuration
    - Patterns: Mapping of resources to physical data sources
    - IngestionParams: Parameters controlling the ingestion process

    Args:
        target_db_config: Target database connection configuration (for writing graph)
        patterns: Patterns instance mapping resources to data sources
            If None, defaults to empty Patterns()
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    # Normalize parameters
    patterns = patterns or Patterns()
    ingestion_params = ingestion_params or IngestionParams()

    # Initialize vertex config with correct field types based on database type
    db_flavor = target_db_config.connection_type
    self.schema.vertex_config.db_flavor = db_flavor
    self.schema.vertex_config.finish_init()
    # Initialize edge config after vertex config is fully initialized
    self.schema.edge_config.finish_init(self.schema.vertex_config)

    # Build registry from patterns
    registry = self._build_registry_from_patterns(patterns, ingestion_params)

    # Ingest data sources
    asyncio.run(
        self.ingest_data_sources(
            data_source_registry=registry,
            conn_conf=target_db_config,
            ingestion_params=ingestion_params,
        )
    )

ingest_data_sources(data_source_registry, conn_conf, ingestion_params=None) async

Ingest data from data sources in a registry.

Note: Schema definition should be handled separately via GraphEngine.define_schema() before calling this method.

Parameters:

Name Type Description Default
data_source_registry DataSourceRegistry

Registry containing data sources mapped to resources

required
conn_conf DBConfig

Database connection configuration

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/caster.py
async def ingest_data_sources(
    self,
    data_source_registry: DataSourceRegistry,
    conn_conf: DBConfig,
    ingestion_params: IngestionParams | None = None,
):
    """Ingest data from data sources in a registry.

    Note: Schema definition should be handled separately via GraphEngine.define_schema()
    before calling this method.

    Args:
        data_source_registry: Registry containing data sources mapped to resources
        conn_conf: Database connection configuration
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    if ingestion_params is None:
        ingestion_params = IngestionParams()

    # Update ingestion params (may override defaults set in __init__)
    self.ingestion_params = ingestion_params
    init_only = ingestion_params.init_only

    if init_only:
        logger.info("ingest execution bound to init")
        sys.exit(0)

    # Collect all data sources
    tasks: list[AbstractDataSource] = []
    for resource_name in self.schema._resources.keys():
        data_sources = data_source_registry.get_data_sources(resource_name)
        if data_sources:
            logger.info(
                f"For resource name {resource_name} {len(data_sources)} data sources were found"
            )
            tasks.extend(data_sources)

    with Timer() as klepsidra:
        if self.ingestion_params.n_cores > 1:
            # Use asyncio for parallel processing
            queue_tasks: asyncio.Queue = asyncio.Queue()
            for item in tasks:
                await queue_tasks.put(item)

            # Add sentinel values to signal workers to stop
            for _ in range(self.ingestion_params.n_cores):
                await queue_tasks.put(None)

            # Create worker tasks
            worker_tasks = [
                self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                for _ in range(self.ingestion_params.n_cores)
            ]

            # Run all workers in parallel
            await asyncio.gather(*worker_tasks)
        else:
            for data_source in tasks:
                await self.process_data_source(
                    data_source=data_source, conn_conf=conn_conf
                )
    logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")

normalize_resource(data, columns=None) staticmethod

Normalize resource data into a list of dictionaries.

Parameters:

Name Type Description Default
data DataFrame | list[list] | list[dict]

Data to normalize (DataFrame, list of lists, or list of dicts)

required
columns list[str] | None

Optional column names for list data

None

Returns:

Type Description
list[dict]

list[dict]: Normalized data as list of dictionaries

Raises:

Type Description
ValueError

If columns is not provided for list data

Source code in graflo/hq/caster.py
@staticmethod
def normalize_resource(
    data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
) -> list[dict]:
    """Normalize resource data into a list of dictionaries.

    Args:
        data: Data to normalize (DataFrame, list of lists, or list of dicts)
        columns: Optional column names for list data

    Returns:
        list[dict]: Normalized data as list of dictionaries

    Raises:
        ValueError: If columns is not provided for list data
    """
    if isinstance(data, pd.DataFrame):
        columns = data.columns.tolist()
        _data = data.values.tolist()
    elif data and isinstance(data[0], list):
        _data = cast(list[list], data)  # Tell mypy this is list[list]
        if columns is None:
            raise ValueError("columns should be set")
    else:
        return cast(list[dict], data)  # Tell mypy this is list[dict]
    rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
    return rows_dressed

process_batch(batch, resource_name, conn_conf=None) async

Process a batch of data.

Parameters:

Name Type Description Default
batch

Batch of data to process

required
resource_name str | None

Optional name of the resource to use

required
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_batch(
    self,
    batch,
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
):
    """Process a batch of data.

    Args:
        batch: Batch of data to process
        resource_name: Optional name of the resource to use
        conn_conf: Optional database connection configuration
    """
    gc = await self.cast_normal_resource(batch, resource_name=resource_name)

    if conn_conf is not None:
        await self.push_db(gc=gc, conn_conf=conn_conf, resource_name=resource_name)

process_data_source(data_source, resource_name=None, conn_conf=None) async

Process a data source.

Parameters:

Name Type Description Default
data_source AbstractDataSource

Data source to process

required
resource_name str | None

Optional name of the resource (overrides data_source.resource_name)

None
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_data_source(
    self,
    data_source: AbstractDataSource,
    resource_name: str | None = None,
    conn_conf: None | DBConfig = None,
):
    """Process a data source.

    Args:
        data_source: Data source to process
        resource_name: Optional name of the resource (overrides data_source.resource_name)
        conn_conf: Optional database connection configuration
    """
    # Use provided resource_name or fall back to data_source's resource_name
    actual_resource_name = resource_name or data_source.resource_name

    # Use pattern-specific limit if available, otherwise use global max_items
    limit = getattr(data_source, "_pattern_limit", None)
    if limit is None:
        limit = self.ingestion_params.max_items

    for batch in data_source.iter_batches(
        batch_size=self.ingestion_params.batch_size, limit=limit
    ):
        await self.process_batch(
            batch, resource_name=actual_resource_name, conn_conf=conn_conf
        )

process_resource(resource_instance, resource_name, conn_conf=None, **kwargs) async

Process a resource instance from configuration or direct data.

This method accepts either: 1. A configuration dictionary with 'source_type' and data source parameters 2. A file path (Path or str) - creates FileDataSource 3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

Parameters:

Name Type Description Default
resource_instance Path | str | list[dict] | list[list] | DataFrame | dict[str, Any]

Configuration dict, file path, or in-memory data. Configuration dict format: - {"source_type": "file", "path": "data.json"} - {"source_type": "api", "config": {"url": "https://..."}} - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}} - {"source_type": "in_memory", "data": [...]}

required
resource_name str | None

Optional name of the resource

required
conn_conf None | DBConfig

Optional database connection configuration

None
**kwargs

Additional arguments passed to data source creation (e.g., columns for list[list], encoding for files)

{}
Source code in graflo/hq/caster.py
async def process_resource(
    self,
    resource_instance: (
        Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
    ),
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
    **kwargs,
):
    """Process a resource instance from configuration or direct data.

    This method accepts either:
    1. A configuration dictionary with 'source_type' and data source parameters
    2. A file path (Path or str) - creates FileDataSource
    3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

    Args:
        resource_instance: Configuration dict, file path, or in-memory data.
            Configuration dict format:
            - {"source_type": "file", "path": "data.json"}
            - {"source_type": "api", "config": {"url": "https://..."}}
            - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
            - {"source_type": "in_memory", "data": [...]}
        resource_name: Optional name of the resource
        conn_conf: Optional database connection configuration
        **kwargs: Additional arguments passed to data source creation
            (e.g., columns for list[list], encoding for files)
    """
    # Handle configuration dictionary
    if isinstance(resource_instance, dict):
        config = resource_instance.copy()
        # Merge with kwargs (kwargs take precedence)
        config.update(kwargs)
        data_source = DataSourceFactory.create_data_source_from_config(config)
    # Handle file paths
    elif isinstance(resource_instance, (Path, str)):
        # File path - create FileDataSource
        # Extract only valid file data source parameters with proper typing
        file_type: str | ChunkerType | None = cast(
            str | ChunkerType | None, kwargs.get("file_type", None)
        )
        encoding: EncodingType = cast(
            EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
        )
        sep: str | None = cast(str | None, kwargs.get("sep", None))
        data_source = DataSourceFactory.create_file_data_source(
            path=resource_instance,
            file_type=file_type,
            encoding=encoding,
            sep=sep,
        )
    # Handle in-memory data
    else:
        # In-memory data - create InMemoryDataSource
        # Extract only valid in-memory data source parameters with proper typing
        columns: list[str] | None = cast(
            list[str] | None, kwargs.get("columns", None)
        )
        data_source = DataSourceFactory.create_in_memory_data_source(
            data=resource_instance,
            columns=columns,
        )

    data_source.resource_name = resource_name

    # Process using the data source
    await self.process_data_source(
        data_source=data_source,
        resource_name=resource_name,
        conn_conf=conn_conf,
    )

process_with_queue(tasks, conn_conf=None) async

Process tasks from a queue.

Parameters:

Name Type Description Default
tasks Queue

Async queue of tasks to process

required
conn_conf DBConfig | None

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_with_queue(
    self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
):
    """Process tasks from a queue.

    Args:
        tasks: Async queue of tasks to process
        conn_conf: Optional database connection configuration
    """
    # Sentinel value to signal completion
    SENTINEL = None

    while True:
        try:
            # Get task from queue (will wait if queue is empty)
            task = await tasks.get()

            # Check for sentinel value
            if task is SENTINEL:
                tasks.task_done()
                break

            # Support both (Path, str) tuples and DataSource instances
            if isinstance(task, tuple) and len(task) == 2:
                filepath, resource_name = task
                await self.process_resource(
                    resource_instance=filepath,
                    resource_name=resource_name,
                    conn_conf=conn_conf,
                )
            elif isinstance(task, AbstractDataSource):
                await self.process_data_source(
                    data_source=task, conn_conf=conn_conf
                )
            tasks.task_done()
        except Exception as e:
            logger.error(f"Error processing task: {e}", exc_info=True)
            tasks.task_done()
            break

push_db(gc, conn_conf, resource_name) async

Push graph container data to the database.

Parameters:

Name Type Description Default
gc GraphContainer

Graph container with data to push

required
conn_conf DBConfig

Database connection configuration

required
resource_name str | None

Optional name of the resource

required
Source code in graflo/hq/caster.py
async def push_db(
    self,
    gc: GraphContainer,
    conn_conf: DBConfig,
    resource_name: str | None,
):
    """Push graph container data to the database.

    Args:
        gc: Graph container with data to push
        conn_conf: Database connection configuration
        resource_name: Optional name of the resource
    """
    vc = self.schema.vertex_config
    resource = self.schema.fetch_resource(resource_name)

    # Push vertices in parallel (with configurable concurrency control to prevent deadlocks)
    # Some databases can deadlock when multiple transactions modify the same nodes
    # Use a semaphore to limit concurrent operations based on max_concurrent_db_ops
    max_concurrent = (
        self.ingestion_params.max_concurrent_db_ops
        if self.ingestion_params.max_concurrent_db_ops is not None
        else self.ingestion_params.n_cores
    )
    vertex_semaphore = asyncio.Semaphore(max_concurrent)

    async def push_vertex(vcol: str, data: list[dict]):
        async with vertex_semaphore:

            def _push_vertex_sync():
                with ConnectionManager(connection_config=conn_conf) as db_client:
                    # blank nodes: push and get back their keys  {"_key": ...}
                    if vcol in vc.blank_vertices:
                        query0 = db_client.insert_return_batch(
                            data, vc.vertex_dbname(vcol)
                        )
                        cursor = db_client.execute(query0)
                        return vcol, [item for item in cursor]
                    else:
                        db_client.upsert_docs_batch(
                            data,
                            vc.vertex_dbname(vcol),
                            vc.index(vcol),
                            update_keys="doc",
                            filter_uniques=True,
                            dry=self.ingestion_params.dry,
                        )
                        return vcol, None

            return await asyncio.to_thread(_push_vertex_sync)

    # Process all vertices in parallel (with semaphore limiting concurrency for Neo4j)
    vertex_results = await asyncio.gather(
        *[push_vertex(vcol, data) for vcol, data in gc.vertices.items()]
    )

    # Update blank vertices with returned keys
    for vcol, result in vertex_results:
        if result is not None:
            gc.vertices[vcol] = result

    # update edge misc with blank node edges
    for vcol in vc.blank_vertices:
        for edge_id, edge in self.schema.edge_config.edges_items():
            vfrom, vto, relation = edge_id
            if vcol == vfrom or vcol == vto:
                if edge_id not in gc.edges:
                    gc.edges[edge_id] = []
                gc.edges[edge_id].extend(
                    [
                        (x, y, {})
                        for x, y in zip(gc.vertices[vfrom], gc.vertices[vto])
                    ]
                )

    # Process extra weights
    async def process_extra_weights():
        def _process_extra_weights_sync():
            with ConnectionManager(connection_config=conn_conf) as db_client:
                # currently works only on item level
                for edge in resource.extra_weights:
                    if edge.weights is None:
                        continue
                    for weight in edge.weights.vertices:
                        if weight.name in vc.vertex_set:
                            index_fields = vc.index(weight.name)

                            if (
                                not self.ingestion_params.dry
                                and weight.name in gc.vertices
                            ):
                                weights_per_item = (
                                    db_client.fetch_present_documents(
                                        class_name=vc.vertex_dbname(weight.name),
                                        batch=gc.vertices[weight.name],
                                        match_keys=index_fields.fields,
                                        keep_keys=weight.fields,
                                    )
                                )

                                for j, item in enumerate(gc.linear):
                                    weights = weights_per_item[j]

                                    for ee in item[edge.edge_id]:
                                        weight_collection_attached = {
                                            weight.cfield(k): v
                                            for k, v in weights[0].items()
                                        }
                                        ee.update(weight_collection_attached)
                        else:
                            logger.error(f"{weight.name} not a valid vertex")

        await asyncio.to_thread(_process_extra_weights_sync)

    await process_extra_weights()

    # Push edges in parallel (with configurable concurrency control to prevent deadlocks)
    # Some databases can deadlock when multiple transactions modify the same nodes/relationships
    # Use a semaphore to limit concurrent operations based on max_concurrent_db_ops
    edge_semaphore = asyncio.Semaphore(max_concurrent)

    async def push_edge(edge_id: tuple, edge: Edge):
        async with edge_semaphore:

            def _push_edge_sync():
                with ConnectionManager(connection_config=conn_conf) as db_client:
                    for ee in gc.loop_over_relations(edge_id):
                        _, _, relation = ee
                        if not self.ingestion_params.dry:
                            data = gc.edges[ee]
                            db_client.insert_edges_batch(
                                docs_edges=data,
                                source_class=vc.vertex_dbname(edge.source),
                                target_class=vc.vertex_dbname(edge.target),
                                relation_name=relation,
                                match_keys_source=vc.index(edge.source).fields,
                                match_keys_target=vc.index(edge.target).fields,
                                filter_uniques=False,
                                dry=self.ingestion_params.dry,
                                collection_name=edge.database_name,
                            )

            await asyncio.to_thread(_push_edge_sync)

    # Process all edges in parallel (with semaphore limiting concurrency for Neo4j)
    await asyncio.gather(
        *[
            push_edge(edge_id, edge)
            for edge_id, edge in self.schema.edge_config.edges_items()
        ]
    )

GraphEngine

Orchestrator for graph database operations.

GraphEngine coordinates schema inference, pattern creation, schema definition, and data ingestion, providing a unified interface for working with graph databases.

The typical workflow is: 1. infer_schema() - Infer schema from source database (if possible) 2. create_patterns() - Create patterns mapping resources to data sources (if possible) 3. define_schema() - Define schema in target database (if possible and necessary) 4. ingest() - Ingest data into the target database

Attributes:

Name Type Description
target_db_flavor

Target database flavor for schema sanitization

resource_mapper

ResourceMapper instance for pattern creation

Source code in graflo/hq/graph_engine.py
class GraphEngine:
    """Orchestrator for graph database operations.

    GraphEngine coordinates schema inference, pattern creation, schema definition,
    and data ingestion, providing a unified interface for working with graph databases.

    The typical workflow is:
    1. infer_schema() - Infer schema from source database (if possible)
    2. create_patterns() - Create patterns mapping resources to data sources (if possible)
    3. define_schema() - Define schema in target database (if possible and necessary)
    4. ingest() - Ingest data into the target database

    Attributes:
        target_db_flavor: Target database flavor for schema sanitization
        resource_mapper: ResourceMapper instance for pattern creation
    """

    def __init__(
        self,
        target_db_flavor: DBType = DBType.ARANGO,
    ):
        """Initialize the GraphEngine.

        Args:
            target_db_flavor: Target database flavor for schema sanitization
        """
        self.target_db_flavor = target_db_flavor
        self.resource_mapper = ResourceMapper()

    def introspect(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
    ) -> SchemaIntrospectionResult:
        """Introspect PostgreSQL schema and return a serializable result.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect (defaults to config schema_name or 'public')

        Returns:
            SchemaIntrospectionResult: Introspection result (vertex_tables, edge_tables,
                raw_tables, schema_name) suitable for serialization.
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            inferencer = InferenceManager(
                conn=postgres_conn,
                target_db_flavor=self.target_db_flavor,
            )
            return inferencer.introspect(schema_name=schema_name)

    def infer_schema(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
        fuzzy_threshold: float = 0.8,
        discard_disconnected_vertices: bool = False,
    ) -> Schema:
        """Infer a graflo Schema from PostgreSQL database.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect (defaults to config schema_name or 'public')
            fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
            discard_disconnected_vertices: If True, remove vertices that do not take part in
                any relation (and resources/actors that reference them). Default False.

        Returns:
            Schema: Inferred schema with vertices, edges, and resources
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            inferencer = InferenceManager(
                conn=postgres_conn,
                target_db_flavor=self.target_db_flavor,
                fuzzy_threshold=fuzzy_threshold,
            )
            schema = inferencer.infer_complete_schema(schema_name=schema_name)
        if discard_disconnected_vertices:
            schema.remove_disconnected_vertices()
        return schema

    def create_patterns(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
    ) -> Patterns:
        """Create Patterns from PostgreSQL tables.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect
            datetime_columns: Optional mapping of resource/table name to datetime
                column name for date-range filtering (sets date_field per
                TablePattern). Use with IngestionParams.datetime_after /
                datetime_before.

        Returns:
            Patterns: Patterns object with TablePattern instances for all tables
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            return self.resource_mapper.create_patterns_from_postgres(
                conn=postgres_conn,
                schema_name=schema_name,
                datetime_columns=datetime_columns,
            )

    def define_schema(
        self,
        schema: Schema,
        target_db_config: DBConfig,
        recreate_schema: bool = False,
    ) -> None:
        """Define schema in the target database.

        This method handles database/schema creation and initialization.
        Some databases don't require explicit schema definition (e.g., Neo4j),
        but this method ensures the database is properly initialized.

        If the schema/graph already exists and recreate_schema is False (default),
        init_db raises SchemaExistsError and the script halts.

        Args:
            schema: Schema configuration for the graph
            target_db_config: Target database connection configuration
            recreate_schema: If True, drop existing schema and define new one.
                If False and schema/graph already exists, raises SchemaExistsError.
        """
        # If effective_schema is not set, use schema.general.name as fallback
        if (
            target_db_config.can_be_target()
            and target_db_config.effective_schema is None
        ):
            schema_name = schema.general.name
            # Map to the appropriate field based on DB type
            if target_db_config.connection_type == DBType.TIGERGRAPH:
                # TigerGraph uses 'schema_name' field
                target_db_config.schema_name = schema_name
            else:
                # ArangoDB, Neo4j use 'database' field (which maps to effective_schema)
                target_db_config.database = schema_name

        # Ensure schema's vertex_config reflects target DB so Edge.finish_init()
        # applies DB-specific defaults (e.g. TigerGraph default relation name)
        schema.vertex_config.db_flavor = target_db_config.connection_type

        # Initialize database with schema definition
        # init_db() handles database/schema creation automatically
        # It checks if the database exists and creates it if needed
        with ConnectionManager(connection_config=target_db_config) as db_client:
            db_client.init_db(schema, recreate_schema)

    def define_and_ingest(
        self,
        schema: Schema,
        target_db_config: DBConfig,
        patterns: "Patterns | None" = None,
        ingestion_params: IngestionParams | None = None,
        recreate_schema: bool | None = None,
        clear_data: bool | None = None,
    ) -> None:
        """Define schema and ingest data into the graph database in one operation.

        This is a convenience method that chains define_schema() and ingest().
        It's the recommended way to set up and populate a graph database.

        Args:
            schema: Schema configuration for the graph
            target_db_config: Target database connection configuration
            patterns: Patterns instance mapping resources to data sources.
                If None, defaults to empty Patterns()
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
            recreate_schema: If True, drop existing schema and define new one.
                If None, defaults to False. When False and schema already exists,
                define_schema raises SchemaExistsError and the script halts.
            clear_data: If True, remove existing data before ingestion (schema unchanged).
                If None, uses ingestion_params.clear_data.
        """
        ingestion_params = ingestion_params or IngestionParams()
        if clear_data is None:
            clear_data = ingestion_params.clear_data
        if recreate_schema is None:
            recreate_schema = False

        # Define schema first (halts with SchemaExistsError if schema exists and recreate_schema is False)
        self.define_schema(
            schema=schema,
            target_db_config=target_db_config,
            recreate_schema=recreate_schema,
        )

        # Then ingest data (clear_data is applied inside ingest() when ingestion_params.clear_data)
        ingestion_params = ingestion_params.model_copy(
            update={"clear_data": clear_data}
        )
        self.ingest(
            schema=schema,
            target_db_config=target_db_config,
            patterns=patterns,
            ingestion_params=ingestion_params,
        )

    def ingest(
        self,
        schema: Schema,
        target_db_config: DBConfig,
        patterns: "Patterns | None" = None,
        ingestion_params: IngestionParams | None = None,
    ) -> None:
        """Ingest data into the graph database.

        If ingestion_params.clear_data is True, removes all existing data
        (without touching the schema) before ingestion.

        Args:
            schema: Schema configuration for the graph
            target_db_config: Target database connection configuration
            patterns: Patterns instance mapping resources to data sources.
                If None, defaults to empty Patterns()
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        ingestion_params = ingestion_params or IngestionParams()
        if ingestion_params.clear_data:
            with ConnectionManager(connection_config=target_db_config) as db_client:
                db_client.clear_data(schema)
        caster = Caster(schema=schema, ingestion_params=ingestion_params)
        caster.ingest(
            target_db_config=target_db_config,
            patterns=patterns or Patterns(),
            ingestion_params=ingestion_params,
        )

__init__(target_db_flavor=DBType.ARANGO)

Initialize the GraphEngine.

Parameters:

Name Type Description Default
target_db_flavor DBType

Target database flavor for schema sanitization

ARANGO
Source code in graflo/hq/graph_engine.py
def __init__(
    self,
    target_db_flavor: DBType = DBType.ARANGO,
):
    """Initialize the GraphEngine.

    Args:
        target_db_flavor: Target database flavor for schema sanitization
    """
    self.target_db_flavor = target_db_flavor
    self.resource_mapper = ResourceMapper()

create_patterns(postgres_config, schema_name=None, datetime_columns=None)

Create Patterns from PostgreSQL tables.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect

None
datetime_columns dict[str, str] | None

Optional mapping of resource/table name to datetime column name for date-range filtering (sets date_field per TablePattern). Use with IngestionParams.datetime_after / datetime_before.

None

Returns:

Name Type Description
Patterns Patterns

Patterns object with TablePattern instances for all tables

Source code in graflo/hq/graph_engine.py
def create_patterns(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
    datetime_columns: dict[str, str] | None = None,
) -> Patterns:
    """Create Patterns from PostgreSQL tables.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect
        datetime_columns: Optional mapping of resource/table name to datetime
            column name for date-range filtering (sets date_field per
            TablePattern). Use with IngestionParams.datetime_after /
            datetime_before.

    Returns:
        Patterns: Patterns object with TablePattern instances for all tables
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        return self.resource_mapper.create_patterns_from_postgres(
            conn=postgres_conn,
            schema_name=schema_name,
            datetime_columns=datetime_columns,
        )

define_and_ingest(schema, target_db_config, patterns=None, ingestion_params=None, recreate_schema=None, clear_data=None)

Define schema and ingest data into the graph database in one operation.

This is a convenience method that chains define_schema() and ingest(). It's the recommended way to set up and populate a graph database.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
target_db_config DBConfig

Target database connection configuration

required
patterns Patterns | None

Patterns instance mapping resources to data sources. If None, defaults to empty Patterns()

None
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
recreate_schema bool | None

If True, drop existing schema and define new one. If None, defaults to False. When False and schema already exists, define_schema raises SchemaExistsError and the script halts.

None
clear_data bool | None

If True, remove existing data before ingestion (schema unchanged). If None, uses ingestion_params.clear_data.

None
Source code in graflo/hq/graph_engine.py
def define_and_ingest(
    self,
    schema: Schema,
    target_db_config: DBConfig,
    patterns: "Patterns | None" = None,
    ingestion_params: IngestionParams | None = None,
    recreate_schema: bool | None = None,
    clear_data: bool | None = None,
) -> None:
    """Define schema and ingest data into the graph database in one operation.

    This is a convenience method that chains define_schema() and ingest().
    It's the recommended way to set up and populate a graph database.

    Args:
        schema: Schema configuration for the graph
        target_db_config: Target database connection configuration
        patterns: Patterns instance mapping resources to data sources.
            If None, defaults to empty Patterns()
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
        recreate_schema: If True, drop existing schema and define new one.
            If None, defaults to False. When False and schema already exists,
            define_schema raises SchemaExistsError and the script halts.
        clear_data: If True, remove existing data before ingestion (schema unchanged).
            If None, uses ingestion_params.clear_data.
    """
    ingestion_params = ingestion_params or IngestionParams()
    if clear_data is None:
        clear_data = ingestion_params.clear_data
    if recreate_schema is None:
        recreate_schema = False

    # Define schema first (halts with SchemaExistsError if schema exists and recreate_schema is False)
    self.define_schema(
        schema=schema,
        target_db_config=target_db_config,
        recreate_schema=recreate_schema,
    )

    # Then ingest data (clear_data is applied inside ingest() when ingestion_params.clear_data)
    ingestion_params = ingestion_params.model_copy(
        update={"clear_data": clear_data}
    )
    self.ingest(
        schema=schema,
        target_db_config=target_db_config,
        patterns=patterns,
        ingestion_params=ingestion_params,
    )

define_schema(schema, target_db_config, recreate_schema=False)

Define schema in the target database.

This method handles database/schema creation and initialization. Some databases don't require explicit schema definition (e.g., Neo4j), but this method ensures the database is properly initialized.

If the schema/graph already exists and recreate_schema is False (default), init_db raises SchemaExistsError and the script halts.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
target_db_config DBConfig

Target database connection configuration

required
recreate_schema bool

If True, drop existing schema and define new one. If False and schema/graph already exists, raises SchemaExistsError.

False
Source code in graflo/hq/graph_engine.py
def define_schema(
    self,
    schema: Schema,
    target_db_config: DBConfig,
    recreate_schema: bool = False,
) -> None:
    """Define schema in the target database.

    This method handles database/schema creation and initialization.
    Some databases don't require explicit schema definition (e.g., Neo4j),
    but this method ensures the database is properly initialized.

    If the schema/graph already exists and recreate_schema is False (default),
    init_db raises SchemaExistsError and the script halts.

    Args:
        schema: Schema configuration for the graph
        target_db_config: Target database connection configuration
        recreate_schema: If True, drop existing schema and define new one.
            If False and schema/graph already exists, raises SchemaExistsError.
    """
    # If effective_schema is not set, use schema.general.name as fallback
    if (
        target_db_config.can_be_target()
        and target_db_config.effective_schema is None
    ):
        schema_name = schema.general.name
        # Map to the appropriate field based on DB type
        if target_db_config.connection_type == DBType.TIGERGRAPH:
            # TigerGraph uses 'schema_name' field
            target_db_config.schema_name = schema_name
        else:
            # ArangoDB, Neo4j use 'database' field (which maps to effective_schema)
            target_db_config.database = schema_name

    # Ensure schema's vertex_config reflects target DB so Edge.finish_init()
    # applies DB-specific defaults (e.g. TigerGraph default relation name)
    schema.vertex_config.db_flavor = target_db_config.connection_type

    # Initialize database with schema definition
    # init_db() handles database/schema creation automatically
    # It checks if the database exists and creates it if needed
    with ConnectionManager(connection_config=target_db_config) as db_client:
        db_client.init_db(schema, recreate_schema)

infer_schema(postgres_config, schema_name=None, fuzzy_threshold=0.8, discard_disconnected_vertices=False)

Infer a graflo Schema from PostgreSQL database.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect (defaults to config schema_name or 'public')

None
fuzzy_threshold float

Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)

0.8
discard_disconnected_vertices bool

If True, remove vertices that do not take part in any relation (and resources/actors that reference them). Default False.

False

Returns:

Name Type Description
Schema Schema

Inferred schema with vertices, edges, and resources

Source code in graflo/hq/graph_engine.py
def infer_schema(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
    fuzzy_threshold: float = 0.8,
    discard_disconnected_vertices: bool = False,
) -> Schema:
    """Infer a graflo Schema from PostgreSQL database.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect (defaults to config schema_name or 'public')
        fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
        discard_disconnected_vertices: If True, remove vertices that do not take part in
            any relation (and resources/actors that reference them). Default False.

    Returns:
        Schema: Inferred schema with vertices, edges, and resources
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        inferencer = InferenceManager(
            conn=postgres_conn,
            target_db_flavor=self.target_db_flavor,
            fuzzy_threshold=fuzzy_threshold,
        )
        schema = inferencer.infer_complete_schema(schema_name=schema_name)
    if discard_disconnected_vertices:
        schema.remove_disconnected_vertices()
    return schema

ingest(schema, target_db_config, patterns=None, ingestion_params=None)

Ingest data into the graph database.

If ingestion_params.clear_data is True, removes all existing data (without touching the schema) before ingestion.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
target_db_config DBConfig

Target database connection configuration

required
patterns Patterns | None

Patterns instance mapping resources to data sources. If None, defaults to empty Patterns()

None
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/graph_engine.py
def ingest(
    self,
    schema: Schema,
    target_db_config: DBConfig,
    patterns: "Patterns | None" = None,
    ingestion_params: IngestionParams | None = None,
) -> None:
    """Ingest data into the graph database.

    If ingestion_params.clear_data is True, removes all existing data
    (without touching the schema) before ingestion.

    Args:
        schema: Schema configuration for the graph
        target_db_config: Target database connection configuration
        patterns: Patterns instance mapping resources to data sources.
            If None, defaults to empty Patterns()
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    ingestion_params = ingestion_params or IngestionParams()
    if ingestion_params.clear_data:
        with ConnectionManager(connection_config=target_db_config) as db_client:
            db_client.clear_data(schema)
    caster = Caster(schema=schema, ingestion_params=ingestion_params)
    caster.ingest(
        target_db_config=target_db_config,
        patterns=patterns or Patterns(),
        ingestion_params=ingestion_params,
    )

introspect(postgres_config, schema_name=None)

Introspect PostgreSQL schema and return a serializable result.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect (defaults to config schema_name or 'public')

None

Returns:

Name Type Description
SchemaIntrospectionResult SchemaIntrospectionResult

Introspection result (vertex_tables, edge_tables, raw_tables, schema_name) suitable for serialization.

Source code in graflo/hq/graph_engine.py
def introspect(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
) -> SchemaIntrospectionResult:
    """Introspect PostgreSQL schema and return a serializable result.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect (defaults to config schema_name or 'public')

    Returns:
        SchemaIntrospectionResult: Introspection result (vertex_tables, edge_tables,
            raw_tables, schema_name) suitable for serialization.
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        inferencer = InferenceManager(
            conn=postgres_conn,
            target_db_flavor=self.target_db_flavor,
        )
        return inferencer.introspect(schema_name=schema_name)

InferenceManager

Inference manager for PostgreSQL sources.

Source code in graflo/hq/inferencer.py
class InferenceManager:
    """Inference manager for PostgreSQL sources."""

    def __init__(
        self,
        conn: PostgresConnection,
        target_db_flavor: DBType = DBType.ARANGO,
        fuzzy_threshold: float = 0.8,
    ):
        """Initialize the PostgreSQL inference manager.

        Args:
            conn: PostgresConnection instance
            target_db_flavor: Target database flavor for schema sanitization
            fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
        """
        self.target_db_flavor = target_db_flavor
        self.sanitizer = SchemaSanitizer(target_db_flavor)
        self.conn = conn
        self.inferencer = PostgresSchemaInferencer(
            db_flavor=target_db_flavor, conn=conn
        )
        self.mapper = PostgresResourceMapper(fuzzy_threshold=fuzzy_threshold)

    def introspect(self, schema_name: str | None = None) -> SchemaIntrospectionResult:
        """Introspect PostgreSQL schema.

        Args:
            schema_name: Schema name to introspect

        Returns:
            SchemaIntrospectionResult: PostgreSQL schema introspection result
        """
        return self.conn.introspect_schema(schema_name=schema_name)

    def infer_schema(
        self, introspection_result, schema_name: str | None = None
    ) -> Schema:
        """Infer graflo Schema from PostgreSQL introspection result.

        Args:
            introspection_result: SchemaIntrospectionResult from PostgreSQL
            schema_name: Schema name (optional, may be inferred from result)

        Returns:
            Schema: Inferred schema with vertices and edges
        """
        return self.inferencer.infer_schema(
            introspection_result, schema_name=schema_name
        )

    def create_resources(
        self, introspection_result, schema: Schema
    ) -> list["Resource"]:
        """Create Resources from PostgreSQL introspection result.

        Args:
            introspection_result: SchemaIntrospectionResult from PostgreSQL
            schema: Existing Schema object

        Returns:
            list[Resource]: List of Resources for PostgreSQL tables
        """
        return self.mapper.create_resources_from_tables(
            introspection_result,
            schema.vertex_config,
            schema.edge_config,
            vertex_attribute_mappings=self.sanitizer.vertex_attribute_mappings,
            fuzzy_threshold=self.mapper.fuzzy_threshold,
        )

    def infer_complete_schema(self, schema_name: str | None = None) -> Schema:
        """Infer a complete Schema from source and sanitize for target.

        This is a convenience method that:
        1. Introspects the source schema
        2. Infers the graflo Schema
        3. Sanitizes for the target database flavor
        4. Creates and adds resources
        5. Re-initializes the schema

        Args:
            schema_name: Schema name to introspect (source-specific)

        Returns:
            Schema: Complete inferred schema with vertices, edges, and resources
        """
        # Introspect the schema
        introspection_result = self.introspect(schema_name=schema_name)

        # Infer schema
        schema = self.infer_schema(introspection_result, schema_name=schema_name)

        # Sanitize for target database flavor
        schema = self.sanitizer.sanitize(schema)

        # Create and add resources
        resources = self.create_resources(introspection_result, schema)
        schema.resources = resources

        # Re-initialize to set up resource mappings
        schema.finish_init()

        return schema

    def create_resources_for_schema(
        self, schema: Schema, schema_name: str | None = None
    ) -> list["Resource"]:
        """Create Resources from source for an existing schema.

        Args:
            schema: Existing Schema object
            schema_name: Schema name to introspect (source-specific)

        Returns:
            list[Resource]: List of Resources for the source
        """
        # Introspect the schema
        introspection_result = self.introspect(schema_name=schema_name)

        # Create resources
        return self.create_resources(introspection_result, schema)

__init__(conn, target_db_flavor=DBType.ARANGO, fuzzy_threshold=0.8)

Initialize the PostgreSQL inference manager.

Parameters:

Name Type Description Default
conn PostgresConnection

PostgresConnection instance

required
target_db_flavor DBType

Target database flavor for schema sanitization

ARANGO
fuzzy_threshold float

Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)

0.8
Source code in graflo/hq/inferencer.py
def __init__(
    self,
    conn: PostgresConnection,
    target_db_flavor: DBType = DBType.ARANGO,
    fuzzy_threshold: float = 0.8,
):
    """Initialize the PostgreSQL inference manager.

    Args:
        conn: PostgresConnection instance
        target_db_flavor: Target database flavor for schema sanitization
        fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
    """
    self.target_db_flavor = target_db_flavor
    self.sanitizer = SchemaSanitizer(target_db_flavor)
    self.conn = conn
    self.inferencer = PostgresSchemaInferencer(
        db_flavor=target_db_flavor, conn=conn
    )
    self.mapper = PostgresResourceMapper(fuzzy_threshold=fuzzy_threshold)

create_resources(introspection_result, schema)

Create Resources from PostgreSQL introspection result.

Parameters:

Name Type Description Default
introspection_result

SchemaIntrospectionResult from PostgreSQL

required
schema Schema

Existing Schema object

required

Returns:

Type Description
list[Resource]

list[Resource]: List of Resources for PostgreSQL tables

Source code in graflo/hq/inferencer.py
def create_resources(
    self, introspection_result, schema: Schema
) -> list["Resource"]:
    """Create Resources from PostgreSQL introspection result.

    Args:
        introspection_result: SchemaIntrospectionResult from PostgreSQL
        schema: Existing Schema object

    Returns:
        list[Resource]: List of Resources for PostgreSQL tables
    """
    return self.mapper.create_resources_from_tables(
        introspection_result,
        schema.vertex_config,
        schema.edge_config,
        vertex_attribute_mappings=self.sanitizer.vertex_attribute_mappings,
        fuzzy_threshold=self.mapper.fuzzy_threshold,
    )

create_resources_for_schema(schema, schema_name=None)

Create Resources from source for an existing schema.

Parameters:

Name Type Description Default
schema Schema

Existing Schema object

required
schema_name str | None

Schema name to introspect (source-specific)

None

Returns:

Type Description
list[Resource]

list[Resource]: List of Resources for the source

Source code in graflo/hq/inferencer.py
def create_resources_for_schema(
    self, schema: Schema, schema_name: str | None = None
) -> list["Resource"]:
    """Create Resources from source for an existing schema.

    Args:
        schema: Existing Schema object
        schema_name: Schema name to introspect (source-specific)

    Returns:
        list[Resource]: List of Resources for the source
    """
    # Introspect the schema
    introspection_result = self.introspect(schema_name=schema_name)

    # Create resources
    return self.create_resources(introspection_result, schema)

infer_complete_schema(schema_name=None)

Infer a complete Schema from source and sanitize for target.

This is a convenience method that: 1. Introspects the source schema 2. Infers the graflo Schema 3. Sanitizes for the target database flavor 4. Creates and adds resources 5. Re-initializes the schema

Parameters:

Name Type Description Default
schema_name str | None

Schema name to introspect (source-specific)

None

Returns:

Name Type Description
Schema Schema

Complete inferred schema with vertices, edges, and resources

Source code in graflo/hq/inferencer.py
def infer_complete_schema(self, schema_name: str | None = None) -> Schema:
    """Infer a complete Schema from source and sanitize for target.

    This is a convenience method that:
    1. Introspects the source schema
    2. Infers the graflo Schema
    3. Sanitizes for the target database flavor
    4. Creates and adds resources
    5. Re-initializes the schema

    Args:
        schema_name: Schema name to introspect (source-specific)

    Returns:
        Schema: Complete inferred schema with vertices, edges, and resources
    """
    # Introspect the schema
    introspection_result = self.introspect(schema_name=schema_name)

    # Infer schema
    schema = self.infer_schema(introspection_result, schema_name=schema_name)

    # Sanitize for target database flavor
    schema = self.sanitizer.sanitize(schema)

    # Create and add resources
    resources = self.create_resources(introspection_result, schema)
    schema.resources = resources

    # Re-initialize to set up resource mappings
    schema.finish_init()

    return schema

infer_schema(introspection_result, schema_name=None)

Infer graflo Schema from PostgreSQL introspection result.

Parameters:

Name Type Description Default
introspection_result

SchemaIntrospectionResult from PostgreSQL

required
schema_name str | None

Schema name (optional, may be inferred from result)

None

Returns:

Name Type Description
Schema Schema

Inferred schema with vertices and edges

Source code in graflo/hq/inferencer.py
def infer_schema(
    self, introspection_result, schema_name: str | None = None
) -> Schema:
    """Infer graflo Schema from PostgreSQL introspection result.

    Args:
        introspection_result: SchemaIntrospectionResult from PostgreSQL
        schema_name: Schema name (optional, may be inferred from result)

    Returns:
        Schema: Inferred schema with vertices and edges
    """
    return self.inferencer.infer_schema(
        introspection_result, schema_name=schema_name
    )

introspect(schema_name=None)

Introspect PostgreSQL schema.

Parameters:

Name Type Description Default
schema_name str | None

Schema name to introspect

None

Returns:

Name Type Description
SchemaIntrospectionResult SchemaIntrospectionResult

PostgreSQL schema introspection result

Source code in graflo/hq/inferencer.py
def introspect(self, schema_name: str | None = None) -> SchemaIntrospectionResult:
    """Introspect PostgreSQL schema.

    Args:
        schema_name: Schema name to introspect

    Returns:
        SchemaIntrospectionResult: PostgreSQL schema introspection result
    """
    return self.conn.introspect_schema(schema_name=schema_name)

IngestionParams

Bases: BaseModel

Parameters for controlling the ingestion process.

Attributes:

Name Type Description
clear_data bool

If True, remove all existing graph data before ingestion without changing the schema.

n_cores int

Number of CPU cores/threads to use for parallel processing

max_items int | None

Maximum number of items to process per resource (applies to all data sources)

batch_size int

Size of batches for processing

dry bool

Whether to perform a dry run (no database changes)

init_only bool

Whether to only initialize the database without ingestion

limit_files int | None

Optional limit on number of files to process

max_concurrent_db_ops int | None

Maximum number of concurrent database operations (for vertices/edges). If None, uses n_cores. Set to 1 to prevent deadlocks in databases that don't handle concurrent transactions well (e.g., Neo4j). Database-independent setting.

datetime_after str | None

Inclusive lower bound for datetime filtering (ISO format). Rows with date_column >= datetime_after are included. Used with SQL/table sources.

datetime_before str | None

Exclusive upper bound for datetime filtering (ISO format). Rows with date_column < datetime_before are included. Range is [datetime_after, datetime_before).

datetime_column str | None

Default column name for datetime filtering when the pattern does not specify date_field. Per-table override: set date_field on TablePattern (or FilePattern).

Source code in graflo/hq/caster.py
class IngestionParams(BaseModel):
    """Parameters for controlling the ingestion process.

    Attributes:
        clear_data: If True, remove all existing graph data before ingestion without
            changing the schema.
        n_cores: Number of CPU cores/threads to use for parallel processing
        max_items: Maximum number of items to process per resource (applies to all data sources)
        batch_size: Size of batches for processing
        dry: Whether to perform a dry run (no database changes)
        init_only: Whether to only initialize the database without ingestion
        limit_files: Optional limit on number of files to process
        max_concurrent_db_ops: Maximum number of concurrent database operations (for vertices/edges).
            If None, uses n_cores. Set to 1 to prevent deadlocks in databases that don't handle
            concurrent transactions well (e.g., Neo4j). Database-independent setting.
        datetime_after: Inclusive lower bound for datetime filtering (ISO format).
            Rows with date_column >= datetime_after are included. Used with SQL/table sources.
        datetime_before: Exclusive upper bound for datetime filtering (ISO format).
            Rows with date_column < datetime_before are included. Range is [datetime_after, datetime_before).
        datetime_column: Default column name for datetime filtering when the pattern does not
            specify date_field. Per-table override: set date_field on TablePattern (or FilePattern).
    """

    clear_data: bool = False
    n_cores: int = 1
    max_items: int | None = None
    batch_size: int = 10000
    dry: bool = False
    init_only: bool = False
    limit_files: int | None = None
    max_concurrent_db_ops: int | None = None
    datetime_after: str | None = None
    datetime_before: str | None = None
    datetime_column: str | None = None

ResourceMapper

Maps different data sources to Patterns for graph ingestion.

This class provides methods to create Patterns from various data sources, enabling a unified interface for pattern creation regardless of the source type.

Source code in graflo/hq/resource_mapper.py
class ResourceMapper:
    """Maps different data sources to Patterns for graph ingestion.

    This class provides methods to create Patterns from various data sources,
    enabling a unified interface for pattern creation regardless of the source type.
    """

    def create_patterns_from_postgres(
        self,
        conn: PostgresConnection,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
    ) -> Patterns:
        """Create Patterns from PostgreSQL tables.

        Args:
            conn: PostgresConnection instance
            schema_name: Schema name to introspect
            datetime_columns: Optional mapping of resource/table name to datetime
                column name for date-range filtering (sets date_field on each
                TablePattern). Used with IngestionParams.datetime_after /
                datetime_before.

        Returns:
            Patterns: Patterns object with TablePattern instances for all tables
        """
        # Introspect the schema
        introspection_result = conn.introspect_schema(schema_name=schema_name)

        # Create patterns
        patterns = Patterns()

        # Get schema name
        effective_schema = schema_name or introspection_result.schema_name

        # Store the connection config
        config_key = "default"
        patterns.postgres_configs[(config_key, effective_schema)] = conn.config

        date_cols = datetime_columns or {}

        # Add patterns for vertex tables
        for table_info in introspection_result.vertex_tables:
            table_name = table_info.name
            table_pattern = TablePattern(
                table_name=table_name,
                schema_name=effective_schema,
                resource_name=table_name,
                date_field=date_cols.get(table_name),
            )
            patterns.table_patterns[table_name] = table_pattern
            patterns.postgres_table_configs[table_name] = (
                config_key,
                effective_schema,
                table_name,
            )

        # Add patterns for edge tables
        for table_info in introspection_result.edge_tables:
            table_name = table_info.name
            table_pattern = TablePattern(
                table_name=table_name,
                schema_name=effective_schema,
                resource_name=table_name,
                date_field=date_cols.get(table_name),
            )
            patterns.table_patterns[table_name] = table_pattern
            patterns.postgres_table_configs[table_name] = (
                config_key,
                effective_schema,
                table_name,
            )

        return patterns

create_patterns_from_postgres(conn, schema_name=None, datetime_columns=None)

Create Patterns from PostgreSQL tables.

Parameters:

Name Type Description Default
conn PostgresConnection

PostgresConnection instance

required
schema_name str | None

Schema name to introspect

None
datetime_columns dict[str, str] | None

Optional mapping of resource/table name to datetime column name for date-range filtering (sets date_field on each TablePattern). Used with IngestionParams.datetime_after / datetime_before.

None

Returns:

Name Type Description
Patterns Patterns

Patterns object with TablePattern instances for all tables

Source code in graflo/hq/resource_mapper.py
def create_patterns_from_postgres(
    self,
    conn: PostgresConnection,
    schema_name: str | None = None,
    datetime_columns: dict[str, str] | None = None,
) -> Patterns:
    """Create Patterns from PostgreSQL tables.

    Args:
        conn: PostgresConnection instance
        schema_name: Schema name to introspect
        datetime_columns: Optional mapping of resource/table name to datetime
            column name for date-range filtering (sets date_field on each
            TablePattern). Used with IngestionParams.datetime_after /
            datetime_before.

    Returns:
        Patterns: Patterns object with TablePattern instances for all tables
    """
    # Introspect the schema
    introspection_result = conn.introspect_schema(schema_name=schema_name)

    # Create patterns
    patterns = Patterns()

    # Get schema name
    effective_schema = schema_name or introspection_result.schema_name

    # Store the connection config
    config_key = "default"
    patterns.postgres_configs[(config_key, effective_schema)] = conn.config

    date_cols = datetime_columns or {}

    # Add patterns for vertex tables
    for table_info in introspection_result.vertex_tables:
        table_name = table_info.name
        table_pattern = TablePattern(
            table_name=table_name,
            schema_name=effective_schema,
            resource_name=table_name,
            date_field=date_cols.get(table_name),
        )
        patterns.table_patterns[table_name] = table_pattern
        patterns.postgres_table_configs[table_name] = (
            config_key,
            effective_schema,
            table_name,
        )

    # Add patterns for edge tables
    for table_info in introspection_result.edge_tables:
        table_name = table_info.name
        table_pattern = TablePattern(
            table_name=table_name,
            schema_name=effective_schema,
            resource_name=table_name,
            date_field=date_cols.get(table_name),
        )
        patterns.table_patterns[table_name] = table_pattern
        patterns.postgres_table_configs[table_name] = (
            config_key,
            effective_schema,
            table_name,
        )

    return patterns

SchemaSanitizer

Sanitizes schema attributes to avoid reserved words and normalize indexes.

This class handles: - Sanitizing vertex names and field names to avoid reserved words - Normalizing vertex indexes for TigerGraph (ensuring consistent indexes for edges with the same relation) - Applying field index mappings to resources

Source code in graflo/hq/sanitizer.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
class SchemaSanitizer:
    """Sanitizes schema attributes to avoid reserved words and normalize indexes.

    This class handles:
    - Sanitizing vertex names and field names to avoid reserved words
    - Normalizing vertex indexes for TigerGraph (ensuring consistent indexes
      for edges with the same relation)
    - Applying field index mappings to resources
    """

    def __init__(self, db_flavor: DBType):
        """Initialize the schema sanitizer.

        Args:
            db_flavor: Target database flavor to load reserved words for
        """
        self.db_flavor = db_flavor
        self.reserved_words = load_reserved_words(db_flavor)
        self.vertex_attribute_mappings: defaultdict[str, dict[str, str]] = defaultdict(
            dict
        )
        self.vertex_mappings: dict[str, str] = {}

    def sanitize(self, schema: Schema) -> Schema:
        """Sanitize attribute names and vertex names in the schema to avoid reserved words.

        This method modifies:
        - Field names in vertices and edges
        - Vertex names themselves
        - Edge source/target/by references to vertices
        - Resource apply lists that reference vertices

        The sanitization is deterministic: the same input always produces the same output.

        Args:
            schema: The schema to sanitize

        Returns:
            Schema with sanitized attribute names and vertex names
        """
        if not self.reserved_words:
            # No reserved words to check, return schema as-is
            return schema

        # First pass: Sanitize vertex dbnames
        for vertex in schema.vertex_config.vertices:
            if vertex.dbname is None:
                continue
            dbname = vertex.dbname
            sanitized_vertex_name = sanitize_attribute_name(
                dbname, self.reserved_words, suffix=f"_{VERTEX_SUFFIX}"
            )
            if sanitized_vertex_name != dbname:
                logger.debug(
                    f"Sanitizing vertex name '{dbname}' -> '{sanitized_vertex_name}'"
                )
                self.vertex_mappings[dbname] = sanitized_vertex_name
                vertex.dbname = sanitized_vertex_name

        # Second pass: Sanitize vertex field names
        for vertex in schema.vertex_config.vertices:
            for field in vertex.fields:
                original_name = field.name
                sanitized_name = sanitize_attribute_name(
                    original_name, self.reserved_words
                )
                if sanitized_name != original_name:
                    self.vertex_attribute_mappings[vertex.name][original_name] = (
                        sanitized_name
                    )
                    logger.debug(
                        f"Sanitizing field name '{original_name}' -> '{sanitized_name}' "
                        f"in vertex '{vertex.name}'"
                    )
                    field.name = sanitized_name

            for index in vertex.indexes:
                index.fields = [
                    self.vertex_attribute_mappings[vertex.name].get(item, item)
                    for item in index.fields
                ]

        vertex_names = {vertex.dbname for vertex in schema.vertex_config.vertices}

        for edge in schema.edge_config.edges:
            if not edge.relation:
                continue

            original = edge.relation_dbname
            if original is None:
                continue

            # First pass: sanitize against reserved words
            sanitized = sanitize_attribute_name(
                original,
                self.reserved_words,
                suffix=f"_{RELATION_SUFFIX}",
            )

            # Second pass: avoid collision with vertex names
            if sanitized in vertex_names:
                base = f"{sanitized}_{RELATION_SUFFIX}"
                candidate = base
                counter = 1

                while candidate in vertex_names:
                    candidate = f"{base}_{counter}"
                    counter += 1

                sanitized = candidate

            # Update only if needed
            if sanitized != original:
                edge.relation_dbname = sanitized

        # Third pass: Normalize edge indexes for TigerGraph
        # TigerGraph requires that edges with the same relation have consistent source and target indexes
        # 1) group edges by relation
        # 2) check that for each group specified by relation the sources have the same index
        # and separately the targets have the same index
        # 3) if this is not the case, identify the most popular index
        # 4) for vertices that don't comply with the chose source/target index, we want to prepare a mapping
        # and rename relevant fields indexes
        field_index_mappings: dict[
            str, dict[str, str]
        ] = {}  # vertex_name -> {old_field: new_field}

        if schema.vertex_config.db_flavor == DBType.TIGERGRAPH:
            # Group edges by relation
            edges_by_relation: dict[str | None, list[Edge]] = {}
            for edge in schema.edge_config.edges:
                # Use sanitized dbname when grouping by relation for TigerGraph
                relation = (
                    edge.relation_dbname
                    if edge.relation_dbname is not None
                    else edge.relation
                )
                if relation not in edges_by_relation:
                    edges_by_relation[relation] = []
                edges_by_relation[relation].append(edge)

            # Process each relation group
            for relation, relation_edges in edges_by_relation.items():
                if len(relation_edges) <= 1:
                    # Only one edge with this relation, no normalization needed
                    continue

                # Collect all vertex/index pairs using a list to capture all occurrences
                # This handles cases where a vertex appears multiple times in edges for the same relation
                source_vertex_indexes: list[tuple[str, tuple[str, ...]]] = []
                target_vertex_indexes: list[tuple[str, tuple[str, ...]]] = []

                for edge in relation_edges:
                    source_vertex = edge.source
                    target_vertex = edge.target

                    # Get primary index for source vertex
                    source_index = schema.vertex_config.index(source_vertex)
                    source_vertex_indexes.append(
                        (source_vertex, tuple(source_index.fields))
                    )

                    # Get primary index for target vertex
                    target_index = schema.vertex_config.index(target_vertex)
                    target_vertex_indexes.append(
                        (target_vertex, tuple(target_index.fields))
                    )

                # Normalize source indexes
                self._normalize_vertex_indexes(
                    source_vertex_indexes,
                    relation,
                    schema,
                    field_index_mappings,
                    "source",
                )

                # Normalize target indexes
                self._normalize_vertex_indexes(
                    target_vertex_indexes,
                    relation,
                    schema,
                    field_index_mappings,
                    "target",
                )

        # Fourth pass: the field maps from edge/relation normalization should be applied to resources:
        # new transforms should be added mapping old index names to those identified in the previous step
        if field_index_mappings:
            for resource in schema.resources:
                self._apply_field_index_mappings_to_resource(
                    resource, field_index_mappings
                )

        return schema

    def _normalize_vertex_indexes(
        self,
        vertex_indexes: list[tuple[str, tuple[str, ...]]],
        relation: str | None,
        schema: Schema,
        field_index_mappings: dict[str, dict[str, str]],
        role: str,  # "source" or "target" for logging
    ) -> None:
        """Normalize vertex indexes to use the most popular index pattern.

        For vertices that don't match the most popular index, this method:
        1. Creates field mappings (old_field -> new_field)
        2. Updates vertex indexes to match the most popular pattern
        3. Adds new fields to vertices if needed
        4. Removes old fields that are being replaced

        Args:
            vertex_indexes: List of (vertex_name, index_fields_tuple) pairs
            relation: Relation name for logging
            schema: Schema to update
            field_index_mappings: Dictionary to update with field mappings
            role: "source" or "target" for logging purposes
        """
        if not vertex_indexes:
            return

        # Extract unique vertex/index pairs (a vertex might appear multiple times)
        vertex_index_dict: dict[str, tuple[str, ...]] = {}
        for vertex_name, index_fields in vertex_indexes:
            # Only store first occurrence - we'll normalize all vertices together
            if vertex_name not in vertex_index_dict:
                vertex_index_dict[vertex_name] = index_fields

        # Check if all indexes are consistent
        indexes_list = list(vertex_index_dict.values())
        indexes_set = set(indexes_list)
        indexes_consistent = len(indexes_set) == 1

        if indexes_consistent:
            # All indexes are the same, no normalization needed
            return

        # Find most popular index
        index_counter = Counter(indexes_list)
        most_popular_index = index_counter.most_common(1)[0][0]

        # Normalize vertices that don't match
        for vertex_name, index_fields in vertex_index_dict.items():
            if index_fields == most_popular_index:
                continue

            # Initialize mappings for this vertex if needed
            if vertex_name not in field_index_mappings:
                field_index_mappings[vertex_name] = {}

            # Map old fields to new fields
            old_fields = list(index_fields)
            new_fields = list(most_popular_index)

            # Create field-to-field mapping
            # If lengths match, map positionally; otherwise map first field to first field
            if len(old_fields) == len(new_fields):
                for old_field, new_field in zip(old_fields, new_fields):
                    if old_field != new_field:
                        # Update existing mapping if it exists, otherwise create new one
                        field_index_mappings[vertex_name][old_field] = new_field
            else:
                # If lengths don't match, map the first field
                if old_fields and new_fields:
                    if old_fields[0] != new_fields[0]:
                        field_index_mappings[vertex_name][old_fields[0]] = new_fields[0]

            # Update vertex index and fields
            vertex = schema.vertex_config[vertex_name]
            existing_field_names = {f.name for f in vertex.fields}

            # Add new fields that don't exist
            for new_field in most_popular_index:
                if new_field not in existing_field_names:
                    vertex.fields.append(Field(name=new_field, type=None))
                    existing_field_names.add(new_field)

            # Remove old fields that are being replaced (not in new index)
            fields_to_remove = [
                f
                for f in vertex.fields
                if f.name in old_fields and f.name not in new_fields
            ]
            for field_to_remove in fields_to_remove:
                vertex.fields.remove(field_to_remove)

            # Update vertex index to match the most popular one
            vertex.indexes[0].fields = list(most_popular_index)

            logger.debug(
                f"Normalizing {role} index for vertex '{vertex_name}' in relation '{relation}': "
                f"{old_fields} -> {new_fields}"
            )

    def _apply_field_index_mappings_to_resource(
        self, resource: Resource, field_index_mappings: dict[str, dict[str, str]]
    ) -> None:
        """Apply field index mappings to TransformActor instances in a resource.

        For vertices that had their indexes normalized, this method updates TransformActor
        instances to map old field names to new field names in their Transform.map attribute.
        Only updates TransformActors where the vertex is confirmed to be created at that level
        (via VertexActor).

        Args:
            resource: The resource to update
            field_index_mappings: Dictionary mapping vertex names to field mappings
                                 (old_field -> new_field)
        """
        from graflo.architecture.actor import (
            ActorWrapper,
            DescendActor,
            TransformActor,
            VertexActor,
        )

        def collect_vertices_at_level(wrappers: list[ActorWrapper]) -> set[str]:
            """Collect vertices created by VertexActor instances at the current level only.

            Does not recurse into nested structures - only collects vertices from
            the immediate level.

            Args:
                wrappers: List of ActorWrapper instances

            Returns:
                set[str]: Set of vertex names created at this level
            """
            vertices = set()
            for wrapper in wrappers:
                if isinstance(wrapper.actor, VertexActor):
                    vertices.add(wrapper.actor.name)
            return vertices

        def update_transform_actor_maps(
            wrapper: ActorWrapper, parent_vertices: set[str] | None = None
        ) -> set[str]:
            """Recursively update TransformActor instances with field index mappings.

            Args:
                wrapper: ActorWrapper instance to process
                parent_vertices: Set of vertices available from parent levels (for nested structures)

            Returns:
                set[str]: Set of all vertices available at this level (including parent)
            """
            if parent_vertices is None:
                parent_vertices = set()

            # Collect vertices created at this level
            current_level_vertices = set()
            if isinstance(wrapper.actor, VertexActor):
                current_level_vertices.add(wrapper.actor.name)

            # All available vertices = current level + parent levels
            all_available_vertices = current_level_vertices | parent_vertices

            # Process TransformActor if present
            if isinstance(wrapper.actor, TransformActor):
                transform_actor: TransformActor = wrapper.actor

                def apply_mappings_to_transform(
                    mappings: dict[str, str],
                    vertex_name: str,
                    actor: TransformActor,
                ) -> None:
                    """Apply field mappings to TransformActor's transform.map attribute.

                    Args:
                        mappings: Dictionary mapping old field names to new field names
                        vertex_name: Name of the vertex these mappings belong to (for logging)
                        actor: The TransformActor instance to update
                    """
                    transform = actor.t
                    if transform.map:
                        # Update existing map: replace values and keys that match old field names
                        # First, update values
                        for map_key, map_value in transform.map.items():
                            if isinstance(map_value, str) and map_value in mappings:
                                transform.map[map_key] = mappings[map_value]

                        # if the terminal attr not in the map - add it
                        for k, v in mappings.items():
                            if v not in transform.map.values():
                                transform.map[k] = v
                    else:
                        # Create new map with all mappings
                        transform.map = mappings.copy()

                    # Update Transform object IO to reflect map edits
                    actor.t._init_io_from_map(force_init=True)

                    logger.debug(
                        f"Updated TransformActor map in resource '{resource.resource_name}' "
                        f"for vertex '{vertex_name}': {mappings}"
                    )

                target_vertex = transform_actor.vertex

                if isinstance(target_vertex, str):
                    # TransformActor has explicit target_vertex
                    if (
                        target_vertex in field_index_mappings
                        and target_vertex in all_available_vertices
                    ):
                        mappings = field_index_mappings[target_vertex]
                        if mappings:
                            apply_mappings_to_transform(
                                mappings, target_vertex, transform_actor
                            )
                        else:
                            logger.debug(
                                f"Skipping TransformActor for vertex '{target_vertex}' "
                                f"in resource '{resource.resource_name}': no mappings needed"
                            )
                    else:
                        logger.debug(
                            f"Skipping TransformActor for vertex '{target_vertex}' "
                            f"in resource '{resource.resource_name}': vertex not created at this level"
                        )
                else:
                    # TransformActor has no target_vertex
                    # Apply mappings from all available vertices (parent and current level)
                    # since transformed fields will be attributed to those vertices
                    applied_any = False
                    for vertex in all_available_vertices:
                        if vertex in field_index_mappings:
                            mappings = field_index_mappings[vertex]
                            if mappings:
                                apply_mappings_to_transform(
                                    mappings, vertex, transform_actor
                                )
                                applied_any = True

                    if not applied_any:
                        logger.debug(
                            f"Skipping TransformActor without target_vertex "
                            f"in resource '{resource.resource_name}': "
                            f"no mappings found for available vertices {all_available_vertices}"
                        )

            # Recursively process nested structures (DescendActor)
            if isinstance(wrapper.actor, DescendActor):
                # Collect vertices from all descendants at this level
                descendant_vertices = collect_vertices_at_level(
                    wrapper.actor.descendants
                )
                all_available_vertices |= descendant_vertices

                # Recursively process each descendant
                for descendant_wrapper in wrapper.actor.descendants:
                    nested_vertices = update_transform_actor_maps(
                        descendant_wrapper, parent_vertices=all_available_vertices
                    )
                    # Merge nested vertices into available vertices
                    all_available_vertices |= nested_vertices

            return all_available_vertices

        # Process the root ActorWrapper if it exists
        if hasattr(resource, "root") and resource.root is not None:
            update_transform_actor_maps(resource.root)
        else:
            logger.warning(
                f"Resource '{resource.resource_name}' does not have a root ActorWrapper. "
                f"Skipping field index mapping updates."
            )

__init__(db_flavor)

Initialize the schema sanitizer.

Parameters:

Name Type Description Default
db_flavor DBType

Target database flavor to load reserved words for

required
Source code in graflo/hq/sanitizer.py
def __init__(self, db_flavor: DBType):
    """Initialize the schema sanitizer.

    Args:
        db_flavor: Target database flavor to load reserved words for
    """
    self.db_flavor = db_flavor
    self.reserved_words = load_reserved_words(db_flavor)
    self.vertex_attribute_mappings: defaultdict[str, dict[str, str]] = defaultdict(
        dict
    )
    self.vertex_mappings: dict[str, str] = {}

sanitize(schema)

Sanitize attribute names and vertex names in the schema to avoid reserved words.

This method modifies: - Field names in vertices and edges - Vertex names themselves - Edge source/target/by references to vertices - Resource apply lists that reference vertices

The sanitization is deterministic: the same input always produces the same output.

Parameters:

Name Type Description Default
schema Schema

The schema to sanitize

required

Returns:

Type Description
Schema

Schema with sanitized attribute names and vertex names

Source code in graflo/hq/sanitizer.py
def sanitize(self, schema: Schema) -> Schema:
    """Sanitize attribute names and vertex names in the schema to avoid reserved words.

    This method modifies:
    - Field names in vertices and edges
    - Vertex names themselves
    - Edge source/target/by references to vertices
    - Resource apply lists that reference vertices

    The sanitization is deterministic: the same input always produces the same output.

    Args:
        schema: The schema to sanitize

    Returns:
        Schema with sanitized attribute names and vertex names
    """
    if not self.reserved_words:
        # No reserved words to check, return schema as-is
        return schema

    # First pass: Sanitize vertex dbnames
    for vertex in schema.vertex_config.vertices:
        if vertex.dbname is None:
            continue
        dbname = vertex.dbname
        sanitized_vertex_name = sanitize_attribute_name(
            dbname, self.reserved_words, suffix=f"_{VERTEX_SUFFIX}"
        )
        if sanitized_vertex_name != dbname:
            logger.debug(
                f"Sanitizing vertex name '{dbname}' -> '{sanitized_vertex_name}'"
            )
            self.vertex_mappings[dbname] = sanitized_vertex_name
            vertex.dbname = sanitized_vertex_name

    # Second pass: Sanitize vertex field names
    for vertex in schema.vertex_config.vertices:
        for field in vertex.fields:
            original_name = field.name
            sanitized_name = sanitize_attribute_name(
                original_name, self.reserved_words
            )
            if sanitized_name != original_name:
                self.vertex_attribute_mappings[vertex.name][original_name] = (
                    sanitized_name
                )
                logger.debug(
                    f"Sanitizing field name '{original_name}' -> '{sanitized_name}' "
                    f"in vertex '{vertex.name}'"
                )
                field.name = sanitized_name

        for index in vertex.indexes:
            index.fields = [
                self.vertex_attribute_mappings[vertex.name].get(item, item)
                for item in index.fields
            ]

    vertex_names = {vertex.dbname for vertex in schema.vertex_config.vertices}

    for edge in schema.edge_config.edges:
        if not edge.relation:
            continue

        original = edge.relation_dbname
        if original is None:
            continue

        # First pass: sanitize against reserved words
        sanitized = sanitize_attribute_name(
            original,
            self.reserved_words,
            suffix=f"_{RELATION_SUFFIX}",
        )

        # Second pass: avoid collision with vertex names
        if sanitized in vertex_names:
            base = f"{sanitized}_{RELATION_SUFFIX}"
            candidate = base
            counter = 1

            while candidate in vertex_names:
                candidate = f"{base}_{counter}"
                counter += 1

            sanitized = candidate

        # Update only if needed
        if sanitized != original:
            edge.relation_dbname = sanitized

    # Third pass: Normalize edge indexes for TigerGraph
    # TigerGraph requires that edges with the same relation have consistent source and target indexes
    # 1) group edges by relation
    # 2) check that for each group specified by relation the sources have the same index
    # and separately the targets have the same index
    # 3) if this is not the case, identify the most popular index
    # 4) for vertices that don't comply with the chose source/target index, we want to prepare a mapping
    # and rename relevant fields indexes
    field_index_mappings: dict[
        str, dict[str, str]
    ] = {}  # vertex_name -> {old_field: new_field}

    if schema.vertex_config.db_flavor == DBType.TIGERGRAPH:
        # Group edges by relation
        edges_by_relation: dict[str | None, list[Edge]] = {}
        for edge in schema.edge_config.edges:
            # Use sanitized dbname when grouping by relation for TigerGraph
            relation = (
                edge.relation_dbname
                if edge.relation_dbname is not None
                else edge.relation
            )
            if relation not in edges_by_relation:
                edges_by_relation[relation] = []
            edges_by_relation[relation].append(edge)

        # Process each relation group
        for relation, relation_edges in edges_by_relation.items():
            if len(relation_edges) <= 1:
                # Only one edge with this relation, no normalization needed
                continue

            # Collect all vertex/index pairs using a list to capture all occurrences
            # This handles cases where a vertex appears multiple times in edges for the same relation
            source_vertex_indexes: list[tuple[str, tuple[str, ...]]] = []
            target_vertex_indexes: list[tuple[str, tuple[str, ...]]] = []

            for edge in relation_edges:
                source_vertex = edge.source
                target_vertex = edge.target

                # Get primary index for source vertex
                source_index = schema.vertex_config.index(source_vertex)
                source_vertex_indexes.append(
                    (source_vertex, tuple(source_index.fields))
                )

                # Get primary index for target vertex
                target_index = schema.vertex_config.index(target_vertex)
                target_vertex_indexes.append(
                    (target_vertex, tuple(target_index.fields))
                )

            # Normalize source indexes
            self._normalize_vertex_indexes(
                source_vertex_indexes,
                relation,
                schema,
                field_index_mappings,
                "source",
            )

            # Normalize target indexes
            self._normalize_vertex_indexes(
                target_vertex_indexes,
                relation,
                schema,
                field_index_mappings,
                "target",
            )

    # Fourth pass: the field maps from edge/relation normalization should be applied to resources:
    # new transforms should be added mapping old index names to those identified in the previous step
    if field_index_mappings:
        for resource in schema.resources:
            self._apply_field_index_mappings_to_resource(
                resource, field_index_mappings
            )

    return schema