Skip to content

graflo.hq

High-level orchestration modules for graflo.

This package provides high-level orchestration classes that coordinate multiple components for graph database operations.

CastBatchResult

Bases: BaseModel

Outcome of casting a batch through a resource (possibly with skipped documents).

Source code in graflo/hq/ingestion_parameters.py
class CastBatchResult(BaseModel):
    """Outcome of casting a batch through a resource (possibly with skipped documents)."""

    model_config = ConfigDict(arbitrary_types_allowed=True)

    graph: GraphContainer
    failures: list[DocCastFailure] = Field(default_factory=list)

Caster

Main class for data casting and ingestion.

This class handles the process of casting data into graph structures and ingesting them into the database. It supports batch processing, parallel execution, and various data formats.

Attributes:

Name Type Description
schema

Schema configuration for the graph

ingestion_params

IngestionParams instance controlling ingestion behavior

Source code in graflo/hq/caster.py
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
class Caster:
    """Main class for data casting and ingestion.

    This class handles the process of casting data into graph structures and
    ingesting them into the database. It supports batch processing, parallel
    execution, and various data formats.

    Attributes:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance controlling ingestion behavior
    """

    def __init__(
        self,
        schema: Schema,
        ingestion_model: IngestionModel,
        ingestion_params: IngestionParams | None = None,
        **kwargs,
    ):
        """Initialize the caster with schema and configuration.

        Args:
            schema: Schema configuration for the graph
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, creates IngestionParams from kwargs or uses defaults
            **kwargs: Additional configuration options (for backward compatibility):
                - clear_data: Whether to clear existing data before ingestion
                - n_cores: Number of CPU cores/threads to use for parallel processing
                - max_items: Maximum number of items to process
                - batch_size: Size of batches for processing
                - dry: Whether to perform a dry run
        """
        if ingestion_params is None:
            ingestion_params = IngestionParams(**kwargs)
        self.ingestion_params = ingestion_params
        self.schema = schema
        self.ingestion_model = ingestion_model
        self._allowed_vertex_names: set[str] | None = None
        self._doc_cast_error_total = 0
        self._doc_cast_error_io_lock = asyncio.Lock()
        self._failure_sinks = failure_sinks_from_ingestion_params(ingestion_params)
        self._bulk_coordinator = BulkSessionCoordinator(schema=self.schema)
        self._ingest_bindings: Bindings | None = None
        self._connection_provider: ConnectionProvider = EmptyConnectionProvider()

    # ------------------------------------------------------------------
    # Casting
    # ------------------------------------------------------------------

    async def _ensure_bulk_session(self, conn_conf: DBConfig) -> str | None:
        """Return active native bulk session id, starting one if needed."""
        return await self._bulk_coordinator.ensure_session(conn_conf)

    async def _finalize_bulk_session(self, conn_conf: DBConfig) -> None:
        await self._bulk_coordinator.finalize(
            conn_conf,
            bindings=self._ingest_bindings,
            connection_provider=self._connection_provider,
        )

    async def _persist_doc_failures(self, failures: list[DocCastFailure]) -> None:
        if not failures:
            return
        params = self.ingestion_params

        async with self._doc_cast_error_io_lock:
            for sink in self._failure_sinks:
                await sink.write_failures(failures)

            self._doc_cast_error_total += len(failures)
            if params.max_doc_errors is not None:
                if self._doc_cast_error_total > params.max_doc_errors:
                    raise DocErrorBudgetExceeded(
                        total_failures=self._doc_cast_error_total,
                        limit=params.max_doc_errors,
                        doc_error_sink_path=params.doc_error_sink_path,
                    )

        if not self._failure_sinks:
            for fail in failures:
                logger.error(
                    "Document cast failure resource=%s doc_index=%s %s: %s",
                    fail.resource_name,
                    fail.doc_index,
                    fail.exception_type,
                    fail.message,
                    extra={"doc_cast_failure": fail.model_dump(mode="json")},
                )

    async def cast_normal_resource(
        self, data, resource_name: str | None = None
    ) -> CastBatchResult:
        """Cast data into a graph container using a resource.

        Args:
            data: Iterable of documents to cast
            resource_name: Optional name of the resource to use

        Returns:
            CastBatchResult with graph and any per-document failures (empty when
            ``on_doc_error`` is ``fail`` and the batch succeeds).
        """
        rr = self.ingestion_model.fetch_resource(resource_name)
        resolved_name = rr.name
        params = self.ingestion_params

        semaphore = asyncio.Semaphore(params.n_cores)

        async def process_doc(doc: dict[str, Any]) -> Any:
            async with semaphore:
                return await asyncio.to_thread(rr, doc)

        if params.on_doc_error == "fail":
            coros = [process_doc(doc) for doc in data]
            docs = await asyncio.gather(*coros)
            graph = GraphContainer.from_docs_list(docs)
            _filter_graph_container_by_vertices_inplace(
                graph, allowed_vertex_names=self._allowed_vertex_names
            )
            return CastBatchResult(graph=graph, failures=[])

        doc_list = list(data)
        raw = await asyncio.gather(
            *[process_doc(doc) for doc in doc_list],
            return_exceptions=True,
        )
        docs: list[Any] = []
        failures: list[DocCastFailure] = []
        for i, item in enumerate(raw):
            doc_raw = doc_list[i]
            doc = (
                doc_raw
                if isinstance(doc_raw, dict)
                else {"_source_repr": repr(doc_raw)}
            )

            if isinstance(item, asyncio.CancelledError):
                raise item
            if isinstance(item, (KeyboardInterrupt, SystemExit)):
                raise item
            if isinstance(item, BaseException):
                failures.append(
                    _doc_failure_from_exception(
                        resource_name=resolved_name,
                        doc_index=i,
                        doc=doc,
                        exc=item,
                        doc_keys=params.doc_error_preview_keys,
                        doc_preview_max_bytes=params.doc_error_preview_max_bytes,
                    )
                )
                continue
            docs.append(item)

        await self._persist_doc_failures(failures)

        graph = GraphContainer.from_docs_list(docs)
        _filter_graph_container_by_vertices_inplace(
            graph, allowed_vertex_names=self._allowed_vertex_names
        )
        return CastBatchResult(graph=graph, failures=failures)

    # ------------------------------------------------------------------
    # Processing pipeline
    # ------------------------------------------------------------------

    async def process_batch(
        self,
        batch,
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a batch of data.

        Args:
            batch: Batch of data to process
            resource_name: Optional name of the resource to use
            conn_conf: Optional database connection configuration
        """
        result = await self.cast_normal_resource(batch, resource_name=resource_name)
        if result.failures:
            logger.warning(
                "Resource %r batch had %d document cast failure(s); first: %s: %s",
                result.failures[0].resource_name,
                len(result.failures),
                result.failures[0].exception_type,
                result.failures[0].message,
            )
        gc = result.graph

        if conn_conf is not None:
            writer = self._make_db_writer()
            bulk_sid = await self._ensure_bulk_session(conn_conf)
            await writer.write(
                gc=gc,
                conn_conf=conn_conf,
                resource_name=resource_name,
                bulk_session_id=bulk_sid,
            )

    async def process_data_source(
        self,
        data_source: AbstractDataSource,
        resource_name: str | None = None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a data source.

        Args:
            data_source: Data source to process
            resource_name: Optional name of the resource (overrides data_source.resource_name)
            conn_conf: Optional database connection configuration
        """
        actual_resource_name = resource_name or data_source.resource_name

        # Same semantics as AbstractDataSource.iter_batches(limit=...).
        limit = self.ingestion_params.max_items
        batch_prefetch = self.ingestion_params.batch_prefetch
        queue: asyncio.Queue[list[dict] | object] = asyncio.Queue(
            maxsize=batch_prefetch
        )
        sentinel = object()
        fetch_error: Exception | None = None

        batches_iter = data_source.iter_batches(
            batch_size=self.ingestion_params.batch_size,
            limit=limit,
        )

        def _next_batch_or_sentinel() -> list[dict] | object:
            try:
                return next(batches_iter)
            except StopIteration:
                return sentinel

        async def _produce_batches() -> None:
            nonlocal fetch_error
            try:
                while True:
                    item = await asyncio.to_thread(_next_batch_or_sentinel)
                    await queue.put(item)
                    if item is sentinel:
                        return
            except asyncio.CancelledError:
                raise
            except Exception as exc:
                fetch_error = exc
                await queue.put(sentinel)

        producer_task = asyncio.create_task(_produce_batches())
        process_error: Exception | None = None
        try:
            while True:
                item = await queue.get()
                if item is sentinel:
                    break
                batch = cast(list[dict], item)
                await self.process_batch(
                    batch,
                    resource_name=actual_resource_name,
                    conn_conf=conn_conf,
                )
        except Exception as exc:
            process_error = exc
            raise
        finally:
            if process_error is not None and not producer_task.done():
                producer_task.cancel()
            try:
                await producer_task
            except asyncio.CancelledError:
                pass

        if fetch_error is not None:
            raise fetch_error

    async def process_resource(
        self,
        resource_instance: (
            Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
        ),
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
        **kwargs,
    ):
        """Process a resource instance from configuration or direct data.

        This method accepts either:
        1. A configuration dictionary with 'source_type' and data source parameters
        2. A file path (Path or str) - creates FileDataSource
        3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

        Args:
            resource_instance: Configuration dict, file path, or in-memory data.
                Configuration dict format:
                - {"source_type": "file", "path": "data.json"}
                - {"source_type": "api", "config": {"url": "https://..."}}
                - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
                - {"source_type": "in_memory", "data": [...]}
            resource_name: Optional name of the resource
            conn_conf: Optional database connection configuration
            **kwargs: Additional arguments passed to data source creation
                (e.g., columns for list[list], encoding for files)
        """
        if isinstance(resource_instance, dict):
            config = resource_instance.copy()
            config.update(kwargs)
            data_source = DataSourceFactory.create_data_source_from_config(config)
        elif isinstance(resource_instance, (Path, str)):
            file_type: str | ChunkerType | None = cast(
                str | ChunkerType | None, kwargs.get("file_type", None)
            )
            encoding: EncodingType = cast(
                EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
            )
            sep: str | None = cast(str | None, kwargs.get("sep", None))
            data_source = DataSourceFactory.create_file_data_source(
                path=resource_instance,
                file_type=file_type,
                encoding=encoding,
                sep=sep,
            )
        else:
            columns: list[str] | None = cast(
                list[str] | None, kwargs.get("columns", None)
            )
            data_source = DataSourceFactory.create_in_memory_data_source(
                data=resource_instance,
                columns=columns,
            )

        data_source.resource_name = resource_name

        await self.process_data_source(
            data_source=data_source,
            resource_name=resource_name,
            conn_conf=conn_conf,
        )

    # ------------------------------------------------------------------
    # Queue-based processing
    # ------------------------------------------------------------------

    async def process_with_queue(
        self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
    ):
        """Process tasks from a queue.

        Args:
            tasks: Async queue of tasks to process
            conn_conf: Optional database connection configuration
        """
        SENTINEL = None

        while True:
            try:
                task = await tasks.get()

                if task is SENTINEL:
                    tasks.task_done()
                    break

                if isinstance(task, tuple) and len(task) == 2:
                    filepath, resource_name = task
                    await self.process_resource(
                        resource_instance=filepath,
                        resource_name=resource_name,
                        conn_conf=conn_conf,
                    )
                elif isinstance(task, AbstractDataSource):
                    await self.process_data_source(
                        data_source=task, conn_conf=conn_conf
                    )
                tasks.task_done()
            except Exception as e:
                logger.error(f"Error processing task: {e}", exc_info=True)
                tasks.task_done()
                break

    # ------------------------------------------------------------------
    # Normalization utility
    # ------------------------------------------------------------------

    @staticmethod
    def normalize_resource(
        data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
    ) -> list[dict]:
        """Normalize resource data into a list of dictionaries.

        Args:
            data: Data to normalize (DataFrame, list of lists, or list of dicts)
            columns: Optional column names for list data

        Returns:
            list[dict]: Normalized data as list of dictionaries

        Raises:
            ValueError: If columns is not provided for list data
        """
        if isinstance(data, pd.DataFrame):
            columns = data.columns.tolist()
            _data = data.values.tolist()
        elif data and isinstance(data[0], list):
            _data = cast(list[list], data)
            if columns is None:
                raise ValueError("columns should be set")
        else:
            return cast(list[dict], data)
        rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
        return rows_dressed

    async def ingest_data_sources(
        self,
        data_source_registry: DataSourceRegistry,
        conn_conf: DBConfig,
        ingestion_params: IngestionParams | None = None,
        allowed_resource_names: set[str] | None = None,
        bindings: Bindings | None = None,
        connection_provider: ConnectionProvider | None = None,
    ):
        """Ingest data from data sources in a registry.

        Note: Schema definition should be handled separately via GraphEngine.define_schema()
        before calling this method.

        Args:
            data_source_registry: Registry containing data sources mapped to resources
            conn_conf: Database connection configuration
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
            bindings: Optional manifest bindings (used to resolve S3 staging proxies).
            connection_provider: Runtime credential provider for source connectors and S3.
        """
        if ingestion_params is None:
            ingestion_params = IngestionParams()

        self.ingestion_params = ingestion_params
        self._doc_cast_error_total = 0
        init_only = ingestion_params.init_only

        if init_only:
            logger.info("ingest execution bound to init")
            sys.exit(0)

        self._ingest_bindings = bindings
        self._connection_provider = connection_provider or EmptyConnectionProvider()
        try:
            tasks: list[AbstractDataSource] = []
            for resource_name in self.ingestion_model._resources.keys():
                if (
                    allowed_resource_names is not None
                    and resource_name not in allowed_resource_names
                ):
                    continue
                data_sources = data_source_registry.get_data_sources(resource_name)
                if data_sources:
                    logger.info(
                        f"For resource name {resource_name} {len(data_sources)} data sources were found"
                    )
                    tasks.extend(data_sources)

            with Timer() as klepsidra:
                if self.ingestion_params.n_cores > 1:
                    queue_tasks: asyncio.Queue = asyncio.Queue()
                    for item in tasks:
                        await queue_tasks.put(item)

                    for _ in range(self.ingestion_params.n_cores):
                        await queue_tasks.put(None)

                    worker_tasks = [
                        self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                        for _ in range(self.ingestion_params.n_cores)
                    ]

                    await asyncio.gather(*worker_tasks)
                else:
                    for data_source in tasks:
                        await self.process_data_source(
                            data_source=data_source, conn_conf=conn_conf
                        )
            logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")
        finally:
            await self._finalize_bulk_session(conn_conf)
            self._ingest_bindings = None
            self._connection_provider = EmptyConnectionProvider()

    def ingest(
        self,
        target_db_config: DBConfig,
        bindings: Bindings | None = None,
        ingestion_params: IngestionParams | None = None,
        connection_provider: ConnectionProvider | None = None,
    ):
        """Ingest data into the graph database.

        This is the main ingestion method that takes:
        - Schema: Graph structure (already set in Caster)
        - OutputConfig: Target graph database configuration
        - Bindings: Mapping of resources to physical data sources
        - IngestionParams: Parameters controlling the ingestion process

        Args:
            target_db_config: Target database connection configuration (for writing graph)
            bindings: Bindings instance mapping resources to data sources
                If None, defaults to empty Bindings()
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        bindings = bindings or Bindings()
        ingestion_params = ingestion_params or IngestionParams()

        db_flavor = target_db_config.connection_type
        self.schema.db_profile.db_flavor = db_flavor
        self.schema.finish_init()

        allowed_resource_names = self._resolve_ingestion_scope(ingestion_params)

        self.ingestion_model.finish_init(
            self.schema.core_schema,
            strict_references=ingestion_params.strict_references,
            dynamic_edge_feedback=ingestion_params.dynamic_edges,
            allowed_vertex_names=self._allowed_vertex_names,
            target_db_flavor=db_flavor,
        )

        registry = RegistryBuilder(self.schema, self.ingestion_model).build(
            bindings,
            ingestion_params,
            connection_provider=connection_provider or EmptyConnectionProvider(),
            strict=ingestion_params.strict_registry,
        )

        asyncio.run(
            self.ingest_data_sources(
                data_source_registry=registry,
                conn_conf=target_db_config,
                ingestion_params=ingestion_params,
                allowed_resource_names=allowed_resource_names,
                bindings=bindings,
                connection_provider=connection_provider or EmptyConnectionProvider(),
            )
        )

    # ------------------------------------------------------------------
    # Internal helpers
    # ------------------------------------------------------------------

    def _resolve_ingestion_scope(
        self, ingestion_params: IngestionParams
    ) -> set[str] | None:
        """Resolve and validate resource/vertex filters for ingestion.

        Resolution order is resources first, then vertices.
        """
        if ingestion_params.resources is not None:
            known_resources = set(self.ingestion_model._resources.keys())
            requested_resources = set(ingestion_params.resources)
            unknown_resources = requested_resources - known_resources
            if unknown_resources:
                raise ValueError(
                    "Unknown resources in ingestion_params.resources: "
                    + ", ".join(sorted(unknown_resources))
                )
            allowed_resource_names: set[str] | None = requested_resources
        else:
            allowed_resource_names = None

        if ingestion_params.vertices is not None:
            known_vertices = {
                v.name for v in self.schema.core_schema.vertex_config.vertices
            }
            requested_vertices = set(ingestion_params.vertices)
            unknown_vertices = requested_vertices - known_vertices
            if unknown_vertices:
                raise ValueError(
                    "Unknown vertices in ingestion_params.vertices: "
                    + ", ".join(sorted(unknown_vertices))
                )
            self._allowed_vertex_names = requested_vertices
        else:
            self._allowed_vertex_names = None

        return allowed_resource_names

    def _make_db_writer(self) -> DBWriter:
        """Create a :class:`DBWriter` from the current ingestion params."""
        max_concurrent = (
            self.ingestion_params.max_concurrent_db_ops
            if self.ingestion_params.max_concurrent_db_ops is not None
            else self.ingestion_params.n_cores
        )
        return DBWriter(
            schema=self.schema,
            ingestion_model=self.ingestion_model,
            dry=self.ingestion_params.dry,
            max_concurrent=max_concurrent,
        )

__init__(schema, ingestion_model, ingestion_params=None, **kwargs)

Initialize the caster with schema and configuration.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, creates IngestionParams from kwargs or uses defaults

None
**kwargs

Additional configuration options (for backward compatibility): - clear_data: Whether to clear existing data before ingestion - n_cores: Number of CPU cores/threads to use for parallel processing - max_items: Maximum number of items to process - batch_size: Size of batches for processing - dry: Whether to perform a dry run

{}
Source code in graflo/hq/caster.py
def __init__(
    self,
    schema: Schema,
    ingestion_model: IngestionModel,
    ingestion_params: IngestionParams | None = None,
    **kwargs,
):
    """Initialize the caster with schema and configuration.

    Args:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, creates IngestionParams from kwargs or uses defaults
        **kwargs: Additional configuration options (for backward compatibility):
            - clear_data: Whether to clear existing data before ingestion
            - n_cores: Number of CPU cores/threads to use for parallel processing
            - max_items: Maximum number of items to process
            - batch_size: Size of batches for processing
            - dry: Whether to perform a dry run
    """
    if ingestion_params is None:
        ingestion_params = IngestionParams(**kwargs)
    self.ingestion_params = ingestion_params
    self.schema = schema
    self.ingestion_model = ingestion_model
    self._allowed_vertex_names: set[str] | None = None
    self._doc_cast_error_total = 0
    self._doc_cast_error_io_lock = asyncio.Lock()
    self._failure_sinks = failure_sinks_from_ingestion_params(ingestion_params)
    self._bulk_coordinator = BulkSessionCoordinator(schema=self.schema)
    self._ingest_bindings: Bindings | None = None
    self._connection_provider: ConnectionProvider = EmptyConnectionProvider()

cast_normal_resource(data, resource_name=None) async

Cast data into a graph container using a resource.

Parameters:

Name Type Description Default
data

Iterable of documents to cast

required
resource_name str | None

Optional name of the resource to use

None

Returns:

Type Description
CastBatchResult

CastBatchResult with graph and any per-document failures (empty when

CastBatchResult

on_doc_error is fail and the batch succeeds).

Source code in graflo/hq/caster.py
async def cast_normal_resource(
    self, data, resource_name: str | None = None
) -> CastBatchResult:
    """Cast data into a graph container using a resource.

    Args:
        data: Iterable of documents to cast
        resource_name: Optional name of the resource to use

    Returns:
        CastBatchResult with graph and any per-document failures (empty when
        ``on_doc_error`` is ``fail`` and the batch succeeds).
    """
    rr = self.ingestion_model.fetch_resource(resource_name)
    resolved_name = rr.name
    params = self.ingestion_params

    semaphore = asyncio.Semaphore(params.n_cores)

    async def process_doc(doc: dict[str, Any]) -> Any:
        async with semaphore:
            return await asyncio.to_thread(rr, doc)

    if params.on_doc_error == "fail":
        coros = [process_doc(doc) for doc in data]
        docs = await asyncio.gather(*coros)
        graph = GraphContainer.from_docs_list(docs)
        _filter_graph_container_by_vertices_inplace(
            graph, allowed_vertex_names=self._allowed_vertex_names
        )
        return CastBatchResult(graph=graph, failures=[])

    doc_list = list(data)
    raw = await asyncio.gather(
        *[process_doc(doc) for doc in doc_list],
        return_exceptions=True,
    )
    docs: list[Any] = []
    failures: list[DocCastFailure] = []
    for i, item in enumerate(raw):
        doc_raw = doc_list[i]
        doc = (
            doc_raw
            if isinstance(doc_raw, dict)
            else {"_source_repr": repr(doc_raw)}
        )

        if isinstance(item, asyncio.CancelledError):
            raise item
        if isinstance(item, (KeyboardInterrupt, SystemExit)):
            raise item
        if isinstance(item, BaseException):
            failures.append(
                _doc_failure_from_exception(
                    resource_name=resolved_name,
                    doc_index=i,
                    doc=doc,
                    exc=item,
                    doc_keys=params.doc_error_preview_keys,
                    doc_preview_max_bytes=params.doc_error_preview_max_bytes,
                )
            )
            continue
        docs.append(item)

    await self._persist_doc_failures(failures)

    graph = GraphContainer.from_docs_list(docs)
    _filter_graph_container_by_vertices_inplace(
        graph, allowed_vertex_names=self._allowed_vertex_names
    )
    return CastBatchResult(graph=graph, failures=failures)

ingest(target_db_config, bindings=None, ingestion_params=None, connection_provider=None)

Ingest data into the graph database.

This is the main ingestion method that takes: - Schema: Graph structure (already set in Caster) - OutputConfig: Target graph database configuration - Bindings: Mapping of resources to physical data sources - IngestionParams: Parameters controlling the ingestion process

Parameters:

Name Type Description Default
target_db_config DBConfig

Target database connection configuration (for writing graph)

required
bindings Bindings | None

Bindings instance mapping resources to data sources If None, defaults to empty Bindings()

None
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/caster.py
def ingest(
    self,
    target_db_config: DBConfig,
    bindings: Bindings | None = None,
    ingestion_params: IngestionParams | None = None,
    connection_provider: ConnectionProvider | None = None,
):
    """Ingest data into the graph database.

    This is the main ingestion method that takes:
    - Schema: Graph structure (already set in Caster)
    - OutputConfig: Target graph database configuration
    - Bindings: Mapping of resources to physical data sources
    - IngestionParams: Parameters controlling the ingestion process

    Args:
        target_db_config: Target database connection configuration (for writing graph)
        bindings: Bindings instance mapping resources to data sources
            If None, defaults to empty Bindings()
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    bindings = bindings or Bindings()
    ingestion_params = ingestion_params or IngestionParams()

    db_flavor = target_db_config.connection_type
    self.schema.db_profile.db_flavor = db_flavor
    self.schema.finish_init()

    allowed_resource_names = self._resolve_ingestion_scope(ingestion_params)

    self.ingestion_model.finish_init(
        self.schema.core_schema,
        strict_references=ingestion_params.strict_references,
        dynamic_edge_feedback=ingestion_params.dynamic_edges,
        allowed_vertex_names=self._allowed_vertex_names,
        target_db_flavor=db_flavor,
    )

    registry = RegistryBuilder(self.schema, self.ingestion_model).build(
        bindings,
        ingestion_params,
        connection_provider=connection_provider or EmptyConnectionProvider(),
        strict=ingestion_params.strict_registry,
    )

    asyncio.run(
        self.ingest_data_sources(
            data_source_registry=registry,
            conn_conf=target_db_config,
            ingestion_params=ingestion_params,
            allowed_resource_names=allowed_resource_names,
            bindings=bindings,
            connection_provider=connection_provider or EmptyConnectionProvider(),
        )
    )

ingest_data_sources(data_source_registry, conn_conf, ingestion_params=None, allowed_resource_names=None, bindings=None, connection_provider=None) async

Ingest data from data sources in a registry.

Note: Schema definition should be handled separately via GraphEngine.define_schema() before calling this method.

Parameters:

Name Type Description Default
data_source_registry DataSourceRegistry

Registry containing data sources mapped to resources

required
conn_conf DBConfig

Database connection configuration

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
bindings Bindings | None

Optional manifest bindings (used to resolve S3 staging proxies).

None
connection_provider ConnectionProvider | None

Runtime credential provider for source connectors and S3.

None
Source code in graflo/hq/caster.py
async def ingest_data_sources(
    self,
    data_source_registry: DataSourceRegistry,
    conn_conf: DBConfig,
    ingestion_params: IngestionParams | None = None,
    allowed_resource_names: set[str] | None = None,
    bindings: Bindings | None = None,
    connection_provider: ConnectionProvider | None = None,
):
    """Ingest data from data sources in a registry.

    Note: Schema definition should be handled separately via GraphEngine.define_schema()
    before calling this method.

    Args:
        data_source_registry: Registry containing data sources mapped to resources
        conn_conf: Database connection configuration
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
        bindings: Optional manifest bindings (used to resolve S3 staging proxies).
        connection_provider: Runtime credential provider for source connectors and S3.
    """
    if ingestion_params is None:
        ingestion_params = IngestionParams()

    self.ingestion_params = ingestion_params
    self._doc_cast_error_total = 0
    init_only = ingestion_params.init_only

    if init_only:
        logger.info("ingest execution bound to init")
        sys.exit(0)

    self._ingest_bindings = bindings
    self._connection_provider = connection_provider or EmptyConnectionProvider()
    try:
        tasks: list[AbstractDataSource] = []
        for resource_name in self.ingestion_model._resources.keys():
            if (
                allowed_resource_names is not None
                and resource_name not in allowed_resource_names
            ):
                continue
            data_sources = data_source_registry.get_data_sources(resource_name)
            if data_sources:
                logger.info(
                    f"For resource name {resource_name} {len(data_sources)} data sources were found"
                )
                tasks.extend(data_sources)

        with Timer() as klepsidra:
            if self.ingestion_params.n_cores > 1:
                queue_tasks: asyncio.Queue = asyncio.Queue()
                for item in tasks:
                    await queue_tasks.put(item)

                for _ in range(self.ingestion_params.n_cores):
                    await queue_tasks.put(None)

                worker_tasks = [
                    self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                    for _ in range(self.ingestion_params.n_cores)
                ]

                await asyncio.gather(*worker_tasks)
            else:
                for data_source in tasks:
                    await self.process_data_source(
                        data_source=data_source, conn_conf=conn_conf
                    )
        logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")
    finally:
        await self._finalize_bulk_session(conn_conf)
        self._ingest_bindings = None
        self._connection_provider = EmptyConnectionProvider()

normalize_resource(data, columns=None) staticmethod

Normalize resource data into a list of dictionaries.

Parameters:

Name Type Description Default
data DataFrame | list[list] | list[dict]

Data to normalize (DataFrame, list of lists, or list of dicts)

required
columns list[str] | None

Optional column names for list data

None

Returns:

Type Description
list[dict]

list[dict]: Normalized data as list of dictionaries

Raises:

Type Description
ValueError

If columns is not provided for list data

Source code in graflo/hq/caster.py
@staticmethod
def normalize_resource(
    data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
) -> list[dict]:
    """Normalize resource data into a list of dictionaries.

    Args:
        data: Data to normalize (DataFrame, list of lists, or list of dicts)
        columns: Optional column names for list data

    Returns:
        list[dict]: Normalized data as list of dictionaries

    Raises:
        ValueError: If columns is not provided for list data
    """
    if isinstance(data, pd.DataFrame):
        columns = data.columns.tolist()
        _data = data.values.tolist()
    elif data and isinstance(data[0], list):
        _data = cast(list[list], data)
        if columns is None:
            raise ValueError("columns should be set")
    else:
        return cast(list[dict], data)
    rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
    return rows_dressed

process_batch(batch, resource_name, conn_conf=None) async

Process a batch of data.

Parameters:

Name Type Description Default
batch

Batch of data to process

required
resource_name str | None

Optional name of the resource to use

required
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_batch(
    self,
    batch,
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
):
    """Process a batch of data.

    Args:
        batch: Batch of data to process
        resource_name: Optional name of the resource to use
        conn_conf: Optional database connection configuration
    """
    result = await self.cast_normal_resource(batch, resource_name=resource_name)
    if result.failures:
        logger.warning(
            "Resource %r batch had %d document cast failure(s); first: %s: %s",
            result.failures[0].resource_name,
            len(result.failures),
            result.failures[0].exception_type,
            result.failures[0].message,
        )
    gc = result.graph

    if conn_conf is not None:
        writer = self._make_db_writer()
        bulk_sid = await self._ensure_bulk_session(conn_conf)
        await writer.write(
            gc=gc,
            conn_conf=conn_conf,
            resource_name=resource_name,
            bulk_session_id=bulk_sid,
        )

process_data_source(data_source, resource_name=None, conn_conf=None) async

Process a data source.

Parameters:

Name Type Description Default
data_source AbstractDataSource

Data source to process

required
resource_name str | None

Optional name of the resource (overrides data_source.resource_name)

None
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_data_source(
    self,
    data_source: AbstractDataSource,
    resource_name: str | None = None,
    conn_conf: None | DBConfig = None,
):
    """Process a data source.

    Args:
        data_source: Data source to process
        resource_name: Optional name of the resource (overrides data_source.resource_name)
        conn_conf: Optional database connection configuration
    """
    actual_resource_name = resource_name or data_source.resource_name

    # Same semantics as AbstractDataSource.iter_batches(limit=...).
    limit = self.ingestion_params.max_items
    batch_prefetch = self.ingestion_params.batch_prefetch
    queue: asyncio.Queue[list[dict] | object] = asyncio.Queue(
        maxsize=batch_prefetch
    )
    sentinel = object()
    fetch_error: Exception | None = None

    batches_iter = data_source.iter_batches(
        batch_size=self.ingestion_params.batch_size,
        limit=limit,
    )

    def _next_batch_or_sentinel() -> list[dict] | object:
        try:
            return next(batches_iter)
        except StopIteration:
            return sentinel

    async def _produce_batches() -> None:
        nonlocal fetch_error
        try:
            while True:
                item = await asyncio.to_thread(_next_batch_or_sentinel)
                await queue.put(item)
                if item is sentinel:
                    return
        except asyncio.CancelledError:
            raise
        except Exception as exc:
            fetch_error = exc
            await queue.put(sentinel)

    producer_task = asyncio.create_task(_produce_batches())
    process_error: Exception | None = None
    try:
        while True:
            item = await queue.get()
            if item is sentinel:
                break
            batch = cast(list[dict], item)
            await self.process_batch(
                batch,
                resource_name=actual_resource_name,
                conn_conf=conn_conf,
            )
    except Exception as exc:
        process_error = exc
        raise
    finally:
        if process_error is not None and not producer_task.done():
            producer_task.cancel()
        try:
            await producer_task
        except asyncio.CancelledError:
            pass

    if fetch_error is not None:
        raise fetch_error

process_resource(resource_instance, resource_name, conn_conf=None, **kwargs) async

Process a resource instance from configuration or direct data.

This method accepts either: 1. A configuration dictionary with 'source_type' and data source parameters 2. A file path (Path or str) - creates FileDataSource 3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

Parameters:

Name Type Description Default
resource_instance Path | str | list[dict] | list[list] | DataFrame | dict[str, Any]

Configuration dict, file path, or in-memory data. Configuration dict format: - {"source_type": "file", "path": "data.json"} - {"source_type": "api", "config": {"url": "https://..."}} - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}} - {"source_type": "in_memory", "data": [...]}

required
resource_name str | None

Optional name of the resource

required
conn_conf None | DBConfig

Optional database connection configuration

None
**kwargs

Additional arguments passed to data source creation (e.g., columns for list[list], encoding for files)

{}
Source code in graflo/hq/caster.py
async def process_resource(
    self,
    resource_instance: (
        Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
    ),
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
    **kwargs,
):
    """Process a resource instance from configuration or direct data.

    This method accepts either:
    1. A configuration dictionary with 'source_type' and data source parameters
    2. A file path (Path or str) - creates FileDataSource
    3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

    Args:
        resource_instance: Configuration dict, file path, or in-memory data.
            Configuration dict format:
            - {"source_type": "file", "path": "data.json"}
            - {"source_type": "api", "config": {"url": "https://..."}}
            - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
            - {"source_type": "in_memory", "data": [...]}
        resource_name: Optional name of the resource
        conn_conf: Optional database connection configuration
        **kwargs: Additional arguments passed to data source creation
            (e.g., columns for list[list], encoding for files)
    """
    if isinstance(resource_instance, dict):
        config = resource_instance.copy()
        config.update(kwargs)
        data_source = DataSourceFactory.create_data_source_from_config(config)
    elif isinstance(resource_instance, (Path, str)):
        file_type: str | ChunkerType | None = cast(
            str | ChunkerType | None, kwargs.get("file_type", None)
        )
        encoding: EncodingType = cast(
            EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
        )
        sep: str | None = cast(str | None, kwargs.get("sep", None))
        data_source = DataSourceFactory.create_file_data_source(
            path=resource_instance,
            file_type=file_type,
            encoding=encoding,
            sep=sep,
        )
    else:
        columns: list[str] | None = cast(
            list[str] | None, kwargs.get("columns", None)
        )
        data_source = DataSourceFactory.create_in_memory_data_source(
            data=resource_instance,
            columns=columns,
        )

    data_source.resource_name = resource_name

    await self.process_data_source(
        data_source=data_source,
        resource_name=resource_name,
        conn_conf=conn_conf,
    )

process_with_queue(tasks, conn_conf=None) async

Process tasks from a queue.

Parameters:

Name Type Description Default
tasks Queue

Async queue of tasks to process

required
conn_conf DBConfig | None

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_with_queue(
    self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
):
    """Process tasks from a queue.

    Args:
        tasks: Async queue of tasks to process
        conn_conf: Optional database connection configuration
    """
    SENTINEL = None

    while True:
        try:
            task = await tasks.get()

            if task is SENTINEL:
                tasks.task_done()
                break

            if isinstance(task, tuple) and len(task) == 2:
                filepath, resource_name = task
                await self.process_resource(
                    resource_instance=filepath,
                    resource_name=resource_name,
                    conn_conf=conn_conf,
                )
            elif isinstance(task, AbstractDataSource):
                await self.process_data_source(
                    data_source=task, conn_conf=conn_conf
                )
            tasks.task_done()
        except Exception as e:
            logger.error(f"Error processing task: {e}", exc_info=True)
            tasks.task_done()
            break

ConnectionProvider

Bases: Protocol

Resolve runtime source connection/auth configuration.

New connector-centric resolution (preferred): - :meth:get_generalized_conn_config takes a connector and returns the generalized runtime config.

Legacy helpers (kept for backwards compatibility): - :meth:get_postgres_config - :meth:get_sparql_auth

Source code in graflo/hq/connection_provider.py
class ConnectionProvider(Protocol):
    """Resolve runtime source connection/auth configuration.

    New connector-centric resolution (preferred):
    - :meth:`get_generalized_conn_config` takes a connector and returns the
      generalized runtime config.

    Legacy helpers (kept for backwards compatibility):
    - :meth:`get_postgres_config`
    - :meth:`get_sparql_auth`
    """

    def get_generalized_conn_config(
        self, connector: ResourceConnector
    ) -> GeneralizedConnConfig | None:
        """Return generalized runtime config for a connector."""

    def get_postgres_config(
        self, resource_name: str, connector: TableConnector
    ) -> PostgresConfig | None:
        """Return source DB config for a SQL table resource (legacy)."""

    def get_sparql_auth(
        self, resource_name: str, connector: SparqlConnector
    ) -> SparqlAuth | None:
        """Return source auth payload for a SPARQL resource (legacy)."""

    def get_generalized_config_by_proxy(
        self, conn_proxy: str
    ) -> GeneralizedConnConfig | None:
        """Resolve a non-secret proxy name to runtime config (S3, etc.)."""

get_generalized_config_by_proxy(conn_proxy)

Resolve a non-secret proxy name to runtime config (S3, etc.).

Source code in graflo/hq/connection_provider.py
def get_generalized_config_by_proxy(
    self, conn_proxy: str
) -> GeneralizedConnConfig | None:
    """Resolve a non-secret proxy name to runtime config (S3, etc.)."""

get_generalized_conn_config(connector)

Return generalized runtime config for a connector.

Source code in graflo/hq/connection_provider.py
def get_generalized_conn_config(
    self, connector: ResourceConnector
) -> GeneralizedConnConfig | None:
    """Return generalized runtime config for a connector."""

get_postgres_config(resource_name, connector)

Return source DB config for a SQL table resource (legacy).

Source code in graflo/hq/connection_provider.py
def get_postgres_config(
    self, resource_name: str, connector: TableConnector
) -> PostgresConfig | None:
    """Return source DB config for a SQL table resource (legacy)."""

get_sparql_auth(resource_name, connector)

Return source auth payload for a SPARQL resource (legacy).

Source code in graflo/hq/connection_provider.py
def get_sparql_auth(
    self, resource_name: str, connector: SparqlConnector
) -> SparqlAuth | None:
    """Return source auth payload for a SPARQL resource (legacy)."""

DBWriter

Push :class:GraphContainer data to the target graph database.

The orchestrator (e.g. :class:Caster) must initialize schema and ingestion_model for the target database (db_profile.db_flavor, :meth:Schema.finish_init, :meth:IngestionModel.finish_init) before calling :meth:write; this class does not repeat that work on every batch.

Attributes:

Name Type Description
schema

Schema configuration providing vertex/edge metadata.

dry

When True no database mutations are performed.

max_concurrent

Upper bound on concurrent DB operations (semaphore size).

Source code in graflo/hq/db_writer.py
class DBWriter:
    """Push :class:`GraphContainer` data to the target graph database.

    The orchestrator (e.g. :class:`Caster`) must initialize ``schema`` and
    ``ingestion_model`` for the target database (``db_profile.db_flavor``,
    :meth:`Schema.finish_init`, :meth:`IngestionModel.finish_init`) before
    calling :meth:`write`; this class does not repeat that work on every batch.

    Attributes:
        schema: Schema configuration providing vertex/edge metadata.
        dry: When ``True`` no database mutations are performed.
        max_concurrent: Upper bound on concurrent DB operations (semaphore size).
    """

    def __init__(
        self,
        schema: Schema,
        ingestion_model: IngestionModel,
        *,
        dry: bool = False,
        max_concurrent: int = 1,
    ):
        self.schema = schema
        self.ingestion_model = ingestion_model
        self.dry = dry
        self.max_concurrent = max_concurrent
        self._schema_db_aware: SchemaDBAware | None = None
        self._schema_db_aware_flavor: DBType | None = None

    # ------------------------------------------------------------------
    # Public API
    # ------------------------------------------------------------------

    async def write(
        self,
        gc: GraphContainer,
        conn_conf: DBConfig,
        resource_name: str | None,
        *,
        bulk_session_id: str | None = None,
    ) -> None:
        """Push *gc* to the database (vertices, extra weights, then edges).

        When *bulk_session_id* is provided, appends rows using the connection's
        native bulk interface instead of using per-record writes.

        .. note::
            *gc* is mutated in-place for the REST path: blank-vertex keys are
            updated and blank edges are extended after the vertex round-trip.
            The bulk path does not support blank vertices or ``extra_weights``.
        """
        if bulk_session_id:
            self._validate_bulk_resource(resource_name)
            if self.dry:
                logger.debug(
                    "Dry run: would append batch to bulk session %s",
                    bulk_session_id,
                )
                return

            def _append() -> None:
                with ConnectionManager(connection_config=conn_conf) as db:
                    db.bulk_load_append(bulk_session_id, gc, self.schema)

            await asyncio.to_thread(_append)
            return

        resource = self.ingestion_model.fetch_resource(resource_name)

        await self._push_vertices(gc, conn_conf)
        self._resolve_blank_edges(gc, conn_conf)
        await self._enrich_extra_weights(gc, conn_conf, resource)
        await self._push_edges(gc, conn_conf)

    def _validate_bulk_resource(self, resource_name: str | None) -> None:
        if resource_name is None:
            return
        resource = self.ingestion_model.fetch_resource(resource_name)
        if resource.extra_weights:
            raise ValueError(
                "Native bulk ingest does not support resources with extra_weights "
                "(those require DB round-trips). Use REST ingest or disable extra_weights."
            )

    # ------------------------------------------------------------------
    # Vertices
    # ------------------------------------------------------------------

    async def _push_vertices(self, gc: GraphContainer, conn_conf: DBConfig) -> None:
        """Upsert all vertex collections in *gc*, resolving blank nodes."""
        vc = self._db_aware_for(conn_conf).vertex_config
        semaphore = asyncio.Semaphore(self.max_concurrent)

        async def _push_one(vcol: str, data: list[dict]):
            async with semaphore:

                def _sync():
                    with ConnectionManager(connection_config=conn_conf) as db:
                        if vcol in vc.blank_vertices:
                            self._assign_blank_vertex_ids(
                                vcol=vcol, data=data, conn_conf=conn_conf
                            )
                        db.upsert_docs_batch(
                            data,
                            vc.vertex_dbname(vcol),
                            vc.identity_fields(vcol),
                            update_keys="doc",
                            filter_uniques=True,
                            dry=self.dry,
                        )
                        return vcol, None

                return await asyncio.to_thread(_sync)

        results = await asyncio.gather(
            *[_push_one(vcol, data) for vcol, data in gc.vertices.items()]
        )

        for vcol, result in results:
            if result is not None:
                gc.vertices[vcol] = result

    def _assign_blank_vertex_ids(
        self, vcol: str, data: list[dict], conn_conf: DBConfig
    ) -> None:
        """Assign deterministic in-memory IDs to blank vertices before persistence."""
        vc = self._db_aware_for(conn_conf).vertex_config
        identity_fields = vc.identity_fields(vcol)
        default_field = "_key" if conn_conf.connection_type == DBType.ARANGO else "id"
        preferred_field = identity_fields[0] if identity_fields else default_field

        for doc in data:
            current_value = doc.get(preferred_field)
            if current_value is None or current_value == "":
                generated = str(uuid4())
                doc[preferred_field] = generated
                if default_field != preferred_field and default_field not in doc:
                    doc[default_field] = generated

    # ------------------------------------------------------------------
    # Blank-edge resolution
    # ------------------------------------------------------------------

    def _resolve_blank_edges(self, gc: GraphContainer, conn_conf: DBConfig) -> None:
        """Extend edge lists for blank vertices after their keys are resolved."""
        vc = self._db_aware_for(conn_conf).vertex_config
        for vcol in vc.blank_vertices:
            for edge_id, _edge in self.schema.core_schema.edge_config.items():
                vfrom, vto, _relation = edge_id
                if vcol == vfrom or vcol == vto:
                    if vfrom not in gc.vertices or vto not in gc.vertices:
                        continue
                    if edge_id not in gc.edges:
                        gc.edges[edge_id] = []
                    source_docs = gc.vertices[vfrom]
                    target_docs = gc.vertices[vto]
                    source_id_fields = vc.identity_fields(vfrom)
                    target_id_fields = vc.identity_fields(vto)
                    shared_fields = [
                        f for f in source_id_fields if f in target_id_fields
                    ]

                    if shared_fields:
                        target_by_key: dict[tuple, list[dict]] = {}
                        for target_doc in target_docs:
                            key = tuple(target_doc.get(f) for f in shared_fields)
                            if any(item is None for item in key):
                                continue
                            target_by_key.setdefault(key, []).append(target_doc)
                        for source_doc in source_docs:
                            key = tuple(source_doc.get(f) for f in shared_fields)
                            if any(item is None for item in key):
                                continue
                            for target_doc in target_by_key.get(key, []):
                                gc.edges[edge_id].append((source_doc, target_doc, {}))
                    else:
                        gc.edges[edge_id].extend(
                            (x, y, {}) for x, y in zip(source_docs, target_docs)
                        )

    # ------------------------------------------------------------------
    # Extra weights
    # ------------------------------------------------------------------

    async def _enrich_extra_weights(
        self, gc: GraphContainer, conn_conf: DBConfig, resource
    ) -> None:
        """Fetch extra-weight vertex data from the DB and attach to edges."""
        vc = self._db_aware_for(conn_conf).vertex_config

        def _sync():
            with ConnectionManager(connection_config=conn_conf) as db:
                for entry in resource.extra_weights:
                    edge = entry.edge
                    if not entry.vertex_weights:
                        continue
                    for weight in entry.vertex_weights:
                        if weight.name not in vc.vertex_set:
                            logger.error(f"{weight.name} not a valid vertex")
                            continue
                        index_fields = vc.identity_fields(weight.name)
                        if self.dry or weight.name not in gc.vertices:
                            continue
                        weights_per_item = db.fetch_present_documents(
                            class_name=vc.vertex_dbname(weight.name),
                            batch=gc.vertices[weight.name],
                            match_keys=index_fields,
                            keep_keys=weight.properties,
                        )
                        for j, item in enumerate(gc.linear):
                            weights = weights_per_item[j]
                            for ee in item[edge.edge_id]:
                                ee.update(
                                    {weight.cfield(k): v for k, v in weights[0].items()}
                                )

        await asyncio.to_thread(_sync)

    # ------------------------------------------------------------------
    # Edges
    # ------------------------------------------------------------------

    async def _push_edges(self, gc: GraphContainer, conn_conf: DBConfig) -> None:
        """Insert all edges in *gc*.

        Each key in ``gc.edges`` is a concrete ``(source, target, relation)``
        triple produced by the extraction pipeline.  We look up the matching
        schema :class:`Edge` for each key (trying an exact match first, then a
        ``relation=None`` schema entry for dynamic-relation edges) and fire one
        async task per key — one DB write per concrete relation, no inner loop.
        """
        schema_db = self._db_aware_for(conn_conf)
        vc = schema_db.vertex_config
        ec = schema_db.edge_config
        core_ec = self.schema.core_schema.edge_config
        semaphore = asyncio.Semaphore(self.max_concurrent)

        def _schema_edge_for(edge_id: tuple) -> Edge | None:
            """Return the schema Edge for a gc edge key, or None if not declared."""
            if edge_id in core_ec:
                return core_ec.edge_for(edge_id)
            # Dynamic-relation edges: schema declares (source, target, None).
            null_id = (edge_id[0], edge_id[1], None)
            if null_id in core_ec:
                return core_ec.edge_for(null_id)
            return None

        async def _push_one(edge_id: tuple, docs: list) -> None:
            edge = _schema_edge_for(edge_id)
            if edge is None:
                return
            async with semaphore:

                def _sync() -> None:
                    _, _, relation = edge_id
                    with ConnectionManager(connection_config=conn_conf) as db:
                        runtime = ec.runtime(edge)
                        merge_props: tuple[str, ...] | None = None
                        mp = ec.relationship_merge_property_names(edge)
                        if mp:
                            merge_props = tuple(mp)
                        if not self.dry:
                            data, relation_name = self._project_edge_docs_for_db(
                                docs=docs,
                                relation=relation,
                                runtime=runtime,
                                conn_type=conn_conf.connection_type,
                            )
                            edge_kw: dict = {
                                "filter_uniques": False,
                                "dry": self.dry,
                                "collection_name": runtime.storage_name(),
                            }
                            if conn_conf.connection_type in (
                                DBType.NEO4J,
                                DBType.FALKORDB,
                                DBType.MEMGRAPH,
                            ):
                                if merge_props is not None:
                                    edge_kw["relationship_merge_properties"] = (
                                        merge_props
                                    )
                            elif conn_conf.connection_type == DBType.ARANGO:
                                if self.ingestion_model.edges_on_duplicate == "upsert":
                                    edge_kw["on_duplicate"] = "upsert"
                                    if merge_props is not None:
                                        edge_kw["uniq_weight_fields"] = list(
                                            merge_props
                                        )
                            db.insert_edges_batch(
                                docs_edges=data,
                                source_class=vc.vertex_dbname(edge.source),
                                target_class=vc.vertex_dbname(edge.target),
                                relation_name=relation_name,
                                match_keys_source=tuple(
                                    vc.identity_fields(edge.source)
                                ),
                                match_keys_target=tuple(
                                    vc.identity_fields(edge.target)
                                ),
                                **edge_kw,
                            )

                await asyncio.to_thread(_sync)

        await asyncio.gather(
            *[_push_one(edge_id, docs) for edge_id, docs in gc.edges.items()]
        )

    def _db_aware_for(self, conn_conf: DBConfig) -> SchemaDBAware:
        """Return a cached :class:`SchemaDBAware` for *conn_conf*'s DB flavor."""
        flavor = conn_conf.connection_type
        if self._schema_db_aware is None or self._schema_db_aware_flavor != flavor:
            self._schema_db_aware = self.schema.resolve_db_aware(flavor)
            self._schema_db_aware_flavor = flavor
        return self._schema_db_aware

    def _project_edge_docs_for_db(
        self,
        *,
        docs: list,
        relation: str | None,
        runtime: EdgeRuntime,
        conn_type: DBType,
    ) -> tuple[list, str | None]:
        """Project logical edge docs into DB-specific relation representation."""
        if conn_type != DBType.TIGERGRAPH:
            return docs, relation

        relation_name = runtime.relation_name
        relation_field = runtime.effective_relation_field
        if not runtime.store_extracted_relation_as_weight or relation_field is None:
            return docs, relation_name

        # TigerGraph stores dynamic extracted relation as an edge attribute while
        # keeping the edge type stable.
        projected: list = []
        for source_doc, target_doc, weight in docs:
            next_weight = dict(weight)
            if relation is not None:
                next_weight[relation_field] = relation
            projected.append((source_doc, target_doc, next_weight))
        return projected, relation_name

write(gc, conn_conf, resource_name, *, bulk_session_id=None) async

Push gc to the database (vertices, extra weights, then edges).

When bulk_session_id is provided, appends rows using the connection's native bulk interface instead of using per-record writes.

.. note:: gc is mutated in-place for the REST path: blank-vertex keys are updated and blank edges are extended after the vertex round-trip. The bulk path does not support blank vertices or extra_weights.

Source code in graflo/hq/db_writer.py
async def write(
    self,
    gc: GraphContainer,
    conn_conf: DBConfig,
    resource_name: str | None,
    *,
    bulk_session_id: str | None = None,
) -> None:
    """Push *gc* to the database (vertices, extra weights, then edges).

    When *bulk_session_id* is provided, appends rows using the connection's
    native bulk interface instead of using per-record writes.

    .. note::
        *gc* is mutated in-place for the REST path: blank-vertex keys are
        updated and blank edges are extended after the vertex round-trip.
        The bulk path does not support blank vertices or ``extra_weights``.
    """
    if bulk_session_id:
        self._validate_bulk_resource(resource_name)
        if self.dry:
            logger.debug(
                "Dry run: would append batch to bulk session %s",
                bulk_session_id,
            )
            return

        def _append() -> None:
            with ConnectionManager(connection_config=conn_conf) as db:
                db.bulk_load_append(bulk_session_id, gc, self.schema)

        await asyncio.to_thread(_append)
        return

    resource = self.ingestion_model.fetch_resource(resource_name)

    await self._push_vertices(gc, conn_conf)
    self._resolve_blank_edges(gc, conn_conf)
    await self._enrich_extra_weights(gc, conn_conf, resource)
    await self._push_edges(gc, conn_conf)

DocCastFailure

Bases: BaseModel

Structured record for one source document that failed during resource casting.

Source code in graflo/hq/ingestion_parameters.py
class DocCastFailure(BaseModel):
    """Structured record for one source document that failed during resource casting."""

    resource_name: str
    doc_index: int
    exception_type: str
    message: str
    traceback: str = Field(
        default="",
        description="Formatted traceback, truncated to the configured max length.",
    )
    doc_preview: Any = Field(
        default=None,
        description="Subset or truncated JSON of the source document for debugging.",
    )

DocErrorBudgetExceeded

Bases: RuntimeError

Raised when total document cast failures exceed IngestionParams.max_doc_errors.

Source code in graflo/hq/ingestion_parameters.py
class DocErrorBudgetExceeded(RuntimeError):
    """Raised when total document cast failures exceed ``IngestionParams.max_doc_errors``."""

    def __init__(
        self,
        *,
        total_failures: int,
        limit: int,
        doc_error_sink_path: Path | None,
    ) -> None:
        self.total_failures = total_failures
        self.limit = limit
        self.doc_error_sink_path = doc_error_sink_path
        sink = str(doc_error_sink_path) if doc_error_sink_path else "(not configured)"
        super().__init__(
            f"Document error budget exceeded: {total_failures} total failures "
            f"(limit {limit}). Doc error sink (jsonl.gz): {sink}"
        )

DocErrorSink

Bases: Protocol

Append structured cast failures (e.g. JSONL or compressed JSONL).

Source code in graflo/hq/doc_error_sink.py
@runtime_checkable
class DocErrorSink(Protocol):
    """Append structured cast failures (e.g. JSONL or compressed JSONL)."""

    async def write_failures(self, failures: list[DocCastFailure]) -> None:
        """Persist *failures*; must be safe to call under a single async lock."""

write_failures(failures) async

Persist failures; must be safe to call under a single async lock.

Source code in graflo/hq/doc_error_sink.py
async def write_failures(self, failures: list[DocCastFailure]) -> None:
    """Persist *failures*; must be safe to call under a single async lock."""

EmptyConnectionProvider

No-op provider when no source credentials/config are configured.

Source code in graflo/hq/connection_provider.py
class EmptyConnectionProvider:
    """No-op provider when no source credentials/config are configured."""

    def get_generalized_conn_config(
        self, connector: ResourceConnector
    ) -> GeneralizedConnConfig | None:
        return None

    def get_postgres_config(
        self, resource_name: str, connector: TableConnector
    ) -> PostgresConfig | None:
        return None

    def get_sparql_auth(
        self, resource_name: str, connector: SparqlConnector
    ) -> SparqlAuth | None:
        return None

    def get_generalized_config_by_proxy(
        self, conn_proxy: str
    ) -> GeneralizedConnConfig | None:
        return None

GraphEngine

Orchestrator for graph database operations.

GraphEngine coordinates schema inference, connector creation, schema definition, and data ingestion, providing a unified interface for working with graph databases.

The typical workflow is: 1. infer_schema() - Infer schema from source database (if possible) 2. create_bindings() - Create bindings mapping resources to data sources (if possible) 3. define_schema() - Define schema in target database (if possible and necessary) 4. ingest() - Ingest data into the target database

Attributes:

Name Type Description
target_db_flavor

Target database flavor for schema sanitization

resource_mapper

ResourceMapper instance for connector creation

Source code in graflo/hq/graph_engine.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
class GraphEngine:
    """Orchestrator for graph database operations.

    GraphEngine coordinates schema inference, connector creation, schema definition,
    and data ingestion, providing a unified interface for working with graph databases.

    The typical workflow is:
    1. infer_schema() - Infer schema from source database (if possible)
    2. create_bindings() - Create bindings mapping resources to data sources (if possible)
    3. define_schema() - Define schema in target database (if possible and necessary)
    4. ingest() - Ingest data into the target database

    Attributes:
        target_db_flavor: Target database flavor for schema sanitization
        resource_mapper: ResourceMapper instance for connector creation
    """

    def __init__(
        self,
        target_db_flavor: DBType = DBType.ARANGO,
    ):
        """Initialize the GraphEngine.

        Args:
            target_db_flavor: Target database flavor for schema sanitization
        """
        self.target_db_flavor = target_db_flavor
        self.resource_mapper = ResourceMapper()
        self.connection_provider: ConnectionProvider = EmptyConnectionProvider()

    def introspect(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
        include_raw_tables: bool = True,
    ) -> SchemaIntrospectionResult:
        """Introspect PostgreSQL schema and return a serializable result.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect (defaults to config schema_name or 'public')

        Returns:
            SchemaIntrospectionResult: Introspection result (vertex_tables, edge_tables,
                raw_tables, schema_name) suitable for serialization.
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            inferencer = SQLInferenceManager(
                conn=postgres_conn,
                target_db_flavor=self.target_db_flavor,
            )
            return inferencer.introspect(
                schema_name=schema_name,
                include_raw_tables=include_raw_tables,
            )

    def infer_manifest(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
        fuzzy_threshold: float = 0.8,
        discard_disconnected_vertices: bool = False,
    ) -> GraphManifest:
        """Infer a GraphManifest from PostgreSQL database.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect (defaults to config schema_name or 'public')
            fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
            discard_disconnected_vertices: If True, remove vertices that do not take part in
                any relation (and resources/actors that reference them). Default False.

        Returns:
            GraphManifest: Inferred manifest with schema, ingestion model, and bindings.
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            inferencer = SQLInferenceManager(
                conn=postgres_conn,
                target_db_flavor=self.target_db_flavor,
                fuzzy_threshold=fuzzy_threshold,
            )
            artifacts = inferencer.infer_artifacts(schema_name=schema_name)
            schema, ingestion_model = artifacts.schema, artifacts.ingestion_model
            bindings, provider = (
                self.resource_mapper.create_bindings_with_provider_from_introspection(
                    introspection_result=artifacts.introspection_result,
                    conn=postgres_conn,
                    schema_name=schema_name,
                )
            )
            self.connection_provider = provider
        if discard_disconnected_vertices:
            disconnected = schema.remove_disconnected_vertices()
            ingestion_model.prune_to_graph(
                schema.core_schema, disconnected=disconnected
            )
            connected_resources = {
                resource.name for resource in ingestion_model.resources
            }
            connectors = list(bindings.connectors)
            resource_connector = list(bindings.resource_connector)
            connector_connection = list(bindings.connector_connection)

            connector_refs_all = set()
            for connector in connectors:
                connector_refs_all.add(connector.hash)
                if connector.name:
                    connector_refs_all.add(connector.name)
            filtered_resource_connector = []
            mapped_connector_refs = set()
            for mapping in resource_connector:
                if isinstance(mapping, dict):
                    resource_name = mapping.get("resource")
                    connector_ref = mapping.get("connector")
                else:
                    resource_name = mapping.resource
                    connector_ref = mapping.connector
                if (
                    resource_name in connected_resources
                    and isinstance(connector_ref, str)
                    and connector_ref in connector_refs_all
                ):
                    filtered_resource_connector.append(mapping)
                    mapped_connector_refs.add(connector_ref)
            filtered_connectors = [
                connector
                for connector in connectors
                if connector.resource_name in connected_resources
                or connector.hash in mapped_connector_refs
                or (
                    connector.name is not None
                    and connector.name in mapped_connector_refs
                )
            ]
            valid_connector_refs = set()
            for connector in filtered_connectors:
                valid_connector_refs.add(connector.hash)
                if connector.name:
                    valid_connector_refs.add(connector.name)
            filtered_connector_connection = []
            for mapping in connector_connection:
                if isinstance(mapping, dict):
                    connector_ref = mapping.get("connector")
                else:
                    connector_ref = mapping.connector
                if connector_ref in valid_connector_refs:
                    filtered_connector_connection.append(mapping)
            bindings_dict = bindings.to_dict(skip_defaults=False)
            bindings_dict["connectors"] = filtered_connectors
            bindings_dict["resource_connector"] = filtered_resource_connector
            bindings_dict["connector_connection"] = filtered_connector_connection
            bindings = Bindings.from_dict(bindings_dict)
        manifest = GraphManifest(
            graph_schema=schema, ingestion_model=ingestion_model, bindings=bindings
        )
        # Apply DB-flavor-specific sanitization a posteriori (reserved words,
        # TigerGraph identity normalization, etc.). Sanitizer is the single
        # entry point that maps `target_db_flavor` to the corresponding
        # evolution ops.
        Sanitizer(self.target_db_flavor).sanitize_manifest(manifest)
        return manifest

    def create_bindings(
        self,
        postgres_config: PostgresConfig,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
        type_lookup_overrides: dict[str, dict] | None = None,
        include_raw_tables: bool = False,
    ) -> Bindings:
        """Create Bindings from PostgreSQL tables.

        Args:
            postgres_config: PostgresConfig instance
            schema_name: Schema name to introspect
            datetime_columns: Optional mapping of resource/table name to datetime
                column name for date-range filtering (sets date_field per
                TableConnector). Use with IngestionParams.datetime_after /
                datetime_before.
            type_lookup_overrides: Optional mapping of table name to type_lookup
                spec for edge tables where source/target types come from a
                lookup table. Each value: {table, identity, type_column,
                source, target, relation?}.

        Returns:
            Bindings: Bindings object with TableConnector instances for all tables
        """
        with PostgresConnection(postgres_config) as postgres_conn:
            bindings, provider = (
                self.resource_mapper.create_bindings_with_provider_from_postgres(
                    conn=postgres_conn,
                    schema_name=schema_name,
                    datetime_columns=datetime_columns,
                    type_lookup_overrides=type_lookup_overrides,
                    include_raw_tables=include_raw_tables,
                )
            )
        self.connection_provider = provider
        return bindings

    def define_schema(
        self,
        manifest: GraphManifest,
        target_db_config: DBConfig,
        recreate_schema: bool = False,
        graph_target_namespace: str | None = None,
    ) -> None:
        """Define schema in the target database.

        This method handles database/schema creation and initialization.
        Some databases don't require explicit schema definition (e.g., Neo4j),
        but this method ensures the database is properly initialized.

        If the schema/graph already exists and recreate_schema is False (default),
        init_db raises SchemaExistsError and the script halts.

        Args:
            manifest: GraphManifest with schema block.
            target_db_config: Target database connection configuration
            recreate_schema: If True, drop existing schema and define new one.
                If False and schema/graph already exists, raises SchemaExistsError.
            graph_target_namespace: Optional target graph/database/space name (e.g. temp
                schema). Overrides ``schema.db_profile.target_namespace`` and defaults
                ahead of ``schema.metadata.name`` when the config omits the namespace.
        """
        schema = manifest.require_schema()

        _ensure_graph_target_namespace(schema, target_db_config, graph_target_namespace)

        # Ensure schema reflects target DB so finish_init applies DB-specific defaults.
        schema.db_profile.db_flavor = target_db_config.connection_type
        schema.finish_init()

        # Initialize database with schema definition
        # init_db() handles database/schema creation automatically
        # It checks if the database exists and creates it if needed
        with ConnectionManager(connection_config=target_db_config) as db_client:
            db_client.init_db(schema, recreate_schema)

    def define_and_ingest(
        self,
        manifest: GraphManifest,
        target_db_config: DBConfig,
        ingestion_params: IngestionParams | None = None,
        connection_provider: ConnectionProvider | None = None,
        recreate_schema: bool | None = None,
        clear_data: bool | None = None,
        graph_target_namespace: str | None = None,
    ) -> None:
        """Define schema and ingest data into the graph database in one operation.

        This is a convenience method that chains define_schema() and ingest().
        It's the recommended way to set up and populate a graph database.

        Args:
            manifest: GraphManifest with schema/ingestion/bindings blocks.
            target_db_config: Target database connection configuration
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
            recreate_schema: If True, drop existing schema and define new one.
                If None, defaults to False. When False and schema already exists,
                define_schema raises SchemaExistsError and the script halts.
            clear_data: If True, remove existing data before ingestion (schema unchanged).
                If None, uses ingestion_params.clear_data.
            graph_target_namespace: Optional target graph/database/space name; passed
                to both ``define_schema`` and ``ingest`` for consistent resolution.
        """
        ingestion_params = ingestion_params or IngestionParams()
        if clear_data is None:
            clear_data = ingestion_params.clear_data
        if recreate_schema is None:
            recreate_schema = False

        # Define schema first (halts with SchemaExistsError if schema exists and recreate_schema is False)
        self.define_schema(
            manifest=manifest,
            target_db_config=target_db_config,
            recreate_schema=recreate_schema,
            graph_target_namespace=graph_target_namespace,
        )

        # Then ingest data (clear_data is applied inside ingest() when ingestion_params.clear_data)
        ingestion_params = ingestion_params.model_copy(
            update={"clear_data": clear_data}
        )
        self.ingest(
            manifest=manifest,
            target_db_config=target_db_config,
            ingestion_params=ingestion_params,
            connection_provider=connection_provider,
            graph_target_namespace=graph_target_namespace,
        )

    def ingest(
        self,
        manifest: GraphManifest,
        target_db_config: DBConfig,
        ingestion_params: IngestionParams | None = None,
        connection_provider: ConnectionProvider | None = None,
        graph_target_namespace: str | None = None,
    ) -> None:
        """Ingest data into the graph database.

        If ingestion_params.clear_data is True, removes all existing data
        (without touching the schema) before ingestion.

        Args:
            manifest: GraphManifest with schema/ingestion/bindings blocks.
            target_db_config: Target database connection configuration
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
            graph_target_namespace: Same semantics as ``define_schema``; use when
                calling ``ingest`` without a prior ``define_schema`` on this config.
        """
        schema = manifest.require_schema()
        ingestion_model = manifest.require_ingestion_model()
        bindings = manifest.bindings

        _ensure_graph_target_namespace(schema, target_db_config, graph_target_namespace)

        ingestion_params = ingestion_params or IngestionParams()
        if ingestion_params.clear_data:
            with ConnectionManager(connection_config=target_db_config) as db_client:
                clear_result = db_client.clear_data(schema)
                if inspect.isawaitable(clear_result):
                    raise TypeError(
                        "clear_data must be synchronous so ingestion only starts "
                        "after data clearing has completed."
                    )

        caster = Caster(
            schema=schema,
            ingestion_model=ingestion_model,
            ingestion_params=ingestion_params,
        )
        caster.ingest(
            target_db_config=target_db_config,
            bindings=bindings or Bindings(),
            ingestion_params=ingestion_params,
            connection_provider=connection_provider or self.connection_provider,
        )

    # ------------------------------------------------------------------
    # RDF / SPARQL inference
    # ------------------------------------------------------------------

    def infer_schema_from_rdf(
        self,
        source: str | Path,
        *,
        endpoint_url: str | None = None,
        graph_uri: str | None = None,
        schema_name: str | None = None,
    ) -> tuple[Schema, IngestionModel]:
        """Infer a graflo Schema from an RDF / OWL ontology.

        Reads the TBox (class and property declarations) and produces
        vertices (from ``owl:Class``), fields (from ``owl:DatatypeProperty``),
        and edges (from ``owl:ObjectProperty`` with domain/range).

        Args:
            source: Path to an RDF file (e.g. ``ontology.ttl``) or a base
                URL when using *endpoint_url*.
            endpoint_url: Optional SPARQL endpoint to CONSTRUCT the
                ontology from.
            graph_uri: Named graph containing the ontology.
            schema_name: Name for the resulting schema.

        Returns:
            tuple[Schema, IngestionModel]: fully initialised schema and ingestion model.
        """
        from graflo.hq.rdf_inferencer import RdfInferenceManager

        mgr = RdfInferenceManager(target_db_flavor=self.target_db_flavor)
        return mgr.infer_schema(
            source,
            endpoint_url=endpoint_url,
            graph_uri=graph_uri,
            schema_name=schema_name,
        )

    def create_bindings_from_rdf(
        self,
        source: str | Path,
        *,
        endpoint_url: str | None = None,
        graph_uri: str | None = None,
        sparql_config: SparqlEndpointConfig | None = None,
    ) -> Bindings:
        """Create :class:`Bindings` from an RDF ontology.

        One :class:`SparqlConnector` is created per ``owl:Class`` found in the
        ontology.

        Args:
            source: Path to an RDF file or base URL.
            endpoint_url: SPARQL endpoint for the *data* (ABox).
            graph_uri: Named graph containing the data.
            sparql_config: Optional :class:`SparqlEndpointConfig` to attach
                to the resulting connectors for authentication.

        Returns:
            Bindings with SPARQL connectors for each class.
        """
        from graflo.hq.rdf_inferencer import RdfInferenceManager

        mgr = RdfInferenceManager(target_db_flavor=self.target_db_flavor)
        bindings = mgr.create_bindings(
            source,
            endpoint_url=endpoint_url,
            graph_uri=graph_uri,
        )

        if sparql_config:
            conn_proxy = "sparql_source"
            provider = InMemoryConnectionProvider()
            provider.register_generalized_config(
                conn_proxy=conn_proxy,
                config=SparqlGeneralizedConnConfig(config=sparql_config),
            )
            provider.default_sparql = sparql_config

            # Wire all SPARQL connectors to the same credential proxy.
            from graflo.architecture.contract.bindings import SparqlConnector

            for connector in bindings.connectors:
                if not isinstance(connector, SparqlConnector):
                    continue
                bindings.bind_connector_to_conn_proxy(connector, conn_proxy)
                provider.bind_connector_to_conn_proxy(
                    connector=connector, conn_proxy=conn_proxy
                )
        else:
            provider = EmptyConnectionProvider()
        self.connection_provider = provider
        return bindings

__init__(target_db_flavor=DBType.ARANGO)

Initialize the GraphEngine.

Parameters:

Name Type Description Default
target_db_flavor DBType

Target database flavor for schema sanitization

ARANGO
Source code in graflo/hq/graph_engine.py
def __init__(
    self,
    target_db_flavor: DBType = DBType.ARANGO,
):
    """Initialize the GraphEngine.

    Args:
        target_db_flavor: Target database flavor for schema sanitization
    """
    self.target_db_flavor = target_db_flavor
    self.resource_mapper = ResourceMapper()
    self.connection_provider: ConnectionProvider = EmptyConnectionProvider()

create_bindings(postgres_config, schema_name=None, datetime_columns=None, type_lookup_overrides=None, include_raw_tables=False)

Create Bindings from PostgreSQL tables.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect

None
datetime_columns dict[str, str] | None

Optional mapping of resource/table name to datetime column name for date-range filtering (sets date_field per TableConnector). Use with IngestionParams.datetime_after / datetime_before.

None
type_lookup_overrides dict[str, dict] | None

Optional mapping of table name to type_lookup spec for edge tables where source/target types come from a lookup table. Each value: {table, identity, type_column, source, target, relation?}.

None

Returns:

Name Type Description
Bindings Bindings

Bindings object with TableConnector instances for all tables

Source code in graflo/hq/graph_engine.py
def create_bindings(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
    datetime_columns: dict[str, str] | None = None,
    type_lookup_overrides: dict[str, dict] | None = None,
    include_raw_tables: bool = False,
) -> Bindings:
    """Create Bindings from PostgreSQL tables.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect
        datetime_columns: Optional mapping of resource/table name to datetime
            column name for date-range filtering (sets date_field per
            TableConnector). Use with IngestionParams.datetime_after /
            datetime_before.
        type_lookup_overrides: Optional mapping of table name to type_lookup
            spec for edge tables where source/target types come from a
            lookup table. Each value: {table, identity, type_column,
            source, target, relation?}.

    Returns:
        Bindings: Bindings object with TableConnector instances for all tables
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        bindings, provider = (
            self.resource_mapper.create_bindings_with_provider_from_postgres(
                conn=postgres_conn,
                schema_name=schema_name,
                datetime_columns=datetime_columns,
                type_lookup_overrides=type_lookup_overrides,
                include_raw_tables=include_raw_tables,
            )
        )
    self.connection_provider = provider
    return bindings

create_bindings_from_rdf(source, *, endpoint_url=None, graph_uri=None, sparql_config=None)

Create :class:Bindings from an RDF ontology.

One :class:SparqlConnector is created per owl:Class found in the ontology.

Parameters:

Name Type Description Default
source str | Path

Path to an RDF file or base URL.

required
endpoint_url str | None

SPARQL endpoint for the data (ABox).

None
graph_uri str | None

Named graph containing the data.

None
sparql_config SparqlEndpointConfig | None

Optional :class:SparqlEndpointConfig to attach to the resulting connectors for authentication.

None

Returns:

Type Description
Bindings

Bindings with SPARQL connectors for each class.

Source code in graflo/hq/graph_engine.py
def create_bindings_from_rdf(
    self,
    source: str | Path,
    *,
    endpoint_url: str | None = None,
    graph_uri: str | None = None,
    sparql_config: SparqlEndpointConfig | None = None,
) -> Bindings:
    """Create :class:`Bindings` from an RDF ontology.

    One :class:`SparqlConnector` is created per ``owl:Class`` found in the
    ontology.

    Args:
        source: Path to an RDF file or base URL.
        endpoint_url: SPARQL endpoint for the *data* (ABox).
        graph_uri: Named graph containing the data.
        sparql_config: Optional :class:`SparqlEndpointConfig` to attach
            to the resulting connectors for authentication.

    Returns:
        Bindings with SPARQL connectors for each class.
    """
    from graflo.hq.rdf_inferencer import RdfInferenceManager

    mgr = RdfInferenceManager(target_db_flavor=self.target_db_flavor)
    bindings = mgr.create_bindings(
        source,
        endpoint_url=endpoint_url,
        graph_uri=graph_uri,
    )

    if sparql_config:
        conn_proxy = "sparql_source"
        provider = InMemoryConnectionProvider()
        provider.register_generalized_config(
            conn_proxy=conn_proxy,
            config=SparqlGeneralizedConnConfig(config=sparql_config),
        )
        provider.default_sparql = sparql_config

        # Wire all SPARQL connectors to the same credential proxy.
        from graflo.architecture.contract.bindings import SparqlConnector

        for connector in bindings.connectors:
            if not isinstance(connector, SparqlConnector):
                continue
            bindings.bind_connector_to_conn_proxy(connector, conn_proxy)
            provider.bind_connector_to_conn_proxy(
                connector=connector, conn_proxy=conn_proxy
            )
    else:
        provider = EmptyConnectionProvider()
    self.connection_provider = provider
    return bindings

define_and_ingest(manifest, target_db_config, ingestion_params=None, connection_provider=None, recreate_schema=None, clear_data=None, graph_target_namespace=None)

Define schema and ingest data into the graph database in one operation.

This is a convenience method that chains define_schema() and ingest(). It's the recommended way to set up and populate a graph database.

Parameters:

Name Type Description Default
manifest GraphManifest

GraphManifest with schema/ingestion/bindings blocks.

required
target_db_config DBConfig

Target database connection configuration

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
recreate_schema bool | None

If True, drop existing schema and define new one. If None, defaults to False. When False and schema already exists, define_schema raises SchemaExistsError and the script halts.

None
clear_data bool | None

If True, remove existing data before ingestion (schema unchanged). If None, uses ingestion_params.clear_data.

None
graph_target_namespace str | None

Optional target graph/database/space name; passed to both define_schema and ingest for consistent resolution.

None
Source code in graflo/hq/graph_engine.py
def define_and_ingest(
    self,
    manifest: GraphManifest,
    target_db_config: DBConfig,
    ingestion_params: IngestionParams | None = None,
    connection_provider: ConnectionProvider | None = None,
    recreate_schema: bool | None = None,
    clear_data: bool | None = None,
    graph_target_namespace: str | None = None,
) -> None:
    """Define schema and ingest data into the graph database in one operation.

    This is a convenience method that chains define_schema() and ingest().
    It's the recommended way to set up and populate a graph database.

    Args:
        manifest: GraphManifest with schema/ingestion/bindings blocks.
        target_db_config: Target database connection configuration
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
        recreate_schema: If True, drop existing schema and define new one.
            If None, defaults to False. When False and schema already exists,
            define_schema raises SchemaExistsError and the script halts.
        clear_data: If True, remove existing data before ingestion (schema unchanged).
            If None, uses ingestion_params.clear_data.
        graph_target_namespace: Optional target graph/database/space name; passed
            to both ``define_schema`` and ``ingest`` for consistent resolution.
    """
    ingestion_params = ingestion_params or IngestionParams()
    if clear_data is None:
        clear_data = ingestion_params.clear_data
    if recreate_schema is None:
        recreate_schema = False

    # Define schema first (halts with SchemaExistsError if schema exists and recreate_schema is False)
    self.define_schema(
        manifest=manifest,
        target_db_config=target_db_config,
        recreate_schema=recreate_schema,
        graph_target_namespace=graph_target_namespace,
    )

    # Then ingest data (clear_data is applied inside ingest() when ingestion_params.clear_data)
    ingestion_params = ingestion_params.model_copy(
        update={"clear_data": clear_data}
    )
    self.ingest(
        manifest=manifest,
        target_db_config=target_db_config,
        ingestion_params=ingestion_params,
        connection_provider=connection_provider,
        graph_target_namespace=graph_target_namespace,
    )

define_schema(manifest, target_db_config, recreate_schema=False, graph_target_namespace=None)

Define schema in the target database.

This method handles database/schema creation and initialization. Some databases don't require explicit schema definition (e.g., Neo4j), but this method ensures the database is properly initialized.

If the schema/graph already exists and recreate_schema is False (default), init_db raises SchemaExistsError and the script halts.

Parameters:

Name Type Description Default
manifest GraphManifest

GraphManifest with schema block.

required
target_db_config DBConfig

Target database connection configuration

required
recreate_schema bool

If True, drop existing schema and define new one. If False and schema/graph already exists, raises SchemaExistsError.

False
graph_target_namespace str | None

Optional target graph/database/space name (e.g. temp schema). Overrides schema.db_profile.target_namespace and defaults ahead of schema.metadata.name when the config omits the namespace.

None
Source code in graflo/hq/graph_engine.py
def define_schema(
    self,
    manifest: GraphManifest,
    target_db_config: DBConfig,
    recreate_schema: bool = False,
    graph_target_namespace: str | None = None,
) -> None:
    """Define schema in the target database.

    This method handles database/schema creation and initialization.
    Some databases don't require explicit schema definition (e.g., Neo4j),
    but this method ensures the database is properly initialized.

    If the schema/graph already exists and recreate_schema is False (default),
    init_db raises SchemaExistsError and the script halts.

    Args:
        manifest: GraphManifest with schema block.
        target_db_config: Target database connection configuration
        recreate_schema: If True, drop existing schema and define new one.
            If False and schema/graph already exists, raises SchemaExistsError.
        graph_target_namespace: Optional target graph/database/space name (e.g. temp
            schema). Overrides ``schema.db_profile.target_namespace`` and defaults
            ahead of ``schema.metadata.name`` when the config omits the namespace.
    """
    schema = manifest.require_schema()

    _ensure_graph_target_namespace(schema, target_db_config, graph_target_namespace)

    # Ensure schema reflects target DB so finish_init applies DB-specific defaults.
    schema.db_profile.db_flavor = target_db_config.connection_type
    schema.finish_init()

    # Initialize database with schema definition
    # init_db() handles database/schema creation automatically
    # It checks if the database exists and creates it if needed
    with ConnectionManager(connection_config=target_db_config) as db_client:
        db_client.init_db(schema, recreate_schema)

infer_manifest(postgres_config, schema_name=None, fuzzy_threshold=0.8, discard_disconnected_vertices=False)

Infer a GraphManifest from PostgreSQL database.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect (defaults to config schema_name or 'public')

None
fuzzy_threshold float

Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)

0.8
discard_disconnected_vertices bool

If True, remove vertices that do not take part in any relation (and resources/actors that reference them). Default False.

False

Returns:

Name Type Description
GraphManifest GraphManifest

Inferred manifest with schema, ingestion model, and bindings.

Source code in graflo/hq/graph_engine.py
def infer_manifest(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
    fuzzy_threshold: float = 0.8,
    discard_disconnected_vertices: bool = False,
) -> GraphManifest:
    """Infer a GraphManifest from PostgreSQL database.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect (defaults to config schema_name or 'public')
        fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
        discard_disconnected_vertices: If True, remove vertices that do not take part in
            any relation (and resources/actors that reference them). Default False.

    Returns:
        GraphManifest: Inferred manifest with schema, ingestion model, and bindings.
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        inferencer = SQLInferenceManager(
            conn=postgres_conn,
            target_db_flavor=self.target_db_flavor,
            fuzzy_threshold=fuzzy_threshold,
        )
        artifacts = inferencer.infer_artifacts(schema_name=schema_name)
        schema, ingestion_model = artifacts.schema, artifacts.ingestion_model
        bindings, provider = (
            self.resource_mapper.create_bindings_with_provider_from_introspection(
                introspection_result=artifacts.introspection_result,
                conn=postgres_conn,
                schema_name=schema_name,
            )
        )
        self.connection_provider = provider
    if discard_disconnected_vertices:
        disconnected = schema.remove_disconnected_vertices()
        ingestion_model.prune_to_graph(
            schema.core_schema, disconnected=disconnected
        )
        connected_resources = {
            resource.name for resource in ingestion_model.resources
        }
        connectors = list(bindings.connectors)
        resource_connector = list(bindings.resource_connector)
        connector_connection = list(bindings.connector_connection)

        connector_refs_all = set()
        for connector in connectors:
            connector_refs_all.add(connector.hash)
            if connector.name:
                connector_refs_all.add(connector.name)
        filtered_resource_connector = []
        mapped_connector_refs = set()
        for mapping in resource_connector:
            if isinstance(mapping, dict):
                resource_name = mapping.get("resource")
                connector_ref = mapping.get("connector")
            else:
                resource_name = mapping.resource
                connector_ref = mapping.connector
            if (
                resource_name in connected_resources
                and isinstance(connector_ref, str)
                and connector_ref in connector_refs_all
            ):
                filtered_resource_connector.append(mapping)
                mapped_connector_refs.add(connector_ref)
        filtered_connectors = [
            connector
            for connector in connectors
            if connector.resource_name in connected_resources
            or connector.hash in mapped_connector_refs
            or (
                connector.name is not None
                and connector.name in mapped_connector_refs
            )
        ]
        valid_connector_refs = set()
        for connector in filtered_connectors:
            valid_connector_refs.add(connector.hash)
            if connector.name:
                valid_connector_refs.add(connector.name)
        filtered_connector_connection = []
        for mapping in connector_connection:
            if isinstance(mapping, dict):
                connector_ref = mapping.get("connector")
            else:
                connector_ref = mapping.connector
            if connector_ref in valid_connector_refs:
                filtered_connector_connection.append(mapping)
        bindings_dict = bindings.to_dict(skip_defaults=False)
        bindings_dict["connectors"] = filtered_connectors
        bindings_dict["resource_connector"] = filtered_resource_connector
        bindings_dict["connector_connection"] = filtered_connector_connection
        bindings = Bindings.from_dict(bindings_dict)
    manifest = GraphManifest(
        graph_schema=schema, ingestion_model=ingestion_model, bindings=bindings
    )
    # Apply DB-flavor-specific sanitization a posteriori (reserved words,
    # TigerGraph identity normalization, etc.). Sanitizer is the single
    # entry point that maps `target_db_flavor` to the corresponding
    # evolution ops.
    Sanitizer(self.target_db_flavor).sanitize_manifest(manifest)
    return manifest

infer_schema_from_rdf(source, *, endpoint_url=None, graph_uri=None, schema_name=None)

Infer a graflo Schema from an RDF / OWL ontology.

Reads the TBox (class and property declarations) and produces vertices (from owl:Class), fields (from owl:DatatypeProperty), and edges (from owl:ObjectProperty with domain/range).

Parameters:

Name Type Description Default
source str | Path

Path to an RDF file (e.g. ontology.ttl) or a base URL when using endpoint_url.

required
endpoint_url str | None

Optional SPARQL endpoint to CONSTRUCT the ontology from.

None
graph_uri str | None

Named graph containing the ontology.

None
schema_name str | None

Name for the resulting schema.

None

Returns:

Type Description
tuple[Schema, IngestionModel]

tuple[Schema, IngestionModel]: fully initialised schema and ingestion model.

Source code in graflo/hq/graph_engine.py
def infer_schema_from_rdf(
    self,
    source: str | Path,
    *,
    endpoint_url: str | None = None,
    graph_uri: str | None = None,
    schema_name: str | None = None,
) -> tuple[Schema, IngestionModel]:
    """Infer a graflo Schema from an RDF / OWL ontology.

    Reads the TBox (class and property declarations) and produces
    vertices (from ``owl:Class``), fields (from ``owl:DatatypeProperty``),
    and edges (from ``owl:ObjectProperty`` with domain/range).

    Args:
        source: Path to an RDF file (e.g. ``ontology.ttl``) or a base
            URL when using *endpoint_url*.
        endpoint_url: Optional SPARQL endpoint to CONSTRUCT the
            ontology from.
        graph_uri: Named graph containing the ontology.
        schema_name: Name for the resulting schema.

    Returns:
        tuple[Schema, IngestionModel]: fully initialised schema and ingestion model.
    """
    from graflo.hq.rdf_inferencer import RdfInferenceManager

    mgr = RdfInferenceManager(target_db_flavor=self.target_db_flavor)
    return mgr.infer_schema(
        source,
        endpoint_url=endpoint_url,
        graph_uri=graph_uri,
        schema_name=schema_name,
    )

ingest(manifest, target_db_config, ingestion_params=None, connection_provider=None, graph_target_namespace=None)

Ingest data into the graph database.

If ingestion_params.clear_data is True, removes all existing data (without touching the schema) before ingestion.

Parameters:

Name Type Description Default
manifest GraphManifest

GraphManifest with schema/ingestion/bindings blocks.

required
target_db_config DBConfig

Target database connection configuration

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
graph_target_namespace str | None

Same semantics as define_schema; use when calling ingest without a prior define_schema on this config.

None
Source code in graflo/hq/graph_engine.py
def ingest(
    self,
    manifest: GraphManifest,
    target_db_config: DBConfig,
    ingestion_params: IngestionParams | None = None,
    connection_provider: ConnectionProvider | None = None,
    graph_target_namespace: str | None = None,
) -> None:
    """Ingest data into the graph database.

    If ingestion_params.clear_data is True, removes all existing data
    (without touching the schema) before ingestion.

    Args:
        manifest: GraphManifest with schema/ingestion/bindings blocks.
        target_db_config: Target database connection configuration
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
        graph_target_namespace: Same semantics as ``define_schema``; use when
            calling ``ingest`` without a prior ``define_schema`` on this config.
    """
    schema = manifest.require_schema()
    ingestion_model = manifest.require_ingestion_model()
    bindings = manifest.bindings

    _ensure_graph_target_namespace(schema, target_db_config, graph_target_namespace)

    ingestion_params = ingestion_params or IngestionParams()
    if ingestion_params.clear_data:
        with ConnectionManager(connection_config=target_db_config) as db_client:
            clear_result = db_client.clear_data(schema)
            if inspect.isawaitable(clear_result):
                raise TypeError(
                    "clear_data must be synchronous so ingestion only starts "
                    "after data clearing has completed."
                )

    caster = Caster(
        schema=schema,
        ingestion_model=ingestion_model,
        ingestion_params=ingestion_params,
    )
    caster.ingest(
        target_db_config=target_db_config,
        bindings=bindings or Bindings(),
        ingestion_params=ingestion_params,
        connection_provider=connection_provider or self.connection_provider,
    )

introspect(postgres_config, schema_name=None, include_raw_tables=True)

Introspect PostgreSQL schema and return a serializable result.

Parameters:

Name Type Description Default
postgres_config PostgresConfig

PostgresConfig instance

required
schema_name str | None

Schema name to introspect (defaults to config schema_name or 'public')

None

Returns:

Name Type Description
SchemaIntrospectionResult SchemaIntrospectionResult

Introspection result (vertex_tables, edge_tables, raw_tables, schema_name) suitable for serialization.

Source code in graflo/hq/graph_engine.py
def introspect(
    self,
    postgres_config: PostgresConfig,
    schema_name: str | None = None,
    include_raw_tables: bool = True,
) -> SchemaIntrospectionResult:
    """Introspect PostgreSQL schema and return a serializable result.

    Args:
        postgres_config: PostgresConfig instance
        schema_name: Schema name to introspect (defaults to config schema_name or 'public')

    Returns:
        SchemaIntrospectionResult: Introspection result (vertex_tables, edge_tables,
            raw_tables, schema_name) suitable for serialization.
    """
    with PostgresConnection(postgres_config) as postgres_conn:
        inferencer = SQLInferenceManager(
            conn=postgres_conn,
            target_db_flavor=self.target_db_flavor,
        )
        return inferencer.introspect(
            schema_name=schema_name,
            include_raw_tables=include_raw_tables,
        )

InMemoryConnectionProvider

Bases: BaseModel

Simple in-memory provider for proxy-based generalized configs.

Supports two wiring modes: - New: proxy_by_connector_hash + configs_by_proxy - Legacy: per-resource maps (postgres_by_resource / sparql_by_resource)

Source code in graflo/hq/connection_provider.py
class InMemoryConnectionProvider(BaseModel):
    """Simple in-memory provider for proxy-based generalized configs.

    Supports two wiring modes:
    - New: ``proxy_by_connector_hash`` + ``configs_by_proxy``
    - Legacy: per-resource maps (``postgres_by_resource`` / ``sparql_by_resource``)
    """

    # New wiring.
    configs_by_proxy: dict[str, GeneralizedConnConfig] = Field(default_factory=dict)
    proxy_by_connector_hash: dict[str, str] = Field(default_factory=dict)

    # Legacy wiring (kept to avoid breaking existing providers).
    postgres_by_resource: dict[str, PostgresConfig] = Field(default_factory=dict)
    sparql_by_resource: dict[str, SparqlEndpointConfig] = Field(default_factory=dict)
    sparql_by_endpoint: dict[str, SparqlEndpointConfig] = Field(default_factory=dict)
    default_sparql: SparqlEndpointConfig | None = None

    # ------------------------------------------------------------------
    # New API
    # ------------------------------------------------------------------
    def register_generalized_config(
        self, *, conn_proxy: str, config: GeneralizedConnConfig
    ) -> None:
        self.configs_by_proxy[conn_proxy] = config

    def bind_connector_to_conn_proxy(
        self, *, connector: ResourceConnector, conn_proxy: str
    ) -> None:
        self.proxy_by_connector_hash[connector.hash] = conn_proxy

    def bind_from_bindings(self, *, bindings: Bindings) -> None:
        """Populate ``proxy_by_connector_hash`` from the contract bindings."""
        for connector in bindings.connectors:
            proxy = bindings.get_conn_proxy_for_connector(connector)
            if proxy is not None:
                self.proxy_by_connector_hash[connector.hash] = proxy

    def bind_single_config_for_bindings(
        self,
        *,
        bindings: Bindings,
        conn_proxy: str,
        config: GeneralizedConnConfig,
    ) -> None:
        """Bind one generalized config to all connectors in bindings.

        This is intended for the common case where a single source DB
        (or single generalized API endpoint) supplies all SQL/SPARQL connectors
        in the manifest.

        Raises:
            ValueError: if bindings use multiple different ``conn_proxy`` labels.
        """
        used_proxies: set[str] = set()
        for connector in bindings.connectors:
            proxy = bindings.get_conn_proxy_for_connector(connector)
            if proxy is not None:
                used_proxies.add(proxy)

        if not used_proxies:
            raise ValueError(
                "No connector_connection mappings found in bindings; "
                "expected connector -> conn_proxy rows."
            )

        if used_proxies != {conn_proxy}:
            used = ", ".join(sorted(used_proxies))
            raise ValueError(
                f"Expected all connector_connection mappings to use conn_proxy='{conn_proxy}', "
                f"but found proxies: {used}. For multi-proxy setups, bind explicitly "
                "with register_generalized_config(...) and bind_from_bindings(...)."
            )

        self.register_generalized_config(conn_proxy=conn_proxy, config=config)
        self.bind_from_bindings(bindings=bindings)

    def get_generalized_conn_config(
        self, connector: ResourceConnector
    ) -> GeneralizedConnConfig | None:
        proxy = self.proxy_by_connector_hash.get(connector.hash)
        if proxy is None:
            return None
        return self.configs_by_proxy.get(proxy)

    def get_generalized_config_by_proxy(
        self, conn_proxy: str
    ) -> GeneralizedConnConfig | None:
        return self.configs_by_proxy.get(conn_proxy)

    def register_s3_config(
        self, *, conn_proxy: str, config: S3GeneralizedConnConfig
    ) -> None:
        """Store S3 staging credentials/config under *conn_proxy*."""
        self.configs_by_proxy[conn_proxy] = config

    # ------------------------------------------------------------------
    # Legacy API
    # ------------------------------------------------------------------
    def get_postgres_config(
        self, resource_name: str, connector: TableConnector
    ) -> PostgresConfig | None:
        generalized = self.get_generalized_conn_config(connector)
        if isinstance(generalized, PostgresGeneralizedConnConfig):
            return generalized.config
        return self.postgres_by_resource.get(resource_name)

    def get_sparql_auth(
        self, resource_name: str, connector: SparqlConnector
    ) -> SparqlAuth | None:
        generalized = self.get_generalized_conn_config(connector)
        if isinstance(generalized, SparqlGeneralizedConnConfig):
            cfg = generalized.config
            return SparqlAuth(username=cfg.username, password=cfg.password)

        cfg = self.sparql_by_resource.get(resource_name)
        if cfg is None and connector.endpoint_url:
            cfg = self.sparql_by_endpoint.get(connector.endpoint_url)
        if cfg is None:
            cfg = self.default_sparql
        if cfg is None:
            return None
        return SparqlAuth(username=cfg.username, password=cfg.password)

bind_from_bindings(*, bindings)

Populate proxy_by_connector_hash from the contract bindings.

Source code in graflo/hq/connection_provider.py
def bind_from_bindings(self, *, bindings: Bindings) -> None:
    """Populate ``proxy_by_connector_hash`` from the contract bindings."""
    for connector in bindings.connectors:
        proxy = bindings.get_conn_proxy_for_connector(connector)
        if proxy is not None:
            self.proxy_by_connector_hash[connector.hash] = proxy

bind_single_config_for_bindings(*, bindings, conn_proxy, config)

Bind one generalized config to all connectors in bindings.

This is intended for the common case where a single source DB (or single generalized API endpoint) supplies all SQL/SPARQL connectors in the manifest.

Raises:

Type Description
ValueError

if bindings use multiple different conn_proxy labels.

Source code in graflo/hq/connection_provider.py
def bind_single_config_for_bindings(
    self,
    *,
    bindings: Bindings,
    conn_proxy: str,
    config: GeneralizedConnConfig,
) -> None:
    """Bind one generalized config to all connectors in bindings.

    This is intended for the common case where a single source DB
    (or single generalized API endpoint) supplies all SQL/SPARQL connectors
    in the manifest.

    Raises:
        ValueError: if bindings use multiple different ``conn_proxy`` labels.
    """
    used_proxies: set[str] = set()
    for connector in bindings.connectors:
        proxy = bindings.get_conn_proxy_for_connector(connector)
        if proxy is not None:
            used_proxies.add(proxy)

    if not used_proxies:
        raise ValueError(
            "No connector_connection mappings found in bindings; "
            "expected connector -> conn_proxy rows."
        )

    if used_proxies != {conn_proxy}:
        used = ", ".join(sorted(used_proxies))
        raise ValueError(
            f"Expected all connector_connection mappings to use conn_proxy='{conn_proxy}', "
            f"but found proxies: {used}. For multi-proxy setups, bind explicitly "
            "with register_generalized_config(...) and bind_from_bindings(...)."
        )

    self.register_generalized_config(conn_proxy=conn_proxy, config=config)
    self.bind_from_bindings(bindings=bindings)

register_s3_config(*, conn_proxy, config)

Store S3 staging credentials/config under conn_proxy.

Source code in graflo/hq/connection_provider.py
def register_s3_config(
    self, *, conn_proxy: str, config: S3GeneralizedConnConfig
) -> None:
    """Store S3 staging credentials/config under *conn_proxy*."""
    self.configs_by_proxy[conn_proxy] = config

IngestionParams

Bases: BaseModel

Parameters for controlling the ingestion process.

max_items caps how many source items (rows, JSON objects, grouped RDF subjects, …) are read per resource run. It maps to AbstractDataSource.iter_batches(..., limit=...). batch_size is only the maximum number of items per yielded batch, not a cap on total volume.

Source code in graflo/hq/ingestion_parameters.py
class IngestionParams(BaseModel):
    """Parameters for controlling the ingestion process.

    ``max_items`` caps how many **source items** (rows, JSON objects, grouped
    RDF subjects, …) are read per resource run. It maps to
    ``AbstractDataSource.iter_batches(..., limit=...)``. ``batch_size`` is only
    the maximum number of items per yielded batch, not a cap on total volume.
    """

    clear_data: bool = False
    n_cores: int = 1
    max_items: int | None = Field(
        default=None,
        ge=1,
        description=(
            "Maximum number of source items (rows / JSON objects / grouped "
            "RDF subjects) to ingest for each resource. Not a batch count."
        ),
    )
    batch_size: int = Field(
        default=10000,
        ge=1,
        description="Number of source items to group per batch for casting and writes.",
    )
    batch_prefetch: int = Field(
        default=2,
        ge=1,
        description=(
            "How many batches to prefetch ahead while processing current batch. "
            "Keeps ingestion lazy with bounded memory."
        ),
    )
    dry: bool = False
    init_only: bool = False
    limit_files: int | None = None
    resources: list[str] | None = None
    vertices: list[str] | None = None
    max_concurrent_db_ops: int | None = None
    datetime_after: str | None = None
    datetime_before: str | None = None
    datetime_column: str | None = None

    # Strict contract checks for major-release style validation workflows.
    strict_references: bool = True
    strict_registry: bool = True
    dynamic_edges: bool = False
    on_doc_error: Literal["skip", "fail"] = "skip"
    doc_error_sink_path: Path | None = Field(
        default=None,
        description=(
            "Append gzip-compressed JSONL cast-failure records (typical suffix .jsonl.gz)."
        ),
    )
    max_doc_errors: int | None = None
    doc_error_preview_max_bytes: int = 4096
    doc_error_preview_keys: tuple[str, ...] | None = None

JsonlGzDocErrorSink

Append gzip-compressed JSON lines (one member per write batch).

Source code in graflo/hq/doc_error_sink.py
class JsonlGzDocErrorSink:
    """Append gzip-compressed JSON lines (one member per write batch)."""

    def __init__(self, path: Path) -> None:
        self._path = path

    async def write_failures(self, failures: list[DocCastFailure]) -> None:
        if not failures:
            return

        def _write_sync() -> None:
            self._path.parent.mkdir(parents=True, exist_ok=True)
            # Each append opens a new gzip member; gzip concatenation is standard for log-style files.
            with gzip.open(self._path, "ab") as f:
                for fail in failures:
                    f.write((fail.model_dump_json() + "\n").encode("utf-8"))

        await asyncio.to_thread(_write_sync)

PostgresGeneralizedConnConfig

Bases: BaseModel

Generalized runtime config variant for SQL/Postgres connections.

Source code in graflo/hq/connection_provider.py
class PostgresGeneralizedConnConfig(BaseModel):
    """Generalized runtime config variant for SQL/Postgres connections."""

    kind: Literal["postgres"] = "postgres"
    config: PostgresConfig

RegistryBuilder

Create a :class:DataSourceRegistry from :class:Bindings.

Attributes:

Name Type Description
schema

Schema providing the resource definitions and vertex/edge config.

Source code in graflo/hq/registry_builder.py
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
class RegistryBuilder:
    """Create a :class:`DataSourceRegistry` from :class:`Bindings`.

    Attributes:
        schema: Schema providing the resource definitions and vertex/edge config.
    """

    def __init__(self, schema: Schema, ingestion_model: IngestionModel):
        self.schema = schema
        self.ingestion_model = ingestion_model

    # ------------------------------------------------------------------
    # Public API
    # ------------------------------------------------------------------

    def build(
        self,
        bindings: Bindings,
        ingestion_params: IngestionParams,
        connection_provider: ConnectionProvider | None = None,
        *,
        strict: bool = False,
    ) -> DataSourceRegistry:
        """Return a populated :class:`DataSourceRegistry`.

        For each ingestion resource, registers every bound connector (same
        resource may have multiple physical sources).
        """
        registry = DataSourceRegistry()
        provider = connection_provider or EmptyConnectionProvider()
        failures: list[str] = []

        resources_filter: set[str] | None = None
        if ingestion_params.resources is not None:
            resources_filter = set(ingestion_params.resources)

        for resource in self.ingestion_model.resources:
            resource_name = resource.name
            if resources_filter is not None and resource_name not in resources_filter:
                continue
            connectors = bindings.get_connectors_for_resource(resource_name)
            if not connectors:
                msg = f"No connectors bound for resource '{resource_name}'"
                logger.warning("%s, skipping", msg)
                failures.append(msg)
                continue

            for connector in connectors:
                cref = connector.name or connector.hash
                kind = connector.bound_source_kind()

                if kind == BoundSourceKind.FILE:
                    if not isinstance(connector, FileConnector):
                        msg = (
                            f"Connector '{cref}' for resource '{resource_name}' "
                            f"is not a FileConnector"
                        )
                        logger.warning("%s, skipping", msg)
                        failures.append(msg)
                        continue
                    try:
                        self._register_file_sources(
                            registry, resource_name, connector, ingestion_params
                        )
                    except Exception as e:
                        msg = (
                            f"Failed to register FILE source for resource "
                            f"'{resource_name}' (connector '{cref}'): {e}"
                        )
                        failures.append(msg)
                        if strict:
                            continue

                elif kind == BoundSourceKind.SQL_TABLE:
                    if not isinstance(connector, TableConnector):
                        msg = (
                            f"Connector '{cref}' for resource '{resource_name}' "
                            f"is not a TableConnector"
                        )
                        logger.warning("%s, skipping", msg)
                        failures.append(msg)
                        continue
                    try:
                        self._register_sql_table_sources(
                            registry,
                            resource_name,
                            connector,
                            bindings,
                            ingestion_params,
                            provider,
                        )
                    except Exception as e:
                        msg = (
                            f"Failed to register SQL source for resource "
                            f"'{resource_name}' (connector '{cref}'): {e}"
                        )
                        failures.append(msg)
                        if strict:
                            continue

                elif kind == BoundSourceKind.SPARQL:
                    if not isinstance(connector, SparqlConnector):
                        msg = (
                            f"Connector '{cref}' for resource '{resource_name}' "
                            f"is not a SparqlConnector"
                        )
                        logger.warning("%s, skipping", msg)
                        failures.append(msg)
                        continue
                    try:
                        self._register_sparql_sources(
                            registry,
                            resource_name,
                            connector,
                            bindings,
                            ingestion_params,
                            provider,
                        )
                    except Exception as e:
                        msg = (
                            f"Failed to register SPARQL source for resource "
                            f"'{resource_name}' (connector '{cref}'): {e}"
                        )
                        failures.append(msg)
                        if strict:
                            continue

                else:
                    msg = (
                        f"Unsupported bound source kind '{kind}' "
                        f"for resource '{resource_name}' (connector '{cref}')"
                    )
                    logger.warning("%s, skipping", msg)
                    failures.append(msg)

        if strict and failures:
            details = "\n".join(f"- {item}" for item in failures)
            raise ValueError(f"Registry build failed in strict mode:\n{details}")

        return registry

    # ------------------------------------------------------------------
    # File sources
    # ------------------------------------------------------------------

    @staticmethod
    def discover_files(
        fpath: Path | str, connector: FileConnector, limit_files: int | None = None
    ) -> list[Path]:
        """Discover files matching *connector* in a directory.

        Args:
            fpath: Directory to search in.
            connector: Connector used to match files.
            limit_files: Optional cap on the number of files returned.

        Returns:
            Matching file paths.
        """
        if connector.sub_path is None:
            raise ValueError("connector.sub_path is required")
        path = Path(fpath) if isinstance(fpath, str) else fpath

        files = [
            f
            for f in path.iterdir()
            if f.is_file()
            and (
                True
                if connector.regex is None
                else re.search(connector.regex, f.name) is not None
            )
        ]

        if limit_files is not None:
            files = files[:limit_files]

        return files

    def _register_file_sources(
        self,
        registry: DataSourceRegistry,
        resource_name: str,
        connector: FileConnector,
        ingestion_params: IngestionParams,
    ) -> None:
        if connector.sub_path is None:
            raise ValueError(
                f"FileConnector for resource '{resource_name}' has no sub_path"
            )

        path_obj = connector.sub_path.expanduser()
        files = self.discover_files(
            path_obj, limit_files=ingestion_params.limit_files, connector=connector
        )
        logger.info(f"For resource name {resource_name} {len(files)} files were found")

        for file_path in files:
            file_source = DataSourceFactory.create_file_data_source(path=file_path)
            registry.register(file_source, resource_name=resource_name)

    # ------------------------------------------------------------------
    # SQL / table sources
    # ------------------------------------------------------------------

    def _register_sql_table_sources(
        self,
        registry: DataSourceRegistry,
        resource_name: str,
        connector: TableConnector,
        bindings: Bindings,
        ingestion_params: IngestionParams,
        connection_provider: ConnectionProvider,
    ) -> None:
        """Register SQL table data sources for a resource.

        Uses SQLDataSource with batch processing (cursors) instead of loading
        all data into memory.

        When the matching Resource has edge actors with ``match_source`` /
        ``match_target`` and the source/target vertex types have known
        table connectors, JoinClauses and IS_NOT_NULL filters are auto-generated
        on the connector before building the SQL query.
        """
        from graflo.hq.auto_join import enrich_edge_connector_with_joins

        generalized = (
            connection_provider.get_generalized_conn_config(connector)
            if hasattr(connection_provider, "get_generalized_conn_config")
            else None
        )
        postgres_config = (
            generalized.config
            if isinstance(generalized, PostgresGeneralizedConnConfig)
            else None
        )
        if postgres_config is None:
            # Legacy fallback: allow older ConnectionProvider implementations.
            postgres_config = connection_provider.get_postgres_config(
                resource_name, connector
            )
        if postgres_config is None:
            logger.warning(
                f"PostgreSQL table '{resource_name}' has no connection config, skipping"
            )
            return

        table_name = connector.table_name
        schema_name = connector.schema_name
        effective_schema = schema_name or postgres_config.schema_name or "public"

        try:
            resource = self.ingestion_model.fetch_resource(resource_name)
            if connector.view is None and not connector.joins:
                enrich_edge_connector_with_joins(
                    resource=resource,
                    connector=connector,
                    bindings=bindings,
                    vertex_config=self.schema.core_schema.vertex_config,
                )

            date_column = connector.date_field or ingestion_params.datetime_column
            if (
                ingestion_params.datetime_after or ingestion_params.datetime_before
            ) and date_column:
                # Handled below via build_query + appended WHERE.
                pass
            elif ingestion_params.datetime_after or ingestion_params.datetime_before:
                logger.warning(
                    "datetime_after/datetime_before set but no date column: "
                    "set TableConnector.date_field or IngestionParams.datetime_column for resource %s",
                    resource_name,
                )

            query = connector.build_query(effective_schema)

            if date_column and date_column != connector.date_field:
                dt_where = datetime_range_where_sql(
                    ingestion_params.datetime_after,
                    ingestion_params.datetime_before,
                    date_column,
                )
                if dt_where:
                    if " WHERE " in query:
                        query += f" AND {dt_where}"
                    else:
                        query += f" WHERE {dt_where}"

            connection_string = postgres_config.to_sqlalchemy_connection_string()

            sql_config = SQLConfig(
                connection_string=connection_string,
                query=query,
            )
            sql_source = SQLDataSource(config=sql_config)

            registry.register(sql_source, resource_name=resource_name)

            logger.info(
                f"Created SQL data source for table '{effective_schema}.{table_name}' "
                f"mapped to resource '{resource_name}' "
                f"(will process in batches of {ingestion_params.batch_size})"
            )
        except Exception as e:
            logger.error(
                f"Failed to create data source for PostgreSQL table '{resource_name}': {e}",
                exc_info=True,
            )
            raise

    # ------------------------------------------------------------------
    # SPARQL / RDF sources
    # ------------------------------------------------------------------

    def _register_sparql_sources(
        self,
        registry: DataSourceRegistry,
        resource_name: str,
        connector: SparqlConnector,
        bindings: "Bindings",
        ingestion_params: "IngestionParams",
        connection_provider: ConnectionProvider,
    ) -> None:
        """Register SPARQL data sources for a resource.

        Handles two modes:

        * **Endpoint mode** (``connector.endpoint_url`` is set): creates a
          :class:`SparqlEndpointDataSource` that queries the remote SPARQL
          endpoint.
        * **File mode** (``connector.rdf_file`` is set): creates an
          :class:`RdfFileDataSource` that parses a local RDF file.
        """
        try:
            if connector.endpoint_url:
                from graflo.data_source.rdf import (
                    SparqlEndpointDataSource,
                    SparqlSourceConfig,
                )

                generalized = (
                    connection_provider.get_generalized_conn_config(connector)
                    if hasattr(connection_provider, "get_generalized_conn_config")
                    else None
                )
                if isinstance(generalized, SparqlGeneralizedConnConfig):
                    cfg = generalized.config
                    username = cfg.username
                    password = cfg.password
                else:
                    # Legacy fallback: allow older ConnectionProvider implementations.
                    sparql_auth = connection_provider.get_sparql_auth(
                        resource_name, connector
                    )
                    username = sparql_auth.username if sparql_auth else None
                    password = sparql_auth.password if sparql_auth else None

                source_config = SparqlSourceConfig(
                    endpoint_url=connector.endpoint_url,
                    rdf_class=connector.rdf_class,
                    graph_uri=connector.graph_uri,
                    sparql_query=connector.sparql_query,
                    username=username,
                    password=password,
                    page_size=ingestion_params.batch_size,
                )
                sparql_source = SparqlEndpointDataSource(config=source_config)
                registry.register(sparql_source, resource_name=resource_name)

                logger.info(
                    "Created SPARQL endpoint data source for class <%s> at '%s' "
                    "mapped to resource '%s'",
                    connector.rdf_class,
                    connector.endpoint_url,
                    resource_name,
                )

            elif connector.rdf_file:
                from graflo.data_source.rdf import RdfFileDataSource

                rdf_source = RdfFileDataSource(
                    path=connector.rdf_file,
                    rdf_class=connector.rdf_class,
                )
                registry.register(rdf_source, resource_name=resource_name)

                logger.info(
                    "Created RDF file data source for class <%s> from '%s' "
                    "mapped to resource '%s'",
                    connector.rdf_class,
                    connector.rdf_file,
                    resource_name,
                )

            else:
                logger.warning(
                    "SparqlConnector for resource '%s' has neither endpoint_url nor "
                    "rdf_file set, skipping",
                    resource_name,
                )

        except Exception as e:
            logger.error(
                "Failed to create data source for SPARQL resource '%s': %s",
                resource_name,
                e,
                exc_info=True,
            )
            raise

build(bindings, ingestion_params, connection_provider=None, *, strict=False)

Return a populated :class:DataSourceRegistry.

For each ingestion resource, registers every bound connector (same resource may have multiple physical sources).

Source code in graflo/hq/registry_builder.py
def build(
    self,
    bindings: Bindings,
    ingestion_params: IngestionParams,
    connection_provider: ConnectionProvider | None = None,
    *,
    strict: bool = False,
) -> DataSourceRegistry:
    """Return a populated :class:`DataSourceRegistry`.

    For each ingestion resource, registers every bound connector (same
    resource may have multiple physical sources).
    """
    registry = DataSourceRegistry()
    provider = connection_provider or EmptyConnectionProvider()
    failures: list[str] = []

    resources_filter: set[str] | None = None
    if ingestion_params.resources is not None:
        resources_filter = set(ingestion_params.resources)

    for resource in self.ingestion_model.resources:
        resource_name = resource.name
        if resources_filter is not None and resource_name not in resources_filter:
            continue
        connectors = bindings.get_connectors_for_resource(resource_name)
        if not connectors:
            msg = f"No connectors bound for resource '{resource_name}'"
            logger.warning("%s, skipping", msg)
            failures.append(msg)
            continue

        for connector in connectors:
            cref = connector.name or connector.hash
            kind = connector.bound_source_kind()

            if kind == BoundSourceKind.FILE:
                if not isinstance(connector, FileConnector):
                    msg = (
                        f"Connector '{cref}' for resource '{resource_name}' "
                        f"is not a FileConnector"
                    )
                    logger.warning("%s, skipping", msg)
                    failures.append(msg)
                    continue
                try:
                    self._register_file_sources(
                        registry, resource_name, connector, ingestion_params
                    )
                except Exception as e:
                    msg = (
                        f"Failed to register FILE source for resource "
                        f"'{resource_name}' (connector '{cref}'): {e}"
                    )
                    failures.append(msg)
                    if strict:
                        continue

            elif kind == BoundSourceKind.SQL_TABLE:
                if not isinstance(connector, TableConnector):
                    msg = (
                        f"Connector '{cref}' for resource '{resource_name}' "
                        f"is not a TableConnector"
                    )
                    logger.warning("%s, skipping", msg)
                    failures.append(msg)
                    continue
                try:
                    self._register_sql_table_sources(
                        registry,
                        resource_name,
                        connector,
                        bindings,
                        ingestion_params,
                        provider,
                    )
                except Exception as e:
                    msg = (
                        f"Failed to register SQL source for resource "
                        f"'{resource_name}' (connector '{cref}'): {e}"
                    )
                    failures.append(msg)
                    if strict:
                        continue

            elif kind == BoundSourceKind.SPARQL:
                if not isinstance(connector, SparqlConnector):
                    msg = (
                        f"Connector '{cref}' for resource '{resource_name}' "
                        f"is not a SparqlConnector"
                    )
                    logger.warning("%s, skipping", msg)
                    failures.append(msg)
                    continue
                try:
                    self._register_sparql_sources(
                        registry,
                        resource_name,
                        connector,
                        bindings,
                        ingestion_params,
                        provider,
                    )
                except Exception as e:
                    msg = (
                        f"Failed to register SPARQL source for resource "
                        f"'{resource_name}' (connector '{cref}'): {e}"
                    )
                    failures.append(msg)
                    if strict:
                        continue

            else:
                msg = (
                    f"Unsupported bound source kind '{kind}' "
                    f"for resource '{resource_name}' (connector '{cref}')"
                )
                logger.warning("%s, skipping", msg)
                failures.append(msg)

    if strict and failures:
        details = "\n".join(f"- {item}" for item in failures)
        raise ValueError(f"Registry build failed in strict mode:\n{details}")

    return registry

discover_files(fpath, connector, limit_files=None) staticmethod

Discover files matching connector in a directory.

Parameters:

Name Type Description Default
fpath Path | str

Directory to search in.

required
connector FileConnector

Connector used to match files.

required
limit_files int | None

Optional cap on the number of files returned.

None

Returns:

Type Description
list[Path]

Matching file paths.

Source code in graflo/hq/registry_builder.py
@staticmethod
def discover_files(
    fpath: Path | str, connector: FileConnector, limit_files: int | None = None
) -> list[Path]:
    """Discover files matching *connector* in a directory.

    Args:
        fpath: Directory to search in.
        connector: Connector used to match files.
        limit_files: Optional cap on the number of files returned.

    Returns:
        Matching file paths.
    """
    if connector.sub_path is None:
        raise ValueError("connector.sub_path is required")
    path = Path(fpath) if isinstance(fpath, str) else fpath

    files = [
        f
        for f in path.iterdir()
        if f.is_file()
        and (
            True
            if connector.regex is None
            else re.search(connector.regex, f.name) is not None
        )
    ]

    if limit_files is not None:
        files = files[:limit_files]

    return files

ResourceMapper

Maps different data sources to Bindings for graph ingestion.

This class provides methods to create Bindings from various data sources, enabling a unified interface for connector creation regardless of the source type.

Source code in graflo/hq/resource_mapper.py
class ResourceMapper:
    """Maps different data sources to Bindings for graph ingestion.

    This class provides methods to create Bindings from various data sources,
    enabling a unified interface for connector creation regardless of the source type.
    """

    def create_bindings_from_postgres(
        self,
        conn: PostgresConnection,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
        type_lookup_overrides: dict[str, dict] | None = None,
        include_raw_tables: bool = False,
    ) -> Bindings:
        bindings, _ = self.create_bindings_with_provider_from_postgres(
            conn=conn,
            schema_name=schema_name,
            datetime_columns=datetime_columns,
            type_lookup_overrides=type_lookup_overrides,
            include_raw_tables=include_raw_tables,
        )
        return bindings

    def create_bindings_with_provider_from_postgres(
        self,
        conn: PostgresConnection,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
        type_lookup_overrides: dict[str, dict] | None = None,
        include_raw_tables: bool = False,
    ) -> tuple[Bindings, InMemoryConnectionProvider]:
        """Create Bindings from PostgreSQL tables.

        Args:
            conn: PostgresConnection instance
            schema_name: Schema name to introspect
            datetime_columns: Optional mapping of resource/table name to datetime
                column name for date-range filtering (sets date_field on each
                TableConnector). Used with IngestionParams.datetime_after /
                datetime_before.
            type_lookup_overrides: Optional mapping of table name to type_lookup
                spec for edge tables where source/target types come from a lookup
                table. Each value is a dict with: table, identity, type_column,
                source, target, relation (optional).

        Returns:
            Tuple of:
                - Bindings object with TableConnector instances for all tables
                - InMemoryConnectionProvider containing connector->PostgresConfig mappings
        """
        introspection_result = conn.introspect_schema(
            schema_name=schema_name,
            include_raw_tables=include_raw_tables,
        )
        return self.create_bindings_with_provider_from_introspection(
            introspection_result=introspection_result,
            conn=conn,
            schema_name=schema_name,
            datetime_columns=datetime_columns,
            type_lookup_overrides=type_lookup_overrides,
        )

    def create_bindings_with_provider_from_introspection(
        self,
        introspection_result: SchemaIntrospectionResult,
        conn: PostgresConnection,
        schema_name: str | None = None,
        datetime_columns: dict[str, str] | None = None,
        type_lookup_overrides: dict[str, dict] | None = None,
    ) -> tuple[Bindings, InMemoryConnectionProvider]:
        """Create bindings/provider from a precomputed introspection result."""

        bindings = Bindings()
        effective_schema = schema_name or introspection_result.schema_name

        provider = InMemoryConnectionProvider()
        conn_proxy = "postgres_source"
        provider.register_generalized_config(
            conn_proxy=conn_proxy,
            config=PostgresGeneralizedConnConfig(config=conn.config),
        )

        date_cols = datetime_columns or {}
        type_lookup = type_lookup_overrides or {}

        # Add bindings for vertex tables
        for table_info in introspection_result.vertex_tables:
            table_name = table_info.name
            table_connector = TableConnector(
                table_name=table_name,
                schema_name=effective_schema,
                date_field=date_cols.get(table_name),
            )
            bindings.add_connector(table_connector)
            bindings.bind_resource(table_name, table_connector)
            bindings.bind_connector_to_conn_proxy(table_connector, conn_proxy)
            provider.bind_connector_to_conn_proxy(
                connector=table_connector, conn_proxy=conn_proxy
            )
            provider.postgres_by_resource[table_name] = conn.config

        # Add bindings for edge tables
        for table_info in introspection_result.edge_tables:
            table_name = table_info.name
            tl_spec = type_lookup.get(table_name)
            view = None
            if tl_spec:
                view = SelectSpec.from_dict({"kind": "type_lookup", **tl_spec})
            table_connector = TableConnector(
                table_name=table_name,
                schema_name=effective_schema,
                date_field=date_cols.get(table_name),
                view=view,
            )
            bindings.add_connector(table_connector)
            bindings.bind_resource(table_name, table_connector)
            bindings.bind_connector_to_conn_proxy(table_connector, conn_proxy)
            provider.bind_connector_to_conn_proxy(
                connector=table_connector, conn_proxy=conn_proxy
            )
            provider.postgres_by_resource[table_name] = conn.config

        return bindings, provider

create_bindings_with_provider_from_introspection(introspection_result, conn, schema_name=None, datetime_columns=None, type_lookup_overrides=None)

Create bindings/provider from a precomputed introspection result.

Source code in graflo/hq/resource_mapper.py
def create_bindings_with_provider_from_introspection(
    self,
    introspection_result: SchemaIntrospectionResult,
    conn: PostgresConnection,
    schema_name: str | None = None,
    datetime_columns: dict[str, str] | None = None,
    type_lookup_overrides: dict[str, dict] | None = None,
) -> tuple[Bindings, InMemoryConnectionProvider]:
    """Create bindings/provider from a precomputed introspection result."""

    bindings = Bindings()
    effective_schema = schema_name or introspection_result.schema_name

    provider = InMemoryConnectionProvider()
    conn_proxy = "postgres_source"
    provider.register_generalized_config(
        conn_proxy=conn_proxy,
        config=PostgresGeneralizedConnConfig(config=conn.config),
    )

    date_cols = datetime_columns or {}
    type_lookup = type_lookup_overrides or {}

    # Add bindings for vertex tables
    for table_info in introspection_result.vertex_tables:
        table_name = table_info.name
        table_connector = TableConnector(
            table_name=table_name,
            schema_name=effective_schema,
            date_field=date_cols.get(table_name),
        )
        bindings.add_connector(table_connector)
        bindings.bind_resource(table_name, table_connector)
        bindings.bind_connector_to_conn_proxy(table_connector, conn_proxy)
        provider.bind_connector_to_conn_proxy(
            connector=table_connector, conn_proxy=conn_proxy
        )
        provider.postgres_by_resource[table_name] = conn.config

    # Add bindings for edge tables
    for table_info in introspection_result.edge_tables:
        table_name = table_info.name
        tl_spec = type_lookup.get(table_name)
        view = None
        if tl_spec:
            view = SelectSpec.from_dict({"kind": "type_lookup", **tl_spec})
        table_connector = TableConnector(
            table_name=table_name,
            schema_name=effective_schema,
            date_field=date_cols.get(table_name),
            view=view,
        )
        bindings.add_connector(table_connector)
        bindings.bind_resource(table_name, table_connector)
        bindings.bind_connector_to_conn_proxy(table_connector, conn_proxy)
        provider.bind_connector_to_conn_proxy(
            connector=table_connector, conn_proxy=conn_proxy
        )
        provider.postgres_by_resource[table_name] = conn.config

    return bindings, provider

create_bindings_with_provider_from_postgres(conn, schema_name=None, datetime_columns=None, type_lookup_overrides=None, include_raw_tables=False)

Create Bindings from PostgreSQL tables.

Parameters:

Name Type Description Default
conn PostgresConnection

PostgresConnection instance

required
schema_name str | None

Schema name to introspect

None
datetime_columns dict[str, str] | None

Optional mapping of resource/table name to datetime column name for date-range filtering (sets date_field on each TableConnector). Used with IngestionParams.datetime_after / datetime_before.

None
type_lookup_overrides dict[str, dict] | None

Optional mapping of table name to type_lookup spec for edge tables where source/target types come from a lookup table. Each value is a dict with: table, identity, type_column, source, target, relation (optional).

None

Returns:

Type Description
tuple[Bindings, InMemoryConnectionProvider]

Tuple of: - Bindings object with TableConnector instances for all tables - InMemoryConnectionProvider containing connector->PostgresConfig mappings

Source code in graflo/hq/resource_mapper.py
def create_bindings_with_provider_from_postgres(
    self,
    conn: PostgresConnection,
    schema_name: str | None = None,
    datetime_columns: dict[str, str] | None = None,
    type_lookup_overrides: dict[str, dict] | None = None,
    include_raw_tables: bool = False,
) -> tuple[Bindings, InMemoryConnectionProvider]:
    """Create Bindings from PostgreSQL tables.

    Args:
        conn: PostgresConnection instance
        schema_name: Schema name to introspect
        datetime_columns: Optional mapping of resource/table name to datetime
            column name for date-range filtering (sets date_field on each
            TableConnector). Used with IngestionParams.datetime_after /
            datetime_before.
        type_lookup_overrides: Optional mapping of table name to type_lookup
            spec for edge tables where source/target types come from a lookup
            table. Each value is a dict with: table, identity, type_column,
            source, target, relation (optional).

    Returns:
        Tuple of:
            - Bindings object with TableConnector instances for all tables
            - InMemoryConnectionProvider containing connector->PostgresConfig mappings
    """
    introspection_result = conn.introspect_schema(
        schema_name=schema_name,
        include_raw_tables=include_raw_tables,
    )
    return self.create_bindings_with_provider_from_introspection(
        introspection_result=introspection_result,
        conn=conn,
        schema_name=schema_name,
        datetime_columns=datetime_columns,
        type_lookup_overrides=type_lookup_overrides,
    )

SQLInferenceManager

Inference manager for PostgreSQL sources.

This class only performs introspection / schema inference / resource creation. Sanitization for a target DB flavor (reserved words, TigerGraph identity normalization, etc.) is the caller's responsibility and should be applied a posteriori via :class:graflo.hq.sanitizer.Sanitizer.

Source code in graflo/hq/sql_inferencer.py
class SQLInferenceManager:
    """Inference manager for PostgreSQL sources.

    This class only performs introspection / schema inference / resource
    creation. Sanitization for a target DB flavor (reserved words, TigerGraph
    identity normalization, etc.) is the caller's responsibility and should be
    applied a posteriori via
    :class:`graflo.hq.sanitizer.Sanitizer`.
    """

    def __init__(
        self,
        conn: PostgresConnection,
        target_db_flavor: DBType = DBType.ARANGO,
        fuzzy_threshold: float = 0.8,
    ):
        """Initialize the PostgreSQL inference manager.

        Args:
            conn: PostgresConnection instance
            target_db_flavor: Target database flavor (used for type mapping
                during inference; does NOT trigger sanitization).
            fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
        """
        self.target_db_flavor = target_db_flavor
        self.conn = conn
        self.inferencer = PostgresSchemaInferencer(
            db_flavor=target_db_flavor, conn=conn
        )
        self.mapper = PostgresResourceMapper(fuzzy_threshold=fuzzy_threshold)

    def introspect(
        self,
        schema_name: str | None = None,
        include_raw_tables: bool = False,
    ) -> SchemaIntrospectionResult:
        """Introspect PostgreSQL schema.

        Args:
            schema_name: Schema name to introspect
            include_raw_tables: Whether to build sampled per-column raw table metadata.
                Defaults to False for performance (binding/schema inference does not require it).

        Returns:
            SchemaIntrospectionResult: PostgreSQL schema introspection result
        """
        return self.conn.introspect_schema(
            schema_name=schema_name,
            include_raw_tables=include_raw_tables,
        )

    def infer_schema(
        self, introspection_result, schema_name: str | None = None
    ) -> Schema:
        """Infer graflo Schema from PostgreSQL introspection result.

        Args:
            introspection_result: SchemaIntrospectionResult from PostgreSQL
            schema_name: Schema name (optional, may be inferred from result)

        Returns:
            Schema: Inferred schema with vertices and edges
        """
        return self.inferencer.infer_schema(
            introspection_result, schema_name=schema_name
        )

    def create_resources(
        self, introspection_result, schema: Schema
    ) -> list["Resource"]:
        """Create Resources from PostgreSQL introspection result.

        Args:
            introspection_result: SchemaIntrospectionResult from PostgreSQL
            schema: Existing Schema object

        Returns:
            list[Resource]: List of Resources for PostgreSQL tables
        """
        return self.mapper.create_resources_from_tables(
            introspection_result,
            schema.core_schema.vertex_config,
            schema.core_schema.edge_config,
            fuzzy_threshold=self.mapper.fuzzy_threshold,
        )

    def infer_complete_schema(
        self, schema_name: str | None = None
    ) -> tuple[Schema, IngestionModel]:
        """Infer a complete schema and ingestion model from source.

        This is a convenience method that:
        1. Introspects the source schema
        2. Infers the graflo Schema
        3. Creates resources and finishes ingestion init

        No sanitization is performed; apply :class:`graflo.hq.sanitizer.Sanitizer`
        a posteriori to the returned manifest if you need it.

        Args:
            schema_name: Schema name to introspect (source-specific)

        Returns:
            tuple[Schema, IngestionModel]: Complete schema and ingestion model
        """
        artifacts = self.infer_artifacts(schema_name=schema_name)
        return artifacts.schema, artifacts.ingestion_model

    def infer_artifacts(self, schema_name: str | None = None) -> SQLInferenceArtifacts:
        """Infer schema/resources from a single introspection pass.

        Returns:
            SQLInferenceArtifacts: introspection + schema + ingestion model tuple.
                The output is NOT sanitized for the target DB flavor.
        """
        introspection_result = self.introspect(schema_name=schema_name)
        schema = self.infer_schema(introspection_result, schema_name=schema_name)
        resources = self.create_resources(introspection_result, schema)
        ingestion_model = IngestionModel(resources=resources)
        ingestion_model.finish_init(schema.core_schema)
        return SQLInferenceArtifacts(
            introspection_result=introspection_result,
            schema=schema,
            ingestion_model=ingestion_model,
        )

    def create_resources_for_schema(
        self, schema: Schema, schema_name: str | None = None
    ) -> list["Resource"]:
        """Create Resources from source for an existing schema.

        Args:
            schema: Existing Schema object
            schema_name: Schema name to introspect (source-specific)

        Returns:
            list[Resource]: List of Resources for the source
        """
        # Introspect the schema
        introspection_result = self.introspect(schema_name=schema_name)

        # Create resources
        return self.create_resources(introspection_result, schema)

__init__(conn, target_db_flavor=DBType.ARANGO, fuzzy_threshold=0.8)

Initialize the PostgreSQL inference manager.

Parameters:

Name Type Description Default
conn PostgresConnection

PostgresConnection instance

required
target_db_flavor DBType

Target database flavor (used for type mapping during inference; does NOT trigger sanitization).

ARANGO
fuzzy_threshold float

Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)

0.8
Source code in graflo/hq/sql_inferencer.py
def __init__(
    self,
    conn: PostgresConnection,
    target_db_flavor: DBType = DBType.ARANGO,
    fuzzy_threshold: float = 0.8,
):
    """Initialize the PostgreSQL inference manager.

    Args:
        conn: PostgresConnection instance
        target_db_flavor: Target database flavor (used for type mapping
            during inference; does NOT trigger sanitization).
        fuzzy_threshold: Similarity threshold for fuzzy matching (0.0 to 1.0, default 0.8)
    """
    self.target_db_flavor = target_db_flavor
    self.conn = conn
    self.inferencer = PostgresSchemaInferencer(
        db_flavor=target_db_flavor, conn=conn
    )
    self.mapper = PostgresResourceMapper(fuzzy_threshold=fuzzy_threshold)

create_resources(introspection_result, schema)

Create Resources from PostgreSQL introspection result.

Parameters:

Name Type Description Default
introspection_result

SchemaIntrospectionResult from PostgreSQL

required
schema Schema

Existing Schema object

required

Returns:

Type Description
list[Resource]

list[Resource]: List of Resources for PostgreSQL tables

Source code in graflo/hq/sql_inferencer.py
def create_resources(
    self, introspection_result, schema: Schema
) -> list["Resource"]:
    """Create Resources from PostgreSQL introspection result.

    Args:
        introspection_result: SchemaIntrospectionResult from PostgreSQL
        schema: Existing Schema object

    Returns:
        list[Resource]: List of Resources for PostgreSQL tables
    """
    return self.mapper.create_resources_from_tables(
        introspection_result,
        schema.core_schema.vertex_config,
        schema.core_schema.edge_config,
        fuzzy_threshold=self.mapper.fuzzy_threshold,
    )

create_resources_for_schema(schema, schema_name=None)

Create Resources from source for an existing schema.

Parameters:

Name Type Description Default
schema Schema

Existing Schema object

required
schema_name str | None

Schema name to introspect (source-specific)

None

Returns:

Type Description
list[Resource]

list[Resource]: List of Resources for the source

Source code in graflo/hq/sql_inferencer.py
def create_resources_for_schema(
    self, schema: Schema, schema_name: str | None = None
) -> list["Resource"]:
    """Create Resources from source for an existing schema.

    Args:
        schema: Existing Schema object
        schema_name: Schema name to introspect (source-specific)

    Returns:
        list[Resource]: List of Resources for the source
    """
    # Introspect the schema
    introspection_result = self.introspect(schema_name=schema_name)

    # Create resources
    return self.create_resources(introspection_result, schema)

infer_artifacts(schema_name=None)

Infer schema/resources from a single introspection pass.

Returns:

Name Type Description
SQLInferenceArtifacts SQLInferenceArtifacts

introspection + schema + ingestion model tuple. The output is NOT sanitized for the target DB flavor.

Source code in graflo/hq/sql_inferencer.py
def infer_artifacts(self, schema_name: str | None = None) -> SQLInferenceArtifacts:
    """Infer schema/resources from a single introspection pass.

    Returns:
        SQLInferenceArtifacts: introspection + schema + ingestion model tuple.
            The output is NOT sanitized for the target DB flavor.
    """
    introspection_result = self.introspect(schema_name=schema_name)
    schema = self.infer_schema(introspection_result, schema_name=schema_name)
    resources = self.create_resources(introspection_result, schema)
    ingestion_model = IngestionModel(resources=resources)
    ingestion_model.finish_init(schema.core_schema)
    return SQLInferenceArtifacts(
        introspection_result=introspection_result,
        schema=schema,
        ingestion_model=ingestion_model,
    )

infer_complete_schema(schema_name=None)

Infer a complete schema and ingestion model from source.

This is a convenience method that: 1. Introspects the source schema 2. Infers the graflo Schema 3. Creates resources and finishes ingestion init

No sanitization is performed; apply :class:graflo.hq.sanitizer.Sanitizer a posteriori to the returned manifest if you need it.

Parameters:

Name Type Description Default
schema_name str | None

Schema name to introspect (source-specific)

None

Returns:

Type Description
tuple[Schema, IngestionModel]

tuple[Schema, IngestionModel]: Complete schema and ingestion model

Source code in graflo/hq/sql_inferencer.py
def infer_complete_schema(
    self, schema_name: str | None = None
) -> tuple[Schema, IngestionModel]:
    """Infer a complete schema and ingestion model from source.

    This is a convenience method that:
    1. Introspects the source schema
    2. Infers the graflo Schema
    3. Creates resources and finishes ingestion init

    No sanitization is performed; apply :class:`graflo.hq.sanitizer.Sanitizer`
    a posteriori to the returned manifest if you need it.

    Args:
        schema_name: Schema name to introspect (source-specific)

    Returns:
        tuple[Schema, IngestionModel]: Complete schema and ingestion model
    """
    artifacts = self.infer_artifacts(schema_name=schema_name)
    return artifacts.schema, artifacts.ingestion_model

infer_schema(introspection_result, schema_name=None)

Infer graflo Schema from PostgreSQL introspection result.

Parameters:

Name Type Description Default
introspection_result

SchemaIntrospectionResult from PostgreSQL

required
schema_name str | None

Schema name (optional, may be inferred from result)

None

Returns:

Name Type Description
Schema Schema

Inferred schema with vertices and edges

Source code in graflo/hq/sql_inferencer.py
def infer_schema(
    self, introspection_result, schema_name: str | None = None
) -> Schema:
    """Infer graflo Schema from PostgreSQL introspection result.

    Args:
        introspection_result: SchemaIntrospectionResult from PostgreSQL
        schema_name: Schema name (optional, may be inferred from result)

    Returns:
        Schema: Inferred schema with vertices and edges
    """
    return self.inferencer.infer_schema(
        introspection_result, schema_name=schema_name
    )

introspect(schema_name=None, include_raw_tables=False)

Introspect PostgreSQL schema.

Parameters:

Name Type Description Default
schema_name str | None

Schema name to introspect

None
include_raw_tables bool

Whether to build sampled per-column raw table metadata. Defaults to False for performance (binding/schema inference does not require it).

False

Returns:

Name Type Description
SchemaIntrospectionResult SchemaIntrospectionResult

PostgreSQL schema introspection result

Source code in graflo/hq/sql_inferencer.py
def introspect(
    self,
    schema_name: str | None = None,
    include_raw_tables: bool = False,
) -> SchemaIntrospectionResult:
    """Introspect PostgreSQL schema.

    Args:
        schema_name: Schema name to introspect
        include_raw_tables: Whether to build sampled per-column raw table metadata.
            Defaults to False for performance (binding/schema inference does not require it).

    Returns:
        SchemaIntrospectionResult: PostgreSQL schema introspection result
    """
    return self.conn.introspect_schema(
        schema_name=schema_name,
        include_raw_tables=include_raw_tables,
    )

Sanitizer

DB-flavor-aware orchestrator for manifest sanitization.

The class encodes the per-flavor policy ("which evolution ops sanitize a manifest for db_flavor") and applies them in place. Callers that want a different sanitization recipe can either subclass and override :meth:build_ops or build ops directly via :mod:graflo.architecture.evolution.

Source code in graflo/hq/sanitizer.py
class Sanitizer:
    """DB-flavor-aware orchestrator for manifest sanitization.

    The class encodes the per-flavor policy ("which evolution ops sanitize a
    manifest for *db_flavor*") and applies them in place. Callers that want a
    different sanitization recipe can either subclass and override
    :meth:`build_ops` or build ops directly via
    :mod:`graflo.architecture.evolution`.
    """

    def __init__(self, db_flavor: DBType):
        """Initialize the sanitizer for a given target DB flavor."""
        self.db_flavor = db_flavor

    def build_ops(
        self,
        manifest: GraphManifest,
        *,
        reserved_words: Iterable[str] | None = None,
    ) -> list[ManifestOp]:
        """Return the ordered list of evolution ops that sanitize *manifest*.

        Today the list collapses to ``[SanitizeOp(db_flavor=...)]``; exposing
        it as a list keeps the door open for future per-flavor composition
        (e.g. flavor-specific identity-normalization variants, future
        rename-relation ops).
        """
        del manifest  # currently policy is purely a function of db_flavor
        rw = list(reserved_words) if reserved_words is not None else None
        return [SanitizeOp(db_flavor=self.db_flavor, reserved_words=rw)]

    def sanitize_manifest(self, manifest: GraphManifest) -> GraphManifest:
        """Mutate *manifest* in place per :meth:`build_ops` and return it.

        Returns the same manifest object so callers can chain or simply assert
        that the in-place result is the original input.
        """
        if manifest.graph_schema is None:
            return manifest

        apply_manifest_ops_inplace(manifest, self.build_ops(manifest))

        manifest.finish_init()
        return manifest

__init__(db_flavor)

Initialize the sanitizer for a given target DB flavor.

Source code in graflo/hq/sanitizer.py
def __init__(self, db_flavor: DBType):
    """Initialize the sanitizer for a given target DB flavor."""
    self.db_flavor = db_flavor

build_ops(manifest, *, reserved_words=None)

Return the ordered list of evolution ops that sanitize manifest.

Today the list collapses to [SanitizeOp(db_flavor=...)]; exposing it as a list keeps the door open for future per-flavor composition (e.g. flavor-specific identity-normalization variants, future rename-relation ops).

Source code in graflo/hq/sanitizer.py
def build_ops(
    self,
    manifest: GraphManifest,
    *,
    reserved_words: Iterable[str] | None = None,
) -> list[ManifestOp]:
    """Return the ordered list of evolution ops that sanitize *manifest*.

    Today the list collapses to ``[SanitizeOp(db_flavor=...)]``; exposing
    it as a list keeps the door open for future per-flavor composition
    (e.g. flavor-specific identity-normalization variants, future
    rename-relation ops).
    """
    del manifest  # currently policy is purely a function of db_flavor
    rw = list(reserved_words) if reserved_words is not None else None
    return [SanitizeOp(db_flavor=self.db_flavor, reserved_words=rw)]

sanitize_manifest(manifest)

Mutate manifest in place per :meth:build_ops and return it.

Returns the same manifest object so callers can chain or simply assert that the in-place result is the original input.

Source code in graflo/hq/sanitizer.py
def sanitize_manifest(self, manifest: GraphManifest) -> GraphManifest:
    """Mutate *manifest* in place per :meth:`build_ops` and return it.

    Returns the same manifest object so callers can chain or simply assert
    that the in-place result is the original input.
    """
    if manifest.graph_schema is None:
        return manifest

    apply_manifest_ops_inplace(manifest, self.build_ops(manifest))

    manifest.finish_init()
    return manifest

SparqlAuth

Bases: BaseModel

Authentication payload for SPARQL endpoint access.

Source code in graflo/hq/connection_provider.py
class SparqlAuth(BaseModel):
    """Authentication payload for SPARQL endpoint access."""

    username: str | None = None
    password: str | None = None

SparqlGeneralizedConnConfig

Bases: BaseModel

Generalized runtime config variant for SPARQL endpoint connections.

Source code in graflo/hq/connection_provider.py
class SparqlGeneralizedConnConfig(BaseModel):
    """Generalized runtime config variant for SPARQL endpoint connections."""

    kind: Literal["sparql"] = "sparql"
    config: SparqlEndpointConfig

failure_sinks_from_ingestion_params(params)

Build file sinks from :class:~graflo.hq.ingestion_parameters.IngestionParams.

Source code in graflo/hq/doc_error_sink.py
def failure_sinks_from_ingestion_params(params: IngestionParams) -> list[DocErrorSink]:
    """Build file sinks from :class:`~graflo.hq.ingestion_parameters.IngestionParams`."""

    sinks: list[DocErrorSink] = []
    if params.doc_error_sink_path is not None:
        sinks.append(JsonlGzDocErrorSink(params.doc_error_sink_path))
    return sinks