Skip to content

graflo.hq.caster

Data casting and ingestion system for graph databases.

This module provides functionality for casting and ingesting data into graph databases. It handles batch processing, file discovery, and database operations for both ArangoDB and Neo4j.

Key Components
  • Caster: Main class for data casting and ingestion
  • FileConnector: Connector matching for file discovery
  • Connectors: Collection of file connectors for different resources
Example

caster = Caster(schema=schema) caster.ingest(path="data/", conn_conf=db_config)

CastBatchResult

Bases: BaseModel

Outcome of casting a batch through a resource (possibly with skipped rows).

Source code in graflo/hq/caster.py
class CastBatchResult(BaseModel):
    """Outcome of casting a batch through a resource (possibly with skipped rows)."""

    model_config = ConfigDict(arbitrary_types_allowed=True)

    graph: GraphContainer
    failures: list[RowCastFailure] = Field(default_factory=list)

Caster

Main class for data casting and ingestion.

This class handles the process of casting data into graph structures and ingesting them into the database. It supports batch processing, parallel execution, and various data formats.

Attributes:

Name Type Description
schema

Schema configuration for the graph

ingestion_params

IngestionParams instance controlling ingestion behavior

Source code in graflo/hq/caster.py
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
class Caster:
    """Main class for data casting and ingestion.

    This class handles the process of casting data into graph structures and
    ingesting them into the database. It supports batch processing, parallel
    execution, and various data formats.

    Attributes:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance controlling ingestion behavior
    """

    def __init__(
        self,
        schema: Schema,
        ingestion_model: IngestionModel,
        ingestion_params: IngestionParams | None = None,
        **kwargs,
    ):
        """Initialize the caster with schema and configuration.

        Args:
            schema: Schema configuration for the graph
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, creates IngestionParams from kwargs or uses defaults
            **kwargs: Additional configuration options (for backward compatibility):
                - clear_data: Whether to clear existing data before ingestion
                - n_cores: Number of CPU cores/threads to use for parallel processing
                - max_items: Maximum number of items to process
                - batch_size: Size of batches for processing
                - dry: Whether to perform a dry run
        """
        if ingestion_params is None:
            ingestion_params = IngestionParams(**kwargs)
        self.ingestion_params = ingestion_params
        self.schema = schema
        self.ingestion_model = ingestion_model
        self._row_error_total = 0
        self._row_error_io_lock = asyncio.Lock()

    # ------------------------------------------------------------------
    # Casting
    # ------------------------------------------------------------------

    async def _persist_row_failures(self, failures: list[RowCastFailure]) -> None:
        if not failures:
            return
        params = self.ingestion_params
        path = params.row_error_dead_letter_path

        async with self._row_error_io_lock:
            if path is not None:
                path.parent.mkdir(parents=True, exist_ok=True)
                with path.open("a", encoding="utf-8") as f:
                    for fail in failures:
                        f.write(fail.model_dump_json() + "\n")

            self._row_error_total += len(failures)
            if params.max_row_errors is not None:
                if self._row_error_total > params.max_row_errors:
                    raise RowErrorBudgetExceeded(
                        total_failures=self._row_error_total,
                        limit=params.max_row_errors,
                        dead_letter_path=path,
                    )

        if path is None:
            for fail in failures:
                logger.error(
                    "Row cast failure resource=%s row_index=%s %s: %s",
                    fail.resource_name,
                    fail.row_index,
                    fail.exception_type,
                    fail.message,
                    extra={"row_cast_failure": fail.model_dump(mode="json")},
                )

    async def cast_normal_resource(
        self, data, resource_name: str | None = None
    ) -> CastBatchResult:
        """Cast data into a graph container using a resource.

        Args:
            data: Iterable of documents to cast
            resource_name: Optional name of the resource to use

        Returns:
            CastBatchResult with graph and any per-row failures (empty when
            ``on_row_error`` is ``fail`` and the batch succeeds).
        """
        rr = self.ingestion_model.fetch_resource(resource_name)
        resolved_name = rr.name
        params = self.ingestion_params
        doc_list = list(data)

        semaphore = asyncio.Semaphore(params.n_cores)

        async def process_doc(doc: dict[str, Any]) -> Any:
            async with semaphore:
                return await asyncio.to_thread(rr, doc)

        if params.on_row_error == "fail":
            coros = [process_doc(doc) for doc in doc_list]
            docs = await asyncio.gather(*coros)
            graph = GraphContainer.from_docs_list(docs)
            return CastBatchResult(graph=graph, failures=[])

        raw = await asyncio.gather(
            *[process_doc(doc) for doc in doc_list],
            return_exceptions=True,
        )
        docs: list[Any] = []
        failures: list[RowCastFailure] = []
        for i, item in enumerate(raw):
            doc_raw = doc_list[i]
            doc = doc_raw if isinstance(doc_raw, dict) else {"_row": repr(doc_raw)}

            if isinstance(item, asyncio.CancelledError):
                raise item
            if isinstance(item, (KeyboardInterrupt, SystemExit)):
                raise item
            if isinstance(item, BaseException):
                failures.append(
                    _row_failure_from_exception(
                        resource_name=resolved_name,
                        row_index=i,
                        doc=doc,
                        exc=item,
                        doc_keys=params.row_error_doc_keys,
                        doc_preview_max_bytes=params.row_error_doc_preview_max_bytes,
                    )
                )
                continue
            docs.append(item)

        await self._persist_row_failures(failures)

        graph = GraphContainer.from_docs_list(docs)
        return CastBatchResult(graph=graph, failures=failures)

    # ------------------------------------------------------------------
    # Processing pipeline
    # ------------------------------------------------------------------

    async def process_batch(
        self,
        batch,
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a batch of data.

        Args:
            batch: Batch of data to process
            resource_name: Optional name of the resource to use
            conn_conf: Optional database connection configuration
        """
        result = await self.cast_normal_resource(batch, resource_name=resource_name)
        if result.failures:
            logger.warning(
                "Resource %r batch had %d row cast failure(s); first: %s: %s",
                result.failures[0].resource_name,
                len(result.failures),
                result.failures[0].exception_type,
                result.failures[0].message,
            )
        gc = result.graph

        if conn_conf is not None:
            writer = self._make_db_writer()
            await writer.write(gc=gc, conn_conf=conn_conf, resource_name=resource_name)

    async def process_data_source(
        self,
        data_source: AbstractDataSource,
        resource_name: str | None = None,
        conn_conf: None | DBConfig = None,
    ):
        """Process a data source.

        Args:
            data_source: Data source to process
            resource_name: Optional name of the resource (overrides data_source.resource_name)
            conn_conf: Optional database connection configuration
        """
        actual_resource_name = resource_name or data_source.resource_name

        limit = getattr(data_source, "_pattern_limit", None)
        if limit is None:
            limit = self.ingestion_params.max_items

        for batch in data_source.iter_batches(
            batch_size=self.ingestion_params.batch_size, limit=limit
        ):
            await self.process_batch(
                batch, resource_name=actual_resource_name, conn_conf=conn_conf
            )

    async def process_resource(
        self,
        resource_instance: (
            Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
        ),
        resource_name: str | None,
        conn_conf: None | DBConfig = None,
        **kwargs,
    ):
        """Process a resource instance from configuration or direct data.

        This method accepts either:
        1. A configuration dictionary with 'source_type' and data source parameters
        2. A file path (Path or str) - creates FileDataSource
        3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

        Args:
            resource_instance: Configuration dict, file path, or in-memory data.
                Configuration dict format:
                - {"source_type": "file", "path": "data.json"}
                - {"source_type": "api", "config": {"url": "https://..."}}
                - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
                - {"source_type": "in_memory", "data": [...]}
            resource_name: Optional name of the resource
            conn_conf: Optional database connection configuration
            **kwargs: Additional arguments passed to data source creation
                (e.g., columns for list[list], encoding for files)
        """
        if isinstance(resource_instance, dict):
            config = resource_instance.copy()
            config.update(kwargs)
            data_source = DataSourceFactory.create_data_source_from_config(config)
        elif isinstance(resource_instance, (Path, str)):
            file_type: str | ChunkerType | None = cast(
                str | ChunkerType | None, kwargs.get("file_type", None)
            )
            encoding: EncodingType = cast(
                EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
            )
            sep: str | None = cast(str | None, kwargs.get("sep", None))
            data_source = DataSourceFactory.create_file_data_source(
                path=resource_instance,
                file_type=file_type,
                encoding=encoding,
                sep=sep,
            )
        else:
            columns: list[str] | None = cast(
                list[str] | None, kwargs.get("columns", None)
            )
            data_source = DataSourceFactory.create_in_memory_data_source(
                data=resource_instance,
                columns=columns,
            )

        data_source.resource_name = resource_name

        await self.process_data_source(
            data_source=data_source,
            resource_name=resource_name,
            conn_conf=conn_conf,
        )

    # ------------------------------------------------------------------
    # Queue-based processing
    # ------------------------------------------------------------------

    async def process_with_queue(
        self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
    ):
        """Process tasks from a queue.

        Args:
            tasks: Async queue of tasks to process
            conn_conf: Optional database connection configuration
        """
        SENTINEL = None

        while True:
            try:
                task = await tasks.get()

                if task is SENTINEL:
                    tasks.task_done()
                    break

                if isinstance(task, tuple) and len(task) == 2:
                    filepath, resource_name = task
                    await self.process_resource(
                        resource_instance=filepath,
                        resource_name=resource_name,
                        conn_conf=conn_conf,
                    )
                elif isinstance(task, AbstractDataSource):
                    await self.process_data_source(
                        data_source=task, conn_conf=conn_conf
                    )
                tasks.task_done()
            except Exception as e:
                logger.error(f"Error processing task: {e}", exc_info=True)
                tasks.task_done()
                break

    # ------------------------------------------------------------------
    # Normalization utility
    # ------------------------------------------------------------------

    @staticmethod
    def normalize_resource(
        data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
    ) -> list[dict]:
        """Normalize resource data into a list of dictionaries.

        Args:
            data: Data to normalize (DataFrame, list of lists, or list of dicts)
            columns: Optional column names for list data

        Returns:
            list[dict]: Normalized data as list of dictionaries

        Raises:
            ValueError: If columns is not provided for list data
        """
        if isinstance(data, pd.DataFrame):
            columns = data.columns.tolist()
            _data = data.values.tolist()
        elif data and isinstance(data[0], list):
            _data = cast(list[list], data)
            if columns is None:
                raise ValueError("columns should be set")
        else:
            return cast(list[dict], data)
        rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
        return rows_dressed

    async def ingest_data_sources(
        self,
        data_source_registry: DataSourceRegistry,
        conn_conf: DBConfig,
        ingestion_params: IngestionParams | None = None,
    ):
        """Ingest data from data sources in a registry.

        Note: Schema definition should be handled separately via GraphEngine.define_schema()
        before calling this method.

        Args:
            data_source_registry: Registry containing data sources mapped to resources
            conn_conf: Database connection configuration
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        if ingestion_params is None:
            ingestion_params = IngestionParams()

        self.ingestion_params = ingestion_params
        self._row_error_total = 0
        init_only = ingestion_params.init_only

        if init_only:
            logger.info("ingest execution bound to init")
            sys.exit(0)

        tasks: list[AbstractDataSource] = []
        for resource_name in self.ingestion_model._resources.keys():
            data_sources = data_source_registry.get_data_sources(resource_name)
            if data_sources:
                logger.info(
                    f"For resource name {resource_name} {len(data_sources)} data sources were found"
                )
                tasks.extend(data_sources)

        with Timer() as klepsidra:
            if self.ingestion_params.n_cores > 1:
                queue_tasks: asyncio.Queue = asyncio.Queue()
                for item in tasks:
                    await queue_tasks.put(item)

                for _ in range(self.ingestion_params.n_cores):
                    await queue_tasks.put(None)

                worker_tasks = [
                    self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                    for _ in range(self.ingestion_params.n_cores)
                ]

                await asyncio.gather(*worker_tasks)
            else:
                for data_source in tasks:
                    await self.process_data_source(
                        data_source=data_source, conn_conf=conn_conf
                    )
        logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")

    def ingest(
        self,
        target_db_config: DBConfig,
        bindings: Bindings | None = None,
        ingestion_params: IngestionParams | None = None,
        connection_provider: ConnectionProvider | None = None,
    ):
        """Ingest data into the graph database.

        This is the main ingestion method that takes:
        - Schema: Graph structure (already set in Caster)
        - OutputConfig: Target graph database configuration
        - Bindings: Mapping of resources to physical data sources
        - IngestionParams: Parameters controlling the ingestion process

        Args:
            target_db_config: Target database connection configuration (for writing graph)
            bindings: Bindings instance mapping resources to data sources
                If None, defaults to empty Bindings()
            ingestion_params: IngestionParams instance with ingestion configuration.
                If None, uses default IngestionParams()
        """
        bindings = bindings or Bindings()
        ingestion_params = ingestion_params or IngestionParams()

        db_flavor = target_db_config.connection_type
        self.schema.db_profile.db_flavor = db_flavor
        self.schema.finish_init()
        self.ingestion_model.finish_init(
            self.schema.core_schema,
            strict_references=ingestion_params.strict_references,
            dynamic_edge_feedback=ingestion_params.dynamic_edges,
        )

        registry = RegistryBuilder(self.schema, self.ingestion_model).build(
            bindings,
            ingestion_params,
            connection_provider=connection_provider or EmptyConnectionProvider(),
            strict=ingestion_params.strict_registry,
        )

        asyncio.run(
            self.ingest_data_sources(
                data_source_registry=registry,
                conn_conf=target_db_config,
                ingestion_params=ingestion_params,
            )
        )

    # ------------------------------------------------------------------
    # Internal helpers
    # ------------------------------------------------------------------

    def _make_db_writer(self) -> DBWriter:
        """Create a :class:`DBWriter` from the current ingestion params."""
        max_concurrent = (
            self.ingestion_params.max_concurrent_db_ops
            if self.ingestion_params.max_concurrent_db_ops is not None
            else self.ingestion_params.n_cores
        )
        return DBWriter(
            schema=self.schema,
            ingestion_model=self.ingestion_model,
            dry=self.ingestion_params.dry,
            max_concurrent=max_concurrent,
            dynamic_edges=self.ingestion_params.dynamic_edges,
        )

__init__(schema, ingestion_model, ingestion_params=None, **kwargs)

Initialize the caster with schema and configuration.

Parameters:

Name Type Description Default
schema Schema

Schema configuration for the graph

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, creates IngestionParams from kwargs or uses defaults

None
**kwargs

Additional configuration options (for backward compatibility): - clear_data: Whether to clear existing data before ingestion - n_cores: Number of CPU cores/threads to use for parallel processing - max_items: Maximum number of items to process - batch_size: Size of batches for processing - dry: Whether to perform a dry run

{}
Source code in graflo/hq/caster.py
def __init__(
    self,
    schema: Schema,
    ingestion_model: IngestionModel,
    ingestion_params: IngestionParams | None = None,
    **kwargs,
):
    """Initialize the caster with schema and configuration.

    Args:
        schema: Schema configuration for the graph
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, creates IngestionParams from kwargs or uses defaults
        **kwargs: Additional configuration options (for backward compatibility):
            - clear_data: Whether to clear existing data before ingestion
            - n_cores: Number of CPU cores/threads to use for parallel processing
            - max_items: Maximum number of items to process
            - batch_size: Size of batches for processing
            - dry: Whether to perform a dry run
    """
    if ingestion_params is None:
        ingestion_params = IngestionParams(**kwargs)
    self.ingestion_params = ingestion_params
    self.schema = schema
    self.ingestion_model = ingestion_model
    self._row_error_total = 0
    self._row_error_io_lock = asyncio.Lock()

cast_normal_resource(data, resource_name=None) async

Cast data into a graph container using a resource.

Parameters:

Name Type Description Default
data

Iterable of documents to cast

required
resource_name str | None

Optional name of the resource to use

None

Returns:

Type Description
CastBatchResult

CastBatchResult with graph and any per-row failures (empty when

CastBatchResult

on_row_error is fail and the batch succeeds).

Source code in graflo/hq/caster.py
async def cast_normal_resource(
    self, data, resource_name: str | None = None
) -> CastBatchResult:
    """Cast data into a graph container using a resource.

    Args:
        data: Iterable of documents to cast
        resource_name: Optional name of the resource to use

    Returns:
        CastBatchResult with graph and any per-row failures (empty when
        ``on_row_error`` is ``fail`` and the batch succeeds).
    """
    rr = self.ingestion_model.fetch_resource(resource_name)
    resolved_name = rr.name
    params = self.ingestion_params
    doc_list = list(data)

    semaphore = asyncio.Semaphore(params.n_cores)

    async def process_doc(doc: dict[str, Any]) -> Any:
        async with semaphore:
            return await asyncio.to_thread(rr, doc)

    if params.on_row_error == "fail":
        coros = [process_doc(doc) for doc in doc_list]
        docs = await asyncio.gather(*coros)
        graph = GraphContainer.from_docs_list(docs)
        return CastBatchResult(graph=graph, failures=[])

    raw = await asyncio.gather(
        *[process_doc(doc) for doc in doc_list],
        return_exceptions=True,
    )
    docs: list[Any] = []
    failures: list[RowCastFailure] = []
    for i, item in enumerate(raw):
        doc_raw = doc_list[i]
        doc = doc_raw if isinstance(doc_raw, dict) else {"_row": repr(doc_raw)}

        if isinstance(item, asyncio.CancelledError):
            raise item
        if isinstance(item, (KeyboardInterrupt, SystemExit)):
            raise item
        if isinstance(item, BaseException):
            failures.append(
                _row_failure_from_exception(
                    resource_name=resolved_name,
                    row_index=i,
                    doc=doc,
                    exc=item,
                    doc_keys=params.row_error_doc_keys,
                    doc_preview_max_bytes=params.row_error_doc_preview_max_bytes,
                )
            )
            continue
        docs.append(item)

    await self._persist_row_failures(failures)

    graph = GraphContainer.from_docs_list(docs)
    return CastBatchResult(graph=graph, failures=failures)

ingest(target_db_config, bindings=None, ingestion_params=None, connection_provider=None)

Ingest data into the graph database.

This is the main ingestion method that takes: - Schema: Graph structure (already set in Caster) - OutputConfig: Target graph database configuration - Bindings: Mapping of resources to physical data sources - IngestionParams: Parameters controlling the ingestion process

Parameters:

Name Type Description Default
target_db_config DBConfig

Target database connection configuration (for writing graph)

required
bindings Bindings | None

Bindings instance mapping resources to data sources If None, defaults to empty Bindings()

None
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/caster.py
def ingest(
    self,
    target_db_config: DBConfig,
    bindings: Bindings | None = None,
    ingestion_params: IngestionParams | None = None,
    connection_provider: ConnectionProvider | None = None,
):
    """Ingest data into the graph database.

    This is the main ingestion method that takes:
    - Schema: Graph structure (already set in Caster)
    - OutputConfig: Target graph database configuration
    - Bindings: Mapping of resources to physical data sources
    - IngestionParams: Parameters controlling the ingestion process

    Args:
        target_db_config: Target database connection configuration (for writing graph)
        bindings: Bindings instance mapping resources to data sources
            If None, defaults to empty Bindings()
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    bindings = bindings or Bindings()
    ingestion_params = ingestion_params or IngestionParams()

    db_flavor = target_db_config.connection_type
    self.schema.db_profile.db_flavor = db_flavor
    self.schema.finish_init()
    self.ingestion_model.finish_init(
        self.schema.core_schema,
        strict_references=ingestion_params.strict_references,
        dynamic_edge_feedback=ingestion_params.dynamic_edges,
    )

    registry = RegistryBuilder(self.schema, self.ingestion_model).build(
        bindings,
        ingestion_params,
        connection_provider=connection_provider or EmptyConnectionProvider(),
        strict=ingestion_params.strict_registry,
    )

    asyncio.run(
        self.ingest_data_sources(
            data_source_registry=registry,
            conn_conf=target_db_config,
            ingestion_params=ingestion_params,
        )
    )

ingest_data_sources(data_source_registry, conn_conf, ingestion_params=None) async

Ingest data from data sources in a registry.

Note: Schema definition should be handled separately via GraphEngine.define_schema() before calling this method.

Parameters:

Name Type Description Default
data_source_registry DataSourceRegistry

Registry containing data sources mapped to resources

required
conn_conf DBConfig

Database connection configuration

required
ingestion_params IngestionParams | None

IngestionParams instance with ingestion configuration. If None, uses default IngestionParams()

None
Source code in graflo/hq/caster.py
async def ingest_data_sources(
    self,
    data_source_registry: DataSourceRegistry,
    conn_conf: DBConfig,
    ingestion_params: IngestionParams | None = None,
):
    """Ingest data from data sources in a registry.

    Note: Schema definition should be handled separately via GraphEngine.define_schema()
    before calling this method.

    Args:
        data_source_registry: Registry containing data sources mapped to resources
        conn_conf: Database connection configuration
        ingestion_params: IngestionParams instance with ingestion configuration.
            If None, uses default IngestionParams()
    """
    if ingestion_params is None:
        ingestion_params = IngestionParams()

    self.ingestion_params = ingestion_params
    self._row_error_total = 0
    init_only = ingestion_params.init_only

    if init_only:
        logger.info("ingest execution bound to init")
        sys.exit(0)

    tasks: list[AbstractDataSource] = []
    for resource_name in self.ingestion_model._resources.keys():
        data_sources = data_source_registry.get_data_sources(resource_name)
        if data_sources:
            logger.info(
                f"For resource name {resource_name} {len(data_sources)} data sources were found"
            )
            tasks.extend(data_sources)

    with Timer() as klepsidra:
        if self.ingestion_params.n_cores > 1:
            queue_tasks: asyncio.Queue = asyncio.Queue()
            for item in tasks:
                await queue_tasks.put(item)

            for _ in range(self.ingestion_params.n_cores):
                await queue_tasks.put(None)

            worker_tasks = [
                self.process_with_queue(queue_tasks, conn_conf=conn_conf)
                for _ in range(self.ingestion_params.n_cores)
            ]

            await asyncio.gather(*worker_tasks)
        else:
            for data_source in tasks:
                await self.process_data_source(
                    data_source=data_source, conn_conf=conn_conf
                )
    logger.info(f"Processing took {klepsidra.elapsed:.1f} sec")

normalize_resource(data, columns=None) staticmethod

Normalize resource data into a list of dictionaries.

Parameters:

Name Type Description Default
data DataFrame | list[list] | list[dict]

Data to normalize (DataFrame, list of lists, or list of dicts)

required
columns list[str] | None

Optional column names for list data

None

Returns:

Type Description
list[dict]

list[dict]: Normalized data as list of dictionaries

Raises:

Type Description
ValueError

If columns is not provided for list data

Source code in graflo/hq/caster.py
@staticmethod
def normalize_resource(
    data: pd.DataFrame | list[list] | list[dict], columns: list[str] | None = None
) -> list[dict]:
    """Normalize resource data into a list of dictionaries.

    Args:
        data: Data to normalize (DataFrame, list of lists, or list of dicts)
        columns: Optional column names for list data

    Returns:
        list[dict]: Normalized data as list of dictionaries

    Raises:
        ValueError: If columns is not provided for list data
    """
    if isinstance(data, pd.DataFrame):
        columns = data.columns.tolist()
        _data = data.values.tolist()
    elif data and isinstance(data[0], list):
        _data = cast(list[list], data)
        if columns is None:
            raise ValueError("columns should be set")
    else:
        return cast(list[dict], data)
    rows_dressed = [{k: v for k, v in zip(columns, item)} for item in _data]
    return rows_dressed

process_batch(batch, resource_name, conn_conf=None) async

Process a batch of data.

Parameters:

Name Type Description Default
batch

Batch of data to process

required
resource_name str | None

Optional name of the resource to use

required
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_batch(
    self,
    batch,
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
):
    """Process a batch of data.

    Args:
        batch: Batch of data to process
        resource_name: Optional name of the resource to use
        conn_conf: Optional database connection configuration
    """
    result = await self.cast_normal_resource(batch, resource_name=resource_name)
    if result.failures:
        logger.warning(
            "Resource %r batch had %d row cast failure(s); first: %s: %s",
            result.failures[0].resource_name,
            len(result.failures),
            result.failures[0].exception_type,
            result.failures[0].message,
        )
    gc = result.graph

    if conn_conf is not None:
        writer = self._make_db_writer()
        await writer.write(gc=gc, conn_conf=conn_conf, resource_name=resource_name)

process_data_source(data_source, resource_name=None, conn_conf=None) async

Process a data source.

Parameters:

Name Type Description Default
data_source AbstractDataSource

Data source to process

required
resource_name str | None

Optional name of the resource (overrides data_source.resource_name)

None
conn_conf None | DBConfig

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_data_source(
    self,
    data_source: AbstractDataSource,
    resource_name: str | None = None,
    conn_conf: None | DBConfig = None,
):
    """Process a data source.

    Args:
        data_source: Data source to process
        resource_name: Optional name of the resource (overrides data_source.resource_name)
        conn_conf: Optional database connection configuration
    """
    actual_resource_name = resource_name or data_source.resource_name

    limit = getattr(data_source, "_pattern_limit", None)
    if limit is None:
        limit = self.ingestion_params.max_items

    for batch in data_source.iter_batches(
        batch_size=self.ingestion_params.batch_size, limit=limit
    ):
        await self.process_batch(
            batch, resource_name=actual_resource_name, conn_conf=conn_conf
        )

process_resource(resource_instance, resource_name, conn_conf=None, **kwargs) async

Process a resource instance from configuration or direct data.

This method accepts either: 1. A configuration dictionary with 'source_type' and data source parameters 2. A file path (Path or str) - creates FileDataSource 3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

Parameters:

Name Type Description Default
resource_instance Path | str | list[dict] | list[list] | DataFrame | dict[str, Any]

Configuration dict, file path, or in-memory data. Configuration dict format: - {"source_type": "file", "path": "data.json"} - {"source_type": "api", "config": {"url": "https://..."}} - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}} - {"source_type": "in_memory", "data": [...]}

required
resource_name str | None

Optional name of the resource

required
conn_conf None | DBConfig

Optional database connection configuration

None
**kwargs

Additional arguments passed to data source creation (e.g., columns for list[list], encoding for files)

{}
Source code in graflo/hq/caster.py
async def process_resource(
    self,
    resource_instance: (
        Path | str | list[dict] | list[list] | pd.DataFrame | dict[str, Any]
    ),
    resource_name: str | None,
    conn_conf: None | DBConfig = None,
    **kwargs,
):
    """Process a resource instance from configuration or direct data.

    This method accepts either:
    1. A configuration dictionary with 'source_type' and data source parameters
    2. A file path (Path or str) - creates FileDataSource
    3. In-memory data (list[dict], list[list], or pd.DataFrame) - creates InMemoryDataSource

    Args:
        resource_instance: Configuration dict, file path, or in-memory data.
            Configuration dict format:
            - {"source_type": "file", "path": "data.json"}
            - {"source_type": "api", "config": {"url": "https://..."}}
            - {"source_type": "sql", "config": {"connection_string": "...", "query": "..."}}
            - {"source_type": "in_memory", "data": [...]}
        resource_name: Optional name of the resource
        conn_conf: Optional database connection configuration
        **kwargs: Additional arguments passed to data source creation
            (e.g., columns for list[list], encoding for files)
    """
    if isinstance(resource_instance, dict):
        config = resource_instance.copy()
        config.update(kwargs)
        data_source = DataSourceFactory.create_data_source_from_config(config)
    elif isinstance(resource_instance, (Path, str)):
        file_type: str | ChunkerType | None = cast(
            str | ChunkerType | None, kwargs.get("file_type", None)
        )
        encoding: EncodingType = cast(
            EncodingType, kwargs.get("encoding", EncodingType.UTF_8)
        )
        sep: str | None = cast(str | None, kwargs.get("sep", None))
        data_source = DataSourceFactory.create_file_data_source(
            path=resource_instance,
            file_type=file_type,
            encoding=encoding,
            sep=sep,
        )
    else:
        columns: list[str] | None = cast(
            list[str] | None, kwargs.get("columns", None)
        )
        data_source = DataSourceFactory.create_in_memory_data_source(
            data=resource_instance,
            columns=columns,
        )

    data_source.resource_name = resource_name

    await self.process_data_source(
        data_source=data_source,
        resource_name=resource_name,
        conn_conf=conn_conf,
    )

process_with_queue(tasks, conn_conf=None) async

Process tasks from a queue.

Parameters:

Name Type Description Default
tasks Queue

Async queue of tasks to process

required
conn_conf DBConfig | None

Optional database connection configuration

None
Source code in graflo/hq/caster.py
async def process_with_queue(
    self, tasks: asyncio.Queue, conn_conf: DBConfig | None = None
):
    """Process tasks from a queue.

    Args:
        tasks: Async queue of tasks to process
        conn_conf: Optional database connection configuration
    """
    SENTINEL = None

    while True:
        try:
            task = await tasks.get()

            if task is SENTINEL:
                tasks.task_done()
                break

            if isinstance(task, tuple) and len(task) == 2:
                filepath, resource_name = task
                await self.process_resource(
                    resource_instance=filepath,
                    resource_name=resource_name,
                    conn_conf=conn_conf,
                )
            elif isinstance(task, AbstractDataSource):
                await self.process_data_source(
                    data_source=task, conn_conf=conn_conf
                )
            tasks.task_done()
        except Exception as e:
            logger.error(f"Error processing task: {e}", exc_info=True)
            tasks.task_done()
            break

IngestionParams

Bases: BaseModel

Parameters for controlling the ingestion process.

Attributes:

Name Type Description
clear_data bool

If True, remove all existing graph data before ingestion without changing the schema.

n_cores int

Number of CPU cores/threads to use for parallel processing

max_items int | None

Maximum number of items to process per resource (applies to all data sources)

batch_size int

Size of batches for processing

dry bool

Whether to perform a dry run (no database changes)

init_only bool

Whether to only initialize the database without ingestion

limit_files int | None

Optional limit on number of files to process

max_concurrent_db_ops int | None

Maximum number of concurrent database operations (for vertices/edges). If None, uses n_cores. Set to 1 to prevent deadlocks in databases that don't handle concurrent transactions well (e.g., Neo4j). Database-independent setting.

datetime_after str | None

Inclusive lower bound for datetime filtering (ISO format). Rows with date_column >= datetime_after are included. Used with SQL/table sources.

datetime_before str | None

Exclusive upper bound for datetime filtering (ISO format). Rows with date_column < datetime_before are included. Range is [datetime_after, datetime_before).

datetime_column str | None

Default column name for datetime filtering when the connector does not specify date_field. Per-table override: set date_field on TableConnector (or FileConnector).

strict_references bool

If True, fail fast during model/resource initialization when named references cannot be resolved (for example, a transform.call.use value that does not exist in ingestion_model.transforms). If False, unresolved references may be tolerated by legacy paths.

strict_registry bool

If True, fail registry build when resources cannot be wired to concrete sources/connectors (missing connector/type/mismatch/source build errors). If False, those issues are logged and skipped, allowing partial ingestion.

dynamic_edges bool

If True, feedback edge declarations discovered during resource runtime initialization (e.g. edge actors) into the shared schema edge config. Keep False to preserve pure logical-schema immutability.

on_row_error Literal['skip', 'fail']

skip continues the batch on per-row cast errors (default); fail fails the batch on the first error (legacy behavior).

row_error_dead_letter_path Path | None

If set, append one JSON line per failed row (JSONL) for debugging.

max_row_errors int | None

If set, total failed rows across the ingest run must not exceed this value or :class:RowErrorBudgetExceeded is raised.

row_error_doc_preview_max_bytes int

Max UTF-8 size for serialized doc_preview.

row_error_doc_keys tuple[str, ...] | None

If set, only these keys from the source doc appear in doc_preview (recommended when documents may contain sensitive fields).

Source code in graflo/hq/caster.py
class IngestionParams(BaseModel):
    """Parameters for controlling the ingestion process.

    Attributes:
        clear_data: If True, remove all existing graph data before ingestion without
            changing the schema.
        n_cores: Number of CPU cores/threads to use for parallel processing
        max_items: Maximum number of items to process per resource (applies to all data sources)
        batch_size: Size of batches for processing
        dry: Whether to perform a dry run (no database changes)
        init_only: Whether to only initialize the database without ingestion
        limit_files: Optional limit on number of files to process
        max_concurrent_db_ops: Maximum number of concurrent database operations (for vertices/edges).
            If None, uses n_cores. Set to 1 to prevent deadlocks in databases that don't handle
            concurrent transactions well (e.g., Neo4j). Database-independent setting.
        datetime_after: Inclusive lower bound for datetime filtering (ISO format).
            Rows with date_column >= datetime_after are included. Used with SQL/table sources.
        datetime_before: Exclusive upper bound for datetime filtering (ISO format).
            Rows with date_column < datetime_before are included. Range is [datetime_after, datetime_before).
        datetime_column: Default column name for datetime filtering when the connector does not
            specify date_field. Per-table override: set date_field on TableConnector (or FileConnector).
        strict_references: If True, fail fast during model/resource initialization when
            named references cannot be resolved (for example, a
            ``transform.call.use`` value that does not exist in
            ``ingestion_model.transforms``). If False, unresolved references may be
            tolerated by legacy paths.
        strict_registry: If True, fail registry build when resources cannot be wired to
            concrete sources/connectors (missing connector/type/mismatch/source build
            errors). If False, those issues are logged and skipped, allowing partial
            ingestion.
        dynamic_edges: If True, feedback edge declarations discovered during resource
            runtime initialization (e.g. edge actors) into the shared schema edge
            config. Keep False to preserve pure logical-schema immutability.
        on_row_error: ``skip`` continues the batch on per-row cast errors (default);
            ``fail`` fails the batch on the first error (legacy behavior).
        row_error_dead_letter_path: If set, append one JSON line per failed row
            (JSONL) for debugging.
        max_row_errors: If set, total failed rows across the ingest run must not
            exceed this value or :class:`RowErrorBudgetExceeded` is raised.
        row_error_doc_preview_max_bytes: Max UTF-8 size for serialized ``doc_preview``.
        row_error_doc_keys: If set, only these keys from the source doc appear in
            ``doc_preview`` (recommended when documents may contain sensitive fields).
    """

    clear_data: bool = False
    n_cores: int = 1
    max_items: int | None = None
    batch_size: int = 10000
    dry: bool = False
    init_only: bool = False
    limit_files: int | None = None
    max_concurrent_db_ops: int | None = None
    datetime_after: str | None = None
    datetime_before: str | None = None
    datetime_column: str | None = None
    # Strict contract checks for major-release style validation workflows.
    strict_references: bool = True
    strict_registry: bool = True
    dynamic_edges: bool = False
    on_row_error: Literal["skip", "fail"] = "skip"
    row_error_dead_letter_path: Path | None = None
    max_row_errors: int | None = None
    row_error_doc_preview_max_bytes: int = 4096
    row_error_doc_keys: tuple[str, ...] | None = None

RowCastFailure

Bases: BaseModel

Structured record for a single row that failed during resource casting.

Source code in graflo/hq/caster.py
class RowCastFailure(BaseModel):
    """Structured record for a single row that failed during resource casting."""

    resource_name: str
    row_index: int
    exception_type: str
    message: str
    traceback: str = Field(
        default="",
        description="Formatted traceback, truncated to the configured max length.",
    )
    doc_preview: Any = Field(
        default=None,
        description="Subset or truncated JSON of the source document for debugging.",
    )

RowErrorBudgetExceeded

Bases: RuntimeError

Raised when total row cast failures exceed IngestionParams.max_row_errors.

Source code in graflo/hq/caster.py
class RowErrorBudgetExceeded(RuntimeError):
    """Raised when total row cast failures exceed ``IngestionParams.max_row_errors``."""

    def __init__(
        self,
        *,
        total_failures: int,
        limit: int,
        dead_letter_path: Path | None,
    ) -> None:
        self.total_failures = total_failures
        self.limit = limit
        self.dead_letter_path = dead_letter_path
        dl = str(dead_letter_path) if dead_letter_path else "(not configured)"
        super().__init__(
            f"Row error budget exceeded: {total_failures} total failures "
            f"(limit {limit}). Dead letter: {dl}"
        )