Skip to content

analysis

analysis

Analysis

High-level orchestration of analysis tasks for a Project.

This class wires calculators and minimizers, exposes a compact interface for parameters, constraints and results, and coordinates computations across the project's structures and experiments.

Source code in src/easydiffraction/analysis/analysis.py
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
class Analysis:
    """
    High-level orchestration of analysis tasks for a Project.

    This class wires calculators and minimizers, exposes a compact
    interface for parameters, constraints and results, and coordinates
    computations across the project's structures and experiments.
    """

    def __init__(self, project: object) -> None:
        """
        Create a new Analysis instance bound to a project.

        Parameters
        ----------
        project : object
            The project that owns models and experiments.
        """
        self.project = project
        self._aliases_type: str = AliasesFactory.default_tag()
        self.aliases = AliasesFactory.create(self._aliases_type)
        self._constraints_type: str = ConstraintsFactory.default_tag()
        self.constraints = ConstraintsFactory.create(self._constraints_type)
        self.constraints_handler = ConstraintsHandler.get()
        self._fit_mode_type: str = FitModeFactory.default_tag()
        self._fit_mode = FitModeFactory.create(self._fit_mode_type)
        self._joint_fit_experiments = JointFitExperiments()
        self.fitter = Fitter()
        self.fit_results = None
        self._parameter_snapshots: dict[str, dict[str, dict]] = {}
        self._display = AnalysisDisplay(self)

    @property
    def display(self) -> AnalysisDisplay:
        """Display helper for parameter tables, CIF, and fit results."""
        return self._display

    def help(self) -> None:
        """Print a summary of analysis properties and methods."""
        console.paragraph("Help for 'Analysis'")

        cls = type(self)

        prop_rows = _discover_property_rows(cls)
        if prop_rows:
            console.paragraph('Properties')
            render_table(
                columns_headers=['#', 'Name', 'Writable', 'Description'],
                columns_alignment=['right', 'left', 'center', 'left'],
                columns_data=prop_rows,
            )

        method_rows = _discover_method_rows(cls)
        if method_rows:
            console.paragraph('Methods')
            render_table(
                columns_headers=['#', 'Name', 'Description'],
                columns_alignment=['right', 'left', 'left'],
                columns_data=method_rows,
            )

    # ------------------------------------------------------------------
    #  Parameter helpers
    # ------------------------------------------------------------------

    @staticmethod
    def _get_params_as_dataframe(
        params: list[NumericDescriptor | Parameter],
    ) -> pd.DataFrame:
        """
        Convert a list of parameters to a DataFrame.

        Parameters
        ----------
        params : list[NumericDescriptor | Parameter]
            List of DescriptorFloat or Parameter objects.

        Returns
        -------
        pd.DataFrame
            A pandas DataFrame containing parameter information.
        """
        records = []
        for param in params:
            record = {}
            # TODO: Merge into one. Add field if attr exists
            # TODO: f'{param.value!r}' for StringDescriptor?
            if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)):
                record = {
                    ('fittable', 'left'): False,
                    ('datablock', 'left'): param._identity.datablock_entry_name,
                    ('category', 'left'): param._identity.category_code,
                    ('entry', 'left'): param._identity.category_entry_name or '',
                    ('parameter', 'left'): param.name,
                    ('value', 'right'): param.value,
                }
            if isinstance(param, (NumericDescriptor, Parameter)):
                record |= {
                    ('units', 'left'): param.units,
                }
            if isinstance(param, Parameter):
                record |= {
                    ('fittable', 'left'): True,
                    ('free', 'left'): param.free,
                    ('min', 'right'): param.fit_min,
                    ('max', 'right'): param.fit_max,
                    ('uncertainty', 'right'): param.uncertainty or '',
                }
            records.append(record)

        df = pd.DataFrame.from_records(records)
        df.columns = pd.MultiIndex.from_tuples(df.columns)
        return df

    def show_current_minimizer(self) -> None:
        """Print the name of the currently selected minimizer."""
        console.paragraph('Current minimizer')
        console.print(self.current_minimizer)

    @staticmethod
    def show_available_minimizers() -> None:
        """Print available minimizer drivers on this system."""
        MinimizerFactory.show_supported()

    @property
    def current_minimizer(self) -> str | None:
        """The identifier of the active minimizer, if any."""
        return self.fitter.selection if self.fitter else None

    @current_minimizer.setter
    def current_minimizer(self, selection: str) -> None:
        """
        Switch to a different minimizer implementation.

        Parameters
        ----------
        selection : str
            Minimizer selection string, e.g. 'lmfit'.
        """
        self.fitter = Fitter(selection)
        console.paragraph('Current minimizer changed to')
        console.print(self.current_minimizer)

    # ------------------------------------------------------------------
    #  Fit mode (single type, with show methods)
    # ------------------------------------------------------------------

    @property
    def fit_mode(self) -> object:
        """Fit-mode category item holding the active strategy."""
        return self._fit_mode

    def show_supported_fit_mode_types(self) -> None:
        """Print a table of supported fit modes for this project."""
        num_expts = len(self.project.experiments) if self.project.experiments else 0
        if num_expts <= 1:
            modes = [FitModeEnum.SINGLE]
        else:
            modes = [FitModeEnum.SINGLE, FitModeEnum.JOINT, FitModeEnum.SEQUENTIAL]
        columns_data = [[mode.value, mode.description()] for mode in modes]
        console.paragraph('Supported fit modes')
        render_table(
            columns_headers=['Mode', 'Description'],
            columns_alignment=['left', 'left'],
            columns_data=columns_data,
        )

    def show_current_fit_mode_type(self) -> None:
        """Print the currently selected fit mode."""
        console.paragraph('Current fit mode')
        console.print(self._fit_mode.mode.value)

    # ------------------------------------------------------------------
    #  Joint-fit experiments (category)
    # ------------------------------------------------------------------

    @property
    def joint_fit_experiments(self) -> object:
        """Per-experiment weight collection for joint fitting."""
        return self._joint_fit_experiments

    def fit(self, verbosity: str | None = None, *, use_physical_limits: bool = False) -> None:
        """
        Execute fitting for all experiments.

        This method performs the optimization but does not display
        results automatically. Call :meth:`display.fit_results` after
        fitting to see a summary of the fit quality and parameter
        values.

        In 'single' mode, fits each experiment independently. In 'joint'
        mode, performs a simultaneous fit across experiments with
        weights. If mode is 'sequential', logs an error directing the
        user to :meth:`fit_sequential` instead.

        Sets :attr:`fit_results` on success, which can be accessed
        programmatically (e.g.,
        ``analysis.fit_results.reduced_chi_square``).

        Parameters
        ----------
        verbosity : str | None, default=None
            Console output verbosity: ``'full'`` for detailed per-
            experiment progress, ``'short'`` for a
            one-row-per-experiment summary table, or ``'silent'`` for no
            output. When ``None``, uses ``project.verbosity``.
        use_physical_limits : bool, default=False
            When ``True``, fall back to physical limits from the value
            spec for parameters whose ``fit_min``/``fit_max`` are
            unbounded.
        """
        verb = VerbosityEnum(verbosity if verbosity is not None else self.project.verbosity)

        structures = self.project.structures
        if not structures:
            log.warning('No structures found in the project. Cannot run fit.')
            return

        experiments = self.project.experiments
        if not experiments:
            log.warning('No experiments found in the project. Cannot run fit.')
            return

        # Apply constraints before fitting so that constrained
        # parameters are marked and excluded from the free parameter
        # list built by the fitter.
        self._update_categories()

        # Run the fitting process
        mode = FitModeEnum(self._fit_mode.mode.value)
        if mode is FitModeEnum.JOINT:
            self._fit_joint(verb, structures, experiments, use_physical_limits=use_physical_limits)
        elif mode is FitModeEnum.SINGLE:
            self._fit_single(
                verb, structures, experiments, use_physical_limits=use_physical_limits
            )
        elif mode is FitModeEnum.SEQUENTIAL:
            log.error(
                "fit_mode is 'sequential'. Use fit_sequential(data_dir=...) instead of fit()."
            )
            return

        # After fitting, save the project
        if self.project.info.path is not None:
            self.project.save()

    def _fit_joint(
        self,
        verb: VerbosityEnum,
        structures: object,
        experiments: object,
        *,
        use_physical_limits: bool,
    ) -> None:
        """
        Run joint fitting across all experiments with weights.

        Parameters
        ----------
        verb : VerbosityEnum
            Output verbosity.
        structures : object
            Project structures collection.
        experiments : object
            Project experiments collection.
        use_physical_limits : bool
            Whether to use physical limits as fit bounds.
        """
        mode = FitModeEnum.JOINT
        # Auto-populate joint_fit_experiments if empty
        if not len(self._joint_fit_experiments):
            for id in experiments.names:
                self._joint_fit_experiments.create(id=id, weight=0.5)
        if verb is not VerbosityEnum.SILENT:
            console.paragraph(
                f"Using all experiments 🔬 {experiments.names} for '{mode.value}' fitting"
            )
        # Resolve weights to a plain numpy array
        experiments_list = list(experiments.values())
        weights_list = [
            self._joint_fit_experiments[name].weight.value for name in experiments.names
        ]
        weights_array = np.array(weights_list, dtype=np.float64)
        self.fitter.fit(
            structures,
            experiments_list,
            weights=weights_array,
            analysis=self,
            verbosity=verb,
            use_physical_limits=use_physical_limits,
        )

        # After fitting, get the results
        self.fit_results = self.fitter.results

    def _fit_single(
        self,
        verb: VerbosityEnum,
        structures: object,
        experiments: object,
        *,
        use_physical_limits: bool,
    ) -> None:
        """
        Run single-mode fitting for each experiment independently.

        Parameters
        ----------
        verb : VerbosityEnum
            Output verbosity.
        structures : object
            Project structures collection.
        experiments : object
            Project experiments collection.
        use_physical_limits : bool
            Whether to use physical limits as fit bounds.
        """
        mode = FitModeEnum.SINGLE
        expt_names = experiments.names

        short_display_handle = self._fit_single_print_header(verb, expt_names, mode)
        short_rows: list[list[str]] = []

        for expt_name in expt_names:
            if verb is VerbosityEnum.FULL:
                console.print(f"📋 Using experiment 🔬 '{expt_name}' for '{mode.value}' fitting")

            experiment = experiments[expt_name]
            self.fitter.fit(
                structures,
                [experiment],
                analysis=self,
                verbosity=verb,
                use_physical_limits=use_physical_limits,
            )

            # After fitting, snapshot parameter values before
            # they get overwritten by the next experiment's fit
            results = self.fitter.results
            self._snapshot_params(expt_name, results)
            self.fit_results = results

            # Short mode: append one summary row and update in-place
            if verb is VerbosityEnum.SHORT:
                self._fit_single_update_short_table(
                    short_rows, expt_name, results, short_display_handle
                )

        # Short mode: close the display handle
        if short_display_handle is not None and hasattr(short_display_handle, 'close'):
            with suppress(Exception):
                short_display_handle.close()

    @staticmethod
    def _fit_single_print_header(
        verb: VerbosityEnum,
        expt_names: list[str],
        mode: FitModeEnum,
    ) -> object | None:
        """
        Print the header for single-mode fitting.

        Parameters
        ----------
        verb : VerbosityEnum
            Output verbosity.
        expt_names : list[str]
            Experiment names.
        mode : FitModeEnum
            The fit mode enum.

        Returns
        -------
        object | None
            Display handle for short mode, or ``None``.
        """
        if verb is not VerbosityEnum.SILENT:
            console.paragraph('Standard fitting')
        if verb is not VerbosityEnum.SHORT:
            return None
        num_expts = len(expt_names)
        console.print(
            f"📋 Using {num_expts} experiments 🔬 from '{expt_names[0]}' to "
            f"'{expt_names[-1]}' for '{mode.value}' fitting"
        )
        console.print("🚀 Starting fit process with 'lmfit'...")
        console.print('📈 Goodness-of-fit (reduced χ²) per experiment:')
        return _make_display_handle()

    def _snapshot_params(self, expt_name: str, results: object) -> None:
        """
        Snapshot parameter values for a single experiment.

        Parameters
        ----------
        expt_name : str
            Experiment name key for the snapshot dict.
        results : object
            Fit results with ``.parameters`` list.
        """
        snapshot: dict[str, dict] = {}
        for param in results.parameters:
            snapshot[param.unique_name] = {
                'value': param.value,
                'uncertainty': param.uncertainty,
                'units': param.units,
            }
        self._parameter_snapshots[expt_name] = snapshot

    def _fit_single_update_short_table(
        self,
        short_rows: list[list[str]],
        expt_name: str,
        results: object,
        display_handle: object | None,
    ) -> None:
        """
        Append a summary row for short-mode display.

        Parameters
        ----------
        short_rows : list[list[str]]
            Accumulated rows (mutated in place).
        expt_name : str
            Experiment name.
        results : object
            Fit results.
        display_handle : object | None
            Display handle for in-place table update.
        """
        chi2_str = (
            f'{results.reduced_chi_square:.2f}' if results.reduced_chi_square is not None else '—'
        )
        iters = str(self.fitter.minimizer.tracker.best_iteration or 0)
        status = '✅' if results.success else '❌'
        short_rows.append([expt_name, chi2_str, iters, status])
        render_table(
            columns_headers=['experiment', 'χ²', 'iterations', 'status'],
            columns_alignment=['left', 'right', 'right', 'center'],
            columns_data=short_rows,
            display_handle=display_handle,
        )

    def fit_sequential(
        self,
        data_dir: str,
        max_workers: int | str = 1,
        chunk_size: int | None = None,
        file_pattern: str = '*',
        extract_diffrn: object = None,
        verbosity: str | None = None,
        *,
        reverse: bool = False,
    ) -> None:
        """
        Run sequential fitting over all data files in a directory.

        Fits each dataset independently using the current structure and
        experiment as a template.  Results are written incrementally to
        ``analysis/results.csv`` in the project directory.

        The project must contain exactly one structure and one
        experiment (the template), and must have been saved
        (``save_as()``) before calling this method.

        Parameters
        ----------
        data_dir : str
            Path to directory containing data files.
        max_workers : int | str, default=1
            Number of parallel worker processes. ``1`` = sequential.
            ``'auto'`` = physical CPU count. Uses
            ``ProcessPoolExecutor`` with ``spawn`` context when > 1.
        chunk_size : int | None, default=None
            Files per chunk. Default ``None`` uses *max_workers*.
        file_pattern : str, default='*'
            Glob pattern to filter files in *data_dir*.
        extract_diffrn : object, default=None
            User callback ``f(file_path) → {diffrn_field: value}``.
            Called per file after fitting. ``None`` = no diffrn
            metadata.
        verbosity : str | None, default=None
            ``'full'``, ``'short'``, or ``'silent'``. Default: project
            verbosity.
        reverse : bool, default=False
            When ``True``, process data files in reverse order.  Useful
            when starting values are better matched to the last file
            (e.g. highest-temperature dataset in a cooling scan).
        """
        from easydiffraction.analysis.sequential import fit_sequential as _fit_seq  # noqa: PLC0415

        # Record the fit mode for CIF serialization
        self._fit_mode.mode = FitModeEnum.SEQUENTIAL.value

        # Apply constraints before building the template
        self._update_categories()

        # Temporarily override project verbosity if caller provided one
        original_verbosity = None
        if verbosity is not None:
            original_verbosity = self.project.verbosity
            self.project.verbosity = verbosity
        try:
            _fit_seq(
                analysis=self,
                data_dir=data_dir,
                max_workers=max_workers,
                chunk_size=chunk_size,
                file_pattern=file_pattern,
                extract_diffrn=extract_diffrn,
                reverse=reverse,
            )
        finally:
            if original_verbosity is not None:
                self.project.verbosity = original_verbosity

    def _update_categories(
        self,
        *,
        called_by_minimizer: bool = False,
    ) -> None:
        """
        Update all categories owned by Analysis.

        This ensures aliases and constraints are up-to-date before
        serialization or after parameter changes.

        Parameters
        ----------
        called_by_minimizer : bool, default=False
            Whether this is called during fitting.
        """
        del called_by_minimizer

        # Apply constraints to sync dependent parameters
        if self.constraints.enabled and self.constraints._items:
            self.constraints_handler.set_aliases(self.aliases)
            self.constraints_handler.set_constraints(self.constraints)
            self.constraints_handler.apply()

    def as_cif(self) -> str:
        """
        Serialize the analysis section to a CIF string.

        Returns
        -------
        str
            The analysis section represented as a CIF document string.
        """
        self._update_categories()
        return analysis_to_cif(self)

__init__(project)

Create a new Analysis instance bound to a project.

Parameters:

Name Type Description Default
project object

The project that owns models and experiments.

required
Source code in src/easydiffraction/analysis/analysis.py
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
def __init__(self, project: object) -> None:
    """
    Create a new Analysis instance bound to a project.

    Parameters
    ----------
    project : object
        The project that owns models and experiments.
    """
    self.project = project
    self._aliases_type: str = AliasesFactory.default_tag()
    self.aliases = AliasesFactory.create(self._aliases_type)
    self._constraints_type: str = ConstraintsFactory.default_tag()
    self.constraints = ConstraintsFactory.create(self._constraints_type)
    self.constraints_handler = ConstraintsHandler.get()
    self._fit_mode_type: str = FitModeFactory.default_tag()
    self._fit_mode = FitModeFactory.create(self._fit_mode_type)
    self._joint_fit_experiments = JointFitExperiments()
    self.fitter = Fitter()
    self.fit_results = None
    self._parameter_snapshots: dict[str, dict[str, dict]] = {}
    self._display = AnalysisDisplay(self)

as_cif()

Serialize the analysis section to a CIF string.

Returns:

Type Description
str

The analysis section represented as a CIF document string.

Source code in src/easydiffraction/analysis/analysis.py
916
917
918
919
920
921
922
923
924
925
926
def as_cif(self) -> str:
    """
    Serialize the analysis section to a CIF string.

    Returns
    -------
    str
        The analysis section represented as a CIF document string.
    """
    self._update_categories()
    return analysis_to_cif(self)

current_minimizer property writable

The identifier of the active minimizer, if any.

display property

Display helper for parameter tables, CIF, and fit results.

fit(verbosity=None, *, use_physical_limits=False)

Execute fitting for all experiments.

This method performs the optimization but does not display results automatically. Call :meth:display.fit_results after fitting to see a summary of the fit quality and parameter values.

In 'single' mode, fits each experiment independently. In 'joint' mode, performs a simultaneous fit across experiments with weights. If mode is 'sequential', logs an error directing the user to :meth:fit_sequential instead.

Sets :attr:fit_results on success, which can be accessed programmatically (e.g., analysis.fit_results.reduced_chi_square).

Parameters:

Name Type Description Default
verbosity str | None

Console output verbosity: 'full' for detailed per- experiment progress, 'short' for a one-row-per-experiment summary table, or 'silent' for no output. When None, uses project.verbosity.

None
use_physical_limits bool

When True, fall back to physical limits from the value spec for parameters whose fit_min/fit_max are unbounded.

False
Source code in src/easydiffraction/analysis/analysis.py
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
def fit(self, verbosity: str | None = None, *, use_physical_limits: bool = False) -> None:
    """
    Execute fitting for all experiments.

    This method performs the optimization but does not display
    results automatically. Call :meth:`display.fit_results` after
    fitting to see a summary of the fit quality and parameter
    values.

    In 'single' mode, fits each experiment independently. In 'joint'
    mode, performs a simultaneous fit across experiments with
    weights. If mode is 'sequential', logs an error directing the
    user to :meth:`fit_sequential` instead.

    Sets :attr:`fit_results` on success, which can be accessed
    programmatically (e.g.,
    ``analysis.fit_results.reduced_chi_square``).

    Parameters
    ----------
    verbosity : str | None, default=None
        Console output verbosity: ``'full'`` for detailed per-
        experiment progress, ``'short'`` for a
        one-row-per-experiment summary table, or ``'silent'`` for no
        output. When ``None``, uses ``project.verbosity``.
    use_physical_limits : bool, default=False
        When ``True``, fall back to physical limits from the value
        spec for parameters whose ``fit_min``/``fit_max`` are
        unbounded.
    """
    verb = VerbosityEnum(verbosity if verbosity is not None else self.project.verbosity)

    structures = self.project.structures
    if not structures:
        log.warning('No structures found in the project. Cannot run fit.')
        return

    experiments = self.project.experiments
    if not experiments:
        log.warning('No experiments found in the project. Cannot run fit.')
        return

    # Apply constraints before fitting so that constrained
    # parameters are marked and excluded from the free parameter
    # list built by the fitter.
    self._update_categories()

    # Run the fitting process
    mode = FitModeEnum(self._fit_mode.mode.value)
    if mode is FitModeEnum.JOINT:
        self._fit_joint(verb, structures, experiments, use_physical_limits=use_physical_limits)
    elif mode is FitModeEnum.SINGLE:
        self._fit_single(
            verb, structures, experiments, use_physical_limits=use_physical_limits
        )
    elif mode is FitModeEnum.SEQUENTIAL:
        log.error(
            "fit_mode is 'sequential'. Use fit_sequential(data_dir=...) instead of fit()."
        )
        return

    # After fitting, save the project
    if self.project.info.path is not None:
        self.project.save()

fit_mode property

Fit-mode category item holding the active strategy.

fit_sequential(data_dir, max_workers=1, chunk_size=None, file_pattern='*', extract_diffrn=None, verbosity=None, *, reverse=False)

Run sequential fitting over all data files in a directory.

Fits each dataset independently using the current structure and experiment as a template. Results are written incrementally to analysis/results.csv in the project directory.

The project must contain exactly one structure and one experiment (the template), and must have been saved (save_as()) before calling this method.

Parameters:

Name Type Description Default
data_dir str

Path to directory containing data files.

required
max_workers int | str

Number of parallel worker processes. 1 = sequential. 'auto' = physical CPU count. Uses ProcessPoolExecutor with spawn context when > 1.

1
chunk_size int | None

Files per chunk. Default None uses max_workers.

None
file_pattern str

Glob pattern to filter files in data_dir.

'*'
extract_diffrn object

User callback f(file_path) → {diffrn_field: value}. Called per file after fitting. None = no diffrn metadata.

None
verbosity str | None

'full', 'short', or 'silent'. Default: project verbosity.

None
reverse bool

When True, process data files in reverse order. Useful when starting values are better matched to the last file (e.g. highest-temperature dataset in a cooling scan).

False
Source code in src/easydiffraction/analysis/analysis.py
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
def fit_sequential(
    self,
    data_dir: str,
    max_workers: int | str = 1,
    chunk_size: int | None = None,
    file_pattern: str = '*',
    extract_diffrn: object = None,
    verbosity: str | None = None,
    *,
    reverse: bool = False,
) -> None:
    """
    Run sequential fitting over all data files in a directory.

    Fits each dataset independently using the current structure and
    experiment as a template.  Results are written incrementally to
    ``analysis/results.csv`` in the project directory.

    The project must contain exactly one structure and one
    experiment (the template), and must have been saved
    (``save_as()``) before calling this method.

    Parameters
    ----------
    data_dir : str
        Path to directory containing data files.
    max_workers : int | str, default=1
        Number of parallel worker processes. ``1`` = sequential.
        ``'auto'`` = physical CPU count. Uses
        ``ProcessPoolExecutor`` with ``spawn`` context when > 1.
    chunk_size : int | None, default=None
        Files per chunk. Default ``None`` uses *max_workers*.
    file_pattern : str, default='*'
        Glob pattern to filter files in *data_dir*.
    extract_diffrn : object, default=None
        User callback ``f(file_path) → {diffrn_field: value}``.
        Called per file after fitting. ``None`` = no diffrn
        metadata.
    verbosity : str | None, default=None
        ``'full'``, ``'short'``, or ``'silent'``. Default: project
        verbosity.
    reverse : bool, default=False
        When ``True``, process data files in reverse order.  Useful
        when starting values are better matched to the last file
        (e.g. highest-temperature dataset in a cooling scan).
    """
    from easydiffraction.analysis.sequential import fit_sequential as _fit_seq  # noqa: PLC0415

    # Record the fit mode for CIF serialization
    self._fit_mode.mode = FitModeEnum.SEQUENTIAL.value

    # Apply constraints before building the template
    self._update_categories()

    # Temporarily override project verbosity if caller provided one
    original_verbosity = None
    if verbosity is not None:
        original_verbosity = self.project.verbosity
        self.project.verbosity = verbosity
    try:
        _fit_seq(
            analysis=self,
            data_dir=data_dir,
            max_workers=max_workers,
            chunk_size=chunk_size,
            file_pattern=file_pattern,
            extract_diffrn=extract_diffrn,
            reverse=reverse,
        )
    finally:
        if original_verbosity is not None:
            self.project.verbosity = original_verbosity

help()

Print a summary of analysis properties and methods.

Source code in src/easydiffraction/analysis/analysis.py
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
def help(self) -> None:
    """Print a summary of analysis properties and methods."""
    console.paragraph("Help for 'Analysis'")

    cls = type(self)

    prop_rows = _discover_property_rows(cls)
    if prop_rows:
        console.paragraph('Properties')
        render_table(
            columns_headers=['#', 'Name', 'Writable', 'Description'],
            columns_alignment=['right', 'left', 'center', 'left'],
            columns_data=prop_rows,
        )

    method_rows = _discover_method_rows(cls)
    if method_rows:
        console.paragraph('Methods')
        render_table(
            columns_headers=['#', 'Name', 'Description'],
            columns_alignment=['right', 'left', 'left'],
            columns_data=method_rows,
        )

joint_fit_experiments property

Per-experiment weight collection for joint fitting.

show_available_minimizers() staticmethod

Print available minimizer drivers on this system.

Source code in src/easydiffraction/analysis/analysis.py
495
496
497
498
@staticmethod
def show_available_minimizers() -> None:
    """Print available minimizer drivers on this system."""
    MinimizerFactory.show_supported()

show_current_fit_mode_type()

Print the currently selected fit mode.

Source code in src/easydiffraction/analysis/analysis.py
543
544
545
546
def show_current_fit_mode_type(self) -> None:
    """Print the currently selected fit mode."""
    console.paragraph('Current fit mode')
    console.print(self._fit_mode.mode.value)

show_current_minimizer()

Print the name of the currently selected minimizer.

Source code in src/easydiffraction/analysis/analysis.py
490
491
492
493
def show_current_minimizer(self) -> None:
    """Print the name of the currently selected minimizer."""
    console.paragraph('Current minimizer')
    console.print(self.current_minimizer)

show_supported_fit_mode_types()

Print a table of supported fit modes for this project.

Source code in src/easydiffraction/analysis/analysis.py
528
529
530
531
532
533
534
535
536
537
538
539
540
541
def show_supported_fit_mode_types(self) -> None:
    """Print a table of supported fit modes for this project."""
    num_expts = len(self.project.experiments) if self.project.experiments else 0
    if num_expts <= 1:
        modes = [FitModeEnum.SINGLE]
    else:
        modes = [FitModeEnum.SINGLE, FitModeEnum.JOINT, FitModeEnum.SEQUENTIAL]
    columns_data = [[mode.value, mode.description()] for mode in modes]
    console.paragraph('Supported fit modes')
    render_table(
        columns_headers=['Mode', 'Description'],
        columns_alignment=['left', 'left'],
        columns_data=columns_data,
    )

AnalysisDisplay

Display helper - parameter tables, CIF, and fit results.

Accessed via analysis.display.

Source code in src/easydiffraction/analysis/analysis.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
class AnalysisDisplay:
    """
    Display helper - parameter tables, CIF, and fit results.

    Accessed via ``analysis.display``.
    """

    def __init__(self, analysis: 'Analysis') -> None:
        self._analysis = analysis

    def all_params(self) -> None:
        """Print all parameters for structures and experiments."""
        project = self._analysis.project
        structures_params = project.structures.parameters
        experiments_params = project.experiments.parameters

        if not structures_params and not experiments_params:
            log.warning('No parameters found.')
            return

        tabler = TableRenderer.get()

        filtered_headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'value',
            'fittable',
        ]

        console.paragraph('All parameters for all structures (🧩 data blocks)')
        df = Analysis._get_params_as_dataframe(structures_params)
        filtered_df = df[filtered_headers]
        tabler.render(filtered_df)

        console.paragraph('All parameters for all experiments (🔬 data blocks)')
        df = Analysis._get_params_as_dataframe(experiments_params)
        filtered_df = df[filtered_headers]
        tabler.render(filtered_df)

    def fittable_params(self) -> None:
        """Print all fittable parameters."""
        project = self._analysis.project
        structures_params = project.structures.fittable_parameters
        experiments_params = project.experiments.fittable_parameters

        if not structures_params and not experiments_params:
            log.warning('No fittable parameters found.')
            return

        tabler = TableRenderer.get()

        filtered_headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'value',
            'uncertainty',
            'units',
            'free',
        ]

        console.paragraph('Fittable parameters for all structures (🧩 data blocks)')
        df = Analysis._get_params_as_dataframe(structures_params)
        filtered_df = df[filtered_headers]
        tabler.render(filtered_df)

        console.paragraph('Fittable parameters for all experiments (🔬 data blocks)')
        df = Analysis._get_params_as_dataframe(experiments_params)
        filtered_df = df[filtered_headers]
        tabler.render(filtered_df)

    def free_params(self) -> None:
        """Print only currently free (varying) parameters."""
        project = self._analysis.project
        structures_params = project.structures.free_parameters
        experiments_params = project.experiments.free_parameters
        free_params = structures_params + experiments_params

        if not free_params:
            log.warning('No free parameters found.')
            return

        tabler = TableRenderer.get()

        filtered_headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'value',
            'uncertainty',
            'min',
            'max',
            'units',
        ]

        console.paragraph(
            'Free parameters for both structures (🧩 data blocks) and experiments (🔬 data blocks)'
        )
        df = Analysis._get_params_as_dataframe(free_params)
        filtered_df = df[filtered_headers]
        tabler.render(filtered_df)

    def how_to_access_parameters(self) -> None:
        """
        Show Python access paths for all parameters.

        The output explains how to reference specific parameters in
        code.
        """
        project = self._analysis.project
        structures_params = project.structures.parameters
        experiments_params = project.experiments.parameters
        all_params = {
            'structures': structures_params,
            'experiments': experiments_params,
        }

        if not all_params:
            log.warning('No parameters found.')
            return

        columns_headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'How to Access in Python Code',
        ]

        columns_alignment = [
            'left',
            'left',
            'left',
            'left',
            'left',
        ]

        columns_data = []
        project_varname = project._varname
        for datablock_code, params in all_params.items():
            for param in params:
                if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)):
                    datablock_entry_name = param._identity.datablock_entry_name
                    category_code = param._identity.category_code
                    category_entry_name = param._identity.category_entry_name or ''
                    param_key = param.name
                    code_variable = (
                        f'{project_varname}.{datablock_code}'
                        f"['{datablock_entry_name}'].{category_code}"
                    )
                    if category_entry_name:
                        code_variable += f"['{category_entry_name}']"
                    code_variable += f'.{param_key}'
                    columns_data.append([
                        datablock_entry_name,
                        category_code,
                        category_entry_name,
                        param_key,
                        code_variable,
                    ])

        console.paragraph('How to access parameters')
        render_table(
            columns_headers=columns_headers,
            columns_alignment=columns_alignment,
            columns_data=columns_data,
        )

    def parameter_cif_uids(self) -> None:
        """
        Show CIF unique IDs for all parameters.

        The output explains which unique identifiers are used when
        creating CIF-based constraints.
        """
        project = self._analysis.project
        structures_params = project.structures.parameters
        experiments_params = project.experiments.parameters
        all_params = {
            'structures': structures_params,
            'experiments': experiments_params,
        }

        if not all_params:
            log.warning('No parameters found.')
            return

        columns_headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'Unique Identifier for CIF Constraints',
        ]

        columns_alignment = [
            'left',
            'left',
            'left',
            'left',
            'left',
        ]

        columns_data = []
        for params in all_params.values():
            for param in params:
                if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)):
                    datablock_entry_name = param._identity.datablock_entry_name
                    category_code = param._identity.category_code
                    category_entry_name = param._identity.category_entry_name or ''
                    param_key = param.name
                    cif_uid = param._cif_handler.uid
                    columns_data.append([
                        datablock_entry_name,
                        category_code,
                        category_entry_name,
                        param_key,
                        cif_uid,
                    ])

        console.paragraph('Show parameter CIF unique identifiers')
        render_table(
            columns_headers=columns_headers,
            columns_alignment=columns_alignment,
            columns_data=columns_data,
        )

    def constraints(self) -> None:
        """Print a table of all user-defined symbolic constraints."""
        analysis = self._analysis
        if not analysis.constraints._items:
            log.warning('No constraints defined.')
            return

        rows = [[constraint.expression.value] for constraint in analysis.constraints]

        console.paragraph('User defined constraints')
        render_table(
            columns_headers=['expression'],
            columns_alignment=['left'],
            columns_data=rows,
        )
        console.print(f'Constraints enabled: {analysis.constraints.enabled}')

    def fit_results(self) -> None:
        """
        Display a summary of the fit results.

        Renders the fit quality metrics (reduced χ², R-factors) and a
        table of fitted parameters with their starting values, final
        values, and uncertainties.

        This method should be called after :meth:`Analysis.fit`
        completes. If no fit has been performed yet, a warning is
        logged.
        """
        analysis = self._analysis
        if analysis.fit_results is None:
            log.warning('No fit results available. Run fit() first.')
            return

        structures = analysis.project.structures
        experiments = list(analysis.project.experiments.values())

        analysis.fitter._process_fit_results(structures, experiments)

    def as_cif(self) -> None:
        """Render the analysis section as CIF in console."""
        cif_text: str = self._analysis.as_cif()
        paragraph_title: str = 'Analysis 🧮 info as cif'
        console.paragraph(paragraph_title)
        render_cif(cif_text)

all_params()

Print all parameters for structures and experiments.

Source code in src/easydiffraction/analysis/analysis.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def all_params(self) -> None:
    """Print all parameters for structures and experiments."""
    project = self._analysis.project
    structures_params = project.structures.parameters
    experiments_params = project.experiments.parameters

    if not structures_params and not experiments_params:
        log.warning('No parameters found.')
        return

    tabler = TableRenderer.get()

    filtered_headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'value',
        'fittable',
    ]

    console.paragraph('All parameters for all structures (🧩 data blocks)')
    df = Analysis._get_params_as_dataframe(structures_params)
    filtered_df = df[filtered_headers]
    tabler.render(filtered_df)

    console.paragraph('All parameters for all experiments (🔬 data blocks)')
    df = Analysis._get_params_as_dataframe(experiments_params)
    filtered_df = df[filtered_headers]
    tabler.render(filtered_df)

as_cif()

Render the analysis section as CIF in console.

Source code in src/easydiffraction/analysis/analysis.py
368
369
370
371
372
373
def as_cif(self) -> None:
    """Render the analysis section as CIF in console."""
    cif_text: str = self._analysis.as_cif()
    paragraph_title: str = 'Analysis 🧮 info as cif'
    console.paragraph(paragraph_title)
    render_cif(cif_text)

constraints()

Print a table of all user-defined symbolic constraints.

Source code in src/easydiffraction/analysis/analysis.py
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
def constraints(self) -> None:
    """Print a table of all user-defined symbolic constraints."""
    analysis = self._analysis
    if not analysis.constraints._items:
        log.warning('No constraints defined.')
        return

    rows = [[constraint.expression.value] for constraint in analysis.constraints]

    console.paragraph('User defined constraints')
    render_table(
        columns_headers=['expression'],
        columns_alignment=['left'],
        columns_data=rows,
    )
    console.print(f'Constraints enabled: {analysis.constraints.enabled}')

fit_results()

Display a summary of the fit results.

Renders the fit quality metrics (reduced χ², R-factors) and a table of fitted parameters with their starting values, final values, and uncertainties.

This method should be called after :meth:Analysis.fit completes. If no fit has been performed yet, a warning is logged.

Source code in src/easydiffraction/analysis/analysis.py
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
def fit_results(self) -> None:
    """
    Display a summary of the fit results.

    Renders the fit quality metrics (reduced χ², R-factors) and a
    table of fitted parameters with their starting values, final
    values, and uncertainties.

    This method should be called after :meth:`Analysis.fit`
    completes. If no fit has been performed yet, a warning is
    logged.
    """
    analysis = self._analysis
    if analysis.fit_results is None:
        log.warning('No fit results available. Run fit() first.')
        return

    structures = analysis.project.structures
    experiments = list(analysis.project.experiments.values())

    analysis.fitter._process_fit_results(structures, experiments)

fittable_params()

Print all fittable parameters.

Source code in src/easydiffraction/analysis/analysis.py
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def fittable_params(self) -> None:
    """Print all fittable parameters."""
    project = self._analysis.project
    structures_params = project.structures.fittable_parameters
    experiments_params = project.experiments.fittable_parameters

    if not structures_params and not experiments_params:
        log.warning('No fittable parameters found.')
        return

    tabler = TableRenderer.get()

    filtered_headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'value',
        'uncertainty',
        'units',
        'free',
    ]

    console.paragraph('Fittable parameters for all structures (🧩 data blocks)')
    df = Analysis._get_params_as_dataframe(structures_params)
    filtered_df = df[filtered_headers]
    tabler.render(filtered_df)

    console.paragraph('Fittable parameters for all experiments (🔬 data blocks)')
    df = Analysis._get_params_as_dataframe(experiments_params)
    filtered_df = df[filtered_headers]
    tabler.render(filtered_df)

free_params()

Print only currently free (varying) parameters.

Source code in src/easydiffraction/analysis/analysis.py
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
def free_params(self) -> None:
    """Print only currently free (varying) parameters."""
    project = self._analysis.project
    structures_params = project.structures.free_parameters
    experiments_params = project.experiments.free_parameters
    free_params = structures_params + experiments_params

    if not free_params:
        log.warning('No free parameters found.')
        return

    tabler = TableRenderer.get()

    filtered_headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'value',
        'uncertainty',
        'min',
        'max',
        'units',
    ]

    console.paragraph(
        'Free parameters for both structures (🧩 data blocks) and experiments (🔬 data blocks)'
    )
    df = Analysis._get_params_as_dataframe(free_params)
    filtered_df = df[filtered_headers]
    tabler.render(filtered_df)

how_to_access_parameters()

Show Python access paths for all parameters.

The output explains how to reference specific parameters in code.

Source code in src/easydiffraction/analysis/analysis.py
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
def how_to_access_parameters(self) -> None:
    """
    Show Python access paths for all parameters.

    The output explains how to reference specific parameters in
    code.
    """
    project = self._analysis.project
    structures_params = project.structures.parameters
    experiments_params = project.experiments.parameters
    all_params = {
        'structures': structures_params,
        'experiments': experiments_params,
    }

    if not all_params:
        log.warning('No parameters found.')
        return

    columns_headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'How to Access in Python Code',
    ]

    columns_alignment = [
        'left',
        'left',
        'left',
        'left',
        'left',
    ]

    columns_data = []
    project_varname = project._varname
    for datablock_code, params in all_params.items():
        for param in params:
            if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)):
                datablock_entry_name = param._identity.datablock_entry_name
                category_code = param._identity.category_code
                category_entry_name = param._identity.category_entry_name or ''
                param_key = param.name
                code_variable = (
                    f'{project_varname}.{datablock_code}'
                    f"['{datablock_entry_name}'].{category_code}"
                )
                if category_entry_name:
                    code_variable += f"['{category_entry_name}']"
                code_variable += f'.{param_key}'
                columns_data.append([
                    datablock_entry_name,
                    category_code,
                    category_entry_name,
                    param_key,
                    code_variable,
                ])

    console.paragraph('How to access parameters')
    render_table(
        columns_headers=columns_headers,
        columns_alignment=columns_alignment,
        columns_data=columns_data,
    )

parameter_cif_uids()

Show CIF unique IDs for all parameters.

The output explains which unique identifiers are used when creating CIF-based constraints.

Source code in src/easydiffraction/analysis/analysis.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
def parameter_cif_uids(self) -> None:
    """
    Show CIF unique IDs for all parameters.

    The output explains which unique identifiers are used when
    creating CIF-based constraints.
    """
    project = self._analysis.project
    structures_params = project.structures.parameters
    experiments_params = project.experiments.parameters
    all_params = {
        'structures': structures_params,
        'experiments': experiments_params,
    }

    if not all_params:
        log.warning('No parameters found.')
        return

    columns_headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'Unique Identifier for CIF Constraints',
    ]

    columns_alignment = [
        'left',
        'left',
        'left',
        'left',
        'left',
    ]

    columns_data = []
    for params in all_params.values():
        for param in params:
            if isinstance(param, (StringDescriptor, NumericDescriptor, Parameter)):
                datablock_entry_name = param._identity.datablock_entry_name
                category_code = param._identity.category_code
                category_entry_name = param._identity.category_entry_name or ''
                param_key = param.name
                cif_uid = param._cif_handler.uid
                columns_data.append([
                    datablock_entry_name,
                    category_code,
                    category_entry_name,
                    param_key,
                    cif_uid,
                ])

    console.paragraph('Show parameter CIF unique identifiers')
    render_table(
        columns_headers=columns_headers,
        columns_alignment=columns_alignment,
        columns_data=columns_data,
    )

calculators

base

CalculatorBase

Bases: ABC

Base API for diffraction calculation engines.

Source code in src/easydiffraction/analysis/calculators/base.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
class CalculatorBase(ABC):
    """Base API for diffraction calculation engines."""

    @property
    @abstractmethod
    def name(self) -> str:
        """Short identifier of the calculation engine."""

    @property
    @abstractmethod
    def engine_imported(self) -> bool:
        """True if the underlying calculation library is available."""

    @abstractmethod
    def calculate_structure_factors(
        self,
        structure: Structure,
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool,
    ) -> None:
        """Calculate structure factors for one experiment."""

    @abstractmethod
    def calculate_pattern(
        self,
        structure: Structures,  # TODO: Structure?
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool,
    ) -> np.ndarray:
        """
        Calculate diffraction pattern for one structure-experiment pair.

        Parameters
        ----------
        structure : Structures
            The structure object.
        experiment : ExperimentBase
            The experiment object.
        called_by_minimizer : bool
            Whether the calculation is called by a minimizer. Default is
            False.

        Returns
        -------
        np.ndarray
            The calculated diffraction pattern as a NumPy array.
        """
calculate_pattern(structure, experiment, *, called_by_minimizer) abstractmethod

Calculate diffraction pattern for one structure-experiment pair.

Parameters:

Name Type Description Default
structure Structures

The structure object.

required
experiment ExperimentBase

The experiment object.

required
called_by_minimizer bool

Whether the calculation is called by a minimizer. Default is False.

required

Returns:

Type Description
ndarray

The calculated diffraction pattern as a NumPy array.

Source code in src/easydiffraction/analysis/calculators/base.py
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
@abstractmethod
def calculate_pattern(
    self,
    structure: Structures,  # TODO: Structure?
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool,
) -> np.ndarray:
    """
    Calculate diffraction pattern for one structure-experiment pair.

    Parameters
    ----------
    structure : Structures
        The structure object.
    experiment : ExperimentBase
        The experiment object.
    called_by_minimizer : bool
        Whether the calculation is called by a minimizer. Default is
        False.

    Returns
    -------
    np.ndarray
        The calculated diffraction pattern as a NumPy array.
    """
calculate_structure_factors(structure, experiment, *, called_by_minimizer) abstractmethod

Calculate structure factors for one experiment.

Source code in src/easydiffraction/analysis/calculators/base.py
27
28
29
30
31
32
33
34
35
@abstractmethod
def calculate_structure_factors(
    self,
    structure: Structure,
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool,
) -> None:
    """Calculate structure factors for one experiment."""
engine_imported abstractmethod property

True if the underlying calculation library is available.

name abstractmethod property

Short identifier of the calculation engine.

crysfml

CrysfmlCalculator

Bases: CalculatorBase

Wrapper for Crysfml library.

Source code in src/easydiffraction/analysis/calculators/crysfml.py
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
@CalculatorFactory.register
class CrysfmlCalculator(CalculatorBase):
    """Wrapper for Crysfml library."""

    type_info = TypeInfo(
        tag='crysfml',
        description='CrysFML library for crystallographic calculations',
    )
    engine_imported: bool = cfml_py_utilities is not None

    @property
    def name(self) -> str:
        """Short identifier of this calculator engine."""
        return 'crysfml'

    def calculate_structure_factors(
        self,
        structures: Structures,
        experiments: Experiments,
    ) -> None:
        """
        Call Crysfml to calculate structure factors.

        Parameters
        ----------
        structures : Structures
            The structures to calculate structure factors for.
        experiments : Experiments
            The experiments associated with the sample models.

        Raises
        ------
        NotImplementedError
            HKL calculation is not implemented for CrysfmlCalculator.
        """
        msg = 'HKL calculation is not implemented for CrysfmlCalculator.'
        raise NotImplementedError(msg)

    def calculate_pattern(
        self,
        structure: Structures,
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool = False,
    ) -> np.ndarray | list[float]:
        """
        Calculate the diffraction pattern using Crysfml.

        Parameters
        ----------
        structure : Structures
            The structure to calculate the pattern for.
        experiment : ExperimentBase
            The experiment associated with the structure.
        called_by_minimizer : bool, default=False
            Whether the calculation is called by a minimizer.

        Returns
        -------
        np.ndarray | list[float]
            The calculated diffraction pattern as a NumPy array or a
            list of floats.
        """
        # Intentionally unused, required by public API/signature
        del called_by_minimizer

        crysfml_dict = self._crysfml_dict(structure, experiment)
        try:
            _, y = cfml_py_utilities.cw_powder_pattern_from_dict(crysfml_dict)
            y = self._adjust_pattern_length(y, len(experiment.data.x))
        except KeyError:
            print('[CrysfmlCalculator] Error: No calculated data')
            y = []
        return np.asarray(y)

    def _adjust_pattern_length(  # noqa: PLR6301
        self,
        pattern: list[float],
        target_length: int,
    ) -> list[float]:
        """
        Adjust the pattern length to match the target length.

        Parameters
        ----------
        pattern : list[float]
            The pattern to adjust.
        target_length : int
            The desired length of the pattern.

        Returns
        -------
        list[float]
            The adjusted pattern.
        """
        # TODO: Check the origin of this discrepancy coming from
        #  PyCrysFML
        # Safety guard: with the correct step formula (max-min)/(N-1+ε),
        # pycrysfml should return exactly target_length points. Truncate
        # if over-length; pad with the last value if under-length.
        if len(pattern) > target_length:
            return pattern[:target_length]
        if len(pattern) < target_length:
            pad = target_length - len(pattern)
            return list(pattern) + [pattern[-1]] * pad
        return pattern

    def _crysfml_dict(
        self,
        structure: Structures,
        experiment: ExperimentBase,
    ) -> dict[str, ExperimentBase | Structure]:
        """
        Convert structure and experiment into a Crysfml dictionary.

        Parameters
        ----------
        structure : Structures
            The structure to convert.
        experiment : ExperimentBase
            The experiment to convert.

        Returns
        -------
        dict[str, ExperimentBase | Structure]
            A dictionary representation of the structure and experiment.
        """
        structure_dict = self._convert_structure_to_dict(structure)
        experiment_dict = self._convert_experiment_to_dict(experiment)
        return {
            'phases': [structure_dict],
            'experiments': [experiment_dict],
        }

    def _convert_structure_to_dict(  # noqa: PLR6301
        self,
        structure: Structure,
    ) -> dict[str, Any]:
        """
        Convert a structure into a dictionary format.

        Parameters
        ----------
        structure : Structure
            The structure to convert.

        Returns
        -------
        dict[str, Any]
            A dictionary representation of the structure.
        """
        structure_dict = {
            structure.name: {
                '_space_group_name_H-M_alt': structure.space_group.name_h_m.value,
                '_cell_length_a': structure.cell.length_a.value,
                '_cell_length_b': structure.cell.length_b.value,
                '_cell_length_c': structure.cell.length_c.value,
                '_cell_angle_alpha': structure.cell.angle_alpha.value,
                '_cell_angle_beta': structure.cell.angle_beta.value,
                '_cell_angle_gamma': structure.cell.angle_gamma.value,
                '_atom_site': [],
            }
        }

        for atom in structure.atom_sites:
            atom_site = {
                '_label': atom.label.value,
                '_type_symbol': atom.type_symbol.value,
                '_fract_x': atom.fract_x.value,
                '_fract_y': atom.fract_y.value,
                '_fract_z': atom.fract_z.value,
                '_occupancy': atom.occupancy.value,
                '_adp_type': atom.adp_type.value,
                '_B_iso_or_equiv': atom.adp_iso_as_b,
            }
            structure_dict[structure.name]['_atom_site'].append(atom_site)

        return structure_dict

    def _convert_experiment_to_dict(  # noqa: PLR6301
        self,
        experiment: ExperimentBase,
    ) -> dict[str, Any]:
        """
        Convert an experiment into a dictionary format.

        Parameters
        ----------
        experiment : ExperimentBase
            The experiment to convert.

        Returns
        -------
        dict[str, Any]
            A dictionary representation of the experiment.
        """
        attrs = type(experiment)._public_attrs()
        expt_type = experiment.type if 'type' in attrs else None
        instrument = experiment.instrument if 'instrument' in attrs else None
        peak = experiment.peak if 'peak' in attrs else None

        x_data = experiment.data.x
        twotheta_min = float(x_data.min())
        twotheta_max = float(x_data.max())

        # TODO: Process default values on the experiment creation
        #  instead of here
        return {
            'NPD': {
                '_diffrn_radiation_probe': expt_type.radiation_probe.value
                if expt_type
                else 'neutron',
                '_diffrn_radiation_wavelength': instrument.setup_wavelength.value
                if instrument
                else 1.0,
                '_pd_instr_resolution_u': peak.broad_gauss_u.value if peak else 0.0,
                '_pd_instr_resolution_v': peak.broad_gauss_v.value if peak else 0.0,
                '_pd_instr_resolution_w': peak.broad_gauss_w.value if peak else 0.0,
                '_pd_instr_resolution_x': peak.broad_lorentz_x.value if peak else 0.0,
                '_pd_instr_resolution_y': peak.broad_lorentz_y.value if peak else 0.0,
                '_pd_meas_2theta_offset': instrument.calib_twotheta_offset.value
                if instrument
                else 0.0,
                '_pd_meas_2theta_range_min': twotheta_min,
                '_pd_meas_2theta_range_max': twotheta_max,
                # TODO: Check the origin of this discrepancy coming from
                #  PyCrysFML
                # Divide by (N-1+ε) instead of (N-1) so that pycrysfml's
                # internal floor((max-min)/step) is robustly N-1 despite
                # floating-point rounding, producing exactly N points.
                '_pd_meas_2theta_range_inc': (twotheta_max - twotheta_min)
                / (len(x_data) - 1 + 1e-9),
            }
        }
calculate_pattern(structure, experiment, *, called_by_minimizer=False)

Calculate the diffraction pattern using Crysfml.

Parameters:

Name Type Description Default
structure Structures

The structure to calculate the pattern for.

required
experiment ExperimentBase

The experiment associated with the structure.

required
called_by_minimizer bool

Whether the calculation is called by a minimizer.

False

Returns:

Type Description
ndarray | list[float]

The calculated diffraction pattern as a NumPy array or a list of floats.

Source code in src/easydiffraction/analysis/calculators/crysfml.py
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def calculate_pattern(
    self,
    structure: Structures,
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool = False,
) -> np.ndarray | list[float]:
    """
    Calculate the diffraction pattern using Crysfml.

    Parameters
    ----------
    structure : Structures
        The structure to calculate the pattern for.
    experiment : ExperimentBase
        The experiment associated with the structure.
    called_by_minimizer : bool, default=False
        Whether the calculation is called by a minimizer.

    Returns
    -------
    np.ndarray | list[float]
        The calculated diffraction pattern as a NumPy array or a
        list of floats.
    """
    # Intentionally unused, required by public API/signature
    del called_by_minimizer

    crysfml_dict = self._crysfml_dict(structure, experiment)
    try:
        _, y = cfml_py_utilities.cw_powder_pattern_from_dict(crysfml_dict)
        y = self._adjust_pattern_length(y, len(experiment.data.x))
    except KeyError:
        print('[CrysfmlCalculator] Error: No calculated data')
        y = []
    return np.asarray(y)
calculate_structure_factors(structures, experiments)

Call Crysfml to calculate structure factors.

Parameters:

Name Type Description Default
structures Structures

The structures to calculate structure factors for.

required
experiments Experiments

The experiments associated with the sample models.

required

Raises:

Type Description
NotImplementedError

HKL calculation is not implemented for CrysfmlCalculator.

Source code in src/easydiffraction/analysis/calculators/crysfml.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def calculate_structure_factors(
    self,
    structures: Structures,
    experiments: Experiments,
) -> None:
    """
    Call Crysfml to calculate structure factors.

    Parameters
    ----------
    structures : Structures
        The structures to calculate structure factors for.
    experiments : Experiments
        The experiments associated with the sample models.

    Raises
    ------
    NotImplementedError
        HKL calculation is not implemented for CrysfmlCalculator.
    """
    msg = 'HKL calculation is not implemented for CrysfmlCalculator.'
    raise NotImplementedError(msg)
name property

Short identifier of this calculator engine.

cryspy

CryspyCalculator

Bases: CalculatorBase

Cryspy-based diffraction calculator.

Converts EasyDiffraction models into Cryspy objects and computes patterns.

Source code in src/easydiffraction/analysis/calculators/cryspy.py
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
@CalculatorFactory.register
class CryspyCalculator(CalculatorBase):
    """
    Cryspy-based diffraction calculator.

    Converts EasyDiffraction models into Cryspy objects and computes
    patterns.
    """

    type_info = TypeInfo(
        tag='cryspy',
        description='CrysPy library for crystallographic calculations',
    )
    engine_imported: bool = cryspy is not None

    @property
    def name(self) -> str:
        """Short identifier of this calculator engine."""
        return 'cryspy'

    def __init__(self) -> None:
        super().__init__()
        self._cryspy_dicts: dict[str, dict[str, Any]] = {}
        self._cached_peak_types: dict[str, str] = {}
        self._cached_adp_types: dict[str, tuple[str, ...]] = {}

    def _invalidate_stale_cache(
        self,
        combined_name: str,
        experiment: ExperimentBase,
        structure: Structure | None = None,
    ) -> None:
        """
        Drop cached dict when experiment or structure config changed.

        Checks both the peak profile type and the per-atom ADP types.
        When either changes the cached dictionary is stale and must be
        rebuilt from a fresh cryspy object.
        """
        if 'peak' in type(experiment)._public_attrs():
            current_type = experiment.peak.type_info.tag
            if self._cached_peak_types.get(combined_name) != current_type:
                self._cryspy_dicts.pop(combined_name, None)
            self._cached_peak_types[combined_name] = current_type

        if structure is not None:
            current_adp = tuple(atom.adp_type.value for atom in structure.atom_sites)
            if self._cached_adp_types.get(combined_name) != current_adp:
                self._cryspy_dicts.pop(combined_name, None)
            self._cached_adp_types[combined_name] = current_adp

    def calculate_structure_factors(
        self,
        structure: Structure,
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool = False,
    ) -> None:
        """
        Raise NotImplementedError as HKL calculation is not implemented.

        Parameters
        ----------
        structure : Structure
            The structure to calculate structure factors for.
        experiment : ExperimentBase
            The experiment associated with the sample models.
        called_by_minimizer : bool, default=False
            Whether the calculation is called by a minimizer.
        """
        combined_name = f'{structure.name}_{experiment.name}'
        self._invalidate_stale_cache(combined_name, experiment, structure)

        if called_by_minimizer:
            if self._cryspy_dicts and combined_name in self._cryspy_dicts:
                cryspy_dict = self._recreate_cryspy_dict(structure, experiment)
            else:
                cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
                cryspy_dict = cryspy_obj.get_dictionary()
        else:
            cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
            cryspy_dict = cryspy_obj.get_dictionary()

        self._cryspy_dicts[combined_name] = copy.deepcopy(cryspy_dict)

        cryspy_in_out_dict: dict[str, Any] = {}

        # Calculate the pattern using Cryspy
        # TODO: Redirect stderr to suppress Cryspy warnings.
        #  This is a temporary solution to avoid cluttering the output.
        #  E.g. cryspy/A_functions_base/powder_diffraction_tof.py:106:
        #  RuntimeWarning: overflow encountered in exp
        #  Remove this when Cryspy is updated to handle warnings better.
        with contextlib.redirect_stderr(io.StringIO()):
            rhochi_calc_chi_sq_by_dictionary(
                cryspy_dict,
                dict_in_out=cryspy_in_out_dict,
                flag_use_precalculated_data=False,
                flag_calc_analytical_derivatives=False,
            )

        cryspy_block_name = f'diffrn_{experiment.name}'

        try:
            y_calc = cryspy_in_out_dict[cryspy_block_name]['intensity_calc']
            stol = cryspy_in_out_dict[cryspy_block_name]['sthovl']
        except KeyError:
            print(f'[CryspyCalculator] Error: No calculated data for {cryspy_block_name}')
            return [], []

        return stol, y_calc

    def calculate_pattern(
        self,
        structure: Structure,
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool = False,
    ) -> np.ndarray | list[float]:
        """
        Calculate the diffraction pattern using Cryspy.

        We only recreate the cryspy_obj if this method is - NOT called
        by the minimizer, or - the cryspy_dict is NOT yet created. In
        other cases, we are modifying the existing cryspy_dict This
        allows significantly speeding up the calculation

        Parameters
        ----------
        structure : Structure
            The structure to calculate the pattern for.
        experiment : ExperimentBase
            The experiment associated with the structure.
        called_by_minimizer : bool, default=False
            Whether the calculation is called by a minimizer.

        Returns
        -------
        np.ndarray | list[float]
            The calculated diffraction pattern as a NumPy array or a
            list of floats.
        """
        combined_name = f'{structure.name}_{experiment.name}'
        self._invalidate_stale_cache(combined_name, experiment, structure)

        if called_by_minimizer:
            if self._cryspy_dicts and combined_name in self._cryspy_dicts:
                cryspy_dict = self._recreate_cryspy_dict(structure, experiment)
            else:
                cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
                cryspy_dict = cryspy_obj.get_dictionary()
        else:
            cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
            cryspy_dict = cryspy_obj.get_dictionary()

        self._cryspy_dicts[combined_name] = copy.deepcopy(cryspy_dict)

        cryspy_in_out_dict: dict[str, Any] = {}

        # Calculate the pattern using Cryspy
        # TODO: Redirect stderr to suppress Cryspy warnings.
        #  This is a temporary solution to avoid cluttering the output.
        #  E.g. cryspy/A_functions_base/powder_diffraction_tof.py:106:
        #  RuntimeWarning: overflow encountered in exp
        #  Remove this when Cryspy is updated to handle warnings better.
        with contextlib.redirect_stderr(io.StringIO()):
            rhochi_calc_chi_sq_by_dictionary(
                cryspy_dict,
                dict_in_out=cryspy_in_out_dict,
                flag_use_precalculated_data=False,
                flag_calc_analytical_derivatives=False,
            )

        prefixes = {
            BeamModeEnum.CONSTANT_WAVELENGTH: 'pd',
            BeamModeEnum.TIME_OF_FLIGHT: 'tof',
        }
        beam_mode = experiment.type.beam_mode.value
        if beam_mode in prefixes:
            cryspy_block_name = f'{prefixes[beam_mode]}_{experiment.name}'
        else:
            print(f'[CryspyCalculator] Error: Unknown beam mode {experiment.type.beam_mode.value}')
            return []

        try:
            signal_plus = cryspy_in_out_dict[cryspy_block_name]['signal_plus']
            signal_minus = cryspy_in_out_dict[cryspy_block_name]['signal_minus']
            y_calc = signal_plus + signal_minus
        except KeyError:
            print(f'[CryspyCalculator] Error: No calculated data for {cryspy_block_name}')
            return []

        return y_calc

    def _recreate_cryspy_dict(
        self,
        structure: Structure,
        experiment: ExperimentBase,
    ) -> dict[str, Any]:
        """
        Recreate the Cryspy dictionary for structure and experiment.

        Parameters
        ----------
        structure : Structure
            The structure to update.
        experiment : ExperimentBase
            The experiment to update.

        Returns
        -------
        dict[str, Any]
            The updated Cryspy dictionary.
        """
        combined_name = f'{structure.name}_{experiment.name}'
        cryspy_dict = copy.deepcopy(self._cryspy_dicts[combined_name])

        cryspy_model_id = f'crystal_{structure.name}'
        self._update_structure_in_cryspy_dict(cryspy_dict[cryspy_model_id], structure)
        self._update_experiment_in_cryspy_dict(cryspy_dict, experiment)

        return cryspy_dict

    @staticmethod
    def _update_structure_in_cryspy_dict(
        cryspy_model_dict: dict[str, Any],
        structure: Structure,
    ) -> None:
        """
        Update structure parameters in the Cryspy model dictionary.

        Parameters
        ----------
        cryspy_model_dict : dict[str, Any]
            The ``crystal_<name>`` sub-dict.
        structure : Structure
            The source structure.
        """
        # Cell
        cryspy_cell = cryspy_model_dict['unit_cell_parameters']
        cryspy_cell[0] = structure.cell.length_a.value
        cryspy_cell[1] = structure.cell.length_b.value
        cryspy_cell[2] = structure.cell.length_c.value
        cryspy_cell[3] = np.deg2rad(structure.cell.angle_alpha.value)
        cryspy_cell[4] = np.deg2rad(structure.cell.angle_beta.value)
        cryspy_cell[5] = np.deg2rad(structure.cell.angle_gamma.value)

        # Atomic coordinates
        cryspy_xyz = cryspy_model_dict['atom_fract_xyz']
        for idx, atom_site in enumerate(structure.atom_sites):
            cryspy_xyz[0][idx] = atom_site.fract_x.value
            cryspy_xyz[1][idx] = atom_site.fract_y.value
            cryspy_xyz[2][idx] = atom_site.fract_z.value

        # Atomic occupancies
        cryspy_occ = cryspy_model_dict['atom_occupancy']
        for idx, atom_site in enumerate(structure.atom_sites):
            cryspy_occ[idx] = atom_site.occupancy.value

        # Atomic ADPs - isotropic
        # For anisotropic atoms the full ADP lives in the β tensor;
        # setting b_iso to zero avoids double-counting in cryspy's DWF
        # which sums both the isotropic and anisotropic contributions.
        from easydiffraction.datablocks.structure.categories.atom_sites.enums import (  # noqa: PLC0415
            AdpTypeEnum,
        )

        aniso_types = {AdpTypeEnum.BANI.value, AdpTypeEnum.UANI.value}
        cryspy_biso = cryspy_model_dict['atom_b_iso']
        for idx, atom_site in enumerate(structure.atom_sites):
            if atom_site.adp_type.value in aniso_types:
                cryspy_biso[idx] = 0.0
            else:
                cryspy_biso[idx] = atom_site.adp_iso_as_b

        # Atomic ADPs - anisotropic (update β tensor when present)
        if 'atom_beta' in cryspy_model_dict:
            CryspyCalculator._update_aniso_beta(
                cryspy_model_dict,
                structure,
            )

    @staticmethod
    def _update_aniso_beta(
        cryspy_model_dict: dict[str, Any],
        structure: Structure,
    ) -> None:
        """
        Update cryspy ``atom_beta`` from anisotropic ADP values.

        Converts B or U tensor components to cryspy's internal β
        representation using β_ij = 2π²·U_ij·a*_i·a*_j.

        Parameters
        ----------
        cryspy_model_dict : dict[str, Any]
            The ``crystal_<name>`` sub-dict.
        structure : Structure
            The source structure.
        """
        from cryspy.A_functions_base.function_1_atomic_vibrations import (  # noqa: PLC0415
            calc_beta_by_u,
        )
        from cryspy.A_functions_base.unit_cell import (  # noqa: PLC0415
            calc_reciprocal_by_unit_cell_parameters,
        )

        from easydiffraction.datablocks.structure.categories.atom_sites.enums import (  # noqa: PLC0415
            AdpTypeEnum,
        )

        aniso_index = cryspy_model_dict.get('atom_site_aniso_index')
        if aniso_index is None:
            return

        cryspy_beta = cryspy_model_dict['atom_beta']
        cell_params = cryspy_model_dict['unit_cell_parameters']

        # Compute reciprocal lengths from cell parameters (with angles
        # already in radians as stored by cryspy).
        recip_params, _ = calc_reciprocal_by_unit_cell_parameters(cell_params)

        class _CellLike:
            reciprocal_length_a = recip_params[0]
            reciprocal_length_b = recip_params[1]
            reciprocal_length_c = recip_params[2]

        cell_like = _CellLike()
        factor = 8.0 * np.pi**2

        for col, atom_idx in enumerate(aniso_index):
            atom = list(structure.atom_sites)[atom_idx]
            adp_enum = AdpTypeEnum(atom.adp_type.value)
            if adp_enum not in {AdpTypeEnum.BANI, AdpTypeEnum.UANI}:
                continue

            aniso = structure.atom_site_aniso[atom.label.value]
            u_vals = [
                aniso.adp_11.value,
                aniso.adp_22.value,
                aniso.adp_33.value,
                aniso.adp_12.value,
                aniso.adp_13.value,
                aniso.adp_23.value,
            ]

            # Convert to U if stored as B
            if adp_enum == AdpTypeEnum.BANI:
                u_vals = [v / factor for v in u_vals]

            betas = calc_beta_by_u(u_vals, cell_like)
            for k in range(6):
                cryspy_beta[k][col] = betas[k]

    @staticmethod
    def _update_experiment_in_cryspy_dict(
        cryspy_dict: dict[str, Any],
        experiment: ExperimentBase,
    ) -> None:
        """
        Update experiment parameters in the Cryspy dictionary.

        Parameters
        ----------
        cryspy_dict : dict[str, Any]
            The full Cryspy dictionary.
        experiment : ExperimentBase
            The source experiment.
        """
        if experiment.type.sample_form.value == SampleFormEnum.POWDER:
            if experiment.type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH:
                cryspy_expt_name = f'pd_{experiment.name}'
                cryspy_expt_dict = cryspy_dict[cryspy_expt_name]

                # Instrument
                cryspy_expt_dict['offset_ttheta'][0] = np.deg2rad(
                    experiment.instrument.calib_twotheta_offset.value
                )
                cryspy_expt_dict['wavelength'][0] = experiment.instrument.setup_wavelength.value

                # Peak
                cryspy_resolution = cryspy_expt_dict['resolution_parameters']
                cryspy_resolution[0] = experiment.peak.broad_gauss_u.value
                cryspy_resolution[1] = experiment.peak.broad_gauss_v.value
                cryspy_resolution[2] = experiment.peak.broad_gauss_w.value
                cryspy_resolution[3] = experiment.peak.broad_lorentz_x.value
                cryspy_resolution[4] = experiment.peak.broad_lorentz_y.value

                if 'asymmetry_parameters' in cryspy_expt_dict:
                    cryspy_asymmetry = cryspy_expt_dict['asymmetry_parameters']
                    cryspy_asymmetry[0] = experiment.peak.asym_empir_1.value
                    cryspy_asymmetry[1] = experiment.peak.asym_empir_2.value
                    cryspy_asymmetry[2] = experiment.peak.asym_empir_3.value
                    cryspy_asymmetry[3] = experiment.peak.asym_empir_4.value

            elif experiment.type.beam_mode.value == BeamModeEnum.TIME_OF_FLIGHT:
                cryspy_expt_name = f'tof_{experiment.name}'
                cryspy_expt_dict = cryspy_dict[cryspy_expt_name]

                # Instrument
                cryspy_expt_dict['zero'][0] = experiment.instrument.calib_d_to_tof_offset.value
                cryspy_expt_dict['dtt1'][0] = experiment.instrument.calib_d_to_tof_linear.value
                cryspy_expt_dict['dtt2'][0] = experiment.instrument.calib_d_to_tof_quad.value
                cryspy_expt_dict['ttheta_bank'] = np.deg2rad(
                    experiment.instrument.setup_twotheta_bank.value
                )

                # Peak - sigma (common to all TOF profiles)
                cryspy_sigma = cryspy_expt_dict['profile_sigmas']
                cryspy_sigma[0] = experiment.peak.broad_gauss_sigma_0.value
                cryspy_sigma[1] = experiment.peak.broad_gauss_sigma_1.value
                cryspy_sigma[2] = experiment.peak.broad_gauss_sigma_2.value

                _update_tof_peak_in_cryspy_dict(cryspy_expt_dict, experiment.peak)

        if experiment.type.sample_form.value == SampleFormEnum.SINGLE_CRYSTAL:
            cryspy_expt_name = f'diffrn_{experiment.name}'
            cryspy_expt_dict = cryspy_dict[cryspy_expt_name]

            # Instrument
            if experiment.type.beam_mode.value == BeamModeEnum.CONSTANT_WAVELENGTH:
                cryspy_expt_dict['wavelength'][0] = experiment.instrument.setup_wavelength.value

            # Extinction
            cryspy_expt_dict['extinction_radius'][0] = experiment.extinction.radius.value
            cryspy_expt_dict['extinction_mosaicity'][0] = experiment.extinction.mosaicity.value

    def _recreate_cryspy_obj(
        self,
        structure: Structure,
        experiment: ExperimentBase,
    ) -> object:
        """
        Recreate the Cryspy object for structure and experiment.

        Parameters
        ----------
        structure : Structure
            The structure to recreate.
        experiment : ExperimentBase
            The experiment to recreate.

        Returns
        -------
        object
            The recreated Cryspy object.
        """
        cryspy_obj = str_to_globaln('')

        cryspy_structure_cif = self._convert_structure_to_cryspy_cif(structure)
        cryspy_structure_obj = str_to_globaln(cryspy_structure_cif)
        cryspy_obj.add_items(cryspy_structure_obj.items)

        # Add single experiment to cryspy_obj
        cryspy_experiment_cif = self._convert_experiment_to_cryspy_cif(
            experiment,
            linked_structure=structure,
        )

        cryspy_experiment_obj = str_to_globaln(cryspy_experiment_cif)
        cryspy_obj.add_items(cryspy_experiment_obj.items)

        return cryspy_obj

    def _convert_structure_to_cryspy_cif(self, structure: Structure) -> str:
        """
        Convert a structure to a Cryspy CIF string.

        CrysPy uses attribute names that match the CIF convention:
        ``u_11`` for ``U_11``, ``b_11`` for ``B_11``, etc.  Its
        ``apply_space_group_constraint`` always accesses ``u_11``
        regardless of ``adp_type``.  To avoid mismatches the CIF sent to
        cryspy always uses **U** notation: ``Biso`` ⟶ ``Uiso``, ``Bani``
        ⟶ ``Uani``, values divided by 8π².

        Parameters
        ----------
        structure : Structure
            The structure to convert.

        Returns
        -------
        str
            The Cryspy CIF string representation of the structure.
        """
        saved = self._temporarily_convert_to_u_notation(structure)

        try:
            cif = structure.as_cif
        finally:
            self._restore_from_u_notation(structure, saved)

        return cif

    @staticmethod
    def _temporarily_convert_to_u_notation(
        structure: Structure,
    ) -> list[tuple]:
        """
        Temporarily convert all B-convention atoms to U notation.

        Returns saved state for later restoration.
        """
        from easydiffraction.datablocks.structure.categories.atom_sites.enums import (  # noqa: PLC0415
            AdpTypeEnum,
        )

        factor = 8.0 * np.pi**2
        suffixes = ('11', '22', '33', '12', '13', '23')
        saved: list[tuple] = []

        for atom in structure.atom_sites:
            adp_enum = AdpTypeEnum(atom.adp_type.value)
            is_b = adp_enum in {AdpTypeEnum.BISO, AdpTypeEnum.BANI}
            if not is_b:
                continue

            orig_adp_type = atom._adp_type._value
            orig_iso_val = atom._adp_iso._value
            orig_iso_names = list(atom._adp_iso._cif_handler._names)

            atom._adp_iso._value = orig_iso_val / factor
            atom._adp_iso._cif_handler._names = [
                '_atom_site.U_iso_or_equiv',
                '_atom_site.B_iso_or_equiv',
            ]

            if adp_enum == AdpTypeEnum.BISO:
                atom._adp_type._value = AdpTypeEnum.UISO.value
                saved.append((atom, None, None, None, orig_adp_type, orig_iso_names, orig_iso_val))
            else:
                atom._adp_type._value = AdpTypeEnum.UANI.value
                lbl = atom.label.value
                if lbl in structure.atom_site_aniso:
                    aniso = structure.atom_site_aniso[lbl]
                else:
                    aniso = None
                if aniso is not None:
                    orig_vals = []
                    orig_names = []
                    for s in suffixes:
                        param = getattr(aniso, f'_adp_{s}')
                        orig_vals.append(param._value)
                        orig_names.append(list(param._cif_handler._names))
                        param._value /= factor
                        param._cif_handler._names = [
                            f'_atom_site_aniso.U_{s}',
                            f'_atom_site_aniso.B_{s}',
                        ]
                    saved.append((
                        atom,
                        aniso,
                        orig_vals,
                        orig_names,
                        orig_adp_type,
                        orig_iso_names,
                        orig_iso_val,
                    ))
                else:
                    saved.append((
                        atom,
                        None,
                        None,
                        None,
                        orig_adp_type,
                        orig_iso_names,
                        orig_iso_val,
                    ))

        return saved

    @staticmethod
    def _restore_from_u_notation(
        structure: Structure,  # noqa: ARG004
        saved: list[tuple],
    ) -> None:
        """Restore original B-convention state after CIF generation."""
        suffixes = ('11', '22', '33', '12', '13', '23')

        for (
            atom,
            aniso,
            orig_vals,
            orig_names,
            orig_adp_type,
            orig_iso_names,
            orig_iso_val,
        ) in saved:
            atom._adp_type._value = orig_adp_type
            atom._adp_iso._value = orig_iso_val
            atom._adp_iso._cif_handler._names = orig_iso_names
            if aniso is not None and orig_vals is not None:
                for s, val, names in zip(suffixes, orig_vals, orig_names, strict=False):
                    param = getattr(aniso, f'_adp_{s}')
                    param._value = val
                    param._cif_handler._names = names

    def _convert_experiment_to_cryspy_cif(  # noqa: PLR6301
        self,
        experiment: ExperimentBase,
        linked_structure: object,
    ) -> str:
        """
        Convert an experiment to a Cryspy CIF string.

        Parameters
        ----------
        experiment : ExperimentBase
            The experiment to convert.
        linked_structure : object
            The structure linked to the experiment.

        Returns
        -------
        str
            The Cryspy CIF string representation of the experiment.
        """
        attrs = type(experiment)._public_attrs()
        expt_type = experiment.type if 'type' in attrs else None
        instrument = experiment.instrument if 'instrument' in attrs else None
        peak = experiment.peak if 'peak' in attrs else None
        extinction = experiment.extinction if 'extinction' in attrs else None

        cif_lines = [f'data_{experiment.name}']

        # Experiment metadata sections
        _cif_radiation_probe(cif_lines, expt_type)
        _cif_instrument_section(cif_lines, expt_type, instrument)
        _cif_peak_section(cif_lines, expt_type, peak)
        _cif_extinction_section(cif_lines, expt_type, extinction)

        # Powder range data (also returns min/max for background)
        twotheta_min, twotheta_max = _cif_range_section(cif_lines, expt_type, experiment)

        # Structure sections
        _cif_orient_matrix_section(cif_lines, expt_type)
        _cif_phase_section(cif_lines, expt_type, linked_structure)
        _cif_background_section(cif_lines, expt_type, twotheta_min, twotheta_max)

        # Measured data
        _cif_measured_data_section(cif_lines, expt_type, experiment)

        return '\n'.join(cif_lines)
calculate_pattern(structure, experiment, *, called_by_minimizer=False)

Calculate the diffraction pattern using Cryspy.

We only recreate the cryspy_obj if this method is - NOT called by the minimizer, or - the cryspy_dict is NOT yet created. In other cases, we are modifying the existing cryspy_dict This allows significantly speeding up the calculation

Parameters:

Name Type Description Default
structure Structure

The structure to calculate the pattern for.

required
experiment ExperimentBase

The experiment associated with the structure.

required
called_by_minimizer bool

Whether the calculation is called by a minimizer.

False

Returns:

Type Description
ndarray | list[float]

The calculated diffraction pattern as a NumPy array or a list of floats.

Source code in src/easydiffraction/analysis/calculators/cryspy.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
def calculate_pattern(
    self,
    structure: Structure,
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool = False,
) -> np.ndarray | list[float]:
    """
    Calculate the diffraction pattern using Cryspy.

    We only recreate the cryspy_obj if this method is - NOT called
    by the minimizer, or - the cryspy_dict is NOT yet created. In
    other cases, we are modifying the existing cryspy_dict This
    allows significantly speeding up the calculation

    Parameters
    ----------
    structure : Structure
        The structure to calculate the pattern for.
    experiment : ExperimentBase
        The experiment associated with the structure.
    called_by_minimizer : bool, default=False
        Whether the calculation is called by a minimizer.

    Returns
    -------
    np.ndarray | list[float]
        The calculated diffraction pattern as a NumPy array or a
        list of floats.
    """
    combined_name = f'{structure.name}_{experiment.name}'
    self._invalidate_stale_cache(combined_name, experiment, structure)

    if called_by_minimizer:
        if self._cryspy_dicts and combined_name in self._cryspy_dicts:
            cryspy_dict = self._recreate_cryspy_dict(structure, experiment)
        else:
            cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
            cryspy_dict = cryspy_obj.get_dictionary()
    else:
        cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
        cryspy_dict = cryspy_obj.get_dictionary()

    self._cryspy_dicts[combined_name] = copy.deepcopy(cryspy_dict)

    cryspy_in_out_dict: dict[str, Any] = {}

    # Calculate the pattern using Cryspy
    # TODO: Redirect stderr to suppress Cryspy warnings.
    #  This is a temporary solution to avoid cluttering the output.
    #  E.g. cryspy/A_functions_base/powder_diffraction_tof.py:106:
    #  RuntimeWarning: overflow encountered in exp
    #  Remove this when Cryspy is updated to handle warnings better.
    with contextlib.redirect_stderr(io.StringIO()):
        rhochi_calc_chi_sq_by_dictionary(
            cryspy_dict,
            dict_in_out=cryspy_in_out_dict,
            flag_use_precalculated_data=False,
            flag_calc_analytical_derivatives=False,
        )

    prefixes = {
        BeamModeEnum.CONSTANT_WAVELENGTH: 'pd',
        BeamModeEnum.TIME_OF_FLIGHT: 'tof',
    }
    beam_mode = experiment.type.beam_mode.value
    if beam_mode in prefixes:
        cryspy_block_name = f'{prefixes[beam_mode]}_{experiment.name}'
    else:
        print(f'[CryspyCalculator] Error: Unknown beam mode {experiment.type.beam_mode.value}')
        return []

    try:
        signal_plus = cryspy_in_out_dict[cryspy_block_name]['signal_plus']
        signal_minus = cryspy_in_out_dict[cryspy_block_name]['signal_minus']
        y_calc = signal_plus + signal_minus
    except KeyError:
        print(f'[CryspyCalculator] Error: No calculated data for {cryspy_block_name}')
        return []

    return y_calc
calculate_structure_factors(structure, experiment, *, called_by_minimizer=False)

Raise NotImplementedError as HKL calculation is not implemented.

Parameters:

Name Type Description Default
structure Structure

The structure to calculate structure factors for.

required
experiment ExperimentBase

The experiment associated with the sample models.

required
called_by_minimizer bool

Whether the calculation is called by a minimizer.

False
Source code in src/easydiffraction/analysis/calculators/cryspy.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def calculate_structure_factors(
    self,
    structure: Structure,
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool = False,
) -> None:
    """
    Raise NotImplementedError as HKL calculation is not implemented.

    Parameters
    ----------
    structure : Structure
        The structure to calculate structure factors for.
    experiment : ExperimentBase
        The experiment associated with the sample models.
    called_by_minimizer : bool, default=False
        Whether the calculation is called by a minimizer.
    """
    combined_name = f'{structure.name}_{experiment.name}'
    self._invalidate_stale_cache(combined_name, experiment, structure)

    if called_by_minimizer:
        if self._cryspy_dicts and combined_name in self._cryspy_dicts:
            cryspy_dict = self._recreate_cryspy_dict(structure, experiment)
        else:
            cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
            cryspy_dict = cryspy_obj.get_dictionary()
    else:
        cryspy_obj = self._recreate_cryspy_obj(structure, experiment)
        cryspy_dict = cryspy_obj.get_dictionary()

    self._cryspy_dicts[combined_name] = copy.deepcopy(cryspy_dict)

    cryspy_in_out_dict: dict[str, Any] = {}

    # Calculate the pattern using Cryspy
    # TODO: Redirect stderr to suppress Cryspy warnings.
    #  This is a temporary solution to avoid cluttering the output.
    #  E.g. cryspy/A_functions_base/powder_diffraction_tof.py:106:
    #  RuntimeWarning: overflow encountered in exp
    #  Remove this when Cryspy is updated to handle warnings better.
    with contextlib.redirect_stderr(io.StringIO()):
        rhochi_calc_chi_sq_by_dictionary(
            cryspy_dict,
            dict_in_out=cryspy_in_out_dict,
            flag_use_precalculated_data=False,
            flag_calc_analytical_derivatives=False,
        )

    cryspy_block_name = f'diffrn_{experiment.name}'

    try:
        y_calc = cryspy_in_out_dict[cryspy_block_name]['intensity_calc']
        stol = cryspy_in_out_dict[cryspy_block_name]['sthovl']
    except KeyError:
        print(f'[CryspyCalculator] Error: No calculated data for {cryspy_block_name}')
        return [], []

    return stol, y_calc
name property

Short identifier of this calculator engine.

factory

Calculator factory — delegates to FactoryBase.

Overrides _supported_map to filter out calculators whose engines are not importable in the current environment.

CalculatorFactory

Bases: FactoryBase

Factory for creating calculation engine instances.

Only calculators whose engine_imported flag is True are available for creation.

Source code in src/easydiffraction/analysis/calculators/factory.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
class CalculatorFactory(FactoryBase):
    """
    Factory for creating calculation engine instances.

    Only calculators whose ``engine_imported`` flag is ``True`` are
    available for creation.
    """

    _default_rules: ClassVar[dict] = {
        frozenset({
            ('scattering_type', ScatteringTypeEnum.BRAGG),
        }): CalculatorEnum.CRYSPY,
        frozenset({
            ('scattering_type', ScatteringTypeEnum.TOTAL),
        }): CalculatorEnum.PDFFIT,
    }

    @classmethod
    def _supported_map(cls) -> dict[str, type]:
        """Only include calculators whose engines are importable."""
        return {klass.type_info.tag: klass for klass in cls._registry if klass.engine_imported}

pdffit

PDF calculation backend using diffpy.pdffit2 if available.

The class adapts the engine to EasyDiffraction calculator interface and silences stdio on import to avoid noisy output in notebooks and logs.

PdffitCalculator

Bases: CalculatorBase

Wrapper for Pdffit library.

Source code in src/easydiffraction/analysis/calculators/pdffit.py
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
@CalculatorFactory.register
class PdffitCalculator(CalculatorBase):
    """Wrapper for Pdffit library."""

    type_info = TypeInfo(
        tag='pdffit',
        description='PDFfit2 for pair distribution function calculations',
    )
    engine_imported: bool = PdfFit is not None

    @property
    def name(self) -> str:
        """Short identifier of this calculator engine."""
        return 'pdffit'

    def calculate_structure_factors(  # noqa: PLR6301
        self,
        structures: object,
        experiments: object,
    ) -> list:
        """
        Return an empty list; PDF does not compute structure factors.

        Parameters
        ----------
        structures : object
            Unused; kept for interface consistency.
        experiments : object
            Unused; kept for interface consistency.

        Returns
        -------
        list
            An empty list.
        """
        # PDF doesn't compute HKL but we keep interface consistent
        # Intentionally unused, required by public API/signature
        del structures, experiments
        print('[pdffit] Calculating HKLs (not applicable)...')
        return []

    def calculate_pattern(  # noqa: PLR6301
        self,
        structure: Structure,
        experiment: ExperimentBase,
        *,
        called_by_minimizer: bool = False,
    ) -> None:
        """
        Calculate the PDF pattern using PDFfit2.

        Parameters
        ----------
        structure : Structure
            The structure object supplying atom sites and cell
            parameters.
        experiment : ExperimentBase
            The experiment object supplying instrument and peak
            parameters.
        called_by_minimizer : bool, default=False
            Unused; kept for interface consistency.
        """
        # Intentionally unused, required by public API/signature
        del called_by_minimizer

        # Create PDF calculator object
        calculator = PdfFit()

        # ---------------------------
        # Set structure parameters
        # ---------------------------

        # TODO: move CIF v2 -> CIF v1 conversion to a separate module
        # Convert the structure to CIF supported by PDFfit
        cif_string_v2 = structure.as_cif
        # convert to version 1 of CIF format
        # this means: replace all dots with underscores for
        # cases where the dot is surrounded by letters on both sides.
        pattern = r'(?<=[a-zA-Z])\.(?=[a-zA-Z])'
        cif_string_v1 = re.sub(pattern, '_', cif_string_v2)

        # Create the PDFit structure
        pdffit_structure = pdffit_cif_parser().parse(cif_string_v1)

        # Set all model parameters:
        # space group, cell parameters, and atom sites (including ADPs)
        calculator.add_structure(pdffit_structure)

        # -------------------------
        # Set experiment parameters
        # -------------------------

        # Set some peak-related parameters
        calculator.setvar('pscale', experiment.linked_phases[structure.name].scale.value)
        calculator.setvar('delta1', experiment.peak.sharp_delta_1.value)
        calculator.setvar('delta2', experiment.peak.sharp_delta_2.value)
        calculator.setvar('spdiameter', experiment.peak.damp_particle_diameter.value)

        # Data
        x = list(experiment.data.x)
        y_noise = list(np.zeros_like(x))

        # Assign the data to the PDFfit calculator
        calculator.read_data_lists(
            stype=experiment.type.radiation_probe.value[0].upper(),
            qmax=experiment.peak.cutoff_q.value,
            qdamp=experiment.peak.damp_q.value,
            r_data=x,
            Gr_data=y_noise,
        )

        # qbroad must be set after read_data_lists
        calculator.setvar('qbroad', experiment.peak.broad_q.value)

        # -----------------
        # Calculate pattern
        # -----------------

        # Calculate the PDF pattern
        calculator.calc()

        # Get the calculated PDF pattern
        pattern = calculator.getpdf_fit()
        return np.array(pattern)
calculate_pattern(structure, experiment, *, called_by_minimizer=False)

Calculate the PDF pattern using PDFfit2.

Parameters:

Name Type Description Default
structure Structure

The structure object supplying atom sites and cell parameters.

required
experiment ExperimentBase

The experiment object supplying instrument and peak parameters.

required
called_by_minimizer bool

Unused; kept for interface consistency.

False
Source code in src/easydiffraction/analysis/calculators/pdffit.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def calculate_pattern(  # noqa: PLR6301
    self,
    structure: Structure,
    experiment: ExperimentBase,
    *,
    called_by_minimizer: bool = False,
) -> None:
    """
    Calculate the PDF pattern using PDFfit2.

    Parameters
    ----------
    structure : Structure
        The structure object supplying atom sites and cell
        parameters.
    experiment : ExperimentBase
        The experiment object supplying instrument and peak
        parameters.
    called_by_minimizer : bool, default=False
        Unused; kept for interface consistency.
    """
    # Intentionally unused, required by public API/signature
    del called_by_minimizer

    # Create PDF calculator object
    calculator = PdfFit()

    # ---------------------------
    # Set structure parameters
    # ---------------------------

    # TODO: move CIF v2 -> CIF v1 conversion to a separate module
    # Convert the structure to CIF supported by PDFfit
    cif_string_v2 = structure.as_cif
    # convert to version 1 of CIF format
    # this means: replace all dots with underscores for
    # cases where the dot is surrounded by letters on both sides.
    pattern = r'(?<=[a-zA-Z])\.(?=[a-zA-Z])'
    cif_string_v1 = re.sub(pattern, '_', cif_string_v2)

    # Create the PDFit structure
    pdffit_structure = pdffit_cif_parser().parse(cif_string_v1)

    # Set all model parameters:
    # space group, cell parameters, and atom sites (including ADPs)
    calculator.add_structure(pdffit_structure)

    # -------------------------
    # Set experiment parameters
    # -------------------------

    # Set some peak-related parameters
    calculator.setvar('pscale', experiment.linked_phases[structure.name].scale.value)
    calculator.setvar('delta1', experiment.peak.sharp_delta_1.value)
    calculator.setvar('delta2', experiment.peak.sharp_delta_2.value)
    calculator.setvar('spdiameter', experiment.peak.damp_particle_diameter.value)

    # Data
    x = list(experiment.data.x)
    y_noise = list(np.zeros_like(x))

    # Assign the data to the PDFfit calculator
    calculator.read_data_lists(
        stype=experiment.type.radiation_probe.value[0].upper(),
        qmax=experiment.peak.cutoff_q.value,
        qdamp=experiment.peak.damp_q.value,
        r_data=x,
        Gr_data=y_noise,
    )

    # qbroad must be set after read_data_lists
    calculator.setvar('qbroad', experiment.peak.broad_q.value)

    # -----------------
    # Calculate pattern
    # -----------------

    # Calculate the PDF pattern
    calculator.calc()

    # Get the calculated PDF pattern
    pattern = calculator.getpdf_fit()
    return np.array(pattern)
calculate_structure_factors(structures, experiments)

Return an empty list; PDF does not compute structure factors.

Parameters:

Name Type Description Default
structures object

Unused; kept for interface consistency.

required
experiments object

Unused; kept for interface consistency.

required

Returns:

Type Description
list

An empty list.

Source code in src/easydiffraction/analysis/calculators/pdffit.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def calculate_structure_factors(  # noqa: PLR6301
    self,
    structures: object,
    experiments: object,
) -> list:
    """
    Return an empty list; PDF does not compute structure factors.

    Parameters
    ----------
    structures : object
        Unused; kept for interface consistency.
    experiments : object
        Unused; kept for interface consistency.

    Returns
    -------
    list
        An empty list.
    """
    # PDF doesn't compute HKL but we keep interface consistent
    # Intentionally unused, required by public API/signature
    del structures, experiments
    print('[pdffit] Calculating HKLs (not applicable)...')
    return []
name property

Short identifier of this calculator engine.

categories

aliases

default

Alias category for mapping friendly names to parameters.

Defines a small record type used by analysis configuration to refer to parameters via readable labels instead of opaque identifiers. At runtime each alias holds a direct object reference to the parameter; for CIF serialization the parameter's unique_name is stored.

Alias

Bases: CategoryItem

Single alias entry.

Maps a human-readable label to a parameter object. The param_unique_name descriptor stores the parameter's unique_name for CIF serialization.

Source code in src/easydiffraction/analysis/categories/aliases/default.py
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
class Alias(CategoryItem):
    """
    Single alias entry.

    Maps a human-readable ``label`` to a parameter object. The
    ``param_unique_name`` descriptor stores the parameter's
    ``unique_name`` for CIF serialization.
    """

    def __init__(self) -> None:
        super().__init__()

        self._label = StringDescriptor(
            name='label',
            description='Human-readable alias for a parameter.',
            value_spec=AttributeSpec(
                default='_',  # TODO, Maybe None?
                validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_]*$'),
            ),
            cif_handler=CifHandler(names=['_alias.label']),
        )
        self._param_unique_name = StringDescriptor(
            name='param_unique_name',
            description='Unique name of the referenced parameter.',
            value_spec=AttributeSpec(
                default='_',
                validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_.]*$'),
            ),
            cif_handler=CifHandler(names=['_alias.param_unique_name']),
        )

        # Direct reference to the Parameter object (runtime only).
        # Stored via object.__setattr__ to avoid parent-chain mutation.
        object.__setattr__(self, '_param_ref', None)

        self._identity.category_code = 'alias'
        self._identity.category_entry_name = lambda: str(self.label.value)

    # ------------------------------------------------------------------
    #  Public properties
    # ------------------------------------------------------------------

    @property
    def label(self) -> StringDescriptor:
        """
        Human-readable alias label (e.g. ``'biso_La'``).

        Reading this property returns the underlying
        ``StringDescriptor`` object. Assigning to it updates the
        parameter value.
        """
        return self._label

    @label.setter
    def label(self, value: str) -> None:
        self._label.value = value

    @property
    def param(self) -> object | None:
        """
        The referenced parameter object, or None before resolution.
        """
        return self._param_ref

    @property
    def param_unique_name(self) -> StringDescriptor:
        """
        Unique name of the referenced parameter (for CIF).

        Reading this property returns the underlying
        ``StringDescriptor`` object.
        """
        return self._param_unique_name

    def _set_param(self, param: object) -> None:
        """
        Store a direct reference to the parameter.

        Also updates ``param_unique_name`` from the parameter's
        ``unique_name`` for CIF round-tripping.
        """
        object.__setattr__(self, '_param_ref', param)  # noqa: PLC2801
        self._param_unique_name.value = param.unique_name

    @property
    def parameters(self) -> list:
        """
        Descriptors owned by this alias (excludes the param reference).
        """
        return [self._label, self._param_unique_name]
label property writable

Human-readable alias label (e.g. 'biso_La').

Reading this property returns the underlying StringDescriptor object. Assigning to it updates the parameter value.

param property

The referenced parameter object, or None before resolution.

param_unique_name property

Unique name of the referenced parameter (for CIF).

Reading this property returns the underlying StringDescriptor object.

parameters property

Descriptors owned by this alias (excludes the param reference).

Aliases

Bases: CategoryCollection

Collection of :class:Alias items.

Source code in src/easydiffraction/analysis/categories/aliases/default.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
@AliasesFactory.register
class Aliases(CategoryCollection):
    """Collection of :class:`Alias` items."""

    type_info = TypeInfo(
        tag='default',
        description='Parameter alias mappings',
    )

    def __init__(self) -> None:
        """Create an empty collection of aliases."""
        super().__init__(item_type=Alias)

    def create(self, *, label: str, param: object) -> None:
        """
        Create a new alias mapping a label to a parameter.

        Parameters
        ----------
        label : str
            Human-readable alias name (e.g. ``'biso_La'``).
        param : object
            The parameter object to reference.
        """
        item = Alias()
        item.label = label
        item._set_param(param)
        self.add(item)
__init__()

Create an empty collection of aliases.

Source code in src/easydiffraction/analysis/categories/aliases/default.py
125
126
127
def __init__(self) -> None:
    """Create an empty collection of aliases."""
    super().__init__(item_type=Alias)
create(*, label, param)

Create a new alias mapping a label to a parameter.

Parameters:

Name Type Description Default
label str

Human-readable alias name (e.g. 'biso_La').

required
param object

The parameter object to reference.

required
Source code in src/easydiffraction/analysis/categories/aliases/default.py
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
def create(self, *, label: str, param: object) -> None:
    """
    Create a new alias mapping a label to a parameter.

    Parameters
    ----------
    label : str
        Human-readable alias name (e.g. ``'biso_La'``).
    param : object
        The parameter object to reference.
    """
    item = Alias()
    item.label = label
    item._set_param(param)
    self.add(item)

factory

Aliases factory — delegates entirely to FactoryBase.

AliasesFactory

Bases: FactoryBase

Create alias collections by tag.

Source code in src/easydiffraction/analysis/categories/aliases/factory.py
12
13
14
15
16
17
class AliasesFactory(FactoryBase):
    """Create alias collections by tag."""

    _default_rules: ClassVar[dict] = {
        frozenset(): 'default',
    }

constraints

default

Simple symbolic constraint between parameters.

Represents an equation of the form lhs_alias = rhs_expr stored as a single expression string. The left- and right-hand sides are derived by splitting the expression at the = sign.

Constraint

Bases: CategoryItem

Single constraint item stored as lhs = rhs expression.

Source code in src/easydiffraction/analysis/categories/constraints/default.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
class Constraint(CategoryItem):
    """Single constraint item stored as ``lhs = rhs`` expression."""

    def __init__(self) -> None:
        super().__init__()

        self._expression = StringDescriptor(
            name='expression',
            description='Constraint equation, e.g. "occ_Ba = 1 - occ_La".',
            value_spec=AttributeSpec(
                default='_',  # TODO, Maybe None?
                validator=RegexValidator(pattern=r'.*'),
            ),
            cif_handler=CifHandler(names=['_constraint.expression']),
        )

        self._identity.category_code = 'constraint'
        self._identity.category_entry_name = lambda: self.lhs_alias

    # ------------------------------------------------------------------
    #  Public properties
    # ------------------------------------------------------------------

    @property
    def expression(self) -> StringDescriptor:
        """
        Full constraint equation (e.g. ``'occ_Ba = 1 - occ_La'``).

        Reading this property returns the underlying
        ``StringDescriptor`` object. Assigning to it updates the value.
        """
        return self._expression

    @expression.setter
    def expression(self, value: str) -> None:
        self._expression.value = value

    @property
    def lhs_alias(self) -> str:
        """Left-hand side alias derived from the expression."""
        return self._split_expression()[0]

    @property
    def rhs_expr(self) -> str:
        """Right-hand side expression derived from the expression."""
        return self._split_expression()[1]

    # ------------------------------------------------------------------
    #  Internal helpers
    # ------------------------------------------------------------------

    def _split_expression(self) -> tuple[str, str]:
        """
        Split the expression at the first ``=`` sign.

        Returns
        -------
        tuple[str, str]
            ``(lhs_alias, rhs_expr)`` with whitespace stripped.
        """
        raw = self._expression.value or ''
        if '=' not in raw:
            return (raw.strip(), '')
        lhs, rhs = raw.split('=', 1)
        return (lhs.strip(), rhs.strip())
expression property writable

Full constraint equation (e.g. 'occ_Ba = 1 - occ_La').

Reading this property returns the underlying StringDescriptor object. Assigning to it updates the value.

lhs_alias property

Left-hand side alias derived from the expression.

rhs_expr property

Right-hand side expression derived from the expression.

Constraints

Bases: CategoryCollection

Collection of :class:Constraint items.

Source code in src/easydiffraction/analysis/categories/constraints/default.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
@ConstraintsFactory.register
class Constraints(CategoryCollection):
    """Collection of :class:`Constraint` items."""

    type_info = TypeInfo(
        tag='default',
        description='Symbolic parameter constraints',
    )

    _update_priority = 90  # After most others, but before data categories

    def __init__(self) -> None:
        """Create an empty constraints collection."""
        super().__init__(item_type=Constraint)
        self._enabled: bool = False

    @property
    def enabled(self) -> bool:
        """Whether constraints are currently active."""
        return self._enabled

    def enable(self) -> None:
        """Activate constraints so they are applied during fitting."""
        self._enabled = True

    def disable(self) -> None:
        """Deactivate constraints without deleting them."""
        self._enabled = False

    def create(self, *, expression: str) -> None:
        """
        Create a constraint from an expression string.

        Automatically enables constraints on the first call.

        Parameters
        ----------
        expression : str
            Constraint equation, e.g. ``'biso_Co2 = biso_Co1'`` or
            ``'occ_Ba = 1 - occ_La'``.
        """
        item = Constraint()
        item.expression = expression
        self.add(item)
        self._enabled = True
__init__()

Create an empty constraints collection.

Source code in src/easydiffraction/analysis/categories/constraints/default.py
101
102
103
104
def __init__(self) -> None:
    """Create an empty constraints collection."""
    super().__init__(item_type=Constraint)
    self._enabled: bool = False
create(*, expression)

Create a constraint from an expression string.

Automatically enables constraints on the first call.

Parameters:

Name Type Description Default
expression str

Constraint equation, e.g. 'biso_Co2 = biso_Co1' or 'occ_Ba = 1 - occ_La'.

required
Source code in src/easydiffraction/analysis/categories/constraints/default.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def create(self, *, expression: str) -> None:
    """
    Create a constraint from an expression string.

    Automatically enables constraints on the first call.

    Parameters
    ----------
    expression : str
        Constraint equation, e.g. ``'biso_Co2 = biso_Co1'`` or
        ``'occ_Ba = 1 - occ_La'``.
    """
    item = Constraint()
    item.expression = expression
    self.add(item)
    self._enabled = True
disable()

Deactivate constraints without deleting them.

Source code in src/easydiffraction/analysis/categories/constraints/default.py
115
116
117
def disable(self) -> None:
    """Deactivate constraints without deleting them."""
    self._enabled = False
enable()

Activate constraints so they are applied during fitting.

Source code in src/easydiffraction/analysis/categories/constraints/default.py
111
112
113
def enable(self) -> None:
    """Activate constraints so they are applied during fitting."""
    self._enabled = True
enabled property

Whether constraints are currently active.

factory

Constraints factory — delegates entirely to FactoryBase.

ConstraintsFactory

Bases: FactoryBase

Create constraint collections by tag.

Source code in src/easydiffraction/analysis/categories/constraints/factory.py
12
13
14
15
16
17
class ConstraintsFactory(FactoryBase):
    """Create constraint collections by tag."""

    _default_rules: ClassVar[dict] = {
        frozenset(): 'default',
    }

fit_mode

enums

Enumeration for fit-mode values.

FitModeEnum

Bases: StrEnum

Fitting strategy for the analysis.

Source code in src/easydiffraction/analysis/categories/fit_mode/enums.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
class FitModeEnum(StrEnum):
    """Fitting strategy for the analysis."""

    SINGLE = 'single'
    JOINT = 'joint'
    SEQUENTIAL = 'sequential'

    @classmethod
    def default(cls) -> FitModeEnum:
        """Return the default fit mode (SINGLE)."""
        return cls.SINGLE

    def description(self) -> str:
        """Return a human-readable description of this fit mode."""
        if self is FitModeEnum.SINGLE:
            return 'Independent fitting of each experiment'
        if self is FitModeEnum.JOINT:
            return 'Simultaneous fitting of all experiments with weights'
        if self is FitModeEnum.SEQUENTIAL:
            return 'Sequential fitting over data files in a directory'
        return None
default() classmethod

Return the default fit mode (SINGLE).

Source code in src/easydiffraction/analysis/categories/fit_mode/enums.py
17
18
19
20
@classmethod
def default(cls) -> FitModeEnum:
    """Return the default fit mode (SINGLE)."""
    return cls.SINGLE
description()

Return a human-readable description of this fit mode.

Source code in src/easydiffraction/analysis/categories/fit_mode/enums.py
22
23
24
25
26
27
28
29
30
def description(self) -> str:
    """Return a human-readable description of this fit mode."""
    if self is FitModeEnum.SINGLE:
        return 'Independent fitting of each experiment'
    if self is FitModeEnum.JOINT:
        return 'Simultaneous fitting of all experiments with weights'
    if self is FitModeEnum.SEQUENTIAL:
        return 'Sequential fitting over data files in a directory'
    return None

factory

Fit-mode factory — delegates entirely to FactoryBase.

FitModeFactory

Bases: FactoryBase

Create fit-mode category items by tag.

Source code in src/easydiffraction/analysis/categories/fit_mode/factory.py
12
13
14
15
16
17
class FitModeFactory(FactoryBase):
    """Create fit-mode category items by tag."""

    _default_rules: ClassVar[dict] = {
        frozenset(): 'default',
    }

fit_mode

Fit-mode category item.

Stores the active fitting strategy as a CIF-serializable descriptor validated by FitModeEnum.

FitMode

Bases: CategoryItem

Fitting strategy selector.

Holds a single mode descriptor whose value is one of FitModeEnum members ('single', 'joint', or 'sequential').

Source code in src/easydiffraction/analysis/categories/fit_mode/fit_mode.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
@FitModeFactory.register
class FitMode(CategoryItem):
    """
    Fitting strategy selector.

    Holds a single ``mode`` descriptor whose value is one of
    ``FitModeEnum`` members (``'single'``, ``'joint'``, or
    ``'sequential'``).
    """

    type_info = TypeInfo(
        tag='default',
        description='Fit-mode category',
    )

    def __init__(self) -> None:
        super().__init__()

        self._mode: StringDescriptor = StringDescriptor(
            name='mode',
            description='Fitting strategy',
            value_spec=AttributeSpec(
                default=FitModeEnum.default().value,
                validator=MembershipValidator(allowed=[member.value for member in FitModeEnum]),
            ),
            cif_handler=CifHandler(names=['_analysis.fit_mode']),
        )

        self._identity.category_code = 'fit_mode'

    @property
    def mode(self) -> StringDescriptor:
        """
        Fitting strategy.

        Reading this property returns the underlying
        ``StringDescriptor`` object. Assigning to it updates the
        parameter value.
        """
        return self._mode

    @mode.setter
    def mode(self, value: str) -> None:
        self._mode.value = value
mode property writable

Fitting strategy.

Reading this property returns the underlying StringDescriptor object. Assigning to it updates the parameter value.

joint_fit_experiments

default

Joint-fit experiment weighting configuration.

Stores per-experiment weights to be used when multiple experiments are fitted simultaneously.

JointFitExperiment

Bases: CategoryItem

A single joint-fit entry.

Source code in src/easydiffraction/analysis/categories/joint_fit_experiments/default.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
class JointFitExperiment(CategoryItem):
    """A single joint-fit entry."""

    def __init__(self) -> None:
        super().__init__()

        self._id: StringDescriptor = StringDescriptor(
            name='id',  # TODO: need new name instead of id
            description='Experiment identifier',  # TODO
            value_spec=AttributeSpec(
                default='_',
                validator=RegexValidator(pattern=r'^[A-Za-z_][A-Za-z0-9_]*$'),
            ),
            cif_handler=CifHandler(names=['_joint_fit_experiment.id']),
        )
        self._weight: NumericDescriptor = NumericDescriptor(
            name='weight',
            description='Weight factor',  # TODO
            value_spec=AttributeSpec(
                default=0.0,
                validator=RangeValidator(),
            ),
            cif_handler=CifHandler(names=['_joint_fit_experiment.weight']),
        )

        self._identity.category_code = 'joint_fit_experiment'
        self._identity.category_entry_name = lambda: str(self.id.value)

    # ------------------------------------------------------------------
    #  Public properties
    # ------------------------------------------------------------------

    @property
    def id(self) -> StringDescriptor:
        """
        Experiment identifier.

        Reading this property returns the underlying
        ``StringDescriptor`` object. Assigning to it updates the
        parameter value.
        """
        return self._id

    @id.setter
    def id(self, value: str) -> None:
        self._id.value = value

    @property
    def weight(self) -> NumericDescriptor:
        """
        Weight factor.

        Reading this property returns the underlying
        ``NumericDescriptor`` object. Assigning to it updates the
        parameter value.
        """
        return self._weight

    @weight.setter
    def weight(self, value: float) -> None:
        self._weight.value = value
id property writable

Experiment identifier.

Reading this property returns the underlying StringDescriptor object. Assigning to it updates the parameter value.

weight property writable

Weight factor.

Reading this property returns the underlying NumericDescriptor object. Assigning to it updates the parameter value.

JointFitExperiments

Bases: CategoryCollection

Collection of :class:JointFitExperiment items.

Source code in src/easydiffraction/analysis/categories/joint_fit_experiments/default.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
@JointFitExperimentsFactory.register
class JointFitExperiments(CategoryCollection):
    """Collection of :class:`JointFitExperiment` items."""

    type_info = TypeInfo(
        tag='default',
        description='Joint-fit experiment weights',
    )

    def __init__(self) -> None:
        """Create an empty joint-fit experiments collection."""
        super().__init__(item_type=JointFitExperiment)
__init__()

Create an empty joint-fit experiments collection.

Source code in src/easydiffraction/analysis/categories/joint_fit_experiments/default.py
 98
 99
100
def __init__(self) -> None:
    """Create an empty joint-fit experiments collection."""
    super().__init__(item_type=JointFitExperiment)

factory

Joint-fit-experiments factory — delegates to FactoryBase.

JointFitExperimentsFactory

Bases: FactoryBase

Create joint-fit experiment collections by tag.

Source code in src/easydiffraction/analysis/categories/joint_fit_experiments/factory.py
12
13
14
15
16
17
class JointFitExperimentsFactory(FactoryBase):
    """Create joint-fit experiment collections by tag."""

    _default_rules: ClassVar[dict] = {
        frozenset(): 'default',
    }

fit_helpers

metrics

calculate_r_factor(y_obs, y_calc)

Calculate the R-factor between observed and calculated data.

Parameters:

Name Type Description Default
y_obs ndarray

Observed data points.

required
y_calc ndarray

Calculated data points.

required

Returns:

Type Description
float

R-factor value.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
def calculate_r_factor(
    y_obs: np.ndarray,
    y_calc: np.ndarray,
) -> float:
    """
    Calculate the R-factor between observed and calculated data.

    Parameters
    ----------
    y_obs : np.ndarray
        Observed data points.
    y_calc : np.ndarray
        Calculated data points.

    Returns
    -------
    float
        R-factor value.
    """
    y_obs = np.asarray(y_obs)
    y_calc = np.asarray(y_calc)
    numerator = np.sum(np.abs(y_obs - y_calc))
    denominator = np.sum(np.abs(y_obs))
    return numerator / denominator if denominator != 0 else np.nan

calculate_r_factor_squared(y_obs, y_calc)

Calculate the R-factor squared between observed and calculated data.

Parameters:

Name Type Description Default
y_obs ndarray

Observed data points.

required
y_calc ndarray

Calculated data points.

required

Returns:

Type Description
float

R-factor squared value.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def calculate_r_factor_squared(
    y_obs: np.ndarray,
    y_calc: np.ndarray,
) -> float:
    """
    Calculate the R-factor squared between observed and calculated data.

    Parameters
    ----------
    y_obs : np.ndarray
        Observed data points.
    y_calc : np.ndarray
        Calculated data points.

    Returns
    -------
    float
        R-factor squared value.
    """
    y_obs = np.asarray(y_obs)
    y_calc = np.asarray(y_calc)
    numerator = np.sum((y_obs - y_calc) ** 2)
    denominator = np.sum(y_obs**2)
    return np.sqrt(numerator / denominator) if denominator != 0 else np.nan

calculate_rb_factor(y_obs, y_calc)

Calculate the Bragg R-factor between observed and calculated data.

Parameters:

Name Type Description Default
y_obs ndarray

Observed data points.

required
y_calc ndarray

Calculated data points.

required

Returns:

Type Description
float

Bragg R-factor value.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def calculate_rb_factor(
    y_obs: np.ndarray,
    y_calc: np.ndarray,
) -> float:
    """
    Calculate the Bragg R-factor between observed and calculated data.

    Parameters
    ----------
    y_obs : np.ndarray
        Observed data points.
    y_calc : np.ndarray
        Calculated data points.

    Returns
    -------
    float
        Bragg R-factor value.
    """
    y_obs = np.asarray(y_obs)
    y_calc = np.asarray(y_calc)
    numerator = np.sum(np.abs(y_obs - y_calc))
    denominator = np.sum(y_obs)
    return numerator / denominator if denominator != 0 else np.nan

calculate_reduced_chi_square(residuals, num_parameters)

Calculate the reduced chi-square statistic.

Parameters:

Name Type Description Default
residuals ndarray

Residuals between observed and calculated data.

required
num_parameters int

Number of free parameters used in the model.

required

Returns:

Type Description
float

Reduced chi-square value.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
def calculate_reduced_chi_square(
    residuals: np.ndarray,
    num_parameters: int,
) -> float:
    """
    Calculate the reduced chi-square statistic.

    Parameters
    ----------
    residuals : np.ndarray
        Residuals between observed and calculated data.
    num_parameters : int
        Number of free parameters used in the model.

    Returns
    -------
    float
        Reduced chi-square value.
    """
    residuals = np.asarray(residuals)
    chi_square = np.sum(residuals**2)
    n_points = len(residuals)
    dof = n_points - num_parameters
    if dof > 0:
        return chi_square / dof
    return np.nan

calculate_weighted_r_factor(y_obs, y_calc, weights)

Calculate weighted R-factor between observed and calculated data.

Parameters:

Name Type Description Default
y_obs ndarray

Observed data points.

required
y_calc ndarray

Calculated data points.

required
weights ndarray

Weights for each data point.

required

Returns:

Type Description
float

Weighted R-factor value.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
def calculate_weighted_r_factor(
    y_obs: np.ndarray,
    y_calc: np.ndarray,
    weights: np.ndarray,
) -> float:
    """
    Calculate weighted R-factor between observed and calculated data.

    Parameters
    ----------
    y_obs : np.ndarray
        Observed data points.
    y_calc : np.ndarray
        Calculated data points.
    weights : np.ndarray
        Weights for each data point.

    Returns
    -------
    float
        Weighted R-factor value.
    """
    y_obs = np.asarray(y_obs)
    y_calc = np.asarray(y_calc)
    weights = np.asarray(weights)
    numerator = np.sum(weights * (y_obs - y_calc) ** 2)
    denominator = np.sum(weights * y_obs**2)
    return np.sqrt(numerator / denominator) if denominator != 0 else np.nan

get_reliability_inputs(structures, experiments)

Collect observed and calculated data for reliability calculations.

Parameters:

Name Type Description Default
structures Structures

Collection of structures.

required
experiments list[ExperimentBase]

List of experiments.

required

Returns:

Type Description
ndarray

Observed values.

ndarray

Calculated values.

ndarray | None

Error values, or None if not available.

Source code in src/easydiffraction/analysis/fit_helpers/metrics.py
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
def get_reliability_inputs(
    structures: Structures,
    experiments: list[ExperimentBase],
) -> tuple[np.ndarray, np.ndarray, np.ndarray | None]:
    """
    Collect observed and calculated data for reliability calculations.

    Parameters
    ----------
    structures : Structures
        Collection of structures.
    experiments : list[ExperimentBase]
        List of experiments.

    Returns
    -------
    np.ndarray
        Observed values.
    np.ndarray
        Calculated values.
    np.ndarray | None
        Error values, or None if not available.
    """
    y_obs_all = []
    y_calc_all = []
    y_err_all = []
    for experiment in experiments:
        for structure in structures:
            structure._update_categories()
        experiment._update_categories()

        y_calc = experiment.data.intensity_calc
        y_meas = experiment.data.intensity_meas
        y_meas_su = experiment.data.intensity_meas_su

        if y_meas is not None and y_calc is not None:
            # If standard uncertainty is not provided, use ones
            if y_meas_su is None:
                y_meas_su = np.ones_like(y_meas)

            y_obs_all.extend(y_meas)
            y_calc_all.extend(y_calc)
            y_err_all.extend(y_meas_su)

    return (
        np.array(y_obs_all),
        np.array(y_calc_all),
        np.array(y_err_all) if y_err_all else None,
    )

reporting

FitResults

Container for results of a single optimization run.

Holds success flag, chi-square metrics, iteration counts, timing, and parameter objects. Provides a printer to summarize key indicators and a table of fitted parameters.

Source code in src/easydiffraction/analysis/fit_helpers/reporting.py
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
class FitResults:
    """
    Container for results of a single optimization run.

    Holds success flag, chi-square metrics, iteration counts, timing,
    and parameter objects. Provides a printer to summarize key
    indicators and a table of fitted parameters.
    """

    def __init__(
        self,
        *,
        success: bool = False,
        parameters: list[object] | None = None,
        reduced_chi_square: float | None = None,
        engine_result: object | None = None,
        starting_parameters: list[object] | None = None,
        fitting_time: float | None = None,
        **kwargs: object,
    ) -> None:
        """
        Initialize FitResults with the given parameters.

        Parameters
        ----------
        success : bool, default=False
            Indicates if the fit was successful.
        parameters : list[object] | None, default=None
            List of parameters used in the fit.
        reduced_chi_square : float | None, default=None
            Reduced chi-square value of the fit.
        engine_result : object | None, default=None
            Result from the fitting engine.
        starting_parameters : list[object] | None, default=None
            Initial parameters for the fit.
        fitting_time : float | None, default=None
            Time taken for the fitting process.
        **kwargs : object
            Additional engine-specific fields. If ``redchi`` is provided
            and ``reduced_chi_square`` is not set, it is used as the
            reduced chi-square value.
        """
        self.success: bool = success
        self.parameters: list[object] = parameters if parameters is not None else []
        self.chi_square: float | None = None
        self.reduced_chi_square: float | None = reduced_chi_square
        self.message: str = ''
        self.iterations: int = 0
        self.engine_result: object | None = engine_result
        self.result: object | None = None
        self.starting_parameters: list[object] = (
            starting_parameters if starting_parameters is not None else []
        )
        self.fitting_time: float | None = fitting_time

        if 'redchi' in kwargs and self.reduced_chi_square is None:
            self.reduced_chi_square = kwargs.get('redchi')

        for key, value in kwargs.items():
            setattr(self, key, value)

    def display_results(
        self,
        y_obs: list[float] | None = None,
        y_calc: list[float] | None = None,
        y_err: list[float] | None = None,
        f_obs: list[float] | None = None,
        f_calc: list[float] | None = None,
    ) -> None:
        """
        Render a human-readable summary of the fit.

        Parameters
        ----------
        y_obs : list[float] | None, default=None
            Observed intensities for pattern R-factor metrics.
        y_calc : list[float] | None, default=None
            Calculated intensities for pattern R-factor metrics.
        y_err : list[float] | None, default=None
            Standard deviations of observed intensities for wR.
        f_obs : list[float] | None, default=None
            Observed structure-factor magnitudes for Bragg R.
        f_calc : list[float] | None, default=None
            Calculated structure-factor magnitudes for Bragg R.
        """
        status_icon = '✅' if self.success else '❌'
        rf = rf2 = wr = br = None
        if y_obs is not None and y_calc is not None:
            rf = calculate_r_factor(y_obs, y_calc) * 100
            rf2 = calculate_r_factor_squared(y_obs, y_calc) * 100
        if y_obs is not None and y_calc is not None and y_err is not None:
            wr = calculate_weighted_r_factor(y_obs, y_calc, y_err) * 100
        if f_obs is not None and f_calc is not None:
            br = calculate_rb_factor(f_obs, f_calc) * 100

        console.paragraph('Fit results')
        console.print(f'{status_icon} Success: {self.success}')
        console.print(f'⏱️ Fitting time: {self.fitting_time:.2f} seconds')
        console.print(f'📏 Goodness-of-fit (reduced χ²): {self.reduced_chi_square:.2f}')
        if rf is not None:
            console.print(f'📏 R-factor (Rf): {rf:.2f}%')
        if rf2 is not None:
            console.print(f'📏 R-factor squared (Rf²): {rf2:.2f}%')
        if wr is not None:
            console.print(f'📏 Weighted R-factor (wR): {wr:.2f}%')
        if br is not None:
            console.print(f'📏 Bragg R-factor (BR): {br:.2f}%')
        console.print('📈 Fitted parameters:')

        headers = [
            'datablock',
            'category',
            'entry',
            'parameter',
            'start',
            'fitted',
            'uncertainty',
            'units',
            'change',
        ]
        alignments = [
            'left',
            'left',
            'left',
            'left',
            'right',
            'right',
            'right',
            'left',
            'right',
        ]

        rows = [_build_parameter_row(p) for p in self.parameters]

        render_table(
            columns_headers=headers,
            columns_alignment=alignments,
            columns_data=rows,
        )

        self._print_table_notes()

    def _print_table_notes(self) -> None:
        """Print color-coded notes below the fitted parameters table."""
        notes: list[str] = []
        if any(getattr(p, '_outside_physical_limits', False) for p in self.parameters):
            notes.append(
                '[red]Red fitted value:[/red] outside expected physical limits (consider '
                'adding constraints)'
            )
        if any(_is_uncertainty_large(p) for p in self.parameters):
            notes.append(
                '[red]Red uncertainty:[/red] exceeds the fitted value (consider adding '
                'constraints)'
            )
        for note in notes:
            log.warning(note)
__init__(*, success=False, parameters=None, reduced_chi_square=None, engine_result=None, starting_parameters=None, fitting_time=None, **kwargs)

Initialize FitResults with the given parameters.

Parameters:

Name Type Description Default
success bool

Indicates if the fit was successful.

False
parameters list[object] | None

List of parameters used in the fit.

None
reduced_chi_square float | None

Reduced chi-square value of the fit.

None
engine_result object | None

Result from the fitting engine.

None
starting_parameters list[object] | None

Initial parameters for the fit.

None
fitting_time float | None

Time taken for the fitting process.

None
**kwargs object

Additional engine-specific fields. If redchi is provided and reduced_chi_square is not set, it is used as the reduced chi-square value.

{}
Source code in src/easydiffraction/analysis/fit_helpers/reporting.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def __init__(
    self,
    *,
    success: bool = False,
    parameters: list[object] | None = None,
    reduced_chi_square: float | None = None,
    engine_result: object | None = None,
    starting_parameters: list[object] | None = None,
    fitting_time: float | None = None,
    **kwargs: object,
) -> None:
    """
    Initialize FitResults with the given parameters.

    Parameters
    ----------
    success : bool, default=False
        Indicates if the fit was successful.
    parameters : list[object] | None, default=None
        List of parameters used in the fit.
    reduced_chi_square : float | None, default=None
        Reduced chi-square value of the fit.
    engine_result : object | None, default=None
        Result from the fitting engine.
    starting_parameters : list[object] | None, default=None
        Initial parameters for the fit.
    fitting_time : float | None, default=None
        Time taken for the fitting process.
    **kwargs : object
        Additional engine-specific fields. If ``redchi`` is provided
        and ``reduced_chi_square`` is not set, it is used as the
        reduced chi-square value.
    """
    self.success: bool = success
    self.parameters: list[object] = parameters if parameters is not None else []
    self.chi_square: float | None = None
    self.reduced_chi_square: float | None = reduced_chi_square
    self.message: str = ''
    self.iterations: int = 0
    self.engine_result: object | None = engine_result
    self.result: object | None = None
    self.starting_parameters: list[object] = (
        starting_parameters if starting_parameters is not None else []
    )
    self.fitting_time: float | None = fitting_time

    if 'redchi' in kwargs and self.reduced_chi_square is None:
        self.reduced_chi_square = kwargs.get('redchi')

    for key, value in kwargs.items():
        setattr(self, key, value)
display_results(y_obs=None, y_calc=None, y_err=None, f_obs=None, f_calc=None)

Render a human-readable summary of the fit.

Parameters:

Name Type Description Default
y_obs list[float] | None

Observed intensities for pattern R-factor metrics.

None
y_calc list[float] | None

Calculated intensities for pattern R-factor metrics.

None
y_err list[float] | None

Standard deviations of observed intensities for wR.

None
f_obs list[float] | None

Observed structure-factor magnitudes for Bragg R.

None
f_calc list[float] | None

Calculated structure-factor magnitudes for Bragg R.

None
Source code in src/easydiffraction/analysis/fit_helpers/reporting.py
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
def display_results(
    self,
    y_obs: list[float] | None = None,
    y_calc: list[float] | None = None,
    y_err: list[float] | None = None,
    f_obs: list[float] | None = None,
    f_calc: list[float] | None = None,
) -> None:
    """
    Render a human-readable summary of the fit.

    Parameters
    ----------
    y_obs : list[float] | None, default=None
        Observed intensities for pattern R-factor metrics.
    y_calc : list[float] | None, default=None
        Calculated intensities for pattern R-factor metrics.
    y_err : list[float] | None, default=None
        Standard deviations of observed intensities for wR.
    f_obs : list[float] | None, default=None
        Observed structure-factor magnitudes for Bragg R.
    f_calc : list[float] | None, default=None
        Calculated structure-factor magnitudes for Bragg R.
    """
    status_icon = '✅' if self.success else '❌'
    rf = rf2 = wr = br = None
    if y_obs is not None and y_calc is not None:
        rf = calculate_r_factor(y_obs, y_calc) * 100
        rf2 = calculate_r_factor_squared(y_obs, y_calc) * 100
    if y_obs is not None and y_calc is not None and y_err is not None:
        wr = calculate_weighted_r_factor(y_obs, y_calc, y_err) * 100
    if f_obs is not None and f_calc is not None:
        br = calculate_rb_factor(f_obs, f_calc) * 100

    console.paragraph('Fit results')
    console.print(f'{status_icon} Success: {self.success}')
    console.print(f'⏱️ Fitting time: {self.fitting_time:.2f} seconds')
    console.print(f'📏 Goodness-of-fit (reduced χ²): {self.reduced_chi_square:.2f}')
    if rf is not None:
        console.print(f'📏 R-factor (Rf): {rf:.2f}%')
    if rf2 is not None:
        console.print(f'📏 R-factor squared (Rf²): {rf2:.2f}%')
    if wr is not None:
        console.print(f'📏 Weighted R-factor (wR): {wr:.2f}%')
    if br is not None:
        console.print(f'📏 Bragg R-factor (BR): {br:.2f}%')
    console.print('📈 Fitted parameters:')

    headers = [
        'datablock',
        'category',
        'entry',
        'parameter',
        'start',
        'fitted',
        'uncertainty',
        'units',
        'change',
    ]
    alignments = [
        'left',
        'left',
        'left',
        'left',
        'right',
        'right',
        'right',
        'left',
        'right',
    ]

    rows = [_build_parameter_row(p) for p in self.parameters]

    render_table(
        columns_headers=headers,
        columns_alignment=alignments,
        columns_data=rows,
    )

    self._print_table_notes()

tracking

FitProgressTracker

Track and report reduced chi-square during optimization.

The tracker keeps iteration counters, remembers the best observed reduced chi-square and when it occurred, and can display progress as a table in notebooks or a text UI in terminals.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
class FitProgressTracker:
    """
    Track and report reduced chi-square during optimization.

    The tracker keeps iteration counters, remembers the best observed
    reduced chi-square and when it occurred, and can display progress as
    a table in notebooks or a text UI in terminals.
    """

    def __init__(self) -> None:
        self._iteration: int = 0
        self._previous_chi2: float | None = None
        self._last_chi2: float | None = None
        self._last_iteration: int | None = None
        self._best_chi2: float | None = None
        self._best_iteration: int | None = None
        self._fitting_time: float | None = None
        self._verbosity: VerbosityEnum = VerbosityEnum.FULL

        self._df_rows: list[list[str]] = []
        self._display_handle: object | None = None
        self._live: object | None = None

    def reset(self) -> None:
        """Reset internal state before a new optimization run."""
        self._iteration = 0
        self._previous_chi2 = None
        self._last_chi2 = None
        self._last_iteration = None
        self._best_chi2 = None
        self._best_iteration = None
        self._fitting_time = None

    def track(
        self,
        residuals: np.ndarray,
        parameters: list[float],
    ) -> np.ndarray:
        """
        Update progress with current residuals and parameters.

        Parameters
        ----------
        residuals : np.ndarray
            Residuals between measured and calculated data.
        parameters : list[float]
            Current free parameters being fitted.

        Returns
        -------
        np.ndarray
            Residuals unchanged, for optimizer consumption.
        """
        self._iteration += 1

        reduced_chi2 = calculate_reduced_chi_square(residuals, len(parameters))

        row: list[str] = []

        # First iteration, initialize tracking
        if self._previous_chi2 is None:
            self._previous_chi2 = reduced_chi2
            self._best_chi2 = reduced_chi2
            self._best_iteration = self._iteration

            row = [
                str(self._iteration),
                f'{reduced_chi2:.2f}',
                '',
            ]

        # Subsequent iterations, check for significant changes
        else:
            change = (self._previous_chi2 - reduced_chi2) / self._previous_chi2

            # Improvement check
            if change > SIGNIFICANT_CHANGE_THRESHOLD:
                change_in_percent = change * 100

                row = [
                    str(self._iteration),
                    f'{reduced_chi2:.2f}',
                    f'{change_in_percent:.1f}% ↓',
                ]

                self._previous_chi2 = reduced_chi2

        # Output if there is something new to display
        if row:
            self.add_tracking_info(row)

        # Update best chi-square if better
        if reduced_chi2 < self._best_chi2:
            self._best_chi2 = reduced_chi2
            self._best_iteration = self._iteration

        # Store last chi-square and iteration
        self._last_chi2 = reduced_chi2
        self._last_iteration = self._iteration

        return residuals

    @property
    def best_chi2(self) -> float | None:
        """Best recorded reduced chi-square value or None."""
        return self._best_chi2

    @property
    def best_iteration(self) -> int | None:
        """Iteration index at which the best chi-square was observed."""
        return self._best_iteration

    @property
    def iteration(self) -> int:
        """Current iteration counter."""
        return self._iteration

    @property
    def fitting_time(self) -> float | None:
        """Elapsed time of the last run in seconds, if available."""
        return self._fitting_time

    def start_timer(self) -> None:
        """Begin timing of a fit run."""
        self._start_time = time.perf_counter()

    def stop_timer(self) -> None:
        """Stop timing and store elapsed time for the run."""
        self._end_time = time.perf_counter()
        self._fitting_time = self._end_time - self._start_time

    def start_tracking(self, minimizer_name: str) -> None:
        """
        Initialize display and headers and announce the minimizer.

        Parameters
        ----------
        minimizer_name : str
            Name of the minimizer used for the run.
        """
        if self._verbosity is VerbosityEnum.SILENT:
            return
        if self._verbosity is VerbosityEnum.SHORT:
            return

        console.print(f"🚀 Starting fit process with '{minimizer_name}'...")
        console.print('📈 Goodness-of-fit (reduced χ²) change:')

        # Reset rows and create an environment-appropriate handle
        self._df_rows = []
        self._display_handle = _make_display_handle()

        # Initial empty table; subsequent updates will reuse the handle
        render_table(
            columns_headers=DEFAULT_HEADERS,
            columns_alignment=DEFAULT_ALIGNMENTS,
            columns_data=self._df_rows,
            display_handle=self._display_handle,
        )

    def add_tracking_info(self, row: list[str]) -> None:
        """
        Append a formatted row to the progress display.

        Parameters
        ----------
        row : list[str]
            Columns corresponding to DEFAULT_HEADERS.
        """
        self._df_rows.append(row)
        if self._verbosity is not VerbosityEnum.FULL:
            return
        # Append and update via the active handle (Jupyter or
        # terminal live)
        render_table(
            columns_headers=DEFAULT_HEADERS,
            columns_alignment=DEFAULT_ALIGNMENTS,
            columns_data=self._df_rows,
            display_handle=self._display_handle,
        )

    def finish_tracking(self) -> None:
        """Finalize progress display and print best result summary."""
        # Add last iteration as last row
        row: list[str] = [
            str(self._last_iteration),
            f'{self._last_chi2:.2f}' if self._last_chi2 is not None else '',
            '',
        ]
        self.add_tracking_info(row)

        if self._verbosity is not VerbosityEnum.FULL:
            return

        # Close terminal live if used
        if self._display_handle is not None and hasattr(self._display_handle, 'close'):
            with suppress(Exception):
                self._display_handle.close()

        # Print best result
        console.print(
            f'🏆 Best goodness-of-fit (reduced χ²) is {self._best_chi2:.2f} '
            f'at iteration {self._best_iteration}'
        )
        console.print('✅ Fitting complete.')
add_tracking_info(row)

Append a formatted row to the progress display.

Parameters:

Name Type Description Default
row list[str]

Columns corresponding to DEFAULT_HEADERS.

required
Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
def add_tracking_info(self, row: list[str]) -> None:
    """
    Append a formatted row to the progress display.

    Parameters
    ----------
    row : list[str]
        Columns corresponding to DEFAULT_HEADERS.
    """
    self._df_rows.append(row)
    if self._verbosity is not VerbosityEnum.FULL:
        return
    # Append and update via the active handle (Jupyter or
    # terminal live)
    render_table(
        columns_headers=DEFAULT_HEADERS,
        columns_alignment=DEFAULT_ALIGNMENTS,
        columns_data=self._df_rows,
        display_handle=self._display_handle,
    )
best_chi2 property

Best recorded reduced chi-square value or None.

best_iteration property

Iteration index at which the best chi-square was observed.

finish_tracking()

Finalize progress display and print best result summary.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
def finish_tracking(self) -> None:
    """Finalize progress display and print best result summary."""
    # Add last iteration as last row
    row: list[str] = [
        str(self._last_iteration),
        f'{self._last_chi2:.2f}' if self._last_chi2 is not None else '',
        '',
    ]
    self.add_tracking_info(row)

    if self._verbosity is not VerbosityEnum.FULL:
        return

    # Close terminal live if used
    if self._display_handle is not None and hasattr(self._display_handle, 'close'):
        with suppress(Exception):
            self._display_handle.close()

    # Print best result
    console.print(
        f'🏆 Best goodness-of-fit (reduced χ²) is {self._best_chi2:.2f} '
        f'at iteration {self._best_iteration}'
    )
    console.print('✅ Fitting complete.')
fitting_time property

Elapsed time of the last run in seconds, if available.

iteration property

Current iteration counter.

reset()

Reset internal state before a new optimization run.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
109
110
111
112
113
114
115
116
117
def reset(self) -> None:
    """Reset internal state before a new optimization run."""
    self._iteration = 0
    self._previous_chi2 = None
    self._last_chi2 = None
    self._last_iteration = None
    self._best_chi2 = None
    self._best_iteration = None
    self._fitting_time = None
start_timer()

Begin timing of a fit run.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
208
209
210
def start_timer(self) -> None:
    """Begin timing of a fit run."""
    self._start_time = time.perf_counter()
start_tracking(minimizer_name)

Initialize display and headers and announce the minimizer.

Parameters:

Name Type Description Default
minimizer_name str

Name of the minimizer used for the run.

required
Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
def start_tracking(self, minimizer_name: str) -> None:
    """
    Initialize display and headers and announce the minimizer.

    Parameters
    ----------
    minimizer_name : str
        Name of the minimizer used for the run.
    """
    if self._verbosity is VerbosityEnum.SILENT:
        return
    if self._verbosity is VerbosityEnum.SHORT:
        return

    console.print(f"🚀 Starting fit process with '{minimizer_name}'...")
    console.print('📈 Goodness-of-fit (reduced χ²) change:')

    # Reset rows and create an environment-appropriate handle
    self._df_rows = []
    self._display_handle = _make_display_handle()

    # Initial empty table; subsequent updates will reuse the handle
    render_table(
        columns_headers=DEFAULT_HEADERS,
        columns_alignment=DEFAULT_ALIGNMENTS,
        columns_data=self._df_rows,
        display_handle=self._display_handle,
    )
stop_timer()

Stop timing and store elapsed time for the run.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
212
213
214
215
def stop_timer(self) -> None:
    """Stop timing and store elapsed time for the run."""
    self._end_time = time.perf_counter()
    self._fitting_time = self._end_time - self._start_time
track(residuals, parameters)

Update progress with current residuals and parameters.

Parameters:

Name Type Description Default
residuals ndarray

Residuals between measured and calculated data.

required
parameters list[float]

Current free parameters being fitted.

required

Returns:

Type Description
ndarray

Residuals unchanged, for optimizer consumption.

Source code in src/easydiffraction/analysis/fit_helpers/tracking.py
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
def track(
    self,
    residuals: np.ndarray,
    parameters: list[float],
) -> np.ndarray:
    """
    Update progress with current residuals and parameters.

    Parameters
    ----------
    residuals : np.ndarray
        Residuals between measured and calculated data.
    parameters : list[float]
        Current free parameters being fitted.

    Returns
    -------
    np.ndarray
        Residuals unchanged, for optimizer consumption.
    """
    self._iteration += 1

    reduced_chi2 = calculate_reduced_chi_square(residuals, len(parameters))

    row: list[str] = []

    # First iteration, initialize tracking
    if self._previous_chi2 is None:
        self._previous_chi2 = reduced_chi2
        self._best_chi2 = reduced_chi2
        self._best_iteration = self._iteration

        row = [
            str(self._iteration),
            f'{reduced_chi2:.2f}',
            '',
        ]

    # Subsequent iterations, check for significant changes
    else:
        change = (self._previous_chi2 - reduced_chi2) / self._previous_chi2

        # Improvement check
        if change > SIGNIFICANT_CHANGE_THRESHOLD:
            change_in_percent = change * 100

            row = [
                str(self._iteration),
                f'{reduced_chi2:.2f}',
                f'{change_in_percent:.1f}% ↓',
            ]

            self._previous_chi2 = reduced_chi2

    # Output if there is something new to display
    if row:
        self.add_tracking_info(row)

    # Update best chi-square if better
    if reduced_chi2 < self._best_chi2:
        self._best_chi2 = reduced_chi2
        self._best_iteration = self._iteration

    # Store last chi-square and iteration
    self._last_chi2 = reduced_chi2
    self._last_iteration = self._iteration

    return residuals

fitting

Fitter

Handles the fitting workflow using a pluggable minimizer.

Source code in src/easydiffraction/analysis/fitting.py
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
class Fitter:
    """Handles the fitting workflow using a pluggable minimizer."""

    def __init__(self, selection: str = MinimizerTypeEnum.default()) -> None:
        self.selection: str = selection
        self.engine: str = selection
        self.minimizer = MinimizerFactory.create(selection)
        self.results: FitResults | None = None

    def fit(
        self,
        structures: Structures,
        experiments: list[ExperimentBase],
        weights: np.ndarray | None = None,
        analysis: object = None,
        verbosity: VerbosityEnum = VerbosityEnum.FULL,
        *,
        use_physical_limits: bool = False,
    ) -> None:
        """
        Run the fitting process.

        This method performs the optimization but does not display
        results. Use :meth:`show_fit_results` on the Analysis object to
        display the fit results after fitting is complete.

        Parameters
        ----------
        structures : Structures
            Collection of structures.
        experiments : list[ExperimentBase]
            List of experiments to fit.
        weights : np.ndarray | None, default=None
            Per-experiment weights as a 1-D array (length must match
            *experiments*). When ``None``, equal weights are used.
        analysis : object, default=None
            Optional Analysis object to update its categories during
            fitting.
        verbosity : VerbosityEnum, default=VerbosityEnum.FULL
            Console output verbosity.
        use_physical_limits : bool, default=False
            When ``True``, fall back to physical limits from the value
            spec for parameters whose ``fit_min``/``fit_max`` are
            unbounded.
        """
        # Enforce symmetry constraints (e.g. ADP) before collecting
        # free parameters so that components fixed by site symmetry are
        # excluded from the minimizer's parameter set.
        for structure in structures:
            structure._need_categories_update = True
            structure._update_categories()

        expt_free_params: list[Parameter] = []
        for expt in experiments:
            expt_free_params.extend(
                p
                for p in expt.parameters
                if isinstance(p, Parameter) and not p.constrained and p.free
            )
        params = structures.free_parameters + expt_free_params

        if not params:
            print('⚠️ No parameters selected for fitting.')
            return

        for param in params:
            param._fit_start_value = param.value

        def objective_function(engine_params: dict[str, Any]) -> np.ndarray:
            """
            Evaluate the residual for the current minimizer parameters.

            Parameters
            ----------
            engine_params : dict[str, Any]
                Parameter values provided by the minimizer engine.

            Returns
            -------
            np.ndarray
                Residual array passed back to the minimizer.
            """
            return self._residual_function(
                engine_params=engine_params,
                parameters=params,
                structures=structures,
                experiments=experiments,
                weights=weights,
                analysis=analysis,
            )

        # Perform fitting
        self.results = self.minimizer.fit(
            params,
            objective_function,
            verbosity=verbosity,
            use_physical_limits=use_physical_limits,
        )

    def _process_fit_results(
        self,
        structures: Structures,
        experiments: list[ExperimentBase],
    ) -> None:
        """
        Collect reliability inputs and display fit results.

        This method is typically called by
        :meth:`Analysis.show_fit_results` rather than directly. It
        calculates R-factors and other metrics, then renders them to the
        console.

        Parameters
        ----------
        structures : Structures
            Collection of structures.
        experiments : list[ExperimentBase]
            List of experiments.
        """
        y_obs, y_calc, y_err = get_reliability_inputs(
            structures,
            experiments,
        )

        # Placeholder for future f_obs / f_calc retrieval
        f_obs, f_calc = None, None

        if self.results:
            self.results.display_results(
                y_obs=y_obs,
                y_calc=y_calc,
                y_err=y_err,
                f_obs=f_obs,
                f_calc=f_calc,
            )

    def _residual_function(
        self,
        engine_params: dict[str, Any],
        parameters: list[Parameter],
        structures: Structures,
        experiments: list[ExperimentBase],
        weights: np.ndarray | None = None,
        analysis: object = None,
    ) -> np.ndarray:
        """
        Compute residuals between measured and calculated patterns.

        It updates the parameter values according to the
        optimizer-provided engine_params.

        Parameters
        ----------
        engine_params : dict[str, Any]
            Engine-specific parameter dict.
        parameters : list[Parameter]
            List of parameters being optimized.
        structures : Structures
            Collection of structures.
        experiments : list[ExperimentBase]
            List of experiments.
        weights : np.ndarray | None, default=None
            Per-experiment weights as a 1-D array. When ``None``, equal
            weights are used.
        analysis : object, default=None
            Optional Analysis object to update its categories during
            fitting.

        Returns
        -------
        np.ndarray
            Array of weighted residuals.
        """
        # Sync parameters back to objects
        self.minimizer._sync_result_to_parameters(parameters, engine_params)

        # Update categories to reflect new parameter values
        # Order matters: structures first (symmetry, structure),
        # then analysis (constraints), then experiments (calculations)
        for structure in structures:
            structure._update_categories()

        if analysis is not None:
            analysis._update_categories(called_by_minimizer=True)

        # Prepare weights for joint fitting
        num_expts: int = len(experiments)
        norm_weights = (
            np.ones(num_expts) if weights is None else np.asarray(weights, dtype=np.float64)
        )

        # Normalize weights so they sum to num_expts
        # We should obtain the same reduced chi_squared when a single
        # dataset is split into two parts and fit together. If weights
        # sum to one, then reduced chi_squared will be half as large as
        # expected.
        norm_weights *= num_expts / np.sum(norm_weights)
        residuals: list[float] = []

        for experiment, weight in zip(experiments, norm_weights, strict=True):
            # Update experiment-specific calculations
            experiment._update_categories(called_by_minimizer=True)

            # Calculate the difference between measured and calculated
            # patterns
            y_calc = experiment.data.intensity_calc
            y_meas = experiment.data.intensity_meas
            y_meas_su = experiment.data.intensity_meas_su
            diff = (y_meas - y_calc) / y_meas_su

            # Residuals are squared before going into reduced
            # chi-squared
            diff *= np.sqrt(weight)

            # Append the residuals for this experiment
            residuals.extend(diff)

        return self.minimizer.tracker.track(np.array(residuals), parameters)

fit(structures, experiments, weights=None, analysis=None, verbosity=VerbosityEnum.FULL, *, use_physical_limits=False)

Run the fitting process.

This method performs the optimization but does not display results. Use :meth:show_fit_results on the Analysis object to display the fit results after fitting is complete.

Parameters:

Name Type Description Default
structures Structures

Collection of structures.

required
experiments list[ExperimentBase]

List of experiments to fit.

required
weights ndarray | None

Per-experiment weights as a 1-D array (length must match experiments). When None, equal weights are used.

None
analysis object

Optional Analysis object to update its categories during fitting.

None
verbosity VerbosityEnum

Console output verbosity.

VerbosityEnum.FULL
use_physical_limits bool

When True, fall back to physical limits from the value spec for parameters whose fit_min/fit_max are unbounded.

False
Source code in src/easydiffraction/analysis/fitting.py
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def fit(
    self,
    structures: Structures,
    experiments: list[ExperimentBase],
    weights: np.ndarray | None = None,
    analysis: object = None,
    verbosity: VerbosityEnum = VerbosityEnum.FULL,
    *,
    use_physical_limits: bool = False,
) -> None:
    """
    Run the fitting process.

    This method performs the optimization but does not display
    results. Use :meth:`show_fit_results` on the Analysis object to
    display the fit results after fitting is complete.

    Parameters
    ----------
    structures : Structures
        Collection of structures.
    experiments : list[ExperimentBase]
        List of experiments to fit.
    weights : np.ndarray | None, default=None
        Per-experiment weights as a 1-D array (length must match
        *experiments*). When ``None``, equal weights are used.
    analysis : object, default=None
        Optional Analysis object to update its categories during
        fitting.
    verbosity : VerbosityEnum, default=VerbosityEnum.FULL
        Console output verbosity.
    use_physical_limits : bool, default=False
        When ``True``, fall back to physical limits from the value
        spec for parameters whose ``fit_min``/``fit_max`` are
        unbounded.
    """
    # Enforce symmetry constraints (e.g. ADP) before collecting
    # free parameters so that components fixed by site symmetry are
    # excluded from the minimizer's parameter set.
    for structure in structures:
        structure._need_categories_update = True
        structure._update_categories()

    expt_free_params: list[Parameter] = []
    for expt in experiments:
        expt_free_params.extend(
            p
            for p in expt.parameters
            if isinstance(p, Parameter) and not p.constrained and p.free
        )
    params = structures.free_parameters + expt_free_params

    if not params:
        print('⚠️ No parameters selected for fitting.')
        return

    for param in params:
        param._fit_start_value = param.value

    def objective_function(engine_params: dict[str, Any]) -> np.ndarray:
        """
        Evaluate the residual for the current minimizer parameters.

        Parameters
        ----------
        engine_params : dict[str, Any]
            Parameter values provided by the minimizer engine.

        Returns
        -------
        np.ndarray
            Residual array passed back to the minimizer.
        """
        return self._residual_function(
            engine_params=engine_params,
            parameters=params,
            structures=structures,
            experiments=experiments,
            weights=weights,
            analysis=analysis,
        )

    # Perform fitting
    self.results = self.minimizer.fit(
        params,
        objective_function,
        verbosity=verbosity,
        use_physical_limits=use_physical_limits,
    )

minimizers

base

MinimizerBase

Bases: ABC

Abstract base for concrete minimizers.

Contract: - Subclasses must implement _prepare_solver_args, _run_solver, _sync_result_to_parameters and _check_success. - The fit method orchestrates the full workflow and returns :class:FitResults.

Source code in src/easydiffraction/analysis/minimizers/base.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
class MinimizerBase(ABC):
    """
    Abstract base for concrete minimizers.

    Contract: - Subclasses must implement ``_prepare_solver_args``,
    ``_run_solver``, ``_sync_result_to_parameters`` and
    ``_check_success``. - The ``fit`` method orchestrates the full
    workflow and returns     :class:`FitResults`.
    """

    def __init__(
        self,
        name: str | None = None,
        method: str | None = None,
        max_iterations: int | None = None,
    ) -> None:
        self.name: str | None = name
        self.method: str | None = method
        self.max_iterations: int | None = max_iterations
        self.result: FitResults | None = None
        self._previous_chi2: float | None = None
        self._iteration: int | None = None
        self._best_chi2: float | None = None
        self._best_iteration: int | None = None
        self._fitting_time: float | None = None
        self.tracker: FitProgressTracker = FitProgressTracker()

    def _start_tracking(
        self,
        minimizer_name: str,
        verbosity: VerbosityEnum = VerbosityEnum.FULL,
    ) -> None:
        """
        Initialize progress tracking and timer.

        Parameters
        ----------
        minimizer_name : str
            Human-readable name shown in progress.
        verbosity : VerbosityEnum, default=VerbosityEnum.FULL
            Console output verbosity.
        """
        self.tracker.reset()
        self.tracker._verbosity = verbosity
        self.tracker.start_tracking(minimizer_name)
        self.tracker.start_timer()

    def _stop_tracking(self) -> None:
        """Stop timer and finalize tracking."""
        self.tracker.stop_timer()
        self.tracker.finish_tracking()

    @abstractmethod
    def _prepare_solver_args(self, parameters: list[Any]) -> dict[str, Any]:
        """
        Prepare keyword-arguments for the underlying solver.

        Parameters
        ----------
        parameters : list[Any]
            List of free parameters to be fitted.

        Returns
        -------
        dict[str, Any]
            Mapping of keyword arguments to pass into ``_run_solver``.
        """

    @abstractmethod
    def _run_solver(
        self,
        objective_function: Callable[..., object],
        engine_parameters: dict[str, object],
    ) -> object:
        """Execute the concrete solver and return its raw result."""

    @abstractmethod
    def _sync_result_to_parameters(
        self,
        raw_result: object,
        parameters: list[object],
    ) -> None:
        """Copy raw_result values back to parameters in-place."""

    def _finalize_fit(
        self,
        parameters: list[object],
        raw_result: object,
    ) -> FitResults:
        """
        Build :class:`FitResults` and store it on ``self.result``.

        Parameters
        ----------
        parameters : list[object]
            Parameters after the solver finished.
        raw_result : object
            Backend-specific solver output object.

        Returns
        -------
        FitResults
            Aggregated outcome of the fit.
        """
        self._sync_result_to_parameters(parameters, raw_result)
        self._warn_boundary_parameters(parameters)
        self._warn_physical_limit_violations(parameters)
        success = self._check_success(raw_result)
        self.result = FitResults(
            success=success,
            parameters=parameters,
            reduced_chi_square=self.tracker.best_chi2,
            engine_result=raw_result,
            starting_parameters=parameters,
            fitting_time=self.tracker.fitting_time,
        )
        return self.result

    @staticmethod
    def _warn_boundary_parameters(parameters: list[object]) -> None:
        """
        Warn if any parameter is near its fit bounds after fitting.
        """
        for param in parameters:
            v = param.value
            lo, hi = param.fit_min, param.fit_max
            span = hi - lo
            if not np.isfinite(span):
                # One-sided or unbounded — check absolute proximity
                if np.isfinite(lo) and abs(v - lo) < BOUNDARY_PROXIMITY_FRACTION * max(
                    abs(lo), 1.0
                ):
                    log.warning(
                        f"Parameter '{param.unique_name}' ({v}) is at its lower "
                        f'bound ({lo}). Consider widening fit_min.'
                    )
                if np.isfinite(hi) and abs(v - hi) < BOUNDARY_PROXIMITY_FRACTION * max(
                    abs(hi), 1.0
                ):
                    log.warning(
                        f"Parameter '{param.unique_name}' ({v}) is at its upper "
                        f'bound ({hi}). Consider widening fit_max.'
                    )
            elif span > 0:
                tol = BOUNDARY_PROXIMITY_FRACTION * span
                if (v - lo) < tol:
                    log.warning(
                        f"Parameter '{param.unique_name}' ({v}) is at its lower "
                        f'bound ({lo}). Consider widening fit_min.'
                    )
                if (hi - v) < tol:
                    log.warning(
                        f"Parameter '{param.unique_name}' ({v}) is at its upper "
                        f'bound ({hi}). Consider widening fit_max.'
                    )

    @staticmethod
    def _apply_physical_limits(parameters: list[object]) -> None:
        """
        Set fit bounds from physical limits for unbounded parameters.

        For each parameter whose ``fit_min`` is ``-inf``, replace it
        with the lower physical limit from the value spec.  Likewise for
        ``fit_max`` and the upper physical limit.

        Parameters
        ----------
        parameters : list[object]
            Free parameters to adjust.
        """
        for param in parameters:
            if param.fit_min == -np.inf:
                phys_lo = param._physical_lower_bound()
                if np.isfinite(phys_lo):
                    param.fit_min = phys_lo
            if param.fit_max == np.inf:
                phys_hi = param._physical_upper_bound()
                if np.isfinite(phys_hi):
                    param.fit_max = phys_hi

    @staticmethod
    def _warn_physical_limit_violations(parameters: list[object]) -> None:
        """
        Flag parameters outside their physical limits.

        Sets ``param._outside_physical_limits = True`` on any parameter
        whose value falls outside its physical bounds.

        Parameters
        ----------
        parameters : list[object]
            Parameters after fitting.
        """
        for param in parameters:
            lo = param._physical_lower_bound()
            hi = param._physical_upper_bound()
            outside = False
            if np.isfinite(lo) and param.value < lo:
                log.warning(
                    f"Parameter '{param.unique_name}' ({param.value:.8f}) is below "
                    f'its physical lower limit ({lo}).'
                )
                outside = True
            if np.isfinite(hi) and param.value > hi:
                log.warning(
                    f"Parameter '{param.unique_name}' ({param.value:.8f}) is above "
                    f'its physical upper limit ({hi}).'
                )
                outside = True
            param._outside_physical_limits = outside

    @abstractmethod
    def _check_success(self, raw_result: object) -> bool:
        """Determine whether the fit was successful."""

    def fit(
        self,
        parameters: list[object],
        objective_function: Callable[..., object],
        verbosity: VerbosityEnum = VerbosityEnum.FULL,
        *,
        use_physical_limits: bool = False,
    ) -> FitResults:
        """
        Run the full minimization workflow.

        Parameters
        ----------
        parameters : list[object]
            Free parameters to optimize.
        objective_function : Callable[..., object]
            Callable returning residuals for a given set of engine
            arguments.
        verbosity : VerbosityEnum, default=VerbosityEnum.FULL
            Console output verbosity.
        use_physical_limits : bool, default=False
            When ``True``, fall back to physical limits from the value
            spec for parameters whose ``fit_min``/``fit_max`` are
            unbounded.

        Returns
        -------
        FitResults
            FitResults with success flag, best chi2 and timing.
        """
        if use_physical_limits:
            self._apply_physical_limits(parameters)

        minimizer_name = self.name or 'Unnamed Minimizer'
        if self.method is not None and f'({self.method})' not in minimizer_name:
            minimizer_name += f' ({self.method})'

        self._start_tracking(minimizer_name, verbosity=verbosity)

        solver_args = self._prepare_solver_args(parameters)
        raw_result = self._run_solver(objective_function, **solver_args)

        self._stop_tracking()

        return self._finalize_fit(parameters, raw_result)

    def _objective_function(
        self,
        engine_params: dict[str, object],
        parameters: list[object],
        structures: object,
        experiments: object,
        calculator: object,
    ) -> np.ndarray:
        """Default objective helper computing residuals array."""
        return self._compute_residuals(
            engine_params,
            parameters,
            structures,
            experiments,
            calculator,
        )

    def _create_objective_function(
        self,
        parameters: list[object],
        structures: object,
        experiments: object,
        calculator: object,
    ) -> Callable[[dict[str, object]], np.ndarray]:
        """Return a closure capturing problem context for the solver."""
        return lambda engine_params: self._objective_function(
            engine_params,
            parameters,
            structures,
            experiments,
            calculator,
        )
fit(parameters, objective_function, verbosity=VerbosityEnum.FULL, *, use_physical_limits=False)

Run the full minimization workflow.

Parameters:

Name Type Description Default
parameters list[object]

Free parameters to optimize.

required
objective_function Callable[..., object]

Callable returning residuals for a given set of engine arguments.

required
verbosity VerbosityEnum

Console output verbosity.

VerbosityEnum.FULL
use_physical_limits bool

When True, fall back to physical limits from the value spec for parameters whose fit_min/fit_max are unbounded.

False

Returns:

Type Description
FitResults

FitResults with success flag, best chi2 and timing.

Source code in src/easydiffraction/analysis/minimizers/base.py
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
def fit(
    self,
    parameters: list[object],
    objective_function: Callable[..., object],
    verbosity: VerbosityEnum = VerbosityEnum.FULL,
    *,
    use_physical_limits: bool = False,
) -> FitResults:
    """
    Run the full minimization workflow.

    Parameters
    ----------
    parameters : list[object]
        Free parameters to optimize.
    objective_function : Callable[..., object]
        Callable returning residuals for a given set of engine
        arguments.
    verbosity : VerbosityEnum, default=VerbosityEnum.FULL
        Console output verbosity.
    use_physical_limits : bool, default=False
        When ``True``, fall back to physical limits from the value
        spec for parameters whose ``fit_min``/``fit_max`` are
        unbounded.

    Returns
    -------
    FitResults
        FitResults with success flag, best chi2 and timing.
    """
    if use_physical_limits:
        self._apply_physical_limits(parameters)

    minimizer_name = self.name or 'Unnamed Minimizer'
    if self.method is not None and f'({self.method})' not in minimizer_name:
        minimizer_name += f' ({self.method})'

    self._start_tracking(minimizer_name, verbosity=verbosity)

    solver_args = self._prepare_solver_args(parameters)
    raw_result = self._run_solver(objective_function, **solver_args)

    self._stop_tracking()

    return self._finalize_fit(parameters, raw_result)

bumps

Minimizer using the bumps package.

BumpsMinimizer

Bases: MinimizerBase

Minimizer using the bumps package.

Source code in src/easydiffraction/analysis/minimizers/bumps.py
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
@MinimizerFactory.register
class BumpsMinimizer(MinimizerBase):
    """Minimizer using the bumps package."""

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.BUMPS,
        description='Bumps library using the default Levenberg-Marquardt method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.BUMPS,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

    def _prepare_solver_args(  # noqa: PLR6301
        self,
        parameters: list[object],
    ) -> dict[str, object]:
        """
        Prepare bumps parameters from EasyDiffraction parameters.

        Parameters
        ----------
        parameters : list[object]
            List of parameters to be optimized.

        Returns
        -------
        dict[str, object]
            Dictionary containing the bumps parameter list.
        """
        bumps_params = []
        for param in parameters:
            bp = BumpsParameter(
                value=param.value,
                name=param._minimizer_uid,
            )
            lo = param.fit_min
            hi = param.fit_max
            bp.range(lo, hi)
            bumps_params.append(bp)
        return {'bumps_params': bumps_params}

    def _run_solver(
        self,
        objective_function: object,
        **kwargs: object,
    ) -> object:
        """
        Run the bumps solver.

        Uses FitDriver directly instead of bumps.fitters.fit() to skip
        the expensive post-fit stderr/Jacobian computation that would
        trigger extra objective-function evaluations.

        Parameters
        ----------
        objective_function : object
            The objective function to minimize.
        **kwargs : object
            Additional arguments for the solver.

        Returns
        -------
        object
            A scipy OptimizeResult with the optimized values.
        """
        bumps_params = kwargs.get('bumps_params')
        fitness = _EasyDiffractionFitness(bumps_params, objective_function)
        fitness.nllf()  # pre-compute so numpoints() is valid
        problem = FitProblem(fitness)

        fitclass = next(cls for cls in FITTERS if cls.id == self.method)
        driver = FitDriver(
            fitclass=fitclass,
            problem=problem,
            monitors=[],
            steps=self.max_iterations,
        )
        driver.clip()
        x, fx = driver.fit()

        success = x is not None
        if success:
            problem.setp(x)

        # Read values back from bumps Parameters in our original order.
        # FitProblem sorts parameters alphabetically, so x from
        # driver.fit() uses that sorted order — not ours.
        result_x = np.array([p.value for p in bumps_params])

        covar, stderr = (
            self._compute_covariance(bumps_params, fitness) if success else (None, None)
        )
        var_names = [p.name for p in bumps_params]

        return OptimizeResult(
            x=result_x,
            dx=stderr,
            fun=fx,
            success=success,
            status=0 if success else -1,
            message='successful termination' if success else 'fit failed',
            covar=covar,
            var_names=var_names,
        )

    def _compute_covariance(  # noqa: PLR6301
        self,
        bumps_params: list[BumpsParameter],
        fitness: _EasyDiffractionFitness,
    ) -> tuple[np.ndarray | None, np.ndarray | None]:
        """
        Compute covariance matrix and standard errors from a Jacobian.

        Parameters
        ----------
        bumps_params : list[BumpsParameter]
            Bumps parameters at their optimal values.
        fitness : _EasyDiffractionFitness
            Fitness object used to evaluate residuals.

        Returns
        -------
        tuple[np.ndarray | None, np.ndarray | None]
            ``(covariance_matrix, standard_errors)`` or ``(None, None)``
            when the computation fails.
        """
        r0 = fitness.residuals()
        n_points = len(r0)
        n_params = len(bumps_params)
        if n_points <= n_params:
            return None, None

        step = np.sqrt(np.finfo(float).eps)
        jacobian = np.empty((n_points, n_params))
        for j in range(n_params):
            orig = bumps_params[j].value
            h = step * max(abs(orig), 1.0)
            bumps_params[j].value = orig + h
            jacobian[:, j] = (fitness.residuals() - r0) / h
            bumps_params[j].value = orig

        chi2_reduced = np.sum(r0**2) / (n_points - n_params)
        try:
            cov = np.linalg.inv(jacobian.T @ jacobian) * chi2_reduced
        except np.linalg.LinAlgError:
            return None, None

        stderr = np.sqrt(np.abs(np.diag(cov)))
        return cov, stderr

    def _sync_result_to_parameters(  # noqa: PLR6301
        self,
        parameters: list[object],
        raw_result: object,
    ) -> None:
        """
        Synchronize the result from the solver to the parameters.

        Parameters
        ----------
        parameters : list[object]
            List of parameters being optimized.
        raw_result : object
            The result object returned by the solver, or a numpy array
            during optimization.
        """
        if hasattr(raw_result, 'x'):
            values = raw_result.x
            uncertainties = getattr(raw_result, 'dx', None)
        else:
            values = raw_result
            uncertainties = None

        for i, param in enumerate(parameters):
            param._set_value_from_minimizer(float(values[i]))
            if uncertainties is not None and i < len(uncertainties):
                param.uncertainty = float(uncertainties[i])
            else:
                param.uncertainty = None

    def _check_success(self, raw_result: object) -> bool:  # noqa: PLR6301
        """
        Determine success from bumps OptimizeResult.

        Parameters
        ----------
        raw_result : object
            The result object returned by the solver.

        Returns
        -------
        bool
            True if the optimization was successful.
        """
        return getattr(raw_result, 'success', False)

bumps_amoeba

Bumps minimizer variant using the Nelder-Mead simplex method.

BumpsAmoebaMinimizer

Bases: BumpsMinimizer

Bumps minimizer using the Nelder-Mead simplex method.

Source code in src/easydiffraction/analysis/minimizers/bumps_amoeba.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
@MinimizerFactory.register
class BumpsAmoebaMinimizer(BumpsMinimizer):
    """Bumps minimizer using the Nelder-Mead simplex method."""

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.BUMPS_AMOEBA,
        description='Bumps library with Nelder-Mead simplex method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.BUMPS_AMOEBA,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

bumps_de

Bumps minimizer variant using the differential evolution method.

BumpsDEMinimizer

Bases: BumpsMinimizer

Bumps minimizer using the differential evolution method.

Source code in src/easydiffraction/analysis/minimizers/bumps_de.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
@MinimizerFactory.register
class BumpsDEMinimizer(BumpsMinimizer):
    """Bumps minimizer using the differential evolution method."""

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.BUMPS_DE,
        description='Bumps library with differential evolution method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.BUMPS_DE,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

bumps_lm

Bumps minimizer variant using the Levenberg-Marquardt method.

BumpsLmMinimizer

Bases: BumpsMinimizer

Bumps minimizer explicitly using the Levenberg-Marquardt method.

Source code in src/easydiffraction/analysis/minimizers/bumps_lm.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@MinimizerFactory.register
class BumpsLmMinimizer(BumpsMinimizer):
    """
    Bumps minimizer explicitly using the Levenberg-Marquardt method.
    """

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.BUMPS_LM,
        description='Bumps library with Levenberg-Marquardt method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.BUMPS_LM,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

dfols

DfolsMinimizer

Bases: MinimizerBase

Minimizer using DFO-LS (derivative-free least-squares).

Source code in src/easydiffraction/analysis/minimizers/dfols.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
@MinimizerFactory.register
class DfolsMinimizer(MinimizerBase):
    """Minimizer using DFO-LS (derivative-free least-squares)."""

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.DFOLS,
        description='DFO-LS library for derivative-free least-squares optimization',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.DFOLS,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
        **kwargs: object,
    ) -> None:
        super().__init__(name=name, method=None, max_iterations=max_iterations)
        # Intentionally unused, accepted for API compatibility
        del kwargs

    def _prepare_solver_args(self, parameters: list[object]) -> dict[str, object]:  # noqa: PLR6301
        x0 = []
        bounds_lower = []
        bounds_upper = []
        for param in parameters:
            x0.append(param.value)
            bounds_lower.append(param.fit_min)
            bounds_upper.append(param.fit_max)
        bounds = (np.array(bounds_lower), np.array(bounds_upper))
        return {'x0': np.array(x0), 'bounds': bounds}

    def _run_solver(self, objective_function: object, **kwargs: object) -> object:
        x0 = kwargs.get('x0')
        bounds = kwargs.get('bounds')
        return solve(objective_function, x0=x0, bounds=bounds, maxfun=self.max_iterations)

    def _sync_result_to_parameters(  # noqa: PLR6301
        self,
        parameters: list[object],
        raw_result: object,
    ) -> None:
        """
        Synchronize the solver result back to the parameters.

        Parameters
        ----------
        parameters : list[object]
            List of parameters being optimized.
        raw_result : object
            The result object returned by the solver.
        """
        # Ensure compatibility with raw_result coming from dfols.solve()
        result_values = raw_result.x if hasattr(raw_result, 'x') else raw_result

        for i, param in enumerate(parameters):
            # Bypass validation but set the dirty flag so
            # _update_categories() knows work is needed.
            param._set_value_from_minimizer(result_values[i])
            # DFO-LS doesn't provide uncertainties; set to None or
            # calculate later if needed
            param.uncertainty = None

    def _check_success(self, raw_result: object) -> bool:  # noqa: PLR6301
        """
        Determine success from DFO-LS result dictionary.

        Parameters
        ----------
        raw_result : object
            The result object returned by the solver.

        Returns
        -------
        bool
            True if the optimization was successful, False otherwise.
        """
        return raw_result.flag == raw_result.EXIT_SUCCESS

enums

Enumerations for minimizer types.

MinimizerTypeEnum

Bases: StrEnum

Supported minimizer types.

Source code in src/easydiffraction/analysis/minimizers/enums.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
class MinimizerTypeEnum(StrEnum):
    """Supported minimizer types."""

    LMFIT = 'lmfit'
    LMFIT_LEASTSQ = 'lmfit (leastsq)'
    LMFIT_LEAST_SQUARES = 'lmfit (least_squares)'
    DFOLS = 'dfols'
    BUMPS = 'bumps'
    BUMPS_LM = 'bumps (lm)'
    BUMPS_AMOEBA = 'bumps (amoeba)'
    BUMPS_DE = 'bumps (de)'

    @classmethod
    def default(cls) -> MinimizerTypeEnum:
        """Return the default minimizer type."""
        return cls.LMFIT_LEASTSQ

    def description(self) -> str:
        """
        Return a human-readable description of this minimizer type.
        """
        descriptions = {
            MinimizerTypeEnum.LMFIT: (
                'LMFIT library using the default Levenberg-Marquardt least squares method'
            ),
            MinimizerTypeEnum.LMFIT_LEASTSQ: (
                'LMFIT library with Levenberg-Marquardt least squares method'
            ),
            MinimizerTypeEnum.LMFIT_LEAST_SQUARES: (
                "LMFIT library with SciPy's trust region reflective algorithm"
            ),
            MinimizerTypeEnum.DFOLS: (
                'DFO-LS library for derivative-free least-squares optimization'
            ),
            MinimizerTypeEnum.BUMPS: (
                'BUMPS library using the default Levenberg-Marquardt method'
            ),
            MinimizerTypeEnum.BUMPS_LM: ('BUMPS library with Levenberg-Marquardt method'),
            MinimizerTypeEnum.BUMPS_AMOEBA: ('BUMPS library with Nelder-Mead simplex method'),
            MinimizerTypeEnum.BUMPS_DE: ('BUMPS library with differential evolution method'),
        }
        return descriptions.get(self, '')
default() classmethod

Return the default minimizer type.

Source code in src/easydiffraction/analysis/minimizers/enums.py
22
23
24
25
@classmethod
def default(cls) -> MinimizerTypeEnum:
    """Return the default minimizer type."""
    return cls.LMFIT_LEASTSQ
description()

Return a human-readable description of this minimizer type.

Source code in src/easydiffraction/analysis/minimizers/enums.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def description(self) -> str:
    """
    Return a human-readable description of this minimizer type.
    """
    descriptions = {
        MinimizerTypeEnum.LMFIT: (
            'LMFIT library using the default Levenberg-Marquardt least squares method'
        ),
        MinimizerTypeEnum.LMFIT_LEASTSQ: (
            'LMFIT library with Levenberg-Marquardt least squares method'
        ),
        MinimizerTypeEnum.LMFIT_LEAST_SQUARES: (
            "LMFIT library with SciPy's trust region reflective algorithm"
        ),
        MinimizerTypeEnum.DFOLS: (
            'DFO-LS library for derivative-free least-squares optimization'
        ),
        MinimizerTypeEnum.BUMPS: (
            'BUMPS library using the default Levenberg-Marquardt method'
        ),
        MinimizerTypeEnum.BUMPS_LM: ('BUMPS library with Levenberg-Marquardt method'),
        MinimizerTypeEnum.BUMPS_AMOEBA: ('BUMPS library with Nelder-Mead simplex method'),
        MinimizerTypeEnum.BUMPS_DE: ('BUMPS library with differential evolution method'),
    }
    return descriptions.get(self, '')

factory

Minimizer factory — delegates to FactoryBase.

MinimizerFactory

Bases: FactoryBase

Factory for creating minimizer instances.

Source code in src/easydiffraction/analysis/minimizers/factory.py
13
14
15
16
17
18
class MinimizerFactory(FactoryBase):
    """Factory for creating minimizer instances."""

    _default_rules: ClassVar[dict] = {
        frozenset(): MinimizerTypeEnum.default(),
    }

lmfit

LmfitMinimizer

Bases: MinimizerBase

Minimizer using the lmfit package.

Source code in src/easydiffraction/analysis/minimizers/lmfit.py
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
@MinimizerFactory.register
class LmfitMinimizer(MinimizerBase):
    """Minimizer using the lmfit package."""

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.LMFIT,
        description='LMFIT library using the default Levenberg-Marquardt least squares method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.LMFIT,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

    def _prepare_solver_args(  # noqa: PLR6301
        self,
        parameters: list[object],
    ) -> dict[str, object]:
        """
        Prepare the solver arguments for the lmfit minimizer.

        Parameters
        ----------
        parameters : list[object]
            List of parameters to be optimized.

        Returns
        -------
        dict[str, object]
            A dictionary containing the prepared lmfit. Parameters
            object.
        """
        engine_parameters = lmfit.Parameters()
        for param in parameters:
            engine_parameters.add(
                name=param._minimizer_uid,
                value=param.value,
                vary=param.free,
                min=param.fit_min,
                max=param.fit_max,
            )
        return {'engine_parameters': engine_parameters}

    def _run_solver(self, objective_function: object, **kwargs: object) -> object:
        """
        Run the lmfit solver.

        Parameters
        ----------
        objective_function : object
            The objective function to minimize.
        **kwargs : object
            Additional arguments for the solver.

        Returns
        -------
        object
            The result of the lmfit minimization.
        """
        engine_parameters = kwargs.get('engine_parameters')

        return lmfit.minimize(
            objective_function,
            params=engine_parameters,
            method=self.method,
            nan_policy='propagate',
            max_nfev=self.max_iterations,
        )

    def _sync_result_to_parameters(  # noqa: PLR6301
        self,
        parameters: list[object],
        raw_result: object,
    ) -> None:
        """
        Synchronize the result from the solver to the parameters.

        Parameters
        ----------
        parameters : list[object]
            List of parameters being optimized.
        raw_result : object
            The result object returned by the solver.
        """
        param_values = raw_result.params if hasattr(raw_result, 'params') else raw_result

        for param in parameters:
            param_result = param_values.get(param._minimizer_uid)
            if param_result is not None:
                # Bypass validation but set the dirty flag so
                # _update_categories() knows work is needed.
                param._set_value_from_minimizer(param_result.value)
                param.uncertainty = getattr(param_result, 'stderr', None)

    def _check_success(self, raw_result: object) -> bool:  # noqa: PLR6301
        """
        Determine success from lmfit MinimizerResult.

        Parameters
        ----------
        raw_result : object
            The result object returned by the solver.

        Returns
        -------
        bool
            True if the optimization was successful, False otherwise.
        """
        return getattr(raw_result, 'success', False)

    def _iteration_callback(
        self,
        params: lmfit.Parameters,
        iter: int,
        resid: object,
        *args: object,
        **kwargs: object,
    ) -> None:
        """
        Handle each iteration callback of the minimizer.

        Parameters
        ----------
        params : lmfit.Parameters
            The current parameters.
        iter : int
            The current iteration number.
        resid : object
            The residuals.
        *args : object
            Additional positional arguments.
        **kwargs : object
            Additional keyword arguments.
        """
        # Intentionally unused, required by callback signature
        del params, resid, args, kwargs
        self._iteration = iter

lmfit_least_squares

LMFIT minimizer variant using trust region reflective method.

LmfitLeastSquaresMinimizer

Bases: LmfitMinimizer

LMFIT minimizer using SciPy's trust region reflective algorithm.

Source code in src/easydiffraction/analysis/minimizers/lmfit_least_squares.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
@MinimizerFactory.register
class LmfitLeastSquaresMinimizer(LmfitMinimizer):
    """
    LMFIT minimizer using SciPy's trust region reflective algorithm.
    """

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.LMFIT_LEAST_SQUARES,
        description="LMFIT library with SciPy's trust region reflective algorithm",
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.LMFIT_LEAST_SQUARES,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

lmfit_leastsq

LMFIT minimizer variant using the Levenberg-Marquardt (leastsq) method.

LmfitLeastsqMinimizer

Bases: LmfitMinimizer

LMFIT minimizer explicitly using the Levenberg-Marquardt method.

Source code in src/easydiffraction/analysis/minimizers/lmfit_leastsq.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
@MinimizerFactory.register
class LmfitLeastsqMinimizer(LmfitMinimizer):
    """
    LMFIT minimizer explicitly using the Levenberg-Marquardt method.
    """

    type_info = TypeInfo(
        tag=MinimizerTypeEnum.LMFIT_LEASTSQ,
        description='LMFIT library with Levenberg-Marquardt least squares method',
    )

    def __init__(
        self,
        name: str = MinimizerTypeEnum.LMFIT_LEASTSQ,
        method: str = DEFAULT_METHOD,
        max_iterations: int = DEFAULT_MAX_ITERATIONS,
    ) -> None:
        super().__init__(
            name=name,
            method=method,
            max_iterations=max_iterations,
        )

sequential

Sequential fitting infrastructure: template, worker, CSV, recovery.

SequentialFitTemplate dataclass

Snapshot of everything a worker needs to recreate and fit a project.

All fields are plain Python types (str, dict, list) so that the template can be pickled for ProcessPoolExecutor.

Source code in src/easydiffraction/analysis/sequential.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
@dataclass(frozen=True)
class SequentialFitTemplate:
    """
    Snapshot of everything a worker needs to recreate and fit a project.

    All fields are plain Python types (str, dict, list) so that the
    template can be pickled for ``ProcessPoolExecutor``.
    """

    structure_cif: str
    experiment_cif: str
    initial_params: dict[str, float]
    free_param_unique_names: list[str]
    alias_defs: list[dict[str, str]]
    constraint_defs: list[str]
    constraints_enabled: bool
    minimizer_tag: str
    calculator_tag: str
    diffrn_field_names: list[str]

fit_sequential(analysis, data_dir, max_workers=1, chunk_size=None, file_pattern='*', extract_diffrn=None, *, reverse=False)

Run sequential fitting over all data files in a directory.

Parameters:

Name Type Description Default
analysis object

The Analysis instance (owns project reference).

required
data_dir str

Path to directory containing data files.

required
max_workers int | str

Number of parallel worker processes. 1 = sequential (no subprocess overhead). 'auto' = physical CPU count. Uses ProcessPoolExecutor with spawn context when > 1.

1
chunk_size int | None

Files per chunk. Default None uses max_workers.

None
file_pattern str

Glob pattern to filter files in data_dir.

'*'
extract_diffrn Callable | None

User callback: f(file_path) → {diffrn_field: value}.

None
reverse bool

When True, process data files in reverse order. Useful when starting values are better matched to the last file (e.g. highest-temperature dataset in a cooling scan).

False
Source code in src/easydiffraction/analysis/sequential.py
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
def fit_sequential(
    analysis: object,
    data_dir: str,
    max_workers: int | str = 1,
    chunk_size: int | None = None,
    file_pattern: str = '*',
    extract_diffrn: Callable | None = None,
    *,
    reverse: bool = False,
) -> None:
    """
    Run sequential fitting over all data files in a directory.

    Parameters
    ----------
    analysis : object
        The ``Analysis`` instance (owns project reference).
    data_dir : str
        Path to directory containing data files.
    max_workers : int | str, default=1
        Number of parallel worker processes. ``1`` = sequential (no
        subprocess overhead). ``'auto'`` = physical CPU count. Uses
        ``ProcessPoolExecutor`` with ``spawn`` context when > 1.
    chunk_size : int | None, default=None
        Files per chunk. Default ``None`` uses ``max_workers``.
    file_pattern : str, default='*'
        Glob pattern to filter files in *data_dir*.
    extract_diffrn : Callable | None, default=None
        User callback: ``f(file_path) → {diffrn_field: value}``.
    reverse : bool, default=False
        When ``True``, process data files in reverse order.  Useful when
        starting values are better matched to the last file (e.g.
        highest-temperature dataset in a cooling scan).
    """
    if mp.parent_process() is not None:
        return

    project = analysis.project
    verb = VerbosityEnum(project.verbosity)

    _check_seq_preconditions(project)

    data_paths = extract_data_paths_from_dir(data_dir, file_pattern=file_pattern)
    template = _build_template(project)

    csv_path, header, already_fitted, template = _setup_csv_and_recovery(
        project,
        template,
        verb,
    )

    remaining = [p for p in data_paths if p not in already_fitted]
    if reverse:
        remaining.reverse()
    if not remaining:
        if verb is not VerbosityEnum.SILENT:
            print('✅ All files already fitted. Nothing to do.')
        return

    max_workers, chunk_size = _resolve_workers(max_workers, chunk_size)
    chunks = [remaining[i : i + chunk_size] for i in range(0, len(remaining), chunk_size)]

    if verb is not VerbosityEnum.SILENT:
        console.paragraph('Sequential fitting')
        console.print(f"🚀 Starting fit process with '{analysis.fitter.selection}'...")
        console.print(
            f'📋 {len(remaining)} files in {len(chunks)} chunks (max_workers={max_workers})'
        )
        console.print('📈 Goodness-of-fit (reduced χ²):')

    pool_cm, main_mod, main_file_bak, main_spec_bak = _create_pool_context(max_workers)
    try:
        _run_fit_loop(pool_cm, chunks, template, (csv_path, header), extract_diffrn, verb)
    finally:
        _restore_main_state(main_mod, main_file_bak, main_spec_bak)

    if verb is not VerbosityEnum.SILENT:
        print(
            f'✅ Sequential fitting complete: '
            f'{len(already_fitted) + len(remaining)} files processed.'
        )
        print(f'📄 Results saved to: {csv_path}')