diff --git a/docs/user-guide/dream/dream-advanced-powder-reduction.ipynb b/docs/user-guide/dream/dream-advanced-powder-reduction.ipynb index bb3ca987..f02589fe 100644 --- a/docs/user-guide/dream/dream-advanced-powder-reduction.ipynb +++ b/docs/user-guide/dream/dream-advanced-powder-reduction.ipynb @@ -628,7 +628,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.12.12" } }, "nbformat": 4, diff --git a/docs/user-guide/dream/dream-make-tof-lookup-table.ipynb b/docs/user-guide/dream/dream-make-wavelength-lookup-table.ipynb similarity index 68% rename from docs/user-guide/dream/dream-make-tof-lookup-table.ipynb rename to docs/user-guide/dream/dream-make-wavelength-lookup-table.ipynb index 3a1515d8..05047add 100644 --- a/docs/user-guide/dream/dream-make-tof-lookup-table.ipynb +++ b/docs/user-guide/dream/dream-make-wavelength-lookup-table.ipynb @@ -5,9 +5,9 @@ "id": "0", "metadata": {}, "source": [ - "# Create a time-of-flight lookup table for DREAM\n", + "# Create a wavelength lookup table for DREAM\n", "\n", - "This notebook shows how to create a time-of-flight lookup table for the DREAM instrument." + "This notebook shows how to create a wavelength lookup table for the DREAM instrument." ] }, { @@ -18,7 +18,7 @@ "outputs": [], "source": [ "import scipp as sc\n", - "from ess.reduce import time_of_flight\n", + "from ess.reduce import unwrap\n", "from ess.reduce.nexus.types import AnyRun\n", "from ess.dream.beamline import InstrumentConfiguration, choppers" ] @@ -40,7 +40,7 @@ "metadata": {}, "outputs": [], "source": [ - "disk_choppers = choppers(InstrumentConfiguration.high_flux_BC215)" + "disk_choppers = choppers(InstrumentConfiguration.high_flux_BC240)" ] }, { @@ -60,17 +60,17 @@ "metadata": {}, "outputs": [], "source": [ - "wf = time_of_flight.TofLookupTableWorkflow()\n", + "wf = unwrap.LookupTableWorkflow()\n", "\n", - "wf[time_of_flight.LtotalRange] = sc.scalar(5.0, unit=\"m\"), sc.scalar(80.0, unit=\"m\")\n", - "wf[time_of_flight.NumberOfSimulatedNeutrons] = 200_000 # Increase this number for more reliable results\n", - "wf[time_of_flight.SourcePosition] = sc.vector([0, 0, 0], unit='m')\n", - "wf[time_of_flight.DiskChoppers[AnyRun]] = disk_choppers\n", - "wf[time_of_flight.DistanceResolution] = sc.scalar(0.1, unit=\"m\")\n", - "wf[time_of_flight.TimeResolution] = sc.scalar(250.0, unit='us')\n", - "wf[time_of_flight.PulsePeriod] = 1.0 / sc.scalar(14.0, unit=\"Hz\")\n", - "wf[time_of_flight.PulseStride] = 1\n", - "wf[time_of_flight.PulseStrideOffset] = None" + "wf[unwrap.LtotalRange] = sc.scalar(5.0, unit=\"m\"), sc.scalar(80.0, unit=\"m\")\n", + "wf[unwrap.NumberOfSimulatedNeutrons] = 200_000 # Increase this number for more reliable results\n", + "wf[unwrap.SourcePosition] = sc.vector([0, 0, 0], unit='m')\n", + "wf[unwrap.DiskChoppers[AnyRun]] = disk_choppers\n", + "wf[unwrap.DistanceResolution] = sc.scalar(0.1, unit=\"m\")\n", + "wf[unwrap.TimeResolution] = sc.scalar(250.0, unit='us')\n", + "wf[unwrap.PulsePeriod] = 1.0 / sc.scalar(14.0, unit=\"Hz\")\n", + "wf[unwrap.PulseStride] = 1\n", + "wf[unwrap.PulseStrideOffset] = None" ] }, { @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "table = wf.compute(time_of_flight.TimeOfFlightLookupTable)\n", + "table = wf.compute(unwrap.LookupTable)\n", "table.array" ] }, @@ -117,7 +117,7 @@ "metadata": {}, "outputs": [], "source": [ - "table.save_hdf5('DREAM-high-flux-tof-lut-5m-80m.h5')" + "table.save_hdf5('DREAM-high-flux-wavelength-lut-5m-80m-bc240.h5')" ] } ], diff --git a/docs/user-guide/dream/dream-powder-reduction.ipynb b/docs/user-guide/dream/dream-powder-reduction.ipynb index b067bca1..5a3425f9 100644 --- a/docs/user-guide/dream/dream-powder-reduction.ipynb +++ b/docs/user-guide/dream/dream-powder-reduction.ipynb @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "workflow = dream.DreamGeant4Workflow(\n", + "wf = dream.DreamGeant4Workflow(\n", " run_norm=powder.RunNormalization.monitor_histogram,\n", ")" ] @@ -77,26 +77,26 @@ "metadata": {}, "outputs": [], "source": [ - "workflow[Filename[SampleRun]] = dream.data.simulated_diamond_sample()\n", - "workflow[Filename[VanadiumRun]] = dream.data.simulated_vanadium_sample()\n", - "workflow[Filename[EmptyCanRun]] = dream.data.simulated_empty_can()\n", - "workflow[CalibrationFilename] = None\n", + "wf[Filename[SampleRun]] = dream.data.simulated_diamond_sample()\n", + "wf[Filename[VanadiumRun]] = dream.data.simulated_vanadium_sample()\n", + "wf[Filename[EmptyCanRun]] = dream.data.simulated_empty_can()\n", + "wf[CalibrationFilename] = None\n", "\n", - "workflow[MonitorFilename[SampleRun]] = dream.data.simulated_monitor_diamond_sample()\n", - "workflow[MonitorFilename[VanadiumRun]] = dream.data.simulated_monitor_vanadium_sample()\n", - "workflow[MonitorFilename[EmptyCanRun]] = dream.data.simulated_monitor_empty_can()\n", - "workflow[CaveMonitorPosition] = sc.vector([0.0, 0.0, -4220.0], unit=\"mm\")\n", + "wf[MonitorFilename[SampleRun]] = dream.data.simulated_monitor_diamond_sample()\n", + "wf[MonitorFilename[VanadiumRun]] = dream.data.simulated_monitor_vanadium_sample()\n", + "wf[MonitorFilename[EmptyCanRun]] = dream.data.simulated_monitor_empty_can()\n", + "wf[CaveMonitorPosition] = sc.vector([0.0, 0.0, -4220.0], unit=\"mm\")\n", "\n", - "workflow[dream.InstrumentConfiguration] = dream.InstrumentConfiguration.high_flux_BC215\n", + "wf[dream.InstrumentConfiguration] = dream.InstrumentConfiguration.high_flux_BC215\n", "# Select a detector bank:\n", - "workflow[NeXusDetectorName] = \"mantle\"\n", + "wf[NeXusDetectorName] = \"mantle\"\n", "# We drop uncertainties where they would otherwise lead to correlations:\n", - "workflow[UncertaintyBroadcastMode] = UncertaintyBroadcastMode.drop\n", + "wf[UncertaintyBroadcastMode] = UncertaintyBroadcastMode.drop\n", "# Edges for binning in d-spacing:\n", - "workflow[DspacingBins] = sc.linspace(\"dspacing\", 0.3, 2.3434, 201, unit=\"angstrom\")\n", + "wf[DspacingBins] = sc.linspace(\"dspacing\", 0.3, 2.3434, 201, unit=\"angstrom\")\n", "\n", "# Do not mask any pixels / voxels:\n", - "workflow = powder.with_pixel_mask_filenames(workflow, [])" + "wf = powder.with_pixel_mask_filenames(wf, [])" ] }, { @@ -121,7 +121,7 @@ "metadata": {}, "outputs": [], "source": [ - "results = workflow.compute([\n", + "results = wf.compute([\n", " EmptyCanSubtractedIofDspacing,\n", " ReducedEmptyCanSubtractedTofCIF\n", "])\n", @@ -145,9 +145,7 @@ "outputs": [], "source": [ "histogram = intensity.hist()\n", - "fig = histogram.plot(title=intensity.coords['detector'].value.capitalize())\n", - "fig.ax.set_ylabel(f\"I(d) [{histogram.unit}]\")\n", - "fig" + "histogram.plot(title=intensity.coords['detector'].value.capitalize(), ylabel=f\"I(d) [{histogram.unit}]\")" ] }, { @@ -229,7 +227,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.10" + "version": "3.12.12" } }, "nbformat": 4, diff --git a/src/ess/beer/types.py b/src/ess/beer/types.py index c4655bf5..cd29ca54 100644 --- a/src/ess/beer/types.py +++ b/src/ess/beer/types.py @@ -12,9 +12,7 @@ import sciline import scipp as sc - from ess.reduce.nexus.types import Filename, RawDetector, RunType, SampleRun -from ess.reduce.time_of_flight.types import TofDetector class StreakClusteredData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): @@ -24,7 +22,6 @@ class StreakClusteredData(sciline.Scope[RunType, sc.DataArray], sc.DataArray): RawDetector = RawDetector Filename = Filename SampleRun = SampleRun -TofDetector = TofDetector class DetectorBank(Enum): @@ -58,3 +55,9 @@ class DetectorBank(Enum): CIFPeaksMinIntensity = NewType("CIFPeaksMinIntensity", sc.Variable) """Minimum peak intensity for peaks from CIF file to be included in :py:`DHKLList`.""" + + +class TofDetector(sciline.Scope[RunType, sc.DataArray], sc.DataArray): + """ + Detector with a time-of-flight coordinate + """ diff --git a/src/ess/dream/data.py b/src/ess/dream/data.py index 1e8136eb..33f43908 100644 --- a/src/ess/dream/data.py +++ b/src/ess/dream/data.py @@ -40,6 +40,9 @@ # `shrink_nexus.py` script in the `tools` folder at the top level of the # `essdiffraction` repository. "TEST_DREAM_nexus_sorted-2023-12-07.nxs": "md5:599b426a93c46a7b4b09a874bf288c53", # noqa: E501 + # Wavelength lookup tables + "DREAM-high-flux-wavelength-lut-5m-80m-bc215.h5": "md5:10c80c9de311cfa246f7b2c165eb0b49", # noqa: E501 + "DREAM-high-flux-wavelength-lut-5m-80m-bc240.h5": "md5:9741176f8da9b34c2a15967a43e21462", # noqa: E501 }, ) @@ -293,3 +296,36 @@ def tof_lookup_table_high_flux(bc: Literal[215, 240] = 215) -> Path: return get_path("DREAM-high-flux-tof-lut-5m-80m-bc240.h5") case _: raise ValueError(f"Unsupported band-control chopper (BC) value: {bc}") + + +def lookup_table_high_flux(bc: Literal[215, 240] = 215) -> Path: + """Path to a HDF5 file containing a wavelength lookup table for high-flux mode. + + The table was created using the ``tof`` package and the chopper settings for the + DREAM instrument in high-resolution mode. + Can return tables for two different band-control chopper (BC) settings: + - ``bc=215``: corresponds to the settings of the choppers in the tutorial data. + - ``bc=240``: a setting with less time overlap between frames. + + Note that the phase of the band-control chopper (BCC) was set to 215 degrees in the + Geant4 simulation which generated the data used in the documentation notebooks. + This has since been found to be non-optimal as it leads to time overlap between the + two frames, and a value of 240 degrees is now recommended. + + This table was computed using `Create a wavelength lookup table for DREAM + <../../user-guide/dream/dream-make-wavelength-lookup-table.rst>`_ + with ``NumberOfSimulatedNeutrons = 5_000_000``. + + Parameters + ---------- + bc: + Band-control chopper (BC) setting. The default is 215, which corresponds to the + settings of the choppers in the tutorial data. + """ + match bc: + case 215: + return get_path("DREAM-high-flux-wavelength-lut-5m-80m-bc215.h5") + case 240: + return get_path("DREAM-high-flux-wavelength-lut-5m-80m-bc240.h5") + case _: + raise ValueError(f"Unsupported band-control chopper (BC) value: {bc}") diff --git a/src/ess/dream/workflows.py b/src/ess/dream/workflows.py index 9c8d9ce6..e8124fdb 100644 --- a/src/ess/dream/workflows.py +++ b/src/ess/dream/workflows.py @@ -6,6 +6,10 @@ import sciline import scipp as sc import scippnexus as snx +from ess.reduce.nexus.types import DetectorBankSizes, NeXusName +from ess.reduce.parameter import parameter_mappers +from ess.reduce.unwrap import GenericUnwrapWorkflow +from ess.reduce.workflow import register_workflow from scippneutron.metadata import Software from ess.powder import providers as powder_providers @@ -18,21 +22,17 @@ CaveMonitorPosition, # Should this be a DREAM-only parameter? EmptyCanRun, KeepEvents, + LookupTableFilename, LookupTableRelativeErrorThreshold, PixelMaskFilename, Position, ReducerSoftware, SampleRun, - TimeOfFlightLookupTableFilename, TofMask, TwoThetaMask, VanadiumRun, WavelengthMask, ) -from ess.reduce.nexus.types import DetectorBankSizes, NeXusName -from ess.reduce.parameter import parameter_mappers -from ess.reduce.time_of_flight import GenericTofWorkflow -from ess.reduce.workflow import register_workflow from .beamline import InstrumentConfiguration from .io.cif import ( @@ -72,24 +72,24 @@ def _get_lookup_table_filename_from_configuration( configuration: InstrumentConfiguration, -) -> TimeOfFlightLookupTableFilename: - from .data import tof_lookup_table_high_flux +) -> LookupTableFilename: + from .data import lookup_table_high_flux match configuration: case InstrumentConfiguration.high_flux_BC215: - out = tof_lookup_table_high_flux(bc=215) + out = lookup_table_high_flux(bc=215) case InstrumentConfiguration.high_flux_BC240: - out = tof_lookup_table_high_flux(bc=240) + out = lookup_table_high_flux(bc=240) case InstrumentConfiguration.high_resolution: raise NotImplementedError("High resolution configuration not yet supported") - return TimeOfFlightLookupTableFilename(out) + return LookupTableFilename(out) def _collect_reducer_software() -> ReducerSoftware: return ReducerSoftware( [ - Software.from_package_metadata('essdiffraction'), + # Software.from_package_metadata('essdiffraction'), Software.from_package_metadata('scippneutron'), Software.from_package_metadata('scipp'), ] @@ -99,7 +99,7 @@ def _collect_reducer_software() -> ReducerSoftware: def DreamWorkflow(**kwargs) -> sciline.Pipeline: """ Dream generic workflow with default parameters. - The workflow is based on the GenericTofWorkflow. + The workflow is based on the GenericUnwrapWorkflow. It can load data from a NeXus file recorded on the DREAM instrument, and can compute time-of-flight for the neutron events. @@ -110,9 +110,9 @@ def DreamWorkflow(**kwargs) -> sciline.Pipeline: ---------- kwargs: Additional keyword arguments are forwarded to the base - :func:`GenericTofWorkflow`. + :func:`GenericUnwrapWorkflow`. """ - wf = GenericTofWorkflow( + wf = GenericUnwrapWorkflow( run_types=[SampleRun, VanadiumRun, EmptyCanRun], monitor_types=[BunkerMonitor, CaveMonitor], **kwargs, diff --git a/src/ess/powder/conversion.py b/src/ess/powder/conversion.py index 42f21743..826793ef 100644 --- a/src/ess/powder/conversion.py +++ b/src/ess/powder/conversion.py @@ -10,10 +10,10 @@ from .calibration import OutputCalibrationData from .correction import merge_calibration -from .logging import get_logger from .types import ( CalibrationData, CorrectedDetector, + DspacingDetector, ElasticCoordTransformGraph, EmptyCanSubtractedIntensityTof, EmptyCanSubtractedIofDspacing, @@ -21,14 +21,10 @@ IntensityDspacing, IntensityTof, MonitorCoordTransformGraph, - MonitorType, Position, RunType, SampleRun, - TofDetector, - TofMonitor, WavelengthDetector, - WavelengthMonitor, ) @@ -64,6 +60,8 @@ def _dspacing_from_diff_calibration_a0_impl(t, t0, c): def _dspacing_from_diff_calibration( + # TODO: should not be tof here but a time-of-arrival + # See https://github.com/scipp/essdiffraction/issues/255 tof: sc.Variable, tzero: sc.Variable, difa: sc.Variable, @@ -98,31 +96,21 @@ def _consume_positions(position, sample_position, source_position): def to_dspacing_with_calibration( data: sc.DataArray, calibration: sc.Dataset, + graph: dict, ) -> sc.DataArray: """ Transform coordinates to d-spacing from calibration parameters. - - Computes d-spacing from time-of-flight stored in `data`. - - Attention - --------- - `data` may have a wavelength coordinate and dimension, - but those are discarded. - Only the stored time-of-flight is used, that is, any modifications to - the wavelength coordinate after it was computed from time-of-flight are lost. - - Raises - ------ - KeyError - If `data` does not contain a 'tof' coordinate. + Computes d-spacing from wavelength stored in `data`. Parameters ---------- data: - Input data in tof or wavelength dimension. - Must have a tof coordinate. + Input data in wavelength dimension. + Must have a wavelength coordinate. calibration: Calibration data. + graph: + Graph for the coordinate transformation, used to restore tof from wavelength. Returns ------- @@ -134,9 +122,14 @@ def to_dspacing_with_calibration( ess.powder.conversions.dspacing_from_diff_calibration """ out = merge_calibration(into=data, calibration=calibration) - out = _restore_tof_if_in_wavelength(out) - graph = {"dspacing": _dspacing_from_diff_calibration} + # TODO: we should not be restoring tof here, as the calibration should be converting + # a time of arrival to d-spacing, and not a tof. + # We defer this to a later step: https://github.com/scipp/essdiffraction/issues/255 + # Restore tof from wavelength + out = out.transform_coords("tof", graph=graph, keep_intermediate=False) + + pos_graph = {"dspacing": _dspacing_from_diff_calibration} # `_dspacing_from_diff_calibration` does not need positions but conceptually, # the conversion maps from positions to d-spacing. # The mechanism with `_tag_positions_consumed` is meant to ensure that, @@ -145,10 +138,10 @@ def to_dspacing_with_calibration( if "position" in out.coords or ( out.bins is not None and "position" in out.bins.coords ): - graph["_tag_positions_consumed"] = _consume_positions + pos_graph["_tag_positions_consumed"] = _consume_positions else: - graph["_tag_positions_consumed"] = lambda: sc.scalar(0) - out = out.transform_coords("dspacing", graph=graph, keep_intermediate=False) + pos_graph["_tag_positions_consumed"] = lambda: sc.scalar(0) + out = out.transform_coords("dspacing", graph=pos_graph, keep_intermediate=False) out.coords.pop("_tag_positions_consumed", None) return CorrectedDetector[RunType](out) @@ -178,7 +171,7 @@ def powder_coordinate_transformation_graph( return ElasticCoordTransformGraph( { **scn.conversion.graph.beamline.beamline(scatter=True), - **scn.conversion.graph.tof.elastic("tof"), + **scn.conversion.graph.tof.elastic("wavelength"), 'source_position': lambda: source_position, 'sample_position': lambda: sample_position, 'gravity': lambda: gravity, @@ -186,27 +179,11 @@ def powder_coordinate_transformation_graph( ) -def _restore_tof_if_in_wavelength(data: sc.DataArray) -> sc.DataArray: - out = data.copy(deep=False) - outer = out.coords.get("wavelength", None) - if out.bins is not None: - binned = out.bins.coords.get("wavelength", None) - else: - binned = None - - if outer is not None or binned is not None: - get_logger().info("Discarded coordinate 'wavelength' in favor of 'tof'.") - - if "wavelength" in out.dims: - out = out.rename_dims(wavelength="tof") - return out - - def add_scattering_coordinates_from_positions( - data: TofDetector[RunType], + data: WavelengthDetector[RunType], graph: ElasticCoordTransformGraph[RunType], calibration: CalibrationData, -) -> WavelengthDetector[RunType]: +) -> DspacingDetector[RunType]: """ Add ``wavelength``, ``two_theta`` and ``dspacing`` coordinates to the data. The input ``data`` must have a ``tof`` coordinate, as well as the necessary @@ -226,7 +203,7 @@ def add_scattering_coordinates_from_positions( keep_intermediate=False, ) out = convert_to_dspacing(out, graph, calibration) - return WavelengthDetector[RunType](out) + return DspacingDetector[RunType](out) def convert_to_dspacing( @@ -237,7 +214,7 @@ def convert_to_dspacing( if calibration is None: out = data.transform_coords(["dspacing"], graph=graph, keep_intermediate=False) else: - out = to_dspacing_with_calibration(data, calibration=calibration) + out = to_dspacing_with_calibration(data, calibration=calibration, graph=graph) for key in ("wavelength", "two_theta"): if key in out.coords.keys(): out.coords.set_aligned(key, False) @@ -293,7 +270,7 @@ def powder_monitor_coordinate_transformation_graph( return MonitorCoordTransformGraph( { **scn.conversion.graph.beamline.beamline(scatter=False), - **scn.conversion.graph.tof.elastic("tof"), + **scn.conversion.graph.tof.elastic("wavelength"), 'source_position': lambda: source_position, 'sample_position': lambda: sample_position, 'gravity': lambda: gravity, @@ -301,20 +278,10 @@ def powder_monitor_coordinate_transformation_graph( ) -def convert_monitor_to_wavelength( - monitor: TofMonitor[RunType, MonitorType], - graph: MonitorCoordTransformGraph[RunType], -) -> WavelengthMonitor[RunType, MonitorType]: - return WavelengthMonitor[RunType, MonitorType]( - monitor.transform_coords("wavelength", graph=graph, keep_intermediate=False) - ) - - providers = ( add_scattering_coordinates_from_positions, convert_reduced_to_tof, convert_reduced_to_empty_can_subtracted_tof, - convert_monitor_to_wavelength, powder_coordinate_transformation_graph, powder_monitor_coordinate_transformation_graph, ) diff --git a/src/ess/powder/types.py b/src/ess/powder/types.py index db5b8065..d7f4afe9 100644 --- a/src/ess/powder/types.py +++ b/src/ess/powder/types.py @@ -14,12 +14,11 @@ import sciline import scipp as sc -from scippneutron.io import cif -from scippneutron.metadata import Person, Software - from ess.reduce.nexus import types as reduce_t -from ess.reduce.time_of_flight import types as tof_t from ess.reduce.uncertainty import UncertaintyBroadcastMode as _UncertaintyBroadcastMode +from ess.reduce.unwrap import types as unwrap_t +from scippneutron.io import cif +from scippneutron.metadata import Person, Software EmptyDetector = reduce_t.EmptyDetector EmptyMonitor = reduce_t.EmptyMonitor @@ -36,12 +35,12 @@ DetectorBankSizes = reduce_t.DetectorBankSizes -TofDetector = tof_t.TofDetector -TofMonitor = tof_t.TofMonitor -PulseStrideOffset = tof_t.PulseStrideOffset -TimeOfFlightLookupTable = tof_t.TimeOfFlightLookupTable -TimeOfFlightLookupTableFilename = tof_t.TimeOfFlightLookupTableFilename -LookupTableRelativeErrorThreshold = tof_t.LookupTableRelativeErrorThreshold +WavelengthDetector = unwrap_t.WavelengthDetector +WavelengthMonitor = unwrap_t.WavelengthMonitor +PulseStrideOffset = unwrap_t.PulseStrideOffset +LookupTable = unwrap_t.LookupTable +LookupTableFilename = unwrap_t.LookupTableFilename +LookupTableRelativeErrorThreshold = unwrap_t.LookupTableRelativeErrorThreshold SampleRun = reduce_t.SampleRun VanadiumRun = reduce_t.VanadiumRun @@ -95,7 +94,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: """Detector calibration data.""" -class WavelengthDetector(sciline.Scope[RunType, sc.DataArray], sc.DataArray): +class DspacingDetector(sciline.Scope[RunType, sc.DataArray], sc.DataArray): """Data with scattering coordinates computed for all events: wavelength, 2theta, d-spacing.""" @@ -170,9 +169,7 @@ class MonitorFilename(sciline.Scope[RunType, Path], Path): """ -class WavelengthMonitor( - sciline.Scope[RunType, MonitorType, sc.DataArray], sc.DataArray -): +class DspacingMonitor(sciline.Scope[RunType, MonitorType, sc.DataArray], sc.DataArray): """Monitor histogram in wavelength.""" @@ -197,7 +194,7 @@ class RawDataAndMetadata(sciline.Scope[RunType, sc.DataGroup], sc.DataGroup): TofMask = NewType("TofMask", Callable | None) -"""TofMask is a callable that returns a mask for a given TofData.""" +"""TofMask is a callable that returns a mask for masking time-of-flight regions.""" TwoThetaMask = NewType("TwoThetaMask", Callable | None) diff --git a/src/ess/powder/workflow.py b/src/ess/powder/workflow.py index 8fbe0b3d..a0ca3828 100644 --- a/src/ess/powder/workflow.py +++ b/src/ess/powder/workflow.py @@ -2,17 +2,17 @@ from .masking import apply_masks from .types import ( CorrectedDetector, + DspacingDetector, MaskedDetectorIDs, RunType, TofMask, TwoThetaMask, - WavelengthDetector, WavelengthMask, ) def add_masks_and_corrections( - da: WavelengthDetector[RunType], + da: DspacingDetector[RunType], masked_pixel_ids: MaskedDetectorIDs, tof_mask_func: TofMask, wavelength_mask_func: WavelengthMask, diff --git a/src/ess/snspowder/powgen/data.py b/src/ess/snspowder/powgen/data.py index 140d3a58..8be882b4 100644 --- a/src/ess/snspowder/powgen/data.py +++ b/src/ess/snspowder/powgen/data.py @@ -5,22 +5,38 @@ from pathlib import Path +import sciline as sl import scipp as sc import scippnexus as snx +from ess.reduce.data import Entry, make_registry from ess.powder.types import ( AccumulatedProtonCharge, CalibrationData, CalibrationFilename, DetectorBankSizes, + ElasticCoordTransformGraph, Filename, + MonitorCoordTransformGraph, + MonitorType, Position, ProtonCharge, RawDataAndMetadata, RunType, - TofDetector, + WavelengthDetector, + WavelengthMonitor, ) -from ess.reduce.data import Entry, make_registry + + +class TofDetector(sl.Scope[RunType, sc.DataArray], sc.DataArray): + """ + Detector with a time-of-flight coordinate + """ + + +class TofMonitor(sl.Scope[RunType, MonitorType, sc.DataArray], sc.DataArray): + """Monitor data with time-of-flight coordinate.""" + _registry = make_registry( "ess/powgen", @@ -234,6 +250,24 @@ def sample_position(dg: RawDataAndMetadata[RunType]) -> Position[snx.NXsample, R return Position[snx.NXsample, RunType](dg["data"].coords["sample_position"]) +def convert_detector_to_wavelength( + da: TofDetector[RunType], + graph: ElasticCoordTransformGraph[RunType], +) -> WavelengthDetector[RunType]: + return WavelengthDetector[RunType]( + da.transform_coords("wavelength", graph=graph, keep_intermediate=False) + ) + + +def convert_monitor_to_wavelength( + monitor: TofMonitor[RunType, MonitorType], + graph: MonitorCoordTransformGraph[RunType], +) -> WavelengthMonitor[RunType, MonitorType]: + return WavelengthMonitor[RunType, MonitorType]( + monitor.transform_coords("wavelength", graph=graph, keep_intermediate=False) + ) + + providers = ( pooch_load, pooch_load_calibration, @@ -242,5 +276,7 @@ def sample_position(dg: RawDataAndMetadata[RunType]) -> Position[snx.NXsample, R extract_raw_data, sample_position, source_position, + convert_detector_to_wavelength, + convert_monitor_to_wavelength, ) """Sciline Providers for loading POWGEN data.""" diff --git a/tests/beer/mcstas_reduction_test.py b/tests/beer/mcstas_reduction_test.py index 40caec52..32ac0875 100644 --- a/tests/beer/mcstas_reduction_test.py +++ b/tests/beer/mcstas_reduction_test.py @@ -1,6 +1,7 @@ import numpy as np import scipp as sc import scippneutron as scn +from ess.reduce.nexus.types import Filename, SampleRun from scipp.testing import assert_allclose from ess.beer import ( @@ -15,9 +16,7 @@ mcstas_silicon_new_model, ) from ess.beer.io import load_beer_mcstas, load_beer_mcstas_monitor -from ess.beer.types import DetectorBank, DHKLList -from ess.reduce.nexus.types import Filename, SampleRun -from ess.reduce.time_of_flight.types import TofDetector +from ess.beer.types import DetectorBank, DHKLList, TofDetector def test_can_reduce_using_known_peaks_workflow(): diff --git a/tests/dream/geant4_reduction_test.py b/tests/dream/geant4_reduction_test.py index e4a11ddc..a82c5486 100644 --- a/tests/dream/geant4_reduction_test.py +++ b/tests/dream/geant4_reduction_test.py @@ -8,6 +8,9 @@ import sciline import scipp as sc import scipp.testing +from ess.reduce import unwrap +from ess.reduce import workflow as reduce_workflow +from ess.reduce.nexus.types import AnyRun from scippneutron import metadata from scippneutron._utils import elem_unit @@ -31,12 +34,12 @@ IntensityDspacingTwoTheta, IntensityTof, KeepEvents, + LookupTable, + LookupTableFilename, MonitorFilename, NeXusDetectorName, ReducedTofCIF, SampleRun, - TimeOfFlightLookupTable, - TimeOfFlightLookupTableFilename, TofMask, TwoThetaBins, TwoThetaMask, @@ -44,9 +47,6 @@ VanadiumRun, WavelengthMask, ) -from ess.reduce import time_of_flight -from ess.reduce import workflow as reduce_workflow -from ess.reduce.nexus.types import AnyRun params = { Filename[SampleRun]: dream.data.simulated_diamond_sample(small=True), @@ -59,8 +59,7 @@ CalibrationFilename: None, UncertaintyBroadcastMode: UncertaintyBroadcastMode.drop, DspacingBins: sc.linspace('dspacing', 0.0, 2.3434, 201, unit='angstrom'), - TofMask: lambda x: (x < sc.scalar(0.0, unit='us').to(unit=elem_unit(x))) - | (x > sc.scalar(86e3, unit='us').to(unit=elem_unit(x))), + TofMask: None, TwoThetaMask: None, WavelengthMask: None, CIFAuthors: CIFAuthors( @@ -112,37 +111,36 @@ def test_pipeline_can_compute_dspacing_result_without_empty_can(workflow): def test_pipeline_can_compute_dspacing_result_using_lookup_table_filename(workflow): workflow = powder.with_pixel_mask_filenames(workflow, []) - workflow[TimeOfFlightLookupTableFilename] = dream.data.tof_lookup_table_high_flux() + workflow[LookupTableFilename] = dream.data.lookup_table_high_flux() result = workflow.compute(EmptyCanSubtractedIofDspacing) assert result.sizes == {'dspacing': len(params[DspacingBins]) - 1} assert sc.identical(result.coords['dspacing'], params[DspacingBins]) @pytest.fixture(scope="module") -def dream_tof_lookup_table(): - lut_wf = time_of_flight.TofLookupTableWorkflow() - lut_wf[time_of_flight.DiskChoppers[AnyRun]] = dream.beamline.choppers( +def dream_lookup_table(): + lut_wf = unwrap.LookupTableWorkflow() + lut_wf[unwrap.DiskChoppers[AnyRun]] = dream.beamline.choppers( dream.beamline.InstrumentConfiguration.high_flux_BC215 ) - lut_wf[time_of_flight.SourcePosition] = sc.vector(value=[0, 0, -76.55], unit="m") - lut_wf[time_of_flight.NumberOfSimulatedNeutrons] = 500_000 - lut_wf[time_of_flight.SimulationSeed] = 555 - lut_wf[time_of_flight.PulseStride] = 1 - lut_wf[time_of_flight.LtotalRange] = ( + lut_wf[unwrap.SourcePosition] = sc.vector(value=[0, 0, -76.55], unit="m") + lut_wf[unwrap.NumberOfSimulatedNeutrons] = 500_000 + lut_wf[unwrap.SimulationSeed] = 555 + lut_wf[unwrap.PulseStride] = 1 + lut_wf[unwrap.LtotalRange] = ( sc.scalar(60.0, unit="m"), sc.scalar(80.0, unit="m"), ) - lut_wf[time_of_flight.DistanceResolution] = sc.scalar(0.1, unit="m") - lut_wf[time_of_flight.TimeResolution] = sc.scalar(250.0, unit='us') - lut_wf[time_of_flight.LookupTableRelativeErrorThreshold] = 0.02 - return lut_wf.compute(time_of_flight.TimeOfFlightLookupTable) + lut_wf[unwrap.DistanceResolution] = sc.scalar(0.1, unit="m") + lut_wf[unwrap.TimeResolution] = sc.scalar(250.0, unit='us') + return lut_wf.compute(unwrap.LookupTable) def test_pipeline_can_compute_dspacing_result_using_custom_built_tof_lookup( - workflow, dream_tof_lookup_table + workflow, dream_lookup_table ): workflow = powder.with_pixel_mask_filenames(workflow, []) - workflow[TimeOfFlightLookupTable] = dream_tof_lookup_table + workflow[LookupTable] = dream_lookup_table result = workflow.compute(IntensityDspacing[SampleRun]) assert result.sizes == {'dspacing': len(params[DspacingBins]) - 1}