diff --git a/.github/workflows/build_conda.yaml b/.github/workflows/build_conda.yaml index e2adc41bf..ef8b8b358 100755 --- a/.github/workflows/build_conda.yaml +++ b/.github/workflows/build_conda.yaml @@ -1,35 +1,35 @@ - -name: Conda - -on: - release: - types: ['released', 'prereleased'] - -# workflow_dispatch: # Un comment line if you also want to trigger action manually - -jobs: - conda_deployment_with_new_tag: - name: Conda deployment of package for platform ${{ matrix.os }} with Python ${{ matrix.python-version }} - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 - - name: Conda environment creation and activation - uses: conda-incubator/setup-miniconda@505e6394dae86d6a5c7fbb6e3fb8938e3e863830 - with: - python-version: "3.12" - environment-file: recipe/build-env.yml - activate-environment: build-environment - auto-update-conda: true - auto-activate-base: false - show-channel-urls: true - - name: Build and upload the conda packages - uses: uibcdf/action-build-and-upload-conda-packages@4940704d2be7906d3bda5b00e3c3e4472fd7808f - with: - meta_yaml_dir: recipe - overwrite: true - python-version: "3.12" - user: phygbu - label: main - token: ${{ secrets.ANACONDA }} # Replace with the right name of your secret + +name: Conda + +on: + release: + types: ['released', 'prereleased'] + +# workflow_dispatch: # Un comment line if you also want to trigger action manually + +jobs: + conda_deployment_with_new_tag: + name: Conda deployment of package for platform ${{ matrix.os }} with Python ${{ matrix.python-version }} + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Conda environment creation and activation + uses: mamba-org/setup-micromamba@add3a49764cedee8ee24e82dfde87f5bc2914462 # v2.0.7 + with: + environment-file: recipe/build-env.yml + environment-name: build-environment + create-args: python=3.12 + init-shell: bash + - name: Build and upload the conda packages + uses: uibcdf/action-build-and-upload-conda-packages@4940704d2be7906d3bda5b00e3c3e4472fd7808f + with: + meta_yaml_dir: recipe + overwrite: true + python-version: "3.12" + user: phygbu + label: main + token: ${{ secrets.ANACONDA }} # Replace with the right name of your secret diff --git a/.github/workflows/run-tests-action.yaml b/.github/workflows/run-tests-action.yaml index 5e30a69c5..132d04740 100755 --- a/.github/workflows/run-tests-action.yaml +++ b/.github/workflows/run-tests-action.yaml @@ -1,92 +1,100 @@ -name: pytest -on: push -jobs: - run_pytest: - name: run-tests (${{ matrix.python-version }}, ${{ matrix.os }}) - runs-on: ${{ matrix.os }} - defaults: - run: - shell: bash -l {0} - strategy: - fail-fast: false - matrix: - python-version: ["3.11", "3.12","3.13"] - os: ["ubuntu-latest"] - steps: - - name: Check out repository code - uses: actions/checkout@v4 - - name: Install Conda environment with Micromamba - uses: conda-incubator/setup-miniconda@v3 - with: - environment-file: tests/test-env.yml - python: ${{ matrix.python-version }} - channels: conda-forge,phygbu - channel-priority: flexible - activate-environment: test-environment - auto-activate-base: false - - name: Conda information - run: | - conda info - conda list - conda config --show-sources - conda config --show - - name: install package - run: pip install --no-deps . - - name: Install headless server - run: | - sudo apt-get update - sudo apt-get install xvfb - sudo apt-get install qtbase5-dev - - name: Test with xvfb - run: | - xvfb-run --auto-servernum /usr/share/miniconda/envs/test-environment/bin/pytest -n 2 --cov-report= --cov=Stoner --junitxml pytest.xml - coverage xml - env: - TZ: Europe/London - LC_CTYPE: en_GB.UTF-8 - GH_ACTION: True - - name: Cleanup X11 server - uses: bcomnes/cleanup-xvfb@v1 - - name: Coveralls Parallel - uses: coverallsapp/github-action@v2 - with: - flag-name: run-${{ join(matrix.*, '-') }} - format: cobertura - github-token: ${{ secrets.GITHUB_TOKEN }} - - name: Upload Unit Test Results - if: always() - uses: actions/upload-artifact@v4 - with: - name: Unit Test Results (Python ${{ matrix.python-version }}) - path: pytest.xml - - name: Post Coveraage result to Codacy - run: | - export CODACY_PROJECT_TOKEN=${{ secrets.CODACY_PROJECT_TOKEN }} - bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml - - publish-test-results: - name: "Publish Unit Tests Results" - needs: run_pytest - runs-on: ubuntu-latest - if: always() - - steps: - - name: Download Artifacts - uses: actions/download-artifact@v4 - with: - path: artifacts - - - name: Publish Unit Test Results - uses: EnricoMi/publish-unit-test-result-action@v2 - with: - files: artifacts/**/*.xml - - coverage-finish: - needs: run_pytest - runs-on: ubuntu-latest - steps: - - name: Coveralls Finished - uses: coverallsapp/github-action@v2 - with: - parallel-finished: true - carryforward: "run-1,run-2" +name: pytest +on: push +jobs: + run_pytest: + name: run-tests (${{ matrix.python-version }}, ${{ matrix.os }}) + runs-on: ${{ matrix.os }} + permissions: + contents: read + checks: write + defaults: + run: + shell: bash -l {0} + strategy: + fail-fast: false + matrix: + python-version: ["3.11", "3.12","3.13"] + os: ["ubuntu-latest"] + steps: + - name: Check out repository code + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + - name: Install Mamba environment + uses: mamba-org/setup-micromamba@add3a49764cedee8ee24e82dfde87f5bc2914462 # v2.0.7 + with: + environment-file: tests/test-env.yml + environment-name: test-environment + create-args: >- + python=${{ matrix.python-version }} + --channel-priority flexible + init-shell: bash + cache-downloads: true + cache-environment: true + - name: Mamba information + run: | + micromamba info + micromamba list + - name: install package + run: pip install --no-deps . + - name: Install headless server + run: | + sudo apt-get update + sudo apt-get install xvfb + sudo apt-get install qtbase5-dev + - name: Test with xvfb + run: | + xvfb-run --auto-servernum pytest -n 2 --cov-report= --cov=Stoner --junitxml pytest.xml + coverage xml + env: + TZ: Europe/London + LC_CTYPE: en_GB.UTF-8 + GH_ACTION: True + - name: Cleanup X11 server + uses: bcomnes/cleanup-xvfb@9e016c43bb8d73fe7d5933d2ef00fd770c1a7c50 # v1.0.9 + - name: Coveralls Parallel + uses: coverallsapp/github-action@5cbfd81b66ca5d10c19b062c04de0199c215fb6e # v2.3.7 + with: + flag-name: run-${{ join(matrix.*, '-') }} + format: cobertura + github-token: ${{ secrets.GITHUB_TOKEN }} + - name: Upload Unit Test Results + if: always() + uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 + with: + name: Unit Test Results (Python ${{ matrix.python-version }}) + path: pytest.xml + - name: Post Coveraage result to Codacy + run: | + export CODACY_PROJECT_TOKEN=${{ secrets.CODACY_PROJECT_TOKEN }} + bash <(curl -Ls https://coverage.codacy.com/get.sh) report -r coverage.xml + + publish-test-results: + name: "Publish Unit Tests Results" + needs: run_pytest + runs-on: ubuntu-latest + if: always() + permissions: + checks: write + pull-requests: write + + steps: + - name: Download Artifacts + uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 + with: + path: artifacts + + - name: Publish Unit Test Results + uses: EnricoMi/publish-unit-test-result-action@c950f6fb443cb5af20a377fd0dfaa78838901040 # v2.23.0 + with: + files: artifacts/**/*.xml + + coverage-finish: + needs: run_pytest + runs-on: ubuntu-latest + permissions: + checks: write + steps: + - name: Coveralls Finished + uses: coverallsapp/github-action@5cbfd81b66ca5d10c19b062c04de0199c215fb6e # v2.3.7 + with: + parallel-finished: true + carryforward: "run-1,run-2" diff --git a/README.rst b/README.rst index 2e4989e51..52f61dbfa 100755 --- a/README.rst +++ b/README.rst @@ -199,7 +199,7 @@ making beta packages available. Development Version ------------------- -The current development version is currently on a stable branch and is version 0.11.x. The master branch contains work +The current development version is currently on a stable branch and is version 0.11.x. The main branch contains work in progress to migrate to using Pandas dataframes as the underlying data store - this is however largely broken! Build Status diff --git a/Stoner/analysis/functions.py b/Stoner/analysis/functions.py index 0e500a9c6..cf2682a56 100755 --- a/Stoner/analysis/functions.py +++ b/Stoner/analysis/functions.py @@ -175,7 +175,7 @@ def decompose( ycol = [ycol] if hysteretic: - from .Util import split_up_down # pylint: disable=import-outside-toplevel + from .utils import split_up_down # pylint: disable=import-outside-toplevel fldr = split_up_down(datafile, datafile.xcol) for grp in ["rising", "falling"]: diff --git a/Stoner/core/array.py b/Stoner/core/array.py index 756d2f0f6..f1bc90100 100755 --- a/Stoner/core/array.py +++ b/Stoner/core/array.py @@ -481,7 +481,7 @@ def _col_args( break else: # User didn't set any values, setas will win no_guess = kwargs.get("no_guess", False) - ret = AttributeStore(self.setas._get_cols(no_guess=no_guess)) + ret = AttributeStore(self.setas._get_cols(no_guess=no_guess, startx=kwargs.get("startx", 0))) force_list = kwargs.get("force_list", not scalar) for c in list(cols.keys()): if isnone(cols[c]): # Not defined, fallback on setas diff --git a/Stoner/formats/data/zip.py b/Stoner/formats/data/zip.py index 4045cdcb4..8023fbacc 100755 --- a/Stoner/formats/data/zip.py +++ b/Stoner/formats/data/zip.py @@ -1,10 +1,15 @@ # -*- coding: utf-8 -*- """Loader for zip files.""" +import fnmatch +import json import pathlib import zipfile as zf from os import path from traceback import format_exc +import chardet +import pandas as pd + from ...compat import path_types, str2bytes from ...core.data import Data from ...core.exceptions import StonerLoadError @@ -14,6 +19,8 @@ from ..decorators import register_loader, register_saver from ..utils.zip import test_is_zip +from ...tools.json import flatten_json, find_paths, find_parent_dicts + def _split_filename(filename: Filename, **kwargs: Kwargs) -> Filename: """Try to get the member and filename parts.""" @@ -27,7 +34,63 @@ def _split_filename(filename: Filename, **kwargs: Kwargs) -> Filename: return filename -@register_loader(patterns=(".zip", 16), mime_types=("application/zip", 16), name="ZippedFile", what="Data") +@register_loader(patterns=(".mlseq", 16), mime_types=("application/zip", 16), name="MeasureLinkFile", what="Data") +def load_measure_linkfile(new_data: Data, *args: Args, **kwargs: Kwargs) -> Data: + """Load a MeasureLink sequence file and assemble as a data object. + + Args: + new_data (Data): + Data instance into whoch to load the new data. + *args: + Other positional arguments passed to get_filename. + + Keyword Arguments: + **kwargs: + Other keyword arguments passed to get_filename. + + Returns: + (Data): + Loaded Data instance. + + Notes: + `.mlseq` files are actually zip archives containing a collection of json files and a flat list of sub-folders + The subfolders contain json for the node operations and optionally (if the key HasData is True) a csv file. + """ + filename, args, kwargs = get_filename(args, kwargs) + if not test_is_zip(filename): + raise StonerLoadError("Must be a zip file to load as a measurement sequence.") + with zf.ZipFile(filename, "r") as seq: + if "FileInfo.json" not in seq.namelist(): + raise StonerLoadError("Missing the Measurelink Sequence FileInfo.json entry") + with seq.open("FileInfo.json", "r") as fileinfo_json: + fileinfo = fileinfo_json.read() + fileinfo = fileinfo.decode(chardet.detect(fileinfo)["encoding"]) + fileinfo = json.loads(fileinfo) + new_data.metadata.update(flatten_json(fileinfo)) + with seq.open("Model.json", "r") as model_json: + model = model_json.read() + model = model.decode(chardet.detect(model)["encoding"]) + model = json.loads(model) + # new_data.metadata.update(flatten_json(model)) + for ix, pth in enumerate(fnmatch.filter(seq.namelist(), "*.csv")): + with seq.open(pth) as dataframe: + df = pd.read_csv(dataframe) + if ix == 0: + data = df + else: + data = pd.concat([data, df]) + + data = data.select_dtypes(include="number") + new_data.data = data.values + new_data.column_headers = list(data.columns) + + has_data = find_paths(model, "HasData", True) + + new_data.filename = filename + return new_data + + +@register_loader(patterns=(".zip", 24), mime_types=("application/zip", 16), name="ZippedFile", what="Data") def load_zipfile(new_data: Data, *args: Args, **kwargs: Kwargs) -> Data: """Load a file from the zip file, opening it as necessary. diff --git a/Stoner/formats/utils/zip.py b/Stoner/formats/utils/zip.py index 06fb1ad90..7fc4ca46b 100755 --- a/Stoner/formats/utils/zip.py +++ b/Stoner/formats/utils/zip.py @@ -23,7 +23,13 @@ def test_is_zip(filename, member=""): """ if not filename or str(filename) == "": return False - if zf.is_zipfile(filename): + if isinstance(filename, (bytes, bytearray)) and b"\x00" in filename: + return False + try: + is_zip = zf.is_zipfile(filename) + except (ValueError, TypeError, OSError): + return False + if is_zip: return filename, member part = path.basename(filename) newfile = path.dirname(filename) diff --git a/Stoner/tools/file.py b/Stoner/tools/file.py index e4e9e864a..f0ae36bca 100755 --- a/Stoner/tools/file.py +++ b/Stoner/tools/file.py @@ -67,7 +67,13 @@ def test_is_zip(filename, member=""): """ if not filename or str(filename) == "": return False - if zipfile.is_zipfile(filename): + if isinstance(filename, (bytes, bytearray)) and b"\x00" in filename: + return False + try: + is_zip = zipfile.is_zipfile(filename) + except (ValueError, TypeError, OSError): + return False + if is_zip: return filename, member part = os.path.basename(filename) newfile = os.path.dirname(filename) diff --git a/Stoner/tools/json.py b/Stoner/tools/json.py new file mode 100755 index 000000000..477b604d9 --- /dev/null +++ b/Stoner/tools/json.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +"""Tools for manipulating json.""" + + +def flatten_json(data, parent_key=""): + """Flatten a nested JSON-like structure into a dotted-key dictionary. + + Args: + data: The JSON-like structure to flatten. May contain dictionaries, + lists, and scalar values. + parent_key: The prefix to prepend to keys in the flattened output. + Used internally during recursion. + + Returns: + dict: A flat dictionary mapping dotted/bracketed key paths to scalar + values. + + This function recursively flattens nested dictionaries and lists into a + single-level dictionary where: + + * Nested dictionary keys are joined using dot notation. + * List indices are represented using bracket notation, e.g. "[0]". + * Scalar values (str, int, float, bool, None) become the final values. + + The function is pure: it does not mutate input data and does not rely on + side effects. Each recursive call returns a new dictionary, and the caller + merges results. + + Examples: + >>> flatten_json({"a": {"b": 1}, "c": [10, 20]}) + {'a.b': 1, 'c[0]': 10, 'c[1]': 20} + + >>> flatten_json({"x": {"y": {"z": True}}}) + {'x.y.z': True} + """ + items = {} + + match data: + case dict(): + for key, value in data.items(): + new_key = f"{parent_key}.{key}" if parent_key else key + items.update(flatten_json(value, new_key)) + + case list(): + for idx, value in enumerate(data): + new_key = f"{parent_key}[{idx}]" + items.update(flatten_json(value, new_key)) + + case _: + items[parent_key] = data + + return items + + +def find_paths(data, target_key, target_value, path=None): + """Yield all paths leading to a key/value pair in a nested structure. + + Args: + data: The JSON-like structure to search. May contain dictionaries, + lists, and scalar values. + target_key: The dictionary key to match. + target_value: The value that must be associated with `target_key` + for a path to be considered a match. + path: Internal recursion parameter. A list representing the path + taken so far. Users should not supply this argument. + + Yields: + list[str]: A list of path components representing the full ancestry + from the root to the matching key/value pair. + + + This function recursively traverses a nested JSON-like structure + (dictionaries, lists, and scalar values) and yields every path where + `target_key` equals `target_value`. Paths are returned as lists of + components, where dictionary keys are plain strings and list indices + are represented as bracketed strings (e.g., "[0]"). + + Examples: + >>> data = {"A": {"B": {"HasData": True}}} + >>> list(find_paths(data, "HasData", True)) + [['A', 'B', 'HasData']] + + >>> data = {"items": [{"HasData": True}, {"HasData": False}]} + >>> list(find_paths(data, "HasData", True)) + [['items', '[0]', 'HasData']] + """ + if path is None: + path = [] + + match data: + case dict(): + for key, value in data.items(): + new_path = path + [key] + if key == target_key and value == target_value: + yield new_path + yield from find_paths(value, target_key, target_value, new_path) + + case list(): + for idx, value in enumerate(data): + new_path = path + [idx] + yield from find_paths(value, target_key, target_value, new_path) + + case _: + return + + +def find_parent_dicts(data, target_key, target_value): + """Yield dictionaries that contain a matching key/value pair. + + Args: + data: The JSON-like structure to search. May contain dictionaries, + lists, and scalar values. + target_key: The dictionary key to match. + target_value: The required value associated with `target_key`. + + Yields: + dict: A dictionary that contains the matching key/value pair. + + This function recursively searches a nested JSON-like structure and + yields every dictionary in which `target_key` exists and its value + equals `target_value`. Unlike `find_paths`, this function returns the + dictionary object itself, allowing callers to inspect sibling keys or + modify the parent structure. + + Examples: + >>> data = {"A": {"B": {"HasData": True, "Other": 5}}} + >>> list(find_parent_dicts(data, "HasData", True)) + [{'HasData': True, 'Other': 5}] + + >>> data = {"items": [{"HasData": True}, {"HasData": False}]} + >>> list(find_parent_dicts(data, "HasData", True)) + [{'HasData': True}] + """ + match data: + case dict(): + if target_key in data and data[target_key] == target_value: + yield data + for value in data.values(): + yield from find_parent_dicts(value, target_key, target_value) + + case list(): + for value in data: + yield from find_parent_dicts(value, target_key, target_value) + + case _: + return + + +if __name__ == "__main__": + data = { + "key1": {"subkey1": 1, "subkey2": 2}, + "key2": ["value2.1", {"subkey3": "value2.2.1", "subkey4": "value2.2.2", "HasData": True}], + } + output = flatten_json(data) + output2 = [pth for pth in find_paths(data, "HasData", True)] + output3 = [pth for pth in find_parent_dicts(data, "HasData", True)] diff --git a/doc/readme.rst b/doc/readme.rst index 2e4989e51..52f61dbfa 100755 --- a/doc/readme.rst +++ b/doc/readme.rst @@ -199,7 +199,7 @@ making beta packages available. Development Version ------------------- -The current development version is currently on a stable branch and is version 0.11.x. The master branch contains work +The current development version is currently on a stable branch and is version 0.11.x. The main branch contains work in progress to migrate to using Pandas dataframes as the underlying data store - this is however largely broken! Build Status diff --git a/recipe/meta.yaml b/recipe/meta.yaml index ebf6a6ef5..995fa2f32 100755 --- a/recipe/meta.yaml +++ b/recipe/meta.yaml @@ -44,6 +44,7 @@ requirements: - python-dateutil >=2.8 - looseversion >=1.0 - rosettasciio + - chardet diff --git a/requirements.txt b/requirements.txt index 8614098a0..f24731158 100755 --- a/requirements.txt +++ b/requirements.txt @@ -18,4 +18,5 @@ dill>=0.2.8 urllib3>=1.26 dateutil>=2.8 seaborn>=0.13 -looseversion>=1.0 \ No newline at end of file +looseversion>=1.0 +chardet>=5.2.0 diff --git a/sample-data/Rxx_Rxy_v_T.mlseq b/sample-data/Rxx_Rxy_v_T.mlseq new file mode 100755 index 000000000..a4d644733 Binary files /dev/null and b/sample-data/Rxx_Rxy_v_T.mlseq differ diff --git a/sample-data/Sequence~002.mlseq b/sample-data/Sequence~002.mlseq new file mode 100755 index 000000000..acdef7cbc Binary files /dev/null and b/sample-data/Sequence~002.mlseq differ diff --git a/tests/Stoner/folders/test_Folders.py b/tests/Stoner/folders/test_Folders.py new file mode 100755 index 000000000..2358cb869 --- /dev/null +++ b/tests/Stoner/folders/test_Folders.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" +test_Folders.py + +Created on Mon Jul 18 14:13:39 2016 + +@author: phygbu +""" + + +import fnmatch +import os +import os.path as path +import re +import sys +import tempfile +from copy import copy + +import matplotlib.pyplot as plt +import numpy as np +import pytest +from numpy import ceil + +from Stoner import Data, DataFolder, __homepath__ +from Stoner.compat import Hyperspy_ok +from Stoner.core.base import RegexpDict +from Stoner.folders import PlotFolder +from Stoner.folders.core import BaseFolder + +pth = __homepath__ / ".." +sys.path.insert(0, pth) + +"""Path to sample Data File""" +datadir = pth / "sample-data" + + +def test_Folders(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + fl = len(fldr) + skip = 1 if Hyperspy_ok else 2 + datfiles = fnmatch.filter(os.listdir(datadir), "*.dat") + length = ( + len([i for i in os.listdir(datadir) if path.isfile(os.path.join(datadir, i))]) - skip + ) # don't coiunt TDMS index + assert length == fl, "Failed to initialise DataFolder from sample data {} {} {} {}".format( + fl, length, skip, Hyperspy_ok + ) + assert fldr.index(path.basename(fldr[-1].filename)) == fl - 1, "Failed to index back on filename" + assert fldr.count(path.basename(fldr[-1].filename)) == 1, "Failed to count filename with string" + assert fldr.count("*.dat") == len(datfiles), "Count with a glob pattern failed" + assert len(fldr[::2]) == ceil(len(fldr) / 2.0), "Failed to get the correct number of elements in a folder slice" + + +def test_loader_opts(): + fldr7 = DataFolder( + path.join(datadir, "NLIV"), pattern=re.compile(r".*at (?P[0-9\-\.]*)\.txt"), read_means=True + ) + x = fldr7.metadata.slice(["field", "Voltage", "Current"], output="Data") + assert x.span("field") == (-0.05, 0.04), "Extract from name pattern and slice into metadata failed." + assert all(x // "Current" < 0) and all(x // "Current" > -1e-20), "Extract means failed." + assert list(fldr7.not_loaded) == [], "Not loaded attribute failed." + fldr7.unload(0) + assert len(list(fldr7.not_loaded)) == 1, "Unload by index failed." + fldr7.unload() + assert len(list(fldr7.not_loaded)) == len(fldr7), "Unload all failed." + + def add_col(d): + d.add_column(np.ones(len(d)) * d["field"], header="field") + + fldr7.each(add_col) + fldr7.concatenate() + assert fldr7[0].shape == (909, 4), "Concatenate failed." + + +def test_groups_methods(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + fldr.group("Loaded as") + fldr.groups.keep(["QDFile", "OpenGDAFile"]) + assert fldr.shape == (0, {"OpenGDAFile": (1, {}), "QDFile": (4, {})}), "groups.keep method failed on folder" + + +def test_discard_earlier(): + fldr2 = DataFolder(path.join(pth, "tests/Stoner/folder_data"), pattern="*.dat", discard_earlier=True) + fldr3 = DataFolder(path.join(pth, "tests/Stoner/folder_data"), pattern="*.dat") + assert len(fldr2) == 1, "Folder created with disacrd_earlier has wrong length ({})".format(len(fldr2)) + assert len(fldr3) == 5, "Folder created without disacrd_earlier has wrong length ({})".format(len(fldr3)) + fldr3.keep_latest() + assert list(fldr2.ls) == list( + fldr3.ls + ), "Folder.keep_latest didn't do the same as discard_earliest in constructor." + + +def test_clear_and_attrs(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + fldr2 = fldr.clone + fldr2.clear() + assert fldr2.shape == (0, {}), "Failed to clear" + fldr2.files = fldr.files + fldr2.groups = fldr.groups + assert fldr2.shape == fldr.shape, "Failed to write to files and groups attri" + fldr.each.debug = True + assert fldr[0].debug, "Setting an attribute on fldr didn't propagate to the contents" + del fldr.each.debug + assert not hasattr(fldr[0], "hello"), "Failed to delete attribute from DataFolder" + with pytest.raises(AttributeError): + del fldr.debug + + +def test_Operators(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + fl = len(fldr) + d = Data(np.ones((100, 5))) + fldr += d + assert fl + 1 == len(fldr), "Failed += operator on DataFolder" + fldr2 = fldr + fldr + assert (fl + 1) * 2 == len(fldr2), "Failed + operator with DataFolder on DataFolder" + fldr -= "Untitled" + assert len(fldr) == fl, "Failed to remove Untitled-0 from DataFolder by name." + fldr -= "New-XRay-Data.dql" + assert fl - 1 == len(fldr), "Failed to remove NEw Xray data by name." + fldr += "New-XRay-Data.dql" + assert len(fldr) == fl, "Failed += operator with string on DataFolder" + fldr /= "Loaded as" + assert len(fldr["QDFile"]) == 4, "Failoed to group folder by Loaded As metadata with /= operator." + assert isinstance(fldr["QDFile", "Byapp"], Data), "Indexing group and then common metadata failed" + fldr = DataFolder(datadir, debug=False, recursive=False) + fldr2 = DataFolder(path.join(datadir, "NLIV"), pattern="*.txt") + fldr2.group(lambda x: "zero" if x["iterator"] % 2 == 0 else "one") + fldr3 = fldr + fldr2 + assert fldr3.shape == (54, {"one": (9, {}), "zero": (7, {})}), "Adding two DataFolders with groups failed" + fldr4 = fldr3 - fldr2 + fldr4.prune() + assert fldr4.shape == fldr.shape, "Failed to subtract one DataFolder from another :{}".format(fldr4.shape) + del fldr2["one"] + assert fldr2.shape == (0, {"zero": (7, {})}), "Delitem with group failed" + fldr2.key = path.basename(fldr2.key) + assert repr(fldr2) == ( + "DataFolder(NLIV) with pattern ('*.txt',) has 0 files and 1 groups\n\tDataFolder(zero) with pattern " + + "['*.txt'] has 7 files and 0 groups" + ), "Representation methods failed" + fldr = DataFolder(datadir, debug=False, recursive=False) + names = list(fldr.ls)[::2] + fldr -= names + assert len(fldr) == 27, "Failed to delete from a sequence" + with pytest.raises(TypeError): + fldr - 0.34 + with pytest.raises(RuntimeError): + fldr - Data() + with pytest.raises(RuntimeError): + fldr - "Wiggle" + + +def test_Base_Operators(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + for d in fldr: + _ = d["Loaded as"] + fldr = BaseFolder(fldr) + fl = len(fldr) + d = Data(np.ones((100, 5))) + fldr += d + assert fl + 1 == len(fldr), "Failed += operator on DataFolder" + fldr2 = fldr + fldr + assert (fl + 1) * 2 == len(fldr2), "Failed + operator with DataFolder on DataFolder" + fldr -= "Untitled" + assert len(fldr) == fl, "Failed to remove Untitled-0 from DataFolder by name." + fldr -= "New-XRay-Data.dql" + assert fl - 1 == len(fldr), "Failed to remove NEw Xray data by name." + if Hyperspy_ok: + del fldr["1449 37.0 kx.emd"] + fldr /= "Loaded as" + assert len(fldr["QDFile"]) == 4, "Failoed to group folder by Loaded As metadata with /= operator." + fldr = DataFolder(datadir, debug=False, recursive=False) + for d in fldr: + _ = d["Loaded as"] + fldr = BaseFolder(fldr) + fldr2 = DataFolder(path.join(datadir, "NLIV"), pattern="*.txt") + fldr2.group(lambda x: "zero" if x["iterator"] % 2 == 0 else "one") + fldr3 = fldr + fldr2 + assert fldr3.shape == (54, {"one": (9, {}), "zero": (7, {})}), "Adding two DataFolders with groups failed" + fldr4 = fldr3 - fldr2 + fldr4.prune() + assert fldr4.shape == fldr.shape, "Failed to subtract one DataFolder from another :{}".format(fldr4.shape) + del fldr2["one"] + assert fldr2.shape == (0, {"zero": (7, {})}), "Delitem with group failed" + fldr2.key = path.basename(fldr2.key) + assert repr(fldr2) == ( + "DataFolder(NLIV) with pattern ('*.txt',) has 0 files and 1 groups\n\tDataFolder(zero) with pattern" + + " ['*.txt'] has 7 files and 0 groups" + ), "Representation methods failed" + fldr = DataFolder(datadir, debug=False, recursive=False) + names = list(fldr.ls)[::2] + fldr -= names + assert len(fldr) == 27, "Failed to delete from a sequence" + with pytest.raises(TypeError): + fldr - 0.34 + with pytest.raises(RuntimeError): + fldr - Data() + with pytest.raises(RuntimeError): + fldr - "Wiggle" + + +def test_Properties(): + fldr = DataFolder(datadir, debug=False, recursive=False) + if not Hyperspy_ok: + del fldr[".*emd$"] + assert fldr.mindepth == 0, "Minimum depth of flat group n ot equal to zero." + fldr /= "Loaded as" + grps = list(fldr.lsgrp) + skip = 0 if Hyperspy_ok else 1 + assert len(grps) == 28 - skip, f"Length of lsgrp not as expected: {len(grps)} not {27-skip}" + fldr.debug = True + fldr = fldr + assert fldr["XRDFile"][0].debug, "Setting debug on folder failed!" + fldr.debug = False + fldr["QDFile"].group("Byapp") + assert fldr.trunkdepth == 1, "Trunkdepth failed" + assert fldr.mindepth == 1, "mindepth attribute of folder failed." + assert fldr.depth == 2, "depth attribute failed." + fldr = DataFolder(datadir, debug=False, recursive=False) + fldr += Data() + skip = 1 if Hyperspy_ok else 2 + assert len(list(fldr.loaded)) == 1, "loaded attribute failed {}".format(len(list(fldr.loaded))) + assert len(list(fldr.not_empty)) == len(fldr) - skip, "not_empty attribute failed." + fldr -= "Untitled" + assert not fldr.is_empty, "fldr.is_empty failed" + fldr = DataFolder(datadir, debug=False, recursive=False) + objects = copy(fldr.objects) + fldr.objects = dict(objects) + assert isinstance(fldr.objects, RegexpDict), "Folder objects not reset to regexp dictionary" + fldr.objects = objects + assert isinstance(fldr.objects, RegexpDict), "Setting Folder objects mangled type" + fldr.type = Data() + assert issubclass(fldr.type, Data), "Setting type by instance of class failed" + + +def test_methods(): + sliced = np.array( + [ + "DataFile", + "MDAASCIIFile", + "BNLFile", + "DataFile", + "DataFile", + "DataFile", + "DataFile", + "DataFile", + "MokeFile", + "EasyPlotFile", + "DataFile", + "DataFile", + "DataFile", + ], + dtype=" 0.5 + selflittle.del_rows() + assert selflittle.shape == (30, 3) + + +def test_iterators(): + global selfd, selfd1, selfd2, selfd3, selfd4 + for i, c in enumerate(selfd.columns()): + assert all(selfd.column(i) == c), "Iterating over DataFile.columns not the same as direct indexing column" + for j, r in enumerate(selfd.rows()): + assert all(selfd[j] == r), "Iteratinf over DataFile.rows not the same as indexed access" + for k, r in enumerate(selfd): + pass + assert j == k, "Iterating over DataFile not the same as DataFile.rows" + assert selfd.data.shape == (j + 1, i + 1), "Iterating over rows and columns not the same as data.shape" + + +def test_dir(): + global selfd, selfd1, selfd2, selfd3, selfd4 + assert selfd.dir("S") == ["Stoner.class"], f"Dir method failed: dir was {selfd.dir()}" + bad_keys = set( + [ + "__class_getitem__", + "__metaclass__", + "iteritems", + "iterkeys", + "itervalues", + "__ge__", + "__gt__", + "__init_subclass__", + "__le__", + "__lt__", + "__reversed__", + "__slots__", + "_abc_negative_cache", + "_abc_registry", + "_abc_negative_cache_version", + "_abc_cache", + "_abc_impl", + "__annotations__", + "__getstate__", + "__static_attributes__", + "__firstlineno__", + '__annotate_func__', + '__annotations_cache__', + ] + ) + attrs = set(dir(selfd)) - bad_keys + assert len(attrs) == 214, "DataFile.__dir__ failed." + selfd.setas.clear() + attrs = set(dir(selfd)) - bad_keys + assert len(attrs) == 212, "DataFile.__dir__ failed." + + +def test_filter(): + global selfd, selfd1, selfd2, selfd3, selfd4 + selfd._push_mask() + ix = np.argmax(selfd.x) + selfd.filter(lambda r: r.x <= 50) + assert np.max(selfd.x) == 50, "Failure of filter method to set mask" + assert np.isnan(selfd.x[ix]), "Failed to mask maximum value" + selfd._pop_mask() + assert selfd2.select(Temp__not__gt=150).shape == (839, 3), "Seect method failure." + assert selfd.select(lambda r: r.x < 30) == selfd.select(X__lt=30), "Select method as callable failed." + assert selfd.select(__=lambda r: r.x < 30) == selfd.select(X__lt=30), "Select method as callable failed." + + +def test_properties(): + global selfd, selfd1, selfd2, selfd3, selfd4 + selflittle = Data() + p = np.linspace(0, np.pi, 91) + q = np.linspace(0, 2 * np.pi, 91) + r = np.cos(p) + x = r * np.sin(q) + y = r * np.cos(q) + selflittle.data = np.column_stack((x, y, r)) + selflittle.setas = "xyz" + q_ang = np.round(selflittle.q / np.pi, decimals=2) + p_ang = np.round(selflittle.p / np.pi, decimals=2) + assert np.max(q_ang) == 1.0, "Data.q failure" + assert np.max(p_ang) == 0.5, "Data.p failure" + assert np.min(p_ang) == -0.5, "Data.p failure" + + +def test_methods(): + global selfd, selfd1, selfd2, selfd3, selfd4 + d = selfd.clone + d &= np.where(d.x < 50, 1.0, 0.0) + d.rename(2, "Z-Data") + d.setas = "xyz" + assert all(d.unique(2) == np.array([0, 1])), f"Unique values failed: {d.unique(2)}" + d = selfd.clone + d.insert_rows(10, np.zeros((2, 2))) + assert len(d) == 102, "Failed to inert extra rows" + assert d[9, 0] == 10 and d[10, 0] == 0 and d[12, 0] == 11, "Failed to insert rows properly." + d = selfd.clone + d.add_column(np.ones(len(d)), replace=False, header="added") + assert d.shape[1] == selfd.shape[1] + 1, "Adding a column with replace=False did not add a column." + assert np.all(d.data[:, -1] == np.ones(len(d))), "Didn't add the new column to the end of the data." + assert len(d.column_headers) == len(selfd.column_headers) + 1, "Column headers isn't bigger by one" + assert d.column_headers == selfd.column_headers + [ + "added", + ], "Column header not added correctly" + d = selfd.clone + d.add_column(selfd.x) + assert np.all(d.x == d[:, 2]), "Adding a column as a DataArray with column headers didn't work" + assert ( + d.x.column_headers[0] == d.column_headers[2] + ), "Adding a column as a DataArray with column headers didn't work" + e = d.clone + d.swap_column([(0, 1), (0, 2)]) + assert d.column_headers == [e.column_headers[x] for x in [2, 0, 1]], f"Swap column test failed: {d.column_headers}" + e = selfd(setas="yx") + assert e.shape == selfd.shape and e.setas[0] == "y", "Failed on a DataFile.__call__ test" + spl = len(repr(selfd).split("\n")) + assert spl, 105 == f"Failed to do repr function got {spl} lines" + e = selfd.clone + e = e.add_column(e.x, header=e.column_headers[0]) + e.del_column(duplicates=True) + assert e.shape == (100, 2), "Deleting duplicate columns failed" + e = selfd2.clone + e.reorder_columns([2, 0, 1]) + assert e.column_headers == [selfd2.column_headers[x] for x in [2, 0, 1]], "Failed to reorder columns: {}".format( + e.column_headers + ) + d = selfd.clone + d.del_rows(0, 10.0) + assert d.shape == (99, 2), f"Del Rows with value and column failed - actual shape {d.shape}" + d = selfd.clone + d.del_rows(0, (10.0, 20.0)) + assert d.shape == (89, 2), f"Del Rows with tuple and column failed - actual shape {d.shape}" + d = selfd.clone + d.mask[::2, 0] = True + d.del_rows() + assert d.shape == (50, 2), f"Del Rows with mask set - actual shape {d.shape}" + d = selfd.clone + d[::2, 1] = nan + d.del_nan(1) + assert d.shape == (50, 2), f"del_nan with explicit column set failed shape was {d.shape}" + d = selfd.clone + d[::2, 1] = nan + d.del_nan(0) + assert d.shape == (100, 2), f"del_nan with explicit column set and not nans failed shape was {d.shape}" + d = selfd.clone + d[::2, 1] = nan + d.setas = ".y" + d.del_nan() + assert d.shape == (50, 2), f"del_nan with columns from setas failed shape was {d.shape}" + d = selfd.clone + d2 = selfd.clone + d2.data = d2.data[::-1, :] + assert d.sort(reverse=True) == d2, "Sorting reverse not the same as manually reversed data." + d = selfd.clone + d.mask[::2] = True + assert d.count() == 50 + assert d.count(9895) == 1 + assert d.count(100, col="X") == 1 + assert selfd.search("X", [98.0, 100]).shape == (2, 2) + assert selfd.search("X", 50.1, accuracy=0.2, columns=0) == 50.0 + with pytest.raises(RuntimeError): + selfd.search("X", "Y") + assert selfd.section(x=(48, 52), accuracy=0.2).shape == (5, 2) + assert selfd.section(y=(1000, 2000))[0, 0] == 33.0 + assert selfd.select({"X-Data__lt": 50}).shape == (49, 2) + assert selfd.select({"X-Data": 50})[0, 0] == 50.0 + dd = selfd.clone + dd.add_column(dd.x % 2, "Channel") + fldr = dd.split("Channel") + assert fldr.shape == (2, {}) + assert dd.split("Channel", lambda d: d.x % 3).shape == (0, {0.0: (3, {}), 1.0: (3, {})}) + dd = selfd.clone + assert dd.split(lambda d: d.x % 3).shape == (3, {}) + assert len(dd.split(lambda d: 1)) == 1 + shp1 = dd.split(lambda d: np.sqrt(d.x) == np.round(np.sqrt(d.x))).shape + shp2 = dd.split(lambda d: np.sqrt(d.x) == int(np.sqrt(d.x))).shape + assert shp1 == shp2 + + +def test_metadata_save(): + global selfd, selfd1, selfd2, selfd3, selfd4 + local = path.dirname(__file__) + t = np.arange(12).reshape(3, 4) # set up a test data file with mixed metadata + t = Data(t) + t.column_headers = ["1", "2", "3", "4"] + metitems = [ + True, + 1, + 0.2, + {"a": 1, "b": "abc"}, + (1, 2), + np.arange(3), + [1, 2, 3], + "abc", # all types accepted + r"\\abc\cde", + 1e-20, # extra tests + [1, (1, 2), "abc"], # list with different types + [[[1]]], # nested list + None, # None value + ] + metnames = ["t" + str(i) for i in range(len(metitems))] + for k, v in zip(metnames, metitems): + t[k] = v + t.save(path.join(local, "mixedmetatest.dat")) + tl = Data( + path.join(local, "mixedmetatest.txt") + ) # will change extension to txt if not txt or tdi, is this what we want? + t2 = selfd4.clone # check that python tdi save is the same as labview tdi save + t2.save(path.join(local, "mixedmetatest2.txt")) + t2l = Data(path.join(local, "mixedmetatest2.txt")) + for orig, load in [(t, tl), (t2, t2l)]: + for k in ["Loaded as", "TDI Format"]: + orig[k] = load[k] + assert np.allclose(orig.data, load.data) + assert orig.column_headers == load.column_headers + _ = load.metadata ^ orig.metadata + assert load.metadata == orig.metadata, "Metadata not the same on round tripping to disc" + # os.remove(path.join(local, "mixedmetatest.txt")) #clear up + # os.remove(path.join(local, "mixedmetatest2.txt")) + + +if __name__ == "__main__": # Run some tests manually to allow debugging + pytest.main(["--pdb", __file__]) diff --git a/tests/Stoner/test_FileFormats.py b/tests/Stoner/test_FileFormats.py new file mode 100755 index 000000000..6d9de1698 --- /dev/null +++ b/tests/Stoner/test_FileFormats.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +""" +test_Core.py +Created on Tue Jan 07 22:05:55 2014 + +@author: phygbu +""" + +import io +import pathlib +import sys +import urllib + +import pytest + +from Stoner import Data, ImageFile, __datapath__, __homepath__ +from Stoner.compat import Hyperspy_ok +from Stoner.core.exceptions import StonerLoadError, StonerUnrecognisedFormat +from Stoner.formats.attocube import AttocubeScan +from Stoner.formats.maximus import MaximusStack +from Stoner.tools.file import clear_routine, get_saver + +pth = __homepath__ / ".." +datadir = __datapath__ + + +def setup_module(): + sys.path.insert(0, str(pth)) + + +def teardown_module(): + sys.path.remove(str(pth)) + + +def list_files(): + skip_files = set([]) # HDF5 loader not working Python 3.5 + incfiles = list(set(datadir.glob("*")) - skip_files) + incfiles = [x for x in incfiles if x.suffix != ".tdms_index"] + incfiles = [x for x in incfiles if not x.is_dir()] + + if not Hyperspy_ok: + print("hyperspy too old, skupping emd file for test") + incfiles = [x for x in incfiles if not x.name.strip().lower().endswith(".emd")] + + return sorted(incfiles) + + +listed_files = list_files() + + +@pytest.mark.parametrize("filename", listed_files) +def test_one_file(tmpdir, filename): + loaded = Data(filename, debug=False) + assert isinstance(loaded, Data), f"Failed to load {filename.name} correctly." + try: + saver=get_saver(loaded["Loaded as"]) + pth = pathlib.Path(tmpdir) / filename.name + _, name, ext = pth.parent, pth.stem, pth.suffix + pth2 = pathlib.Path(tmpdir) / f"{name}-2{ext}" + loaded.save(pth, as_loaded=True) + assert pth.exists() or pathlib.Path(loaded.filename).exists(), f"Failed to save as {pth}" + pathlib.Path(loaded.filename).unlink() + loaded.save(pth2, as_loaded=loaded["Loaded as"]) + assert pth2.exists() or pathlib.Path(loaded.filename).exists(), "Failed to save as {}".format(pth) + pathlib.Path(loaded.filename).unlink() + except StonerLoadError: + pass + +def test_csvfile(): + csv = Data( + datadir / "working" / "CSVFile_test.dat", filetype="JustNumbers", column_headers=["Q", "I", "dI"], setas="xye" + ) + assert csv.shape == (167, 3), "Failed to load CSVFile from text" + + +def test_attocube_scan(tmpdir): + tmpdir = pathlib.Path(tmpdir) + scandir = datadir / "attocube_scan" + scan1 = AttocubeScan("SC_085", scandir, regrid=False) + scan2 = AttocubeScan(85, scandir, regrid=False) + assert scan1 == scan2, "Loading scans by number and string not equal" + + # self.assertEqual(scan1,scan2,"Loading Attocube Scans by root name and number didn't match") + + pth = tmpdir / f"SC_{scan1.scan_no:03d}.hdf5" + scan1.to_hdf5(pth) + + scan3 = AttocubeScan.read_hdf5(pth) + + assert pth.exists(), f"Failed to save scan as {pth}" + if scan1 != scan3: + print("A" * 80) + print(scan1.layout, scan3.layout) + for grp in scan1.groups: + print(scan1[grp].metadata.all_by_keys ^ scan3[grp].metadata.all_by_keys) + print(scan1.shape) + assert scan1.layout == scan3.layout, "Roundtripping scan through hdf5 failed" + pth.unlink() + + pth = tmpdir / f"SC_{scan1.scan_no:03d}.tiff" + scan1.to_tiff(pth) + scan3 = AttocubeScan.from_tiff(pth) + assert pth.exists(), f"Failed to save scan as {pth}" + if scan1 != scan3: + print("B" * 80) + print(scan1.layout, scan3.layout) + for grp in scan1.groups: + print(scan1[grp].metadata.all_by_keys ^ scan3[grp].metadata.all_by_keys) + assert scan1.layout == scan3.layout, "Roundtripping scan through tiff failed" + pth.unlink() + + scan3 = AttocubeScan() + scan3._marshall(layout=scan1.layout, data=scan1._marshall()) + assert scan1 == scan3, "Recreating scan through _marshall failed." + + scan1["fwd"].level_image(method="parabola", signal="Amp") + scan1["bwd"].regrid() + + +def test_maximus_image(): + pths = list((datadir / "maximus_scan").glob("MPI_210127019*.*")) + assert len(pths) == 2 + for pth in pths: + img = ImageFile.load(pth) + assert img.shape == (1000, 1000) + assert len(img.metadata) == 196 + + +def test_maximus_stack(tmpdir): + tmpdir = pathlib.Path(tmpdir) + scandir = datadir / "maximus_scan" / "MPI_210127021" + stack = MaximusStack(scandir / "MPI_210127021") + stack.to_hdf5(tmpdir / "MPI_210127021.hdf5") + stack2 = MaximusStack.read_hdf5(tmpdir / "MPI_210127021.hdf5") + assert stack2.shape == stack.shape, "Round trip through MaximusStack" + + +def test_fail_to_load(): + with pytest.raises(StonerUnrecognisedFormat): + _ = Data(datadir /"bad_data" / "Origin_Project.opju") + + +def test_arb_class_load(): + _ = Data(datadir / "TDI_Format_RT.txt", filetype="dummy.ArbClass") + clear_routine("dummy.ArbClass") + + +def test_url_load(): + """Test URL scheme openers.""" + t1 = Data("https://github.com/stonerlab/Stoner-PythonCode/raw/main/sample-data/hairboRaman.spc") + assert t1 == Data(__datapath__ / "hairboRaman.spc") + t2 = Data("https://github.com/stonerlab/Stoner-PythonCode/raw/main/sample-data/New-XRay-Data.dql") + assert t2 == Data(__datapath__ / "New-XRay-Data.dql") + resp = urllib.request.urlopen( + "https://github.com/stonerlab/Stoner-PythonCode/raw/main/sample-data/New-XRay-Data.dql" + ) + t3 = Data(resp) + assert t3 == t2 + + +def test_from_bytes(): + """Test loading a binary file as bytes.""" + with open(__datapath__ / "harribo.spc", "rb") as data: + d = Data(data.read()) + assert d == Data(__datapath__ / "harribo.spc") + + +def test_from_StringIO(): + """Test loading a binary file as bytes.""" + with open(__datapath__ / "RASOR.dat", "r") as data: + buffer = io.StringIO(data.read()) + assert Data(buffer) == Data(__datapath__ / "RASOR.dat") + + +def test_ImageAutoLoad(): + """Test ImageFile autoloading""" + img = ImageFile(__datapath__ / "kermit.png") + assert img.shape == (479, 359) + img = ImageFile(__datapath__ / "working" / "hydra_0017.edf") + assert img.shape == (512, 768) + img = ImageFile(__datapath__ / "working" / "Sample_Image_2017-06-03_035.hdf5") + assert img.shape == (80, 300) + + +def test_FabioImageFle(): + loader = ImageFile.load(datadir / "working" / "hydra_0017.edf", filetype="FabioImage") + assert loader.shape == (512, 768) + + +if __name__ == "__main__": # Run some tests manually to allow debugging + pytest.main(["--pdb", __file__]) diff --git a/tests/Stoner/tools/test_widgets.py b/tests/Stoner/tools/test_widgets.py new file mode 100755 index 000000000..3cae5a0a2 --- /dev/null +++ b/tests/Stoner/tools/test_widgets.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Apr 19 12:04:58 2020 + +@author: phygbu +""" + +import sys +import threading +import time + +import numpy as np +import pytest +from matplotlib.backend_bases import Event + +import Stoner + +ret_pth = Stoner.__homepath__ / ".." / "sample-data" / "TDI_Format_RT.txt" + +# Horrible hack to patch QFileDialog for testing + +import Stoner.tools.widgets as widgets +from Stoner import Data, DataFolder + + +def test_filedialog(): + def dummy(mode="getOpenFileName"): + modes = { + "getOpenFileName": ret_pth, + "getOpenFileNames": [ret_pth], + "getSaveFileName": None, + "getExistingDirectory": ret_pth.parent, + } + return lambda *args, **kwargs: (modes[mode], None) + + modes = { + "OpenFile": { + "method": dummy("getOpenFileName"), + "caption": "Select file to open...", + "arg": ["parent", "caption", "directory", "filter", "options"], + }, + "OpenFiles": { + "method": dummy("getOpenFileNames"), + "caption": "Select file(s_ to open...", + "arg": ["parent", "caption", "directory", "filter", "options"], + }, + "SaveFile": { + "method": dummy("getSaveFileName"), + "caption": "Save file as...", + "arg": ["parent", "caption", "directory", "filter", "options"], + }, + "SelectDirectory": { + "method": dummy("getExistingDirectory"), + "caption": "Select folder...", + "arg": ["parent", "caption", "directory", "options"], + }, + } + + widgets = sys.modules["Stoner.tools.widgets"] + app = getattr(widgets, "App") + setattr(app, "modes", modes) + + assert widgets.file_dialog.open_dialog() == ret_pth + assert widgets.file_dialog.open_dialog(title="Test", start=".") == ret_pth + assert widgets.file_dialog.open_dialog(patterns={"*.bad": "Very bad files"}) == ret_pth + assert widgets.file_dialog.open_dialog(mode="OpenFiles") == [ret_pth] + assert widgets.file_dialog.open_dialog(mode="SaveFile") is None + assert widgets.file_dialog.open_dialog(mode="SelectDirectory") == ret_pth.parent + with pytest.raises(ValueError): + widgets.file_dialog.open_dialog(mode="Whateve") + + +def test_loader(): + d = Data(False) + assert d.shape == (1676, 3), "Failed to load data with dialog box" + with pytest.raises(RuntimeError): + d.save(False) + fldr = DataFolder(False) + del fldr["bad_data"] + assert fldr.shape == ( + 54, + { + "attocube_scan": (15, {}), + "maximus_scan": (2, {"MPI_210127021": (3, {})}), + "NLIV": (11, {}), + "recursivefoldertest": (1, {}), + "working": (4, {}), + }, + ) + fldr = DataFolder(False, multifile=True) + assert fldr.shape == (1, {}), "multifile mode failed!" + + +def _event(data, name, **kwargs): + """Make a fake event.""" + select = data._select + event = Event("fake", select.data.fig.canvas) + for k, v in kwargs.items(): + setattr(event, k, v) + try: + getattr(select, name)(event) + except Exception: + breakpoint() + pass + + +def _trigger0(data): + time.sleep(1) + select = data._select + select.onselect(50, 100) + _event(data, "keypress", key="escape") + + +def _trigger1(data): + time.sleep(1) + select = data._select + select.onselect(50, 100) + _event(data, "keypress", key="enter") + + +def _trigger2(data): + time.sleep(1) + select = data._select + select.onselect(50, 100) + select.onselect(150, 200) + _event(data, "keypress", key="i") + _event(data, "keypress", key="backspace") + _event(data, "keypress", key="enter") + + +def test_range_select(): + data = Stoner.Data(ret_pth, setas="xy") + thread = threading.Thread(target=_trigger0, args=(data,)) + thread.start() + result = data.search() + xmin, xmax = result.x.min(), result.x.max() + assert xmin < 4.4 and xmax > 291, "Failed to select and clear" + thread = threading.Thread(target=_trigger1, args=(data,)) + thread.start() + result = data.search() + xmin1, xmax1 = result.x.min(), result.x.max() + assert np.isclose(xmin1, 50, atol=1) and np.isclose(xmax1, 100, 1), "Single selection failed." + thread = threading.Thread(target=_trigger2, args=(data,)) + thread.start() + result = data.search() + xmin2, xmax2 = result.x.min(), result.x.max() + assert np.isclose(xmin, xmin2) and np.isclose(xmax, xmax2), "Selection with keypresses failed" + + +if __name__ == "__main__": + pytest.main(["--pdb", __file__]) diff --git a/tests/test-env.yml b/tests/test-env.yml index 79712a05f..736314d68 100755 --- a/tests/test-env.yml +++ b/tests/test-env.yml @@ -36,3 +36,4 @@ dependencies: - looseversion >=1.0 - rosettasciio - fabio + - chardet