diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d327d36ba7..f194bac075 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -64,22 +64,18 @@ repos: - --keep-before=#! - --keep-after=.. include - repo: https://github.com/kynan/nbstripout - rev: 0.7.1 + rev: 0.8.1 hooks: - id: nbstripout -- repo: https://github.com/google/yapf - rev: 'v0.40.2' - hooks: - - id: yapf - repo: https://github.com/astral-sh/ruff-pre-commit rev: 'v0.8.0' hooks: - # - id: ruff-format # TODO: enable formatting after all other v5 PRs are merged - # types_or: [python] + - id: ruff-format + types_or: [python] - id: ruff types_or: [python] - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v19.1.1 + rev: v19.1.4 hooks: - id: clang-format types_or: [c, c++, cuda, inc] diff --git a/example_plugins/pair_plugin/__init__.py b/example_plugins/pair_plugin/__init__.py index b876a975e8..9a0ee365eb 100644 --- a/example_plugins/pair_plugin/__init__.py +++ b/example_plugins/pair_plugin/__init__.py @@ -5,4 +5,4 @@ from hoomd.pair_plugin import pair -__all__ = ['pair'] +__all__ = ["pair"] diff --git a/example_plugins/pair_plugin/pair.py b/example_plugins/pair_plugin/pair.py index 2779368c50..ed25101ecf 100644 --- a/example_plugins/pair_plugin/pair.py +++ b/example_plugins/pair_plugin/pair.py @@ -20,9 +20,11 @@ class ExamplePair(pair.Pair): _cpp_class_name = "PotentialPairExample" _accepted_modes = ("none", "shift", "xplor") - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(k=float, sigma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(k=float, sigma=float, len_keys=2), + ) self._add_typeparam(params) diff --git a/example_plugins/pair_plugin/pytest/test_example_pair.py b/example_plugins/pair_plugin/pytest/test_example_pair.py index dc81079122..e78f55fda7 100644 --- a/example_plugins/pair_plugin/pytest/test_example_pair.py +++ b/example_plugins/pair_plugin/pytest/test_example_pair.py @@ -14,16 +14,15 @@ # Python implementation of the pair force and energy. def harm_force_and_energy(dx, k, sigma, r_cut, shift=False): - dr = np.linalg.norm(dx) if dr >= r_cut: return np.array([0.0, 0.0, 0.0], dtype=np.float64), 0.0 f = k * (sigma - dr) * np.array(dx, dtype=np.float64) / dr - e = 0.5 * k * (sigma - dr)**2 + e = 0.5 * k * (sigma - dr) ** 2 if shift: - e -= 0.5 * k * (r_cut - sigma)**2 + e -= 0.5 * k * (r_cut - sigma) ** 2 return f, e @@ -39,10 +38,9 @@ def harm_force_and_energy(dx, k, sigma, r_cut, shift=False): @pytest.mark.parametrize("distance, k, sigma, mode", testdata) -def test_force_and_energy_eval(simulation_factory, - two_particle_snapshot_factory, distance, k, - sigma, mode): - +def test_force_and_energy_eval( + simulation_factory, two_particle_snapshot_factory, distance, k, sigma, mode +): # Build the simulation from the factory fixtures defined in # hoomd/conftest.py. sim = simulation_factory(two_particle_snapshot_factory(d=distance)) @@ -53,7 +51,8 @@ def test_force_and_energy_eval(simulation_factory, cell = hoomd.md.nlist.Cell(buffer=0.4) example_pair: hoomd.md.pair.Pair = pair_plugin.pair.ExamplePair( - cell, default_r_cut=sigma, mode=mode) + cell, default_r_cut=sigma, mode=mode + ) example_pair.params[("A", "A")] = dict(k=k, sigma=sigma) integrator.forces = [example_pair] integrator.methods = [nve] diff --git a/example_plugins/shape_plugin/__init__.py b/example_plugins/shape_plugin/__init__.py index 98fd76b5cf..75a31256d8 100644 --- a/example_plugins/shape_plugin/__init__.py +++ b/example_plugins/shape_plugin/__init__.py @@ -5,4 +5,4 @@ from . import integrate -__all__ = ['integrate'] +__all__ = ["integrate"] diff --git a/example_plugins/shape_plugin/integrate.py b/example_plugins/shape_plugin/integrate.py index 31aa708580..5e3f3bbeba 100644 --- a/example_plugins/shape_plugin/integrate.py +++ b/example_plugins/shape_plugin/integrate.py @@ -20,26 +20,29 @@ class MySphere(hpmc.integrate.HPMCIntegrator): _ext_module = _shape_plugin _cpp_cls = "IntegratorHPMCMonoMySphere" - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - radius=float, - ignore_statistics=False, - orientable=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + radius=float, ignore_statistics=False, orientable=False, len_keys=1 + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format.""" return super()._return_type_shapes() diff --git a/example_plugins/updater_plugin/__init__.py b/example_plugins/updater_plugin/__init__.py index 6b21bd9c31..df00a18961 100644 --- a/example_plugins/updater_plugin/__init__.py +++ b/example_plugins/updater_plugin/__init__.py @@ -5,4 +5,4 @@ from hoomd.updater_plugin import update -__all__ = ['update'] +__all__ = ["update"] diff --git a/example_plugins/updater_plugin/pytest/test_example_updater.py b/example_plugins/updater_plugin/pytest/test_example_updater.py index 283f6305fa..c9fa674fc6 100644 --- a/example_plugins/updater_plugin/pytest/test_example_updater.py +++ b/example_plugins/updater_plugin/pytest/test_example_updater.py @@ -23,7 +23,6 @@ # Use pytest decorator to automate testing over the sequence of parameters. @pytest.mark.parametrize("vel", velocities) def test_updater(simulation_factory, one_particle_snapshot_factory, vel): - # `one_particle_snapshot_factory` and `simulation_factory` are pytest # fixtures defined in hoomd/conftest.py. These factories automatically # handle iterating tests over different CPU and GPU devices. @@ -34,7 +33,8 @@ def test_updater(simulation_factory, one_particle_snapshot_factory, vel): # Add our plugin to the simulation. updater: operation.Updater = updater_plugin.update.ExampleUpdater( - hoomd.trigger.On(sim.timestep)) + hoomd.trigger.On(sim.timestep) + ) sim.operations.updaters.append(updater) # Test that the initial velocity matches our input. @@ -49,6 +49,6 @@ def test_updater(simulation_factory, one_particle_snapshot_factory, vel): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: velocity = snap.particles.velocity[0] - np.testing.assert_array_almost_equal(velocity, - np.array([0.0, 0.0, 0.0]), - decimal=6) + np.testing.assert_array_almost_equal( + velocity, np.array([0.0, 0.0, 0.0]), decimal=6 + ) diff --git a/example_plugins/updater_plugin/update.py b/example_plugins/updater_plugin/update.py index 2da44a9e2b..418faf6faf 100644 --- a/example_plugins/updater_plugin/update.py +++ b/example_plugins/updater_plugin/update.py @@ -22,7 +22,9 @@ def _attach_hook(self): # initialize the reflected c++ class if isinstance(self._simulation.device, hoomd.device.CPU): self._cpp_obj = _updater_plugin.ExampleUpdater( - self._simulation.state._cpp_sys_def, self.trigger) + self._simulation.state._cpp_sys_def, self.trigger + ) else: self._cpp_obj = _updater_plugin.ExampleUpdaterGPU( - self._simulation.state._cpp_sys_def, self.trigger) + self._simulation.state._cpp_sys_def, self.trigger + ) diff --git a/hoomd.code-workspace b/hoomd.code-workspace deleted file mode 100644 index 91b85323ea..0000000000 --- a/hoomd.code-workspace +++ /dev/null @@ -1,174 +0,0 @@ -{ - "folders": [ - { - "path": "." - } - ], - "settings": { - "files.associations": { - "**/.azp/**/*.yml": "azure-pipelines", - "array": "cpp", - "atomic": "cpp", - "hash_map": "cpp", - "bit": "cpp", - "*.tcc": "cpp", - "bitset": "cpp", - "cctype": "cpp", - "chrono": "cpp", - "cinttypes": "cpp", - "clocale": "cpp", - "cmath": "cpp", - "codecvt": "cpp", - "compare": "cpp", - "complex": "cpp", - "concepts": "cpp", - "condition_variable": "cpp", - "cstdarg": "cpp", - "cstddef": "cpp", - "cstdint": "cpp", - "cstdio": "cpp", - "cstdlib": "cpp", - "cstring": "cpp", - "ctime": "cpp", - "cwchar": "cpp", - "cwctype": "cpp", - "deque": "cpp", - "forward_list": "cpp", - "list": "cpp", - "map": "cpp", - "set": "cpp", - "unordered_map": "cpp", - "unordered_set": "cpp", - "vector": "cpp", - "exception": "cpp", - "algorithm": "cpp", - "functional": "cpp", - "iterator": "cpp", - "memory": "cpp", - "memory_resource": "cpp", - "numeric": "cpp", - "optional": "cpp", - "random": "cpp", - "ratio": "cpp", - "regex": "cpp", - "string": "cpp", - "string_view": "cpp", - "system_error": "cpp", - "tuple": "cpp", - "type_traits": "cpp", - "utility": "cpp", - "fstream": "cpp", - "future": "cpp", - "initializer_list": "cpp", - "iomanip": "cpp", - "iosfwd": "cpp", - "iostream": "cpp", - "istream": "cpp", - "limits": "cpp", - "mutex": "cpp", - "new": "cpp", - "ostream": "cpp", - "ranges": "cpp", - "shared_mutex": "cpp", - "sstream": "cpp", - "stdexcept": "cpp", - "stop_token": "cpp", - "streambuf": "cpp", - "thread": "cpp", - "typeindex": "cpp", - "typeinfo": "cpp", - "valarray": "cpp", - "variant": "cpp", - "*.cuh": "cpp", - "*.in": "cpp", - "cfenv": "cpp", - "fft": "cpp", - "__nullptr": "cpp", - "numbers": "cpp", - "semaphore": "cpp", - "any": "cpp", - "*.inc": "cpp" - }, - "cSpell.words": [ - "angmom", - "arange", - "asarray", - "athermal", - "atol", - "barostat", - "Berendsen", - "cmake", - "cpptools", - "cuda", - "dtype", - "enthalpic", - "eqnarray", - "Frenkel", - "GPUs", - "gyroid", - "Hirasawa", - "HOOMD", - "HOOMDGPU", - "hpmc", - "infty", - "itertools", - "Iverson", - "Jens", - "joaander", - "Jupyter", - "Jupyter's", - "Kamberaj", - "Ladd", - "langevin", - "mathbb", - "mathbf", - "mathrm", - "mpcd", - "nabla", - "ndarray", - "nlist", - "noqa", - "nosignatures", - "nselect", - "numpy", - "OPLS", - "OVITO", - "parameterdicts", - "pdata", - "picklable", - "polyedron", - "pyargs", - "pybind", - "pycodestyle", - "pylint", - "pyside", - "pytest", - "rtag", - "rtol", - "rtype", - "Simulation", - "sphereopolyhedra", - "spherocylinder", - "Spheropolygon", - "spheropolygons", - "spheropolyhedra", - "Spheropolyhedron", - "stkb", - "struct", - "thermalize", - "tobytes", - "twxs", - "uint", - "unapply", - "unmap", - "unsync", - "Verlet", - "verts", - "virial", - "virials", - "xmax", - "yapf", - "Yoneya" - ] - } -} diff --git a/hoomd/__init__.py b/hoomd/__init__.py index 62da085c76..7dbfbbca7e 100644 --- a/hoomd/__init__.py +++ b/hoomd/__init__.py @@ -37,6 +37,7 @@ (`hoomd.write.GSD.flush`) when a user's process is terminated. Use `signal.signal` to adjust this behavior as needed. """ + import sys import pathlib import os @@ -46,15 +47,17 @@ # Work around /usr/lib64/slurm/auth_munge.so: undefined symbol: slurm_conf # error on Purdue Anvil. -if os.environ.get('RCAC_CLUSTER') == 'anvil': +if os.environ.get("RCAC_CLUSTER") == "anvil": sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL) -if ((pathlib.Path(__file__).parent / 'CMakeLists.txt').exists() - and 'SPHINX' not in os.environ): +if ( + pathlib.Path(__file__).parent / "CMakeLists.txt" +).exists() and "SPHINX" not in os.environ: print("It appears that hoomd is being imported from the source directory:") print(pathlib.Path(__file__).parent) print() - print("""Compile the package and import from the build directory or install + print( + """Compile the package and import from the build directory or install the package and import from the Python environment. To run pytest, either: @@ -62,7 +65,8 @@ (2) compile and install. Then, ensuring your current working directory is outside the hoomd source directory, execute `python3 -m pytest --pyargs hoomd`. """, - file=sys.stderr) + file=sys.stderr, + ) from hoomd import version from hoomd import trigger @@ -85,6 +89,7 @@ from hoomd import tune from hoomd import logging from hoomd import custom + if version.md_built: from hoomd import md if version.hpmc_built: @@ -122,30 +127,30 @@ def _hoomd_sys_excepthook(type, value, traceback): pass __all__ = [ - 'Box', - 'Operations', - 'Simulation', - 'Snapshot', - 'State', - 'box', - 'communicator', - 'custom', - 'data', - 'device', - 'error', - 'filter', - 'hpmc', - 'logging', - 'md', - 'mesh', - 'mpcd', - 'operation', - 'trigger', - 'tune', - 'update', - 'util', - 'variant', - 'version', - 'wall', - 'write', + "Box", + "Operations", + "Simulation", + "Snapshot", + "State", + "box", + "communicator", + "custom", + "data", + "device", + "error", + "filter", + "hpmc", + "logging", + "md", + "mesh", + "mpcd", + "operation", + "trigger", + "tune", + "update", + "util", + "variant", + "version", + "wall", + "write", ] diff --git a/hoomd/box.py b/hoomd/box.py index 81897b4d86..acadad33bf 100644 --- a/hoomd/box.py +++ b/hoomd/box.py @@ -32,24 +32,23 @@ class (e.g. Scalar3, Int3). return vec_factory(v, v, v) if l_vec == 3: try: - return vec_factory(scalar_type(vec[0]), scalar_type(vec[1]), - scalar_type(vec[2])) + return vec_factory( + scalar_type(vec[0]), scalar_type(vec[1]), scalar_type(vec[2]) + ) except (ValueError, TypeError): raise ValueError("Expected values of type {}.".format(scalar_type)) else: - raise ValueError("Expected a sequence of three values or a single " - "value. Received {} values.".format(len(vec))) + raise ValueError( + "Expected a sequence of three values or a single " + "value. Received {} values.".format(len(vec)) + ) -_make_scalar3 = partial(_make_vec3, - vec_factory=_hoomd.make_scalar3, - scalar_type=float) +_make_scalar3 = partial(_make_vec3, vec_factory=_hoomd.make_scalar3, scalar_type=float) _make_int3 = partial(_make_vec3, vec_factory=_hoomd.make_int3, scalar_type=int) -_make_char3 = partial(_make_vec3, - vec_factory=_hoomd.make_char3, - scalar_type=int) +_make_char3 = partial(_make_vec3, vec_factory=_hoomd.make_char3, scalar_type=int) def _vec3_to_array(vec, dtype=None): @@ -251,9 +250,8 @@ def from_basis_vectors(cls, box_matrix): points = np.array([[0, 0, 0], [0.5, 0, 0], [0.25, 0.25, 0]]) box, rotation = hoomd.Box.from_basis_vectors( - box_matrix = [[ 1, 1, 0], - [ 1, -1, 0], - [ 0, 0, 1]]) + box_matrix=[[1, 1, 0], [1, -1, 0], [0, 0, 1]] + ) rotated_points = rotation @ points """ box_matrix = np.asarray(box_matrix, dtype=np.float64) @@ -273,16 +271,21 @@ def from_basis_vectors(cls, box_matrix): a3x = np.dot(v0, v2) / Lx xz = a3x / Lz yz = (np.dot(v1, v2) - a2x * a3x) / (Ly * Lz) - upper_triangular_box_matrix = np.array([[Lx, Ly * xy, Lz * xz], - [0, Ly, Lz * yz], - [0, 0, Lz]]) + upper_triangular_box_matrix = np.array( + [[Lx, Ly * xy, Lz * xz], [0, Ly, Lz * yz], [0, 0, Lz]] + ) else: xz = yz = 0 - if not (np.allclose(v2, [0, 0, 0]) and np.allclose(v0[2], 0) - and np.allclose(v1[2], 0)): - error_string = ("A 2D box matrix must have a third vector and" - "third component of first two vectors set to" - "zero.") + if not ( + np.allclose(v2, [0, 0, 0]) + and np.allclose(v0[2], 0) + and np.allclose(v1[2], 0) + ): + error_string = ( + "A 2D box matrix must have a third vector and" + "third component of first two vectors set to" + "zero." + ) raise ValueError(error_string) upper_triangular_box_matrix = np.array([[Lx, Ly * xy], [0, Ly]]) box_matrix = box_matrix[:2, :2] @@ -333,8 +336,12 @@ def from_matrix(cls, box_matrix): if not np.allclose(box_matrix, np.triu(box_matrix)): raise ValueError("Box matrix must be upper triangular.") L = np.diag(box_matrix) - return cls(*L, box_matrix[0, 1] / L[1], box_matrix[0, 2] / L[2], - box_matrix[1, 2] / L[2]) + return cls( + *L, + box_matrix[0, 1] / L[1], + box_matrix[0, 2] / L[2], + box_matrix[1, 2] / L[2], + ) @classmethod def _from_cpp(cls, cpp_obj): @@ -375,24 +382,25 @@ def from_box(cls, box): # Handles hoomd.box.Box and objects with attributes Lx = box.Lx Ly = box.Ly - Lz = getattr(box, 'Lz', 0) - xy = getattr(box, 'xy', 0) - xz = getattr(box, 'xz', 0) - yz = getattr(box, 'yz', 0) + Lz = getattr(box, "Lz", 0) + xy = getattr(box, "xy", 0) + xz = getattr(box, "xz", 0) + yz = getattr(box, "yz", 0) except AttributeError: try: # Handle dictionary-like - Lx = box['Lx'] - Ly = box['Ly'] - Lz = box.get('Lz', 0) - xy = box.get('xy', 0) - xz = box.get('xz', 0) - yz = box.get('yz', 0) + Lx = box["Lx"] + Ly = box["Ly"] + Lz = box.get("Lz", 0) + xy = box.get("xy", 0) + xz = box.get("xz", 0) + yz = box.get("yz", 0) except (IndexError, KeyError, TypeError): if len(box) not in [2, 3, 6]: raise ValueError( "List-like objects must have length 2, 3, or 6 to be " - "converted to hoomd.Box.") + "converted to hoomd.Box." + ) # Handle list-like Lx = box[0] Ly = box[1] @@ -625,7 +633,7 @@ def volume(self): @volume.setter def volume(self, volume): - self.scale((volume / self.volume)**(1 / self.dimensions)) + self.scale((volume / self.volume) ** (1 / self.dimensions)) def to_matrix(self): """(3, 3) `numpy.ndarray` `float`: The upper triangular matrix that \ @@ -679,7 +687,8 @@ def scale(self, s): def __repr__(self): """Executable representation of the object.""" return "hoomd.box.Box(Lx={}, Ly={}, Lz={}, xy={}, xz={}, yz={})".format( - self.Lx, self.Ly, self.Lz, self.xy, self.xz, self.yz) + self.Lx, self.Ly, self.Lz, self.xy, self.xz, self.yz + ) def __eq__(self, other): """Test if boxes are equal.""" @@ -743,8 +752,9 @@ def yz(self) -> float: pass -box_like = typing.Union[Box, BoxInterface, typing.Sequence[float], - typing.Mapping[str, float], np.ndarray] +box_like = typing.Union[ + Box, BoxInterface, typing.Sequence[float], typing.Mapping[str, float], np.ndarray +] """Objects that are or can be converted to `Box`. This includes @@ -762,6 +772,6 @@ def yz(self) -> float: """ __all__ = [ - 'BoxInterface', - 'box_like', + "BoxInterface", + "box_like", ] diff --git a/hoomd/communicator.py b/hoomd/communicator.py index f1fe9ec204..2dbadb1101 100644 --- a/hoomd/communicator.py +++ b/hoomd/communicator.py @@ -64,7 +64,6 @@ class Communicator(object): """ def __init__(self, mpi_comm=None, ranks_per_partition=None): - # check ranks_per_partition if ranks_per_partition is not None: if not hoomd.version.mpi_enabled: @@ -88,10 +87,12 @@ def __init__(self, mpi_comm=None, ranks_per_partition=None): # pass in pointer to MPI_Comm object provided by mpi4py try: import mpi4py + if isinstance(mpi_comm, mpi4py.MPI.Comm): addr = mpi4py.MPI._addressof(mpi_comm) - self.cpp_mpi_conf = \ - _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr) + self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm( + addr + ) handled = True except ImportError: # silently ignore when mpi4py is missing @@ -100,19 +101,20 @@ def __init__(self, mpi_comm=None, ranks_per_partition=None): # undocumented case: handle plain integers as pointers to MPI_Comm # objects if not handled and isinstance(mpi_comm, int): - self.cpp_mpi_conf = \ - _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm) + self.cpp_mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm( + mpi_comm + ) handled = True if not handled: - raise RuntimeError( - "Invalid mpi_comm object: {}".format(mpi_comm)) + raise RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm)) if ranks_per_partition is not None: # check validity - if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition): - raise RuntimeError('Total number of ranks is not a multiple of ' - 'ranks_per_partition.') + if self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition: + raise RuntimeError( + "Total number of ranks is not a multiple of " "ranks_per_partition." + ) # split the communicator into partitions self.cpp_mpi_conf.splitPartitions(ranks_per_partition) @@ -276,5 +278,5 @@ def walltime(self): _current_communicator = Communicator() __all__ = [ - 'Communicator', + "Communicator", ] diff --git a/hoomd/conftest.py b/hoomd/conftest.py index aefdb0c29c..476e7df7d2 100644 --- a/hoomd/conftest.py +++ b/hoomd/conftest.py @@ -15,6 +15,7 @@ import os import numpy import math + try: import sybil import sybil.parsers.rest @@ -31,11 +32,11 @@ devices = [hoomd.device.CPU] _n_available_gpu = len(hoomd.device.GPU.get_available_devices()) -_require_gpu_tests = (os.environ.get('_HOOMD_REQUIRE_GPU_TESTS_IN_GPU_BUILDS_') - is not None) +_require_gpu_tests = ( + os.environ.get("_HOOMD_REQUIRE_GPU_TESTS_IN_GPU_BUILDS_") is not None +) if hoomd.version.gpu_enabled and (_n_available_gpu > 0 or _require_gpu_tests): - - if os.environ.get('_HOOMD_SKIP_CPU_TESTS_WHEN_GPUS_PRESENT_') is not None: + if os.environ.get("_HOOMD_SKIP_CPU_TESTS_WHEN_GPUS_PRESENT_") is not None: devices.pop(0) devices.append(hoomd.device.GPU) @@ -44,31 +45,33 @@ def setup_sybil_tests(namespace): """Sybil setup function.""" # Common imports. - namespace['numpy'] = numpy - namespace['hoomd'] = hoomd - namespace['math'] = math + namespace["numpy"] = numpy + namespace["hoomd"] = hoomd + namespace["math"] = math - namespace['gpu_not_available'] = _n_available_gpu == 0 + namespace["gpu_not_available"] = _n_available_gpu == 0 try: import cupy except ImportError: cupy = None - namespace['cupy_not_available'] = cupy is None + namespace["cupy_not_available"] = cupy is None if sybil is not None: - pytest_collect_file = sybil.Sybil(parsers=[ - sybil.parsers.rest.PythonCodeBlockParser(), - sybil.parsers.rest.SkipParser(), - ], - pattern='*.py', - setup=setup_sybil_tests, - fixtures=['tmp_path']).pytest() - - -@pytest.fixture(scope='session', params=devices) + pytest_collect_file = sybil.Sybil( + parsers=[ + sybil.parsers.rest.PythonCodeBlockParser(), + sybil.parsers.rest.SkipParser(), + ], + pattern="*.py", + setup=setup_sybil_tests, + fixtures=["tmp_path"], + ).pytest() + + +@pytest.fixture(scope="session", params=devices) def device(request): """Parameterized Device fixture. @@ -85,7 +88,7 @@ def device(request): return d -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def simulation_factory(device): """Make a Simulation object from a snapshot. @@ -111,15 +114,17 @@ def make_simulation(snapshot=None, domain_decomposition=None): return make_simulation -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def one_particle_snapshot_factory(device): """Make a snapshot with a single particle.""" - def make_snapshot(particle_types=['A'], - dimensions=3, - position=(0, 0, 0), - orientation=(1, 0, 0, 0), - L=20): + def make_snapshot( + particle_types=["A"], + dimensions=3, + position=(0, 0, 0), + orientation=(1, 0, 0, 0), + L=20, + ): """Make the snapshot. Args: @@ -138,8 +143,7 @@ def make_snapshot(particle_types=['A'], N = 1 if dimensions == 2 and position[2] != 0: - raise ValueError( - 'z component of position must be zero for 2D simulation.') + raise ValueError("z component of position must be zero for 2D simulation.") if s.communicator.rank == 0: box = [L, L, L, 0, 0, 0] @@ -156,11 +160,11 @@ def make_snapshot(particle_types=['A'], return make_snapshot -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def two_particle_snapshot_factory(device): """Make a snapshot with two particles.""" - def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20): + def make_snapshot(particle_types=["A"], dimensions=3, d=1, L=20): """Make the snapshot. Args: @@ -183,7 +187,7 @@ def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20): s.configuration.box = box s.particles.N = N # shift particle positions slightly in z so MPI tests pass - s.particles.position[:] = [[-d / 2, 0, .1], [d / 2, 0, .1]] + s.particles.position[:] = [[-d / 2, 0, 0.1], [d / 2, 0, 0.1]] s.particles.types = particle_types if dimensions == 2: box[2] = 0 @@ -194,11 +198,11 @@ def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20): return make_snapshot -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def lattice_snapshot_factory(device): """Make a snapshot with particles on a cubic/square lattice.""" - def make_snapshot(particle_types=['A'], dimensions=3, a=1, n=7, r=0): + def make_snapshot(particle_types=["A"], dimensions=3, a=1, n=7, r=0): """Make the snapshot. Args: @@ -239,8 +243,7 @@ def make_snapshot(particle_types=['A'], dimensions=3, a=1, n=7, r=0): # create the lattice ranges = [numpy.arange(-nx / 2, nx / 2) for nx in n] x, y, z = numpy.meshgrid(*ranges) - lattice_position = numpy.vstack( - (x.flatten(), y.flatten(), z.flatten())).T + lattice_position = numpy.vstack((x.flatten(), y.flatten(), z.flatten())).T pos = (lattice_position + 0.5) * a if dimensions == 2: pos[:, 2] = 0 @@ -257,11 +260,11 @@ def make_snapshot(particle_types=['A'], dimensions=3, a=1, n=7, r=0): return make_snapshot -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def fcc_snapshot_factory(device): """Make a snapshot with particles in a fcc structure.""" - def make_snapshot(particle_types=['A'], a=1, n=7, r=0): + def make_snapshot(particle_types=["A"], a=1, n=7, r=0): """Make a snapshot with particles in a fcc structure. Args: @@ -302,39 +305,37 @@ def make_snapshot(particle_types=['A'], a=1, n=7, r=0): @pytest.fixture(autouse=True) def skip_mpi(request): """Skip tests marked ``serial`` when running with MPI.""" - if request.node.get_closest_marker('serial'): - if 'device' in request.fixturenames: - if request.getfixturevalue('device').communicator.num_ranks > 1: - pytest.skip('Test does not support MPI execution') + if request.node.get_closest_marker("serial"): + if "device" in request.fixturenames: + if request.getfixturevalue("device").communicator.num_ranks > 1: + pytest.skip("Test does not support MPI execution") else: - raise ValueError('skip_mpi requires the *device* fixture') + raise ValueError("skip_mpi requires the *device* fixture") @pytest.fixture(autouse=True) def only_gpu(request): """Skip CPU tests marked ``gpu``.""" - if request.node.get_closest_marker('gpu'): - if 'device' in request.fixturenames: - if not isinstance(request.getfixturevalue('device'), - hoomd.device.GPU): - pytest.skip('Test is run only on GPU(s).') + if request.node.get_closest_marker("gpu"): + if "device" in request.fixturenames: + if not isinstance(request.getfixturevalue("device"), hoomd.device.GPU): + pytest.skip("Test is run only on GPU(s).") else: - raise ValueError('only_gpu requires the *device* fixture') + raise ValueError("only_gpu requires the *device* fixture") @pytest.fixture(autouse=True) def only_cpu(request): """Skip GPU tests marked ``cpu``.""" - if request.node.get_closest_marker('cpu'): - if 'device' in request.fixturenames: - if not isinstance(request.getfixturevalue('device'), - hoomd.device.CPU): - pytest.skip('Test is run only on CPU(s).') + if request.node.get_closest_marker("cpu"): + if "device" in request.fixturenames: + if not isinstance(request.getfixturevalue("device"), hoomd.device.CPU): + pytest.skip("Test is run only on CPU(s).") else: - raise ValueError('only_cpu requires the *device* fixture') + raise ValueError("only_cpu requires the *device* fixture") -@pytest.fixture(scope='function', autouse=True) +@pytest.fixture(scope="function", autouse=True) def numpy_random_seed(): """Seed the numpy random number generator. @@ -353,13 +354,12 @@ def rng(): def pytest_configure(config): """Add markers to pytest configuration.""" config.addinivalue_line( - "markers", - "serial: Tests that will not execute with more than 1 MPI process") - config.addinivalue_line("markers", - "gpu: Tests that should only run on the gpu.") + "markers", "serial: Tests that will not execute with more than 1 MPI process" + ) + config.addinivalue_line("markers", "gpu: Tests that should only run on the gpu.") config.addinivalue_line( - "markers", - "cupy_optional: tests that should pass with and without CuPy.") + "markers", "cupy_optional: tests that should pass with and without CuPy." + ) config.addinivalue_line("markers", "cpu: Tests that only run on the CPU.") config.addinivalue_line("markers", "gpu: Tests that only run on the GPU.") @@ -385,34 +385,13 @@ def pytest_sessionfinish(session, exitstatus): expected_loggable_params = { - 'energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'energies': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'forces': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'torques': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'virials': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'additional_energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'additional_virial': { - 'category': LoggerCategories.sequence, - 'default': True - } + "energy": {"category": LoggerCategories.scalar, "default": True}, + "energies": {"category": LoggerCategories.particle, "default": True}, + "forces": {"category": LoggerCategories.particle, "default": True}, + "torques": {"category": LoggerCategories.particle, "default": True}, + "virials": {"category": LoggerCategories.particle, "default": True}, + "additional_energy": {"category": LoggerCategories.scalar, "default": True}, + "additional_virial": {"category": LoggerCategories.sequence, "default": True}, } @@ -432,8 +411,10 @@ def logging_check(cls, expected_namespace, expected_loggables): expected value of each for the loggable. """ # Check namespace - assert all(log_quantity.namespace == (*expected_namespace, cls.__name__) - for log_quantity in cls._export_dict.values()) + assert all( + log_quantity.namespace == (*expected_namespace, cls.__name__) + for log_quantity in cls._export_dict.values() + ) # Check specific loggables def check_loggable(cls, name, properties): @@ -473,8 +454,9 @@ def _check_obj_attr_compatibility(a, b): if compatible: return True - logger.debug(f"In equality check, incompatible attrs found " - f"{filtered_differences}.") + logger.debug( + f"In equality check, incompatible attrs found " f"{filtered_differences}." + ) return False @@ -528,8 +510,11 @@ def check_item(x, y, attr): # Check item equality for key in keys: for type_, value in a._typeparam_dict[key].items(): - check_item(value, b._typeparam_dict[key][type_], ".".join( - (key, str(type_)))) + check_item( + value, + b._typeparam_dict[key][type_], + ".".join((key, str(type_))), + ) continue check_item(a.__dict__[attr], b.__dict__[attr], attr) @@ -676,9 +661,8 @@ class Generator: well for instance a ``Float`` class which specified the range of values to assume would be quite simple to add. """ - alphabet = [ - char for char in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" - ] + + alphabet = [char for char in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"] def __init__(self, rng, max_float=1e9, max_int=1_000_000): self.rng = rng @@ -704,7 +688,7 @@ def __call__(self, spec): bool: self.bool, numpy.ndarray: self.ndarray, hoomd.variant.Variant: self.variant, - None: self.none + None: self.none, }[spec]() def tuple(self, spec): @@ -741,8 +725,7 @@ def str(self, max_length=20): """Return a random string.""" length = self.int(max_length) + 1 characters = [ - self.rng.choice(self.alphabet) - for _ in range(self.rng.integers(length)) + self.rng.choice(self.alphabet) for _ in range(self.rng.integers(length)) ] return "".join(characters) @@ -752,17 +735,19 @@ def ndarray(self, shape=(None,), dtype="float64"): A value of None in shape means any length. """ shape = tuple(i if i is not None else self.int(20) for i in shape) - return (100 * self.rng.random(numpy.prod(shape)) - - 50).reshape(shape).astype(dtype) + return ( + (100 * self.rng.random(numpy.prod(shape)) - 50).reshape(shape).astype(dtype) + ) def variant(self): """Return a random `hoomd.variant.Variant` or `float`.""" - classes = ((hoomd.variant.Constant, (float,)), - (hoomd.variant.Cycle, (float, float, int, int, int, int, - int)), (hoomd.variant.Ramp, - (float, float, int, int)), - (hoomd.variant.Power, (float, float, int, int, - int)), (float, (float,))) + classes = ( + (hoomd.variant.Constant, (float,)), + (hoomd.variant.Cycle, (float, float, int, int, int, int, int)), + (hoomd.variant.Ramp, (float, float, int, int)), + (hoomd.variant.Power, (float, float, int, int, int)), + (float, (float,)), + ) cls, spec = classes[self.rng.integers(len(classes))] return cls(*self(spec)) @@ -807,9 +792,7 @@ def generate_init_args(self): def generate_all_attr_change(self): """Get arguments to test setting attributes.""" - return { - k: self.generator(spec) for k, spec in self.attribute_spec.items() - } + return {k: self.generator(spec) for k, spec in self.attribute_spec.items()} class BaseCollectionsTest: @@ -956,7 +939,8 @@ def test_contains(self, populated_collection, generate_plain_collection): if isinstance(item, numpy.ndarray): contains = any( test_collection._numpy_equality(item, item2) - for item2 in plain_collection) + for item2 in plain_collection + ) else: if any(isinstance(a, numpy.ndarray) for a in plain_collection): contains = False @@ -988,6 +972,7 @@ def test_iter(self, populated_collection): class BaseSequenceTest(BaseCollectionsTest): """Basic extensible test suite for tuple-like classes.""" + _negative_indexing = True _allow_slices = True @@ -1001,10 +986,12 @@ def test_getitem(self, populated_collection): if self._allow_slices: assert all( self.is_equal(t, p) - for t, p in zip(test_collection[:], plain_collection)) + for t, p in zip(test_collection[:], plain_collection) + ) assert all( self.is_equal(t, p) - for t, p in zip(test_collection[1:], plain_collection[1:])) + for t, p in zip(test_collection[1:], plain_collection[1:]) + ) if self._negative_indexing: for i in range(-1, -len(plain_collection), -1): assert self.is_equal(test_collection[i], plain_collection[i]) @@ -1051,8 +1038,7 @@ def test_delitem(self, delete_index, populated_collection): old_items = test_list[1:] del test_list[1:] assert len(test_list) == 1 - assert all( - self.to_base(old_item) not in test_list for old_item in old_items) + assert all(self.to_base(old_item) not in test_list for old_item in old_items) self.final_check(test_list) def test_append(self, empty_collection, plain_collection): @@ -1123,8 +1109,9 @@ def setitem_index(self, request): """ return request.param - def test_setitem(self, setitem_index, populated_collection, - generate_plain_collection): + def test_setitem( + self, setitem_index, populated_collection, generate_plain_collection + ): """Test __setitem__.""" if not self._negative_indexing and setitem_index < 0: return diff --git a/hoomd/custom/__init__.py b/hoomd/custom/__init__.py index 4fd2bb189d..99b7703d87 100644 --- a/hoomd/custom/__init__.py +++ b/hoomd/custom/__init__.py @@ -16,9 +16,9 @@ `hoomd.md.force.Custom` """ -from hoomd.custom.custom_operation import (Action, CustomOperation) +from hoomd.custom.custom_operation import Action, CustomOperation __all__ = [ - 'Action', - 'CustomOperation', + "Action", + "CustomOperation", ] diff --git a/hoomd/custom/custom_action.py b/hoomd/custom/custom_action.py index 16d1f521ce..9916b23fc6 100644 --- a/hoomd/custom/custom_action.py +++ b/hoomd/custom/custom_action.py @@ -49,9 +49,11 @@ def act(self, timestep): .. code-block:: python class ExampleAction(hoomd.custom.Action): - flags = [Action.Flags.ROTATIONAL_KINETIC_ENERGY, - Action.Flags.PRESSURE_TENSOR, - Action.Flags.EXTERNAL_FIELD_VIRIAL] + flags = [ + Action.Flags.ROTATIONAL_KINETIC_ENERGY, + Action.Flags.PRESSURE_TENSOR, + Action.Flags.EXTERNAL_FIELD_VIRIAL, + ] def act(self, timestep): pass @@ -61,7 +63,6 @@ def act(self, timestep): .. code-block:: python class ExampleAction(hoomd.custom.Action): - @hoomd.logging.log def answer(self): return 42 @@ -69,8 +70,9 @@ def answer(self): def act(self, timestep): pass + example_action = ExampleAction() - logger.add(example_action, quantities=['answer']) + logger.add(example_action, quantities=["answer"]) Attributes: flags (list[Action.Flags]): List of flags from the @@ -85,6 +87,7 @@ class Flags(IntEnum): * ROTATIONAL_KINETIC_ENERGY = 1 * EXTERNAL_FIELD_VIRIAL = 2 """ + PRESSURE_TENSOR = 0 ROTATIONAL_KINETIC_ENERGY = 1 EXTERNAL_FIELD_VIRIAL = 2 @@ -109,7 +112,7 @@ def attach(self, simulation): @property def _attached(self): - return getattr(self, '_state', None) is not None + return getattr(self, "_state", None) is not None def detach(self): """Detaches the Action from the `hoomd.Simulation`.""" diff --git a/hoomd/custom/custom_operation.py b/hoomd/custom/custom_operation.py index 327dcd7de3..c6c43382f8 100644 --- a/hoomd/custom/custom_operation.py +++ b/hoomd/custom/custom_operation.py @@ -43,7 +43,9 @@ class CustomOperation(TriggeredOperation, metaclass=_AbstractLoggable): __doc__ = __doc__.replace("{inherited}", TriggeredOperation._doc_inherited) - _doc_inherited = TriggeredOperation._doc_inherited + """ + _doc_inherited = ( + TriggeredOperation._doc_inherited + + """ ---------- **Members inherited from** @@ -59,8 +61,9 @@ class CustomOperation(TriggeredOperation, metaclass=_AbstractLoggable): Action that this operation wraps. `Read more... ` """ + ) - _override_setattr = {'_action', "_export_dict", "_simulation"} + _override_setattr = {"_action", "_export_dict", "_simulation"} @abstractmethod def _cpp_class_name(self): @@ -69,13 +72,14 @@ def _cpp_class_name(self): def __init__(self, trigger, action): if not isinstance(action, Action): - raise ValueError("action must be a subclass of " - "hoomd.custom_action.custom.Action.") + raise ValueError( + "action must be a subclass of " "hoomd.custom_action.custom.Action." + ) self._action = action self._export_dict = action._export_dict param_dict = ParameterDict(trigger=Trigger) - param_dict['trigger'] = trigger + param_dict["trigger"] = trigger self._param_dict.update(param_dict) def __getattr__(self, attr): @@ -86,8 +90,9 @@ def __getattr__(self, attr): try: return getattr(self._action, attr) except AttributeError: - raise AttributeError("{} object has no attribute {}".format( - type(self), attr)) + raise AttributeError( + "{} object has no attribute {}".format(type(self), attr) + ) def _setattr_hook(self, attr, value): """This implements the __setattr__ pass through to the Action.""" @@ -99,7 +104,8 @@ def _setattr_hook(self, attr, value): def _attach_hook(self): """Create the C++ custom operation.""" self._cpp_obj = getattr(_hoomd, self._cpp_class_name)( - self._simulation.state._cpp_sys_def, self.trigger, self._action) + self._simulation.state._cpp_sys_def, self.trigger, self._action + ) self._action.attach(self._simulation) def _detach_hook(self): @@ -154,7 +160,8 @@ def __init__(cls, name, base, dct): # noqa: N805 extra_methods = dct.get("_wrap_methods", []) for name in itertools.chain(action_cls._export_dict, extra_methods): wrapped_method = _AbstractLoggableWithPassthrough._wrap_loggable( - name, getattr(action_cls, name)) + name, getattr(action_cls, name) + ) setattr(cls, name, wrapped_method) cls._export_dict = action_cls._export_dict _AbstractLoggable.__init__(cls, name, base, dct) @@ -193,12 +200,14 @@ def __getattr__(self, attr): # classmethods in the wrapping operation should be fine. return getattr(self._internal_class, attr) except AttributeError: - raise AttributeError("{} object {} has no attribute {}".format( - type(self), self, attr)) + raise AttributeError( + "{} object {} has no attribute {}".format(type(self), self, attr) + ) -class _InternalCustomOperation(CustomOperation, - metaclass=_AbstractLoggableWithPassthrough): +class _InternalCustomOperation( + CustomOperation, metaclass=_AbstractLoggableWithPassthrough +): """Internal class for Python `Action`s. Offers a streamlined ``__init__``. Adds a wrapper around an hoomd Python action. This extends the attribute @@ -210,18 +219,20 @@ class _InternalCustomOperation(CustomOperation, # These attributes are not accessible or able to be passed through to # prevent leaky abstractions and help promote the illusion of a single # object for cases of internal custom actions. - _disallowed_attrs = {'detach', 'attach', 'action', "act"} + _disallowed_attrs = {"detach", "attach", "action", "act"} def __getattribute__(self, attr): if attr in object.__getattribute__(self, "_disallowed_attrs"): - raise AttributeError("{} object {} has no attribute {}.".format( - type(self), self, attr)) + raise AttributeError( + "{} object {} has no attribute {}.".format(type(self), self, attr) + ) return object.__getattribute__(self, attr) def __getattr__(self, attr): if attr in self._disallowed_attrs: - raise AttributeError("{} object {} has no attribute {}.".format( - type(self), self, attr)) + raise AttributeError( + "{} object {} has no attribute {}.".format(type(self), self, attr) + ) return super().__getattr__(attr) @property @@ -242,9 +253,7 @@ def __dir__(self): """Expose all attributes for dynamic querying in notebooks and IDEs.""" list_ = super().__dir__() act = self._action - action_list = [ - k for k in itertools.chain(act._param_dict, act._typeparam_dict) - ] + action_list = [k for k in itertools.chain(act._param_dict, act._typeparam_dict)] list_.remove("action") list_.remove("act") return list_ + action_list diff --git a/hoomd/data/__init__.py b/hoomd/data/__init__.py index 1d30e91869..de7d4730f6 100644 --- a/hoomd/data/__init__.py +++ b/hoomd/data/__init__.py @@ -9,25 +9,30 @@ """ from .array import HOOMDArray, HOOMDGPUArray -from .local_access import (AngleLocalAccessBase, BondLocalAccessBase, - ConstraintLocalAccessBase, DihedralLocalAccessBase, - ImproperLocalAccessBase, PairLocalAccessBase, - ParticleLocalAccessBase) +from .local_access import ( + AngleLocalAccessBase, + BondLocalAccessBase, + ConstraintLocalAccessBase, + DihedralLocalAccessBase, + ImproperLocalAccessBase, + PairLocalAccessBase, + ParticleLocalAccessBase, +) from .local_access_cpu import LocalSnapshot from .local_access_gpu import LocalSnapshotGPU from .typeparam import TypeParameter __all__ = [ - 'AngleLocalAccessBase', - 'BondLocalAccessBase', - 'ConstraintLocalAccessBase', - 'DihedralLocalAccessBase', - 'HOOMDArray', - 'HOOMDGPUArray', - 'ImproperLocalAccessBase', - 'LocalSnapshot', - 'LocalSnapshotGPU', - 'PairLocalAccessBase', - 'ParticleLocalAccessBase', - 'TypeParameter', + "AngleLocalAccessBase", + "BondLocalAccessBase", + "ConstraintLocalAccessBase", + "DihedralLocalAccessBase", + "HOOMDArray", + "HOOMDGPUArray", + "ImproperLocalAccessBase", + "LocalSnapshot", + "LocalSnapshotGPU", + "PairLocalAccessBase", + "ParticleLocalAccessBase", + "TypeParameter", ] diff --git a/hoomd/data/array.py b/hoomd/data/array.py index a97da05081..c52a7b1042 100644 --- a/hoomd/data/array.py +++ b/hoomd/data/array.py @@ -14,13 +14,13 @@ class HOOMDArrayError(RuntimeError): """Error when accessing HOOMD buffers outside a context manager.""" + pass -def _wrap_class_factory(methods_wrap_func_list, - *args, - allow_exceptions=False, - **kwargs): +def _wrap_class_factory( + methods_wrap_func_list, *args, allow_exceptions=False, **kwargs +): """Factory function for metaclasses that produce methods via a functor. Applies the functor to each method given in methods. This occurs before @@ -40,7 +40,6 @@ class methods, and the method raising the error is skipped. """ class _WrapClass(type): - def __new__(cls, name, bases, class_dict): for methods, functor in methods_wrap_func_list: for method in methods: @@ -94,37 +93,38 @@ def wrapped(self, *args, **kwargs): _ndarray_ops_ = ( [ # Comparison - '__lt__', - '__le__', - '__gt__', - '__ge__', - '__eq__', - '__ne__', - '__bool__', + "__lt__", + "__le__", + "__gt__", + "__ge__", + "__eq__", + "__ne__", + "__bool__", # Unary - '__neg__', - '__pos__', - '__abs__', - '__invert__', + "__neg__", + "__pos__", + "__abs__", + "__invert__", # Arithmetic - '__add__', - '__sub__', - '__mul__', - '__truediv__', - '__floordiv__', - '__mod__', - '__divmod__', - '__pow__', + "__add__", + "__sub__", + "__mul__", + "__truediv__", + "__floordiv__", + "__mod__", + "__divmod__", + "__pow__", # Bitwise - '__lshift__', - '__rshift__', - '__and__', - '__or__', - '__xor__', + "__lshift__", + "__rshift__", + "__and__", + "__or__", + "__xor__", # Matrix - '__matmul__', + "__matmul__", ], - _op_wrap) + _op_wrap, +) # Magic methods that never return an array to the same memory buffer # ---------------------------------------------------------------------- @@ -137,21 +137,22 @@ def wrapped(self, *args, **kwargs): _ndarray_magic_safe_ = ( [ # Copy - '__copy__', - '__deepcopy__', + "__copy__", + "__deepcopy__", # Pickling - '__reduce__', - '__setstate__', + "__reduce__", + "__setstate__", # Container based - '__len__', - '__setitem__', - '__contains__', + "__len__", + "__setitem__", + "__contains__", # Conversion - '__int__', - '__float__', - '__complex__' + "__int__", + "__float__", + "__complex__", ], - _magic_wrap) + _magic_wrap, +) # Magic methods that may return an array pointing to the same buffer # ------------------------------------------------------------------ @@ -179,9 +180,10 @@ def wrapped(self, *args, **kwargs): _ndarray_magic_unsafe_ = ( [ # Container based - '__getitem__', + "__getitem__", ], - _magic_wrap_with_check) + _magic_wrap_with_check, +) # Operations that return an array pointing to the same buffer # ---------------------------------------------------------- @@ -206,21 +208,22 @@ def wrapped(self, *args, **kwargs): _ndarray_iops_ = ( [ # Inplace Arithmetic - '__iadd__', - '__isub__', - '__imul__', - '__itruediv__', - '__ifloordiv__', - '__imod__', - '__ipow__', + "__iadd__", + "__isub__", + "__imul__", + "__itruediv__", + "__ifloordiv__", + "__imod__", + "__ipow__", # Inplace Bitwise - '__ilshift__', - '__irshift__', - '__iand__', - '__ior__', - '__ixor__' + "__ilshift__", + "__irshift__", + "__iand__", + "__ior__", + "__ixor__", ], - _iop_wrap) + _iop_wrap, +) # Regular methods that may return an array pointing to the same buffer # -------------------------------------------------------------------- @@ -232,15 +235,16 @@ def wrapped(self, *args, **kwargs): _ndarray_std_funcs_ = ( [ # Select subset of array - 'diagonal', + "diagonal", # Reshapes array - 'reshape', - 'transpose', - 'swapaxes', - 'ravel', - 'squeeze', + "reshape", + "transpose", + "swapaxes", + "ravel", + "squeeze", ], - _std_func_with_check) + _std_func_with_check, +) # Functions that we disallow # -------------------------- @@ -250,17 +254,17 @@ def wrapped(self, *args, **kwargs): def _disallowed_wrap(method): - def raise_error(self, *args, **kwargs): raise HOOMDArrayError( "The {} method is not allowed for {} objects.".format( - method, self.__class__)) + method, self.__class__ + ) + ) return raise_error -_ndarray_disallow_funcs_ = (['view', 'resize', 'flat', - 'flatiter'], _disallowed_wrap) +_ndarray_disallow_funcs_ = (["view", "resize", "flat", "flatiter"], _disallowed_wrap) # Properties that can return an array pointing to the same buffer # --------------------------------------------------------------- @@ -289,7 +293,7 @@ def wrapped(self, value): return wrapped -_ndarray_properties_ = (['T'], _wrap_properties_with_check) +_ndarray_properties_ = (["T"], _wrap_properties_with_check) # Properties we disallow # -------------------------------- @@ -298,17 +302,18 @@ def wrapped(self, value): def _disallowed_property_wrap(method): - @property def raise_error(self): raise HOOMDArrayError( "The {} property is not allowed for {} objects.".format( - method, self.__class__)) + method, self.__class__ + ) + ) return raise_error -_ndarray_disallow_properties_ = (['data', 'base'], _disallowed_property_wrap) +_ndarray_disallow_properties_ = (["data", "base"], _disallowed_property_wrap) _wrap_list = [ _ndarray_ops_, @@ -327,8 +332,7 @@ def coerce_mock_to_array(val): Coerces ``HOOMDArray`` objects into ``numpy.ndarray`` objects. """ - if isinstance(val, Iterable) and not isinstance(val, - (np.ndarray, HOOMDArray)): + if isinstance(val, Iterable) and not isinstance(val, (np.ndarray, HOOMDArray)): return [coerce_mock_to_array(v) for v in val] return val if not isinstance(val, HOOMDArray) else val._coerce_to_ndarray() @@ -390,11 +394,12 @@ def __init__(self, buffer, callback, read_only=None): self._read_only = buffer.read_only except AttributeError: try: - self._read_only = not buffer.flags['WRITEABLE'] + self._read_only = not buffer.flags["WRITEABLE"] except AttributeError: raise ValueError( "Whether the buffer is read only could not be " - "discerned. Pass read_only manually.") + "discerned. Pass read_only manually." + ) else: self._read_only = bool(read_only) @@ -407,8 +412,7 @@ def __array_function__(self, func, types, args, kwargs): new_inputs = [coerce_mock_to_array(val) for val in args] for key, value in kwargs.items(): if type(value) is tuple: - kwargs[key] = tuple( - [coerce_mock_to_array(val) for val in value]) + kwargs[key] = tuple([coerce_mock_to_array(val) for val in value]) else: kwargs[key] = coerce_mock_to_array(value) arr = func(*new_inputs, **kwargs) @@ -430,7 +434,7 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): kwargs = {k: coerce_mock_to_array(v) for k, v in kwargs} if out is not None: if any(isinstance(o, HOOMDArray) for o in out): - kwargs['out'] = tuple((coerce_mock_to_array(v) for v in out)) + kwargs["out"] = tuple((coerce_mock_to_array(v) for v in out)) getattr(ufunc, method)(*new_inputs, **kwargs) return self return getattr(ufunc, method)(*new_inputs, **kwargs) @@ -449,8 +453,7 @@ def __array_interface__(self): `numpy.ndarray` pointing to the underlying buffer we cannot guarantee safety. """ - return np.array(self._coerce_to_ndarray(), - copy=True).__array_interface__ + return np.array(self._coerce_to_ndarray(), copy=True).__array_interface__ def _coerce_to_ndarray(self): """Provide a `numpy.ndarray` interface to the underlying buffer. @@ -460,7 +463,7 @@ def _coerce_to_ndarray(self): if self._callback(): if self._read_only: arr = np.array(self._buffer, copy=False) - arr.flags['WRITEABLE'] = False + arr.flags["WRITEABLE"] = False return arr else: return np.array(self._buffer, copy=False) @@ -468,7 +471,9 @@ def _coerce_to_ndarray(self): raise HOOMDArrayError( "Cannot access {} outside context manager. Use " "numpy.array inside context manager instead.".format( - self.__class__.__name__)) + self.__class__.__name__ + ) + ) @property def shape(self): @@ -477,9 +482,11 @@ def shape(self): @shape.setter def shape(self, value): - raise HOOMDArrayError("Shape cannot be set on a {}. Use " - "``array.reshape`` instead.".format( - self.__class__.__name__)) + raise HOOMDArrayError( + "Shape cannot be set on a {}. Use " "``array.reshape`` instead.".format( + self.__class__.__name__ + ) + ) @property def read_only(self): @@ -506,11 +513,11 @@ def _repr_html_(self): """str: Format the array in HTML.""" name = self.__class__.__name__ if self._callback(): - return "" + name + "" \ - + "(" + str(self._coerce_to_ndarray()) + ")" + return ( + "" + name + "" + "(" + str(self._coerce_to_ndarray()) + ")" + ) else: - return "" + name + "" \ - + "(INVALID)" + return "" + name + "" + "(INVALID)" if hoomd.version.gpu_enabled: @@ -547,15 +554,15 @@ def read_only(self): return self._buffer.read_only try: - if os.environ.get('_HOOMD_DISABLE_CUPY_') is not None: + if os.environ.get("_HOOMD_DISABLE_CUPY_") is not None: raise ImportError import cupy except ImportError: _wrap_gpu_array_list = [] - class HOOMDGPUArray(_HOOMDGPUArrayBase, - metaclass=_wrap_class_factory(_wrap_gpu_array_list) - ): + class HOOMDGPUArray( + _HOOMDGPUArrayBase, metaclass=_wrap_class_factory(_wrap_gpu_array_list) + ): """Zero copy access to HOOMD data on the GPU.""" def __len__(self): @@ -566,19 +573,20 @@ def __len__(self): def shape(self): """tuple: Array shape.""" protocol = self._buffer.__cuda_array_interface__ - return protocol['shape'] + return protocol["shape"] @shape.setter def shape(self, value): - raise HOOMDArrayError("Shape cannot be set on a {}. Use " - "``array.reshape`` instead.".format( - self.__class__.__name__)) + raise HOOMDArrayError( + "Shape cannot be set on a {}. Use " + "``array.reshape`` instead.".format(self.__class__.__name__) + ) @property def strides(self): """tuple: Array strides.""" protocol = self._buffer.__cuda_array_interface__ - return protocol['strides'] + return protocol["strides"] @property def ndim(self): @@ -589,14 +597,20 @@ def ndim(self): def dtype(self): """Data type.""" protocol = self._buffer.__cuda_array_interface__ - return protocol['typestr'] + return protocol["typestr"] def __str__(self): """str: Convert array to a string.""" name = self.__class__ if self._callback(): - return name + "(shape=(" + str(self.shape) \ - + "), dtype=(" + str(self.dtype) + "))" + return ( + name + + "(shape=(" + + str(self.shape) + + "), dtype=(" + + str(self.dtype) + + "))" + ) else: return name + "(INVALID)" @@ -604,8 +618,14 @@ def __repr__(self): """str: Convert array to an string that can be evaluated.""" name = self.__class__.__name__ if self._callback(): - return name + "(shape=(" + str(self.shape) \ - + "), dtype=(" + str(self.dtype) + "))" + return ( + name + + "(shape=(" + + str(self.shape) + + "), dtype=(" + + str(self.dtype) + + "))" + ) else: return name + "(INVALID)" @@ -613,26 +633,41 @@ def _repr_html_(self): """str: Format the array in HTML.""" name = self.__class__.__name__ if self._callback(): - return "" + name + "" + "(shape=(" \ - + str(self.shape) + "), dtype=(" \ - + str(self.dtype) + "))" + return ( + "" + + name + + "" + + "(shape=(" + + str(self.shape) + + "), dtype=(" + + str(self.dtype) + + "))" + ) else: return "" + name + "(INVALID)" else: - _cupy_ndarray_magic_safe_ = ([ - item for item in _ndarray_magic_safe_[0] if item not in - {'__copy__', '__setstate__', '__contains__', '__setitem__'} - ], _ndarray_magic_safe_[1]) + _cupy_ndarray_magic_safe_ = ( + [ + item + for item in _ndarray_magic_safe_[0] + if item + not in {"__copy__", "__setstate__", "__contains__", "__setitem__"} + ], + _ndarray_magic_safe_[1], + ) _wrap_gpu_array_list = [ - _ndarray_iops_, _cupy_ndarray_magic_safe_, _ndarray_std_funcs_, - _ndarray_disallow_funcs_, _ndarray_properties_, - _ndarray_disallow_properties_ + _ndarray_iops_, + _cupy_ndarray_magic_safe_, + _ndarray_std_funcs_, + _ndarray_disallow_funcs_, + _ndarray_properties_, + _ndarray_disallow_properties_, ] - _GPUArrayMeta = _wrap_class_factory(_wrap_gpu_array_list, - allow_exceptions=True, - cls=cupy.ndarray) + _GPUArrayMeta = _wrap_class_factory( + _wrap_gpu_array_list, allow_exceptions=True, cls=cupy.ndarray + ) class HOOMDGPUArray(_HOOMDGPUArrayBase, metaclass=_GPUArrayMeta): """Zero copy access to HOOMD data on the GPU.""" @@ -662,9 +697,10 @@ def shape(self): @shape.setter def shape(self, value): - raise HOOMDArrayError("Shape cannot be set on a {}. Use " - "``array.reshape`` instead.".format( - self.__class__.__name__)) + raise HOOMDArrayError( + "Shape cannot be set on a {}. Use " + "``array.reshape`` instead.".format(self.__class__.__name__) + ) def _coerce_to_ndarray(self): """Provide a `cupy.ndarray` interface to the underlying buffer. @@ -678,14 +714,14 @@ def _coerce_to_ndarray(self): raise HOOMDArrayError( "Cannot access {} outside context manager. Use " "cupy.array(obj, copy=True) inside context manager " - "instead.".format(self.__class__.__name__)) + "instead.".format(self.__class__.__name__) + ) def __str__(self): """str: Convert array to a string.""" name = self.__class__.__name__ if self._callback(): - return name + "(" \ - + str(self._coerce_to_ndarray()) + ")" + return name + "(" + str(self._coerce_to_ndarray()) + ")" else: return name + "(INVALID)" @@ -693,8 +729,7 @@ def __repr__(self): """str: Convert array to an string that can be evaluated.""" name = self.__class__.__name__ if self._callback(): - return name + "(" + str(self._coerce_to_ndarray()) \ - + ")" + return name + "(" + str(self._coerce_to_ndarray()) + ")" else: return name + "(INVALID)" @@ -702,16 +737,22 @@ def _repr_html_(self): """str: Format the array in HTML.""" name = self.__class__.__name__ if self._callback(): - return "" + name + "" \ - + "(" + str(self._coerce_to_ndarray()) + ")" + return ( + "" + + name + + "" + + "(" + + str(self._coerce_to_ndarray()) + + ")" + ) else: - return "" + name + "" \ - + "(INVALID)" + return "" + name + "" + "(INVALID)" else: from hoomd.error import _NoGPU class HOOMDGPUArray(_NoGPU): """GPU arrays are not available on the CPU.""" + pass diff --git a/hoomd/data/collections.py b/hoomd/data/collections.py index 5dfceec576..16608ba674 100644 --- a/hoomd/data/collections.py +++ b/hoomd/data/collections.py @@ -273,11 +273,13 @@ def _isolate(self): self._isolated = True def _to_hoomd_data(self, schema, data): - validated_value = _to_hoomd_data(root=self, - schema=schema, - parent=self._parent, - identity=self._identity, - data=data) + validated_value = _to_hoomd_data( + root=self, + schema=schema, + parent=self._parent, + identity=self._identity, + data=data, + ) if isinstance(validated_value, _HOOMDSyncedCollection): if self._isolated: @@ -562,8 +564,7 @@ def count(self, obj): return self._data.count(obj) def _update(self, new_value): - if (not isinstance(new_value, abc.Sequence) - or len(new_value) != len(self._data)): + if not isinstance(new_value, abc.Sequence) or len(new_value) != len(self._data): self._isolate() warnings.warn(hoomd.error.IsolationWarning()) return False @@ -592,20 +593,20 @@ def _to_hoomd_data(root, schema, parent=None, identity=None, data=None): # remains a ndarray and not a list when the validation is for an array. In # addition, this would error if we allowed the MutableSequence conditional # to execute. - if (isinstance(data, np.ndarray) - and isinstance(schema, _typeconverter.NDArrayValidator)): + if isinstance(data, np.ndarray) and isinstance( + schema, _typeconverter.NDArrayValidator + ): return data if isinstance(data, abc.MutableMapping): - spec = _find_structural_validator(schema, - _typeconverter.TypeConverterMapping) + spec = _find_structural_validator(schema, _typeconverter.TypeConverterMapping) return _HOOMDDict(root, spec, parent, identity, data) if isinstance(data, abc.MutableSequence): - spec = _find_structural_validator(schema, - _typeconverter.TypeConverterSequence) + spec = _find_structural_validator(schema, _typeconverter.TypeConverterSequence) return _HOOMDList(root, spec.converter, parent, identity, data) if not isinstance(data, str) and isinstance(data, abc.Sequence): spec = _find_structural_validator( - schema, _typeconverter.TypeConverterFixedLengthSequence) + schema, _typeconverter.TypeConverterFixedLengthSequence + ) return _HOOMDTuple(root, spec, parent, identity, data) return data @@ -617,10 +618,7 @@ def _to_base(collection): # Suspending reading and writing will also prevent isolation warnings. with collection._suspend_read_and_write: if isinstance(collection, _HOOMDDict): - return { - key: _to_base(value) - for key, value in collection._data.items() - } + return {key: _to_base(value) for key, value in collection._data.items()} if isinstance(collection, _HOOMDList): return [_to_base(value) for value in collection._data] if isinstance(collection, _HOOMDTuple): diff --git a/hoomd/data/local_access.py b/hoomd/data/local_access.py index ed5c1d85e4..b4e2c747a0 100644 --- a/hoomd/data/local_access.py +++ b/hoomd/data/local_access.py @@ -9,8 +9,8 @@ class _LocalAccess(ABC): - __slots__ = ('_accessed_fields', '_cpp_obj', '_entered') - _global_fields = {'rtag': 'getRTags'} + __slots__ = ("_accessed_fields", "_cpp_obj", "_entered") + _global_fields = {"rtag": "getRTags"} @property @abstractmethod @@ -36,11 +36,11 @@ def __getattr__(self, attr): if raw_attr in self._fields: buff = getattr(self._cpp_obj, self._fields[raw_attr])(flag) else: - raise AttributeError("{} object has no attribute {}".format( - type(self), attr)) + raise AttributeError( + "{} object has no attribute {}".format(type(self), attr) + ) - self._accessed_fields[attr] = arr = self._array_cls( - buff, lambda: self._entered) + self._accessed_fields[attr] = arr = self._array_cls(buff, lambda: self._entered) return arr def _get_raw_attr_and_flag(self, attr): @@ -48,8 +48,10 @@ def _get_raw_attr_and_flag(self, attr): with_ghosts = attr.endswith("_with_ghost") raw_attr = attr.replace("_with_ghost", "").replace("ghost_", "") if ghosts_only and with_ghosts: - raise ValueError("Attribute cannot be both prefixed with ghost_ " - "and suffixed with _with_ghost") + raise ValueError( + "Attribute cannot be both prefixed with ghost_ " + "and suffixed with _with_ghost" + ) elif ghosts_only: return raw_attr, _hoomd.GhostDataFlag.ghost elif with_ghosts: @@ -64,8 +66,9 @@ def __setattr__(self, attr, value): try: arr = getattr(self, attr) except AttributeError: - raise AttributeError("{} object has no attribute {}.".format( - self.__class__, attr)) + raise AttributeError( + "{} object has no attribute {}.".format(self.__class__, attr) + ) else: if arr.read_only: raise RuntimeError("Attribute {} is not settable.".format(attr)) @@ -152,24 +155,24 @@ def _cpp_cls(self): pass _fields = { - 'position': 'getPosition', - 'typeid': 'getTypes', - 'velocity': 'getVelocities', - 'mass': 'getMasses', - 'acceleration': 'getAcceleration', - 'orientation': 'getOrientation', - 'angmom': 'getAngularMomentum', - 'moment_inertia': 'getMomentsOfInertia', - 'charge': 'getCharge', - 'diameter': 'getDiameter', - 'image': 'getImages', - 'tag': 'getTags', - 'rtag': 'getRTags', - 'body': 'getBodies', - 'net_force': 'getNetForce', - 'net_torque': 'getNetTorque', - 'net_virial': 'getNetVirial', - 'net_energy': 'getNetEnergy' + "position": "getPosition", + "typeid": "getTypes", + "velocity": "getVelocities", + "mass": "getMasses", + "acceleration": "getAcceleration", + "orientation": "getOrientation", + "angmom": "getAngularMomentum", + "moment_inertia": "getMomentsOfInertia", + "charge": "getCharge", + "diameter": "getDiameter", + "image": "getImages", + "tag": "getTags", + "rtag": "getRTags", + "body": "getBodies", + "net_force": "getNetForce", + "net_torque": "getNetTorque", + "net_virial": "getNetVirial", + "net_energy": "getNetEnergy", } def __init__(self, state): @@ -178,7 +181,6 @@ def __init__(self, state): class _GroupLocalAccess(_LocalAccess): - @property @abstractmethod def _cpp_cls(self): @@ -190,16 +192,17 @@ def _cpp_get_data_method_name(self): pass _fields = { - 'typeid': 'getTypeVal', - 'group': 'getMembers', - 'tag': 'getTags', - 'rtag': 'getRTags' + "typeid": "getTypeVal", + "group": "getMembers", + "tag": "getTags", + "rtag": "getRTags", } def __init__(self, state): super().__init__() self._cpp_obj = self._cpp_cls( - getattr(state._cpp_sys_def, self._cpp_get_data_method_name)()) + getattr(state._cpp_sys_def, self._cpp_get_data_method_name)() + ) class BondLocalAccessBase(_GroupLocalAccess): @@ -224,6 +227,7 @@ class BondLocalAccessBase(_GroupLocalAccess): ``i = bonds.rtag[tag]`` is the array index holding that bond. """ + _cpp_get_data_method_name = "getBondData" @@ -248,6 +252,7 @@ class AngleLocalAccessBase(_GroupLocalAccess): The angle reverse tags. For a given angle tag ``tag``, ``i = angles.rtag[tag]`` is the array index holding that angle. """ + _cpp_get_data_method_name = "getAngleData" @@ -272,6 +277,7 @@ class DihedralLocalAccessBase(_GroupLocalAccess): The dihedral reverse tags. For a given dihedral tag ``tag``, ``i = dihedrals.rtag[tag]`` is the array index holding that dihedral. """ + _cpp_get_data_method_name = "getDihedralData" @@ -296,6 +302,7 @@ class ImproperLocalAccessBase(_GroupLocalAccess): The improper reverse tags. For a given improper tag ``tag``, ``i = impropers.rtag[tag]`` is the array index holding that improper. """ + _cpp_get_data_method_name = "getImproperData" @@ -321,11 +328,12 @@ class ConstraintLocalAccessBase(_GroupLocalAccess): ``i = constraints.rtag[tag]`` is the array index holding that constraint. """ + _fields = { - 'value': 'getTypeVal', - 'group': 'getMembers', - 'tag': 'getTags', - 'rtag': 'getRTags' + "value": "getTypeVal", + "group": "getMembers", + "tag": "getTags", + "rtag": "getRTags", } _cpp_get_data_method_name = "getConstraintData" @@ -352,11 +360,11 @@ class PairLocalAccessBase(_GroupLocalAccess): ``tag``, ``i = pairs.rtag[tag]`` is the array index holding that special pair. """ + _cpp_get_data_method_name = "getPairData" class _LocalSnapshot: - def __init__(self, state): self._state = state self._box = state.box diff --git a/hoomd/data/local_access_cpu.py b/hoomd/data/local_access_cpu.py index 3e603ed51e..f88e2680c4 100644 --- a/hoomd/data/local_access_cpu.py +++ b/hoomd/data/local_access_cpu.py @@ -3,54 +3,65 @@ """Implement local access classes for the CPU.""" -from hoomd.data.local_access import (ParticleLocalAccessBase, - BondLocalAccessBase, AngleLocalAccessBase, - DihedralLocalAccessBase, - ImproperLocalAccessBase, - ConstraintLocalAccessBase, - PairLocalAccessBase, _LocalSnapshot) +from hoomd.data.local_access import ( + ParticleLocalAccessBase, + BondLocalAccessBase, + AngleLocalAccessBase, + DihedralLocalAccessBase, + ImproperLocalAccessBase, + ConstraintLocalAccessBase, + PairLocalAccessBase, + _LocalSnapshot, +) from hoomd.data.array import HOOMDArray from hoomd import _hoomd class ParticleLocalAccessCPU(ParticleLocalAccessBase): """Access particle data on the CPU.""" + _cpp_cls = _hoomd.LocalParticleDataHost _array_cls = HOOMDArray class BondLocalAccessCPU(BondLocalAccessBase): """Access bond data on the GPU.""" + _cpp_cls = _hoomd.LocalBondDataHost _array_cls = HOOMDArray class AngleLocalAccessCPU(AngleLocalAccessBase): """Access angle data on the GPU.""" + _cpp_cls = _hoomd.LocalAngleDataHost _array_cls = HOOMDArray class DihedralLocalAccessCPU(DihedralLocalAccessBase): """Access dihedral data on the GPU.""" + _cpp_cls = _hoomd.LocalDihedralDataHost _array_cls = HOOMDArray class ImproperLocalAccessCPU(ImproperLocalAccessBase): """Access improper data on the GPU.""" + _cpp_cls = _hoomd.LocalImproperDataHost _array_cls = HOOMDArray class ConstraintLocalAccessCPU(ConstraintLocalAccessBase): """Access constraint data on the GPU.""" + _cpp_cls = _hoomd.LocalConstraintDataHost _array_cls = HOOMDArray class PairLocalAccessCPU(PairLocalAccessBase): """Access special pair data on the GPU.""" + _cpp_cls = _hoomd.LocalPairDataHost _array_cls = HOOMDArray diff --git a/hoomd/data/local_access_gpu.py b/hoomd/data/local_access_gpu.py index c524364d4c..9c113d8b58 100644 --- a/hoomd/data/local_access_gpu.py +++ b/hoomd/data/local_access_gpu.py @@ -5,9 +5,15 @@ from hoomd import _hoomd from hoomd.data.local_access import ( - ParticleLocalAccessBase, BondLocalAccessBase, ConstraintLocalAccessBase, - DihedralLocalAccessBase, AngleLocalAccessBase, ImproperLocalAccessBase, - PairLocalAccessBase, _LocalSnapshot) + ParticleLocalAccessBase, + BondLocalAccessBase, + ConstraintLocalAccessBase, + DihedralLocalAccessBase, + AngleLocalAccessBase, + ImproperLocalAccessBase, + PairLocalAccessBase, + _LocalSnapshot, +) from hoomd.data.array import HOOMDGPUArray import hoomd @@ -16,36 +22,43 @@ class ParticleLocalAccessGPU(ParticleLocalAccessBase): """Access particle data on the GPU.""" + _cpp_cls = _hoomd.LocalParticleDataDevice _array_cls = HOOMDGPUArray class BondLocalAccessGPU(BondLocalAccessBase): """Access bond data on the GPU.""" + _cpp_cls = _hoomd.LocalBondDataDevice _array_cls = HOOMDGPUArray class AngleLocalAccessGPU(AngleLocalAccessBase): """Access angle data on the GPU.""" + _cpp_cls = _hoomd.LocalAngleDataDevice _array_cls = HOOMDGPUArray class DihedralLocalAccessGPU(DihedralLocalAccessBase): """Access dihedral data on the GPU.""" + _cpp_cls = _hoomd.LocalDihedralDataDevice _array_cls = HOOMDGPUArray class ImproperLocalAccessGPU(ImproperLocalAccessBase): """Access improper data on the GPU.""" + _cpp_cls = _hoomd.LocalImproperDataDevice _array_cls = HOOMDGPUArray class ConstraintLocalAccessGPU(ConstraintLocalAccessBase): """Access constraint data on the GPU.""" + _cpp_cls = _hoomd.LocalConstraintDataDevice _array_cls = HOOMDGPUArray class PairLocalAccessGPU(PairLocalAccessBase): """Access special pair data on the GPU.""" + _cpp_cls = _hoomd.LocalPairDataDevice _array_cls = HOOMDGPUArray @@ -67,34 +80,42 @@ def __init__(self, state): class BondLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class AngleLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class DihedralLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class ImproperLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class ConstraintLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class PairLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class ParticleLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class LocalSnapshotGPU(_NoGPU, _LocalSnapshot): """GPU data access is not available in CPU builds.""" + pass diff --git a/hoomd/data/parameterdicts.py b/hoomd/data/parameterdicts.py index 2c01ff8096..37d75716d6 100644 --- a/hoomd/data/parameterdicts.py +++ b/hoomd/data/parameterdicts.py @@ -10,14 +10,22 @@ import numpy as np -from hoomd.data.collections import (_HOOMDSyncedCollection, _to_hoomd_data, - _to_base) +from hoomd.data.collections import _HOOMDSyncedCollection, _to_hoomd_data, _to_base from hoomd.error import MutabilityError, TypeConversionError from hoomd.util import _to_camel_case, _is_iterable -from hoomd.data.typeconverter import (to_type_converter, RequiredArg, - TypeConverterMapping, OnlyIf, Either) -from hoomd.data.smart_default import (_to_base_defaults, _to_default, - _SmartDefault, _NoDefault) +from hoomd.data.typeconverter import ( + to_type_converter, + RequiredArg, + TypeConverterMapping, + OnlyIf, + Either, +) +from hoomd.data.smart_default import ( + _to_base_defaults, + _to_default, + _SmartDefault, + _NoDefault, +) from hoomd.error import IncompleteSpecificationError @@ -72,7 +80,6 @@ def _raise_error_with_context(context): class _SmartTypeIndexer: - def __init__(self, len_key, valid_types=None): self.len_key = len_key self._valid_types = valid_types @@ -84,8 +91,7 @@ def __call__(self, key): else: for k in self.raw_yield(key): if not self.are_valid_types(k): - raise KeyError( - f"Key {k} from key {key} is not of valid types.") + raise KeyError(f"Key {k} from key {key} is not of valid types.") yield k def raw_yield(self, key): @@ -133,10 +139,7 @@ def validate_and_split_len(self, key): iterable of type strings. """ if isinstance(key, tuple) and len(key) == self.len_key: - if any([ - not _is_key_iterable(v) and not isinstance(v, str) - for v in key - ]): + if any([not _is_key_iterable(v) and not isinstance(v, str) for v in key]): raise KeyError("The key {} is not valid.".format(key)) # convert str to single item list for proper enumeration using # product @@ -172,9 +175,12 @@ def yield_all_keys(self): elif self.len_key == 1: yield from self._valid_types elif isinstance(self._valid_types, set): - yield from (tuple(sorted(key)) - for key in combinations_with_replacement( - self._valid_types, self.len_key)) + yield from ( + tuple(sorted(key)) + for key in combinations_with_replacement( + self._valid_types, self.len_key + ) + ) class _ValidatedDefaultDict(MutableMapping): @@ -208,14 +214,17 @@ class _ValidatedDefaultDict(MutableMapping): """ def _set_validation_and_defaults(self, *args, **kwargs): - defaults = kwargs.pop('_defaults', _NoDefault) + defaults = kwargs.pop("_defaults", _NoDefault) if len(kwargs) != 0 and len(args) != 0: - raise ValueError("Positional argument(s) and keyword argument(s) " - "cannot both be specified.") + raise ValueError( + "Positional argument(s) and keyword argument(s) " + "cannot both be specified." + ) if len(kwargs) == 0 and len(args) == 0: - raise ValueError("Either a positional or keyword " - "argument must be specified.") + raise ValueError( + "Either a positional or keyword " "argument must be specified." + ) if len(args) > 1: raise ValueError("Only one positional argument allowed.") @@ -245,8 +254,7 @@ def __setitem__(self, keys, item): try: validated_value = self._validate_values(item) except ValueError as err: - raise TypeConversionError( - f"For types {list(keys)}: {err!s}.") from err + raise TypeConversionError(f"For types {list(keys)}: {err!s}.") from err for key in keys: self._single_setitem(key, validated_value) @@ -322,16 +330,21 @@ def _validate_values(self, value): expected_keys = set(self._type_converter.cond.keys()) elif isinstance(self._type_converter, Either): mapping = next( - filter(lambda x: isinstance(x, TypeConverterMapping), - self._type_converter.specs)) + filter( + lambda x: isinstance(x, TypeConverterMapping), + self._type_converter.specs, + ) + ) expected_keys = set(mapping.keys()) else: # the code shouldn't reach here so raise an error. raise ValueError("Couid not identify specification.") bad_keys = set(validated_value.keys()) - expected_keys if len(bad_keys) != 0: - raise KeyError("Keys must be a subset of available keys. " - "Bad keys are {}".format(bad_keys)) + raise KeyError( + "Keys must be a subset of available keys. " + "Bad keys are {}".format(bad_keys) + ) # update validated_value with the default (specifically to add dict keys # that have defaults and were not manually specified). if isinstance(self._default, _SmartDefault): @@ -406,7 +419,6 @@ class to interface setting and getting type parameters with. """ def __init__(self, *args, len_keys, **kwargs): - # Validate proper key constraint if len_keys < 1 or len_keys != int(len_keys): raise ValueError("len_keys must be a positive integer.") @@ -429,17 +441,20 @@ def _single_getitem(self, key): # We always attempt to keep the _dict up to date with the C++ values, # and isolate existing components otherwise. validated_cpp_value = self._validate_values( - getattr(self._cpp_obj, self._getter)(key)) + getattr(self._cpp_obj, self._getter)(key) + ) if isinstance(self._dict[key], _HOOMDSyncedCollection): if self._dict[key]._update(validated_cpp_value): return self._dict[key] else: self._dict[key]._isolate() - self._dict[key] = _to_hoomd_data(root=self, - schema=self._type_converter, - data=validated_cpp_value, - parent=None, - identity=key) + self._dict[key] = _to_hoomd_data( + root=self, + schema=self._type_converter, + data=validated_cpp_value, + parent=None, + identity=key, + ) return self._dict[key] def _single_setitem(self, key, item): @@ -449,11 +464,9 @@ def _single_setitem(self, key, item): """ if isinstance(self._dict.get(key), _HOOMDSyncedCollection): self._dict[key]._isolate() - self._dict[key] = _to_hoomd_data(root=self, - schema=self._type_converter, - data=item, - parent=None, - identity=key) + self._dict[key] = _to_hoomd_data( + root=self, schema=self._type_converter, data=item, parent=None, identity=key + ) if not self._attached: return # We don't need to set the _dict yet since we will query C++ when @@ -558,7 +571,7 @@ def __getstate__(self): "_default": self._default, "_type_converter": self._type_converter, "_dict": dict_, - "_cpp_obj": None + "_cpp_obj": None, } @@ -643,10 +656,9 @@ def __setitem__(self, key, value): if self._attached: try: self._cpp_setting(key, validated_value) - except (AttributeError): + except AttributeError: raise MutabilityError(key) - if key in self._dict and isinstance(self._dict[key], - _HOOMDSyncedCollection): + if key in self._dict and isinstance(self._dict[key], _HOOMDSyncedCollection): self._dict[key]._isolate() self._dict[key] = self._to_hoomd_data(key, validated_value) @@ -684,7 +696,8 @@ def __delitem__(self, key): """Remove parameter by key.""" if self._attached: raise RuntimeError( - "Item deletion is not supported after calling Simulation.run()") + "Item deletion is not supported after calling Simulation.run()" + ) del self._type_converter[key] self._dict.pop(key, None) @@ -706,8 +719,9 @@ def __eq__(self, other): """Determine equality between ParameterDict objects.""" if not isinstance(other, ParameterDict): return NotImplemented - return (set(self.keys()) == set(other.keys()) - and np.all([np.all(self[key] == other[key]) for key in self])) + return set(self.keys()) == set(other.keys()) and np.all( + [np.all(self[key] == other[key]) for key in self] + ) def update(self, other): """Add keys and values to the dictionary.""" @@ -743,11 +757,13 @@ def _detach(self): self._cpp_obj = None def _to_hoomd_data(self, key, value): - return _to_hoomd_data(root=self, - schema=self._type_converter[key], - parent=None, - identity=key, - data=value) + return _to_hoomd_data( + root=self, + schema=self._type_converter[key], + parent=None, + identity=key, + data=value, + ) def _write(self, obj): if self._attached: @@ -773,7 +789,7 @@ def __getstate__(self): "_type_converter": self._type_converter, "_cpp_obj": None, "_setters": self._setters, - "_getters": self._getters + "_getters": self._getters, } def __repr__(self): diff --git a/hoomd/data/smart_default.py b/hoomd/data/smart_default.py index 329d522d83..66ac7ee7e9 100644 --- a/hoomd/data/smart_default.py +++ b/hoomd/data/smart_default.py @@ -16,7 +16,6 @@ class _NoDefault: class _SmartDefault(ABC): - @abstractmethod def __init__(self, *args, **kwargs): pass @@ -31,15 +30,12 @@ def to_base(self): class _SmartDefaultSequence(_SmartDefault): - def __init__(self, sequence, default): if _is_iterable(default): dft_iter = cycle(default) else: dft_iter = repeat(default) - self.default = [ - _to_default(item, dft) for item, dft in zip(sequence, dft_iter) - ] + self.default = [_to_default(item, dft) for item, dft in zip(sequence, dft_iter)] def __call__(self, sequence): if sequence is None: @@ -78,14 +74,14 @@ def to_base(self): class _SmartDefaultFixedLengthSequence(_SmartDefault): - def __init__(self, sequence, default): if _is_iterable(default): dft_iter = cycle(default) else: dft_iter = repeat(default) self.default = tuple( - [_to_default(item, dft) for item, dft in zip(sequence, dft_iter)]) + [_to_default(item, dft) for item, dft in zip(sequence, dft_iter)] + ) def __call__(self, sequence): if sequence is None: @@ -114,12 +110,10 @@ def to_base(self): class _SmartDefaultMapping(_SmartDefault): - def __init__(self, mapping, defaults): if defaults is _NoDefault: self.default = { - key: _to_default(value, _NoDefault) - for key, value in mapping.items() + key: _to_default(value, _NoDefault) for key, value in mapping.items() } else: self.default = { @@ -199,8 +193,7 @@ def _to_base_defaults(value, _defaults=_NoDefault): if isinstance(_defaults, Mapping): for key, dft in value.items(): sub_explicit_default = _defaults.get(key, _NoDefault) - new_default[key] = _to_base_defaults( - dft, sub_explicit_default) + new_default[key] = _to_base_defaults(dft, sub_explicit_default) else: return None else: diff --git a/hoomd/data/syncedlist.py b/hoomd/data/syncedlist.py index 1fd9a06a20..a49aa49df7 100644 --- a/hoomd/data/syncedlist.py +++ b/hoomd/data/syncedlist.py @@ -64,12 +64,14 @@ class SyncedList(MutableSequence): # Also guarantees that lists remain in same order when using the public API. - def __init__(self, - validation, - to_synced_list=None, - iterable=None, - callable_class=False, - attach_members=True): + def __init__( + self, + validation, + to_synced_list=None, + iterable=None, + callable_class=False, + attach_members=True, + ): self._attach_members = attach_members self._simulation_ = None if to_synced_list is None: @@ -103,8 +105,7 @@ def __setitem__(self, index, value): # If synced need to change cpp_list and detach operation before # changing python list if self._synced: - self._synced_list[index] = \ - self._to_synced_list_conversion(value) + self._synced_list[index] = self._to_synced_list_conversion(value) self._unregister_item(self._list[index]) self._list[index] = value @@ -148,8 +149,7 @@ def insert(self, index, value): else: index = self._handle_int(index) if self._synced: - self._synced_list.insert(index, - self._to_synced_list_conversion(value)) + self._synced_list.insert(index, self._to_synced_list_conversion(value)) self._list.insert(index, value) def _handle_int(self, integer): @@ -158,12 +158,13 @@ def _handle_int(self, integer): if -integer > len(self): raise IndexError( f"Negative index {integer} is too small for list of length " - f"{len(self)}") + f"{len(self)}" + ) return integer % max(1, len(self)) if integer >= len(self): raise IndexError( - f"Index {integer} is outside bounds of a length {len(self)}" - f"list.") + f"Index {integer} is outside bounds of a length {len(self)}" f"list." + ) return integer def _handle_index(self, index): @@ -191,8 +192,7 @@ def _register_item(self, value): return else: if value._attached: - raise RuntimeError( - f"Cannot place {value} into two simulations.") + raise RuntimeError(f"Cannot place {value} into two simulations.") def _unregister_item(self, value): """Detaches and/or removes value to simulation if attached. @@ -263,11 +263,10 @@ def _unsync(self): def __getstate__(self): """Get state for pickling.""" state = copy(self.__dict__) - state['_simulation_'] = None - state.pop('_synced_list', None) + state["_simulation_"] = None + state.pop("_synced_list", None) return state def __eq__(self, other): """Test for equality.""" - return (len(self) == len(other) - and all(a == b for a, b in zip(self, other))) + return len(self) == len(other) and all(a == b for a, b in zip(self, other)) diff --git a/hoomd/data/typeconverter.py b/hoomd/data/typeconverter.py index b0c6eef9f7..618d8408e1 100644 --- a/hoomd/data/typeconverter.py +++ b/hoomd/data/typeconverter.py @@ -17,6 +17,7 @@ class RequiredArg: """Define a parameter as required.""" + pass @@ -59,8 +60,10 @@ def box_preprocessing(box): try: return hoomd.Box.from_box(box) except Exception: - raise ValueError(f"{box} is not convertible into a hoomd.Box object" - f". using hoomd.Box.from_box") + raise ValueError( + f"{box} is not convertible into a hoomd.Box object" + f". using hoomd.Box.from_box" + ) def box_variant_preprocessing(input): @@ -75,8 +78,10 @@ def box_variant_preprocessing(input): try: return hoomd.variant.box.Constant(box_preprocessing(input)) except Exception: - raise ValueError(f"{input} is not convertible into a " - f"hoomd.variant.box.BoxVariant object.") + raise ValueError( + f"{input} is not convertible into a " + f"hoomd.variant.box.BoxVariant object." + ) def positive_real(number): @@ -84,8 +89,7 @@ def positive_real(number): try: float_number = float(number) except Exception as err: - raise TypeConversionError( - f"{number} not convertible to float.") from err + raise TypeConversionError(f"{number} not convertible to float.") from err if float_number <= 0: raise TypeConversionError("Expected a number greater than zero.") return float_number @@ -96,8 +100,7 @@ def nonnegative_real(number): try: float_number = float(number) except Exception as err: - raise TypeConversionError( - f"{number} not convertible to float.") from err + raise TypeConversionError(f"{number} not convertible to float.") from err if float_number < 0: raise TypeConversionError("Expected a nonnegative real number.") return float_number @@ -135,8 +138,7 @@ def __call__(self, value): except Exception as err: if isinstance(err, TypeConversionError): raise err - raise TypeConversionError( - f"Error raised in conversion: {err!s}") from err + raise TypeConversionError(f"Error raised in conversion: {err!s}") from err @abstractmethod def _validate(self, value): @@ -164,7 +166,10 @@ class Either(_HelpValidate): Example:: - e = Either(to_type_converter((float,) * 6), to_type_converter(float)) + e = Either( + to_type_converter((float,) * 6), + to_type_converter(float), + ) would allow either value to pass. """ @@ -179,8 +184,10 @@ def _validate(self, value): return spec(value) except Exception: continue - raise ValueError(f"value {value} not converible using " - f"{[str(spec) for spec in self.specs]}") + raise ValueError( + f"value {value} not converible using " + f"{[str(spec) for spec in self.specs]}" + ) def __str__(self): """str: String representation of the validator.""" @@ -194,11 +201,7 @@ class OnlyIf(_HelpValidate): pre/post-processing and optionally allows None. """ - def __init__(self, - cond, - preprocess=None, - postprocess=None, - allow_none=False): + def __init__(self, cond, preprocess=None, postprocess=None, allow_none=False): super().__init__(preprocess, postprocess, allow_none) self.cond = cond @@ -220,13 +223,15 @@ class OnlyTypes(_HelpValidate): order of the ``types`` sequence. """ - def __init__(self, - *types, - disallow_types=None, - strict=False, - preprocess=None, - postprocess=None, - allow_none=False): + def __init__( + self, + *types, + disallow_types=None, + strict=False, + preprocess=None, + postprocess=None, + allow_none=False, + ): super().__init__(preprocess, postprocess, allow_none) # Handle if a class is passed rather than an iterable of classes self.types = types @@ -238,13 +243,13 @@ def __init__(self, def _validate(self, value): if isinstance(value, self.disallow_types): - raise TypeConversionError( - f"Value {value} cannot be of type {type(value)}") + raise TypeConversionError(f"Value {value} cannot be of type {type(value)}") if isinstance(value, self.types): return value elif self.strict: raise ValueError( - f"Value {value} is not an instance of any of {self.types}.") + f"Value {value} is not an instance of any of {self.types}." + ) else: for type_ in self.types: try: @@ -253,7 +258,8 @@ def _validate(self, value): pass raise ValueError( f"Value {value} is not convertable into any of these types " - f"{self.types}") + f"{self.types}" + ) def __str__(self): """str: String representation of the validator.""" @@ -267,11 +273,7 @@ class OnlyFrom(_HelpValidate): that generator expressions are fine. """ - def __init__(self, - options, - preprocess=None, - postprocess=None, - allow_none=False): + def __init__(self, options, preprocess=None, postprocess=None, allow_none=False): super().__init__(preprocess, postprocess, allow_none) self.options = set(options) @@ -364,13 +366,15 @@ class NDArrayValidator(_HelpValidate): in. """ - def __init__(self, - dtype, - shape=(None,), - order="K", - preprocess=None, - postprocess=None, - allow_none=False): + def __init__( + self, + dtype, + shape=(None,), + order="K", + preprocess=None, + postprocess=None, + allow_none=False, + ): """Create a NDArrayValidator object.""" super().__init__(preprocess, postprocess, allow_none) self._dtype = dtype @@ -383,37 +387,34 @@ def _validate(self, arr): if len(typed_and_ordered.shape) != len(self._shape): raise ValueError( f"Expected array of {len(self._shape)} dimensions, but " - f"recieved array of {len(typed_and_ordered.shape)} dimensions.") + f"recieved array of {len(typed_and_ordered.shape)} dimensions." + ) for i, dim in enumerate(self._shape): if dim is not None: if typed_and_ordered.shape[i] != dim: raise ValueError( f"In dimension {i}, expected size {dim}, but got size " - f"{typed_and_ordered.shape[i]}") + f"{typed_and_ordered.shape[i]}" + ) return typed_and_ordered class _BaseConverter: """Get the base level (i.e. no deeper level exists) validator.""" + _conversion_func_dict = { - Variant: - OnlyTypes(Variant, preprocess=variant_preprocessing), - ParticleFilter: - OnlyTypes(ParticleFilter, CustomFilter, strict=True), - str: - OnlyTypes(str, strict=True), - Trigger: - OnlyTypes(Trigger, preprocess=trigger_preprocessing), - hoomd.Box: - OnlyTypes(hoomd.Box, preprocess=box_preprocessing), - hoomd.variant.box.BoxVariant: - OnlyTypes(hoomd.variant.box.BoxVariant, - preprocess=box_variant_preprocessing), + Variant: OnlyTypes(Variant, preprocess=variant_preprocessing), + ParticleFilter: OnlyTypes(ParticleFilter, CustomFilter, strict=True), + str: OnlyTypes(str, strict=True), + Trigger: OnlyTypes(Trigger, preprocess=trigger_preprocessing), + hoomd.Box: OnlyTypes(hoomd.Box, preprocess=box_preprocessing), + hoomd.variant.box.BoxVariant: OnlyTypes( + hoomd.variant.box.BoxVariant, preprocess=box_variant_preprocessing + ), # arrays default to float of one dimension of arbitrary length and # ordering - np.ndarray: - NDArrayValidator(float), + np.ndarray: NDArrayValidator(float), } @classmethod @@ -472,15 +473,15 @@ def _validate(self, sequence): if not _is_iterable(sequence): raise TypeConversionError( f"Expected a sequence like instance. Received {sequence} of " - f"type {type(sequence)}.") + f"type {type(sequence)}." + ) else: new_sequence = [] try: for i, v in enumerate(sequence): new_sequence.append(self.converter(v)) except (ValueError, TypeError) as err: - raise TypeConversionError( - f"In list item number {i}: {err!s}") from err + raise TypeConversionError(f"In list item number {i}: {err!s}") from err return new_sequence @@ -516,19 +517,20 @@ def _validate(self, sequence): if not _is_iterable(sequence): raise TypeConversionError( f"Expected a tuple like object. Received {sequence} of type " - f"{type(sequence)}.") + f"{type(sequence)}." + ) elif len(sequence) != len(self.converter): raise TypeConversionError( f"Expected exactly {len(self.converter)} items. Received " - f"{len(sequence)}.") + f"{len(sequence)}." + ) else: new_sequence = [] try: for i, (v, c) in enumerate(zip(sequence, self)): new_sequence.append(c(v)) except (ValueError, TypeError) as err: - raise TypeConversionError( - f"In tuple item number {i}: {err!s}") from err + raise TypeConversionError(f"In tuple item number {i}: {err!s}") from err return tuple(new_sequence) def __iter__(self): @@ -559,13 +561,13 @@ class TypeConverterMapping(TypeConverter, MutableMapping): Example:: - t = TypeConverterMapping({'str': str, 'list_of_floats': [float]}) + t = TypeConverterMapping({"str": str, "list_of_floats": [float]}) # valid - t({'str': 'hello'}) + t({"str": "hello"}) # invalid - t({'new_key': None}) + t({"new_key": None}) """ def __init__(self, mapping): @@ -578,7 +580,8 @@ def _validate(self, mapping): if not isinstance(mapping, Mapping): raise TypeConversionError( f"Expected a dict like value. Recieved {mapping} of type " - f"{type(mapping)}.") + f"{type(mapping)}." + ) new_mapping = {} for key, value in mapping.items(): @@ -622,7 +625,8 @@ def to_type_converter(value): # list take a list of tuples of 3 floats each validation = to_type_converter( - {'str': str, 'list': [(float, float, float)]}) + {"str": str, "list": [(float, float, float)]} + ) """ if isinstance(value, tuple): return TypeConverterFixedLengthSequence(value) diff --git a/hoomd/data/typeparam.py b/hoomd/data/typeparam.py index 8b9f75c9cd..c514f32d0e 100644 --- a/hoomd/data/typeparam.py +++ b/hoomd/data/typeparam.py @@ -52,6 +52,7 @@ class TypeParameter(MutableMapping): lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4)) langevin = hoomd.md.methods.Langevin(filter=hoomd.filter.All(), kT=1.0) """ + __slots__ = ("name", "param_dict", "type_kind") def __init__(self, name, type_kind, param_dict): @@ -66,8 +67,9 @@ def __getattr__(self, attr): try: return getattr(self.param_dict, attr) except AttributeError: - raise AttributeError("'{}' object has no attribute " - "'{}'".format(type(self), attr)) + raise AttributeError( + "'{}' object has no attribute " "'{}'".format(type(self), attr) + ) def __setitem__(self, key, value): """Set parameters for a given type (or type pair). @@ -80,7 +82,7 @@ def __setitem__(self, key, value): .. code-block:: python - langevin.gamma['A'] = 2.0 + langevin.gamma["A"] = 2.0 Set parameters for multiple types: @@ -88,7 +90,7 @@ def __setitem__(self, key, value): .. code-block:: python - langevin.gamma[['B', 'C']] = 3.0 + langevin.gamma[["B", "C"]] = 3.0 Set type pair parameters with a tuple of names: @@ -96,7 +98,7 @@ def __setitem__(self, key, value): .. code-block:: python - lj.params[('A', 'A')] = dict(epsilon=1.5, sigma=2.0) + lj.params[("A", "A")] = dict(epsilon=1.5, sigma=2.0) Set parameters for multiple pairs (e.g. ('A', 'B') and ('A', 'C')): @@ -104,7 +106,7 @@ def __setitem__(self, key, value): .. code-block:: python - lj.params[('A', ['B', 'C'])] = dict(epsilon=0, sigma=0) + lj.params[("A", ["B", "C"])] = dict(epsilon=0, sigma=0) Set parameters for multiple pairs (e.g. ('B', 'B'), ('B', 'C'), ('C', 'B'), and ('C', 'C')): @@ -113,7 +115,7 @@ def __setitem__(self, key, value): .. code-block:: python - lj.params[(['B', 'C'], ['B', 'C'])] = dict(epsilon=1, sigma=1) + lj.params[(["B", "C"], ["B", "C"])] = dict(epsilon=1, sigma=1) Note: Setting the value for *(a,b)* automatically sets the symmetric @@ -130,13 +132,13 @@ def __getitem__(self, key): .. code-block:: python - gamma_A = langevin.gamma['A'] + gamma_A = langevin.gamma["A"] .. skip: next if(not hoomd.version.md_built) .. code-block:: python - lj_epsilon_AB = lj.params[('A', 'B')]['epsilon'] + lj_epsilon_AB = lj.params[("A", "B")]["epsilon"] .. rubric:: Multiple keys @@ -147,7 +149,7 @@ def __getitem__(self, key): .. code-block:: python - gammas = langevin.gamma[['A', 'B']] + gammas = langevin.gamma[["A", "B"]] is equivalent to: @@ -155,7 +157,7 @@ def __getitem__(self, key): .. code-block:: python - gammas = {key: langevin.gamma[key] for key in ['A', 'B']} + gammas = {key: langevin.gamma[key] for key in ["A", "B"]} """ return self.param_dict[key] @@ -182,7 +184,7 @@ def get(self, key, default): .. code-block:: python - gamma_D = langevin.gamma.get('D', default=5.0) + gamma_D = langevin.gamma.get("D", default=5.0) """ return self.param_dict.get(key, default) @@ -201,7 +203,7 @@ def setdefault(self, key, default): .. code-block:: python - langevin.gamma.setdefault('D', default=5.0) + langevin.gamma.setdefault("D", default=5.0) """ self.param_dict.setdefault(key, default) @@ -214,9 +216,11 @@ def __eq__(self, other): langevin.gamma == lj.params """ - return self.name == other.name and \ - self.type_kind == other.type_kind and \ - self.param_dict == other.param_dict + return ( + self.name == other.name + and self.type_kind == other.type_kind + and self.param_dict == other.param_dict + ) @property def default(self): @@ -253,8 +257,7 @@ def default(self, value): self.param_dict.default = value def _attach(self, cpp_obj, state): - self.param_dict._attach(cpp_obj, self.name, - getattr(state, self.type_kind)) + self.param_dict._attach(cpp_obj, self.name, getattr(state, self.type_kind)) return self def _detach(self): diff --git a/hoomd/device.py b/hoomd/device.py index e6f53bca46..f662b6e046 100644 --- a/hoomd/device.py +++ b/hoomd/device.py @@ -74,7 +74,7 @@ def write(self, message): .. code-block:: python - notice_file.write('Message\\n') + notice_file.write("Message\\n") """ self._buff += message @@ -145,8 +145,9 @@ def __init__(self, communicator, notice_level, message_filename): self._comm = communicator # c++ messenger object - self._cpp_msg = _create_messenger(self.communicator.cpp_mpi_conf, - notice_level, message_filename) + self._cpp_msg = _create_messenger( + self.communicator.cpp_mpi_conf, notice_level, message_filename + ) # c++ execution configuration mirror class self._cpp_exec_conf = None @@ -198,7 +199,7 @@ def message_filename(self): .. code-block:: python - device.message_filename = str(path / 'messages.log') + device.message_filename = str(path / "messages.log") .. code-block:: python @@ -214,11 +215,11 @@ def message_filename(self): .. code-block:: python - communicator = hoomd.communicator.Communicator( - ranks_per_partition=2) - filename = f'messages.{communicator.partition}' - device = hoomd.device.CPU(communicator=communicator, - message_filename=filename) + communicator = hoomd.communicator.Communicator(ranks_per_partition=2) + filename = f"messages.{communicator.partition}" + device = hoomd.device.CPU( + communicator=communicator, message_filename=filename + ) """ return self._message_filename @@ -249,7 +250,7 @@ def notice(self, message, level=1): .. code-block:: python - device.notice('Message') + device.notice("Message") Hint: Use `notice` instead of `print` to write status messages and your @@ -346,7 +347,6 @@ def __init__( notice_level=2, gpu_id=None, ): - super().__init__(communicator, notice_level, message_filename) if gpu_id is None: @@ -354,8 +354,11 @@ def __init__( # convert None options to defaults self._cpp_exec_conf = _hoomd.ExecutionConfiguration( - _hoomd.ExecutionConfiguration.executionMode.GPU, gpu_id, - self.communicator.cpp_mpi_conf, self._cpp_msg) + _hoomd.ExecutionConfiguration.executionMode.GPU, + gpu_id, + self.communicator.cpp_mpi_conf, + self._cpp_msg, + ) @property def gpu_error_checking(self): @@ -475,12 +478,14 @@ def __init__( message_filename=None, notice_level=2, ): - super().__init__(communicator, notice_level, message_filename) self._cpp_exec_conf = _hoomd.ExecutionConfiguration( - _hoomd.ExecutionConfiguration.executionMode.CPU, -1, - self.communicator.cpp_mpi_conf, self._cpp_msg) + _hoomd.ExecutionConfiguration.executionMode.CPU, + -1, + self.communicator.cpp_mpi_conf, + self._cpp_msg, + ) def auto_select( @@ -517,9 +522,9 @@ def auto_select( __all__ = [ - 'CPU', - 'GPU', - 'Device', - 'NoticeFile', - 'auto_select', + "CPU", + "GPU", + "Device", + "NoticeFile", + "auto_select", ] diff --git a/hoomd/error.py b/hoomd/error.py index bac64037eb..0dfe8e1a20 100644 --- a/hoomd/error.py +++ b/hoomd/error.py @@ -16,8 +16,10 @@ def __init__(self, attribute_name): def __str__(self): """Returns the error message.""" - return (f'The attribute {self.attribute_name} is immutable after ' - 'simulation has been run.') + return ( + f"The attribute {self.attribute_name} is immutable after " + "simulation has been run." + ) class DataAccessError(RuntimeError): @@ -28,18 +30,22 @@ def __init__(self, data_name): def __str__(self): """Returns the error message.""" - return (f'The property {self.data_name} is not available until the ' - 'operation is added to a simulation AND `simulation.run` ' - 'has been called.') + return ( + f"The property {self.data_name} is not available until the " + "operation is added to a simulation AND `simulation.run` " + "has been called." + ) class TypeConversionError(ValueError): """Error when converting a parameter.""" + pass class GPUNotAvailableError(NotImplementedError): """Error for when a GPU specific feature was requested without a GPU.""" + pass @@ -47,22 +53,24 @@ class _NoGPU: """Used in nonGPU builds of hoomd to raise errors for attempted use.""" def __init__(self, *args, **kwargs): - raise GPUNotAvailableError( - "This build of HOOMD-blue does not support GPUs.") + raise GPUNotAvailableError("This build of HOOMD-blue does not support GPUs.") class MPINotAvailableError(NotImplementedError): """Error for when a feature is not implemented for MPI.""" + pass class IncompleteSpecificationError(ValueError): """Error when a value is missing.""" + pass class SimulationDefinitionError(RuntimeError): """Error in definition of simulation internal state.""" + pass @@ -71,18 +79,20 @@ class IsolationWarning(UserWarning): def __str__(self): """Returns the error message.""" - return ("The data structure is removed from its original data source, " - "and updates will no longer modify the previously composing " - "object. Call obj.to_base() to remove this warning.") + return ( + "The data structure is removed from its original data source, " + "and updates will no longer modify the previously composing " + "object. Call obj.to_base() to remove this warning." + ) __all__ = [ - 'DataAccessError', - 'GPUNotAvailableError', - 'IncompleteSpecificationError', - 'IsolationWarning', - 'MPINotAvailableError', - 'MutabilityError', - 'SimulationDefinitionError', - 'TypeConversionError', + "DataAccessError", + "GPUNotAvailableError", + "IncompleteSpecificationError", + "IsolationWarning", + "MPINotAvailableError", + "MutabilityError", + "SimulationDefinitionError", + "TypeConversionError", ] diff --git a/hoomd/filter/__init__.py b/hoomd/filter/__init__.py index 4ed32dd805..503b0d8f23 100644 --- a/hoomd/filter/__init__.py +++ b/hoomd/filter/__init__.py @@ -42,15 +42,15 @@ """ __all__ = [ - 'All', - 'CustomFilter', - 'Intersection', - 'Null', - 'ParticleFilter', - 'Rigid', - 'SetDifference', - 'Tags', - 'Type', - 'Union', - 'filter_like', + "All", + "CustomFilter", + "Intersection", + "Null", + "ParticleFilter", + "Rigid", + "SetDifference", + "Tags", + "Type", + "Union", + "filter_like", ] diff --git a/hoomd/filter/custom.py b/hoomd/filter/custom.py index 267802fe8e..dbb3ef684a 100644 --- a/hoomd/filter/custom.py +++ b/hoomd/filter/custom.py @@ -7,6 +7,7 @@ simulation = hoomd.util.make_example_simulation() """ + from abc import abstractmethod from collections.abc import Hashable, Callable @@ -38,17 +39,21 @@ def __hash__(self): return hash((self.min_mass, self.max_mass)) def __eq__(self, other): - return (isinstance(other, MassRangeFilter) - and self.min_mass == other.min_mass - and self.max_mass == other.max_mass) + return ( + isinstance(other, MassRangeFilter) + and self.min_mass == other.min_mass + and self.max_mass == other.max_mass + ) def __call__(self, state): with state.cpu_local_snapshot as snap: masses = snap.particles.mass - indices = ((masses > self.min_mass) - & (masses < self.max_mass)) + indices = (masses > self.min_mass) & ( + masses < self.max_mass + ) return numpy.copy(snap.particles.tag[indices]) + mass_range_filter = MassRangeFilter(1.0, 5.0) print(mass_range_filter(simulation.state)) diff --git a/hoomd/filter/filter_.py b/hoomd/filter/filter_.py index 6ac1837a58..073660ca59 100644 --- a/hoomd/filter/filter_.py +++ b/hoomd/filter/filter_.py @@ -69,8 +69,9 @@ def __eq__(self, other): if filter == other: pass """ - raise NotImplementedError("Equality between {} is not defined.".format( - self.__class__)) + raise NotImplementedError( + "Equality between {} is not defined.".format(self.__class__) + ) def __str__(self): """Format a human readable string describing the filter. diff --git a/hoomd/filter/rigid.py b/hoomd/filter/rigid.py index 0951cbfbef..6916a36c5e 100644 --- a/hoomd/filter/rigid.py +++ b/hoomd/filter/rigid.py @@ -26,11 +26,11 @@ class Rigid(ParticleFilter, ParticleFilterRigid): .. code-block:: python - rigid_center_and_free = hoomd.filter.Rigid(flags=('center', 'free')) + rigid_center_and_free = hoomd.filter.Rigid(flags=("center", "free")) .. code-block:: python - rigid_center = hoomd.filter.Rigid(flags=('center',)) + rigid_center = hoomd.filter.Rigid(flags=("center",)) """ __doc__ += ParticleFilter._doc_inherited @@ -38,7 +38,8 @@ class Rigid(ParticleFilter, ParticleFilterRigid): def __init__(self, flags=("center",)): if not all(flag in {"center", "constituent", "free"} for flag in flags): raise ValueError( - "Only allowed flags are 'center', 'constituent', and 'free'.") + "Only allowed flags are 'center', 'constituent', and 'free'." + ) ParticleFilter.__init__(self) ParticleFilterRigid.__init__(self, flags) self._flags = flags diff --git a/hoomd/filter/set_.py b/hoomd/filter/set_.py index c464824780..8e259fcb70 100644 --- a/hoomd/filter/set_.py +++ b/hoomd/filter/set_.py @@ -40,8 +40,9 @@ def __init__(self, f, g): ParticleFilter.__init__(self) if f == g: - raise ValueError("Cannot use same filter for {}" - "".format(self.__class__.__name__)) + raise ValueError( + "Cannot use same filter for {}" "".format(self.__class__.__name__) + ) else: self._f = f self._g = g @@ -54,20 +55,24 @@ def __hash__(self): def __eq__(self, other): if self._symmetric: - return type(self) is type(other) and \ - (self._f == other._f or self._f == other._g) and \ - (self._g == other._g or self._g == other._f) + return ( + type(self) is type(other) + and (self._f == other._f or self._f == other._g) + and (self._g == other._g or self._g == other._f) + ) else: - return type(self) is type(other) and \ - self._f == other._f and self._g == other._g + return ( + type(self) is type(other) + and self._f == other._f + and self._g == other._g + ) def __reduce__(self): """Enable (deep)copying and pickling of set based particle filters.""" return (type(self), (self._f, self._g)) -class SetDifference(_ParticleFilterSetOperations, - _hoomd.ParticleFilterSetDifference): +class SetDifference(_ParticleFilterSetOperations, _hoomd.ParticleFilterSetDifference): r"""Set difference operation. Args: @@ -83,7 +88,8 @@ class SetDifference(_ParticleFilterSetOperations, set_difference = hoomd.filter.SetDifference(filter1, filter2) """ - _cpp_cls_name = 'ParticleFilterSetDifference' + + _cpp_cls_name = "ParticleFilterSetDifference" _symmetric = False __doc__ += ParticleFilter._doc_inherited @@ -104,13 +110,13 @@ class Union(_ParticleFilterSetOperations, _hoomd.ParticleFilterUnion): union = hoomd.filter.Union(filter1, filter2) """ - _cpp_cls_name = 'ParticleFilterUnion' + + _cpp_cls_name = "ParticleFilterUnion" _symmetric = True __doc__ += ParticleFilter._doc_inherited -class Intersection(_ParticleFilterSetOperations, - _hoomd.ParticleFilterIntersection): +class Intersection(_ParticleFilterSetOperations, _hoomd.ParticleFilterIntersection): r"""Set intersection operation. Args: @@ -126,6 +132,7 @@ class Intersection(_ParticleFilterSetOperations, intersection = hoomd.filter.Intersection(filter1, filter2) """ - _cpp_cls_name = 'ParticleFilterIntersection' + + _cpp_cls_name = "ParticleFilterIntersection" _symmetric = True __doc__ += ParticleFilter._doc_inherited diff --git a/hoomd/filter/tags.py b/hoomd/filter/tags.py index 8866cc557c..35b06e1ec4 100644 --- a/hoomd/filter/tags.py +++ b/hoomd/filter/tags.py @@ -42,14 +42,13 @@ def __init__(self, tags): def __hash__(self): """Return a hash of the filter parameters.""" - if not hasattr(self, '_id'): + if not hasattr(self, "_id"): self._id = hash(self._tags.tobytes()) return self._id def __eq__(self, other): """Test for equality between two particle filters.""" - return type(self) is type(other) and np.array_equal( - self.tags, other.tags) + return type(self) is type(other) and np.array_equal(self.tags, other.tags) @property def tags(self): diff --git a/hoomd/filter/type_.py b/hoomd/filter/type_.py index 0cdeb58f04..7c8cc8f6c9 100644 --- a/hoomd/filter/type_.py +++ b/hoomd/filter/type_.py @@ -19,7 +19,7 @@ class Type(ParticleFilter, ParticleFilterType): .. code-block:: python - type_A_B = hoomd.filter.Type(['A', 'B']) + type_A_B = hoomd.filter.Type(["A", "B"]) {inherited} diff --git a/hoomd/hpmc/__init__.py b/hoomd/hpmc/__init__.py index 6a9f6603ca..a704ac5a76 100644 --- a/hoomd/hpmc/__init__.py +++ b/hoomd/hpmc/__init__.py @@ -33,12 +33,12 @@ from hoomd.hpmc import shape_move __all__ = [ - 'compute', - 'external', - 'integrate', - 'nec', - 'pair', - 'shape_move', - 'tune', - 'update', + "compute", + "external", + "integrate", + "nec", + "pair", + "shape_move", + "tune", + "update", ] diff --git a/hoomd/hpmc/compute.py b/hoomd/hpmc/compute.py index 027cf17d48..b1361aefdf 100644 --- a/hoomd/hpmc/compute.py +++ b/hoomd/hpmc/compute.py @@ -85,8 +85,9 @@ class FreeVolume(Compute): Examples:: - fv = hoomd.hpmc.compute.FreeVolume(test_particle_type='B', - num_samples=1000) + fv = hoomd.hpmc.compute.FreeVolume( + test_particle_type="B", num_samples=1000 + ) {inherited} @@ -108,8 +109,8 @@ def __init__(self, test_particle_type, num_samples): # store metadata param_dict = ParameterDict(test_particle_type=str, num_samples=int) param_dict.update( - dict(test_particle_type=test_particle_type, - num_samples=num_samples)) + dict(test_particle_type=test_particle_type, num_samples=num_samples) + ) self._param_dict.update(param_dict) def _attach_hook(self): @@ -121,16 +122,16 @@ def _attach_hook(self): integrator_name = integrator.__class__.__name__ try: if isinstance(self._simulation.device, hoomd.device.CPU): - cpp_cls = getattr(_hpmc, 'ComputeFreeVolume' + integrator_name) + cpp_cls = getattr(_hpmc, "ComputeFreeVolume" + integrator_name) else: - cpp_cls = getattr(_hpmc, - 'ComputeFreeVolume' + integrator_name + 'GPU') + cpp_cls = getattr(_hpmc, "ComputeFreeVolume" + integrator_name + "GPU") except AttributeError: raise RuntimeError("Unsupported integrator.") cl = _hoomd.CellList(self._simulation.state._cpp_sys_def) - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - integrator._cpp_obj, cl) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, integrator._cpp_obj, cl + ) @log(requires_run=True) def free_volume(self): @@ -351,7 +352,7 @@ def _attach_hook(self): # Extract 'Shape' from '' integrator_name = integrator.__class__.__name__ - cpp_cls = getattr(_hpmc, 'ComputeSDF' + integrator_name) + cpp_cls = getattr(_hpmc, "ComputeSDF" + integrator_name) self._cpp_obj = cpp_cls( self._simulation.state._cpp_sys_def, @@ -360,7 +361,7 @@ def _attach_hook(self): self.dx, ) - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def sdf_compression(self): """(*N_bins*,) `numpy.ndarray` of `float`): :math:`s_\\mathrm{comp}[k]`\ - The scale distribution function for compression moves \ @@ -376,7 +377,7 @@ def sdf_compression(self): self._cpp_obj.compute(self._simulation.timestep) return self._cpp_obj.sdf_compression - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def sdf_expansion(self): """(*N_bins*,) `numpy.ndarray` of `float`): :math:`s_\\mathrm{exp}[k]` \ - The scale distribution function for the expansion moves \ @@ -397,7 +398,7 @@ def sdf_expansion(self): else: return None - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def x_compression(self): """(*N_bins*,) `numpy.ndarray` of `float`): The x \ values at the center of each bin corresponding to the scale \ @@ -410,7 +411,7 @@ def x_compression(self): x = numpy.arange(0, self._cpp_obj.num_bins, 1) * self.dx + self.dx / 2 return x - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def x_expansion(self): """(*N_bins*,) `numpy.ndarray` of `float`): The x \ values at the center of each bin corresponding to the scale \ @@ -461,8 +462,7 @@ def betaP(self): # noqa: N802 - allow function name # perform the fit and extrapolation p = numpy.polyfit(x_fit_compression, sdf_fit_compression, 5) p0_compression = numpy.polyval(p, 0.0) - compression_contribution = rho * p0_compression / (2 - * box.dimensions) + compression_contribution = rho * p0_compression / (2 * box.dimensions) # expansive contribution # perform the fit and extrapolation @@ -499,6 +499,6 @@ def P(self): # noqa: N802 - allow function name __all__ = [ - 'SDF', - 'FreeVolume', + "SDF", + "FreeVolume", ] diff --git a/hoomd/hpmc/external/__init__.py b/hoomd/hpmc/external/__init__.py index 816e69055d..9ec8929fde 100644 --- a/hoomd/hpmc/external/__init__.py +++ b/hoomd/hpmc/external/__init__.py @@ -14,8 +14,8 @@ from .wall import WallPotential __all__ = [ - 'External', - 'Harmonic', - 'Linear', - 'WallPotential', + "External", + "Harmonic", + "Linear", + "WallPotential", ] diff --git a/hoomd/hpmc/external/harmonic.py b/hoomd/hpmc/external/harmonic.py index 9db1c2490e..d17a6df3f3 100644 --- a/hoomd/hpmc/external/harmonic.py +++ b/hoomd/hpmc/external/harmonic.py @@ -12,7 +12,7 @@ from .external import External -@hoomd.logging.modify_namespace(('hpmc', 'external', 'Harmonic')) +@hoomd.logging.modify_namespace(("hpmc", "external", "Harmonic")) class Harmonic(External): r"""Restrain particle positions and orientations with harmonic springs. @@ -83,22 +83,26 @@ class Harmonic(External): __doc__ = __doc__.replace("{inherited}", External._doc_inherited) - def __init__(self, reference_positions, reference_orientations, - k_translational, k_rotational, symmetries): + def __init__( + self, + reference_positions, + reference_orientations, + k_translational, + k_rotational, + symmetries, + ): param_dict = ParameterDict( - reference_positions=NDArrayValidator(dtype=np.double, - shape=(None, 3)), - reference_orientations=NDArrayValidator(dtype=np.double, - shape=(None, 4)), + reference_positions=NDArrayValidator(dtype=np.double, shape=(None, 3)), + reference_orientations=NDArrayValidator(dtype=np.double, shape=(None, 4)), k_translational=hoomd.variant.Variant, k_rotational=hoomd.variant.Variant, symmetries=NDArrayValidator(dtype=np.double, shape=(None, 4)), ) - param_dict['k_translational'] = k_translational - param_dict['k_rotational'] = k_rotational - param_dict['reference_positions'] = reference_positions - param_dict['reference_orientations'] = reference_orientations - param_dict['symmetries'] = symmetries + param_dict["k_translational"] = k_translational + param_dict["k_rotational"] = k_rotational + param_dict["reference_positions"] = reference_positions + param_dict["reference_orientations"] = reference_orientations + param_dict["symmetries"] = symmetries self._param_dict.update(param_dict) def _make_cpp_obj(self): diff --git a/hoomd/hpmc/external/linear.py b/hoomd/hpmc/external/linear.py index c0488bdb36..077f537883 100644 --- a/hoomd/hpmc/external/linear.py +++ b/hoomd/hpmc/external/linear.py @@ -16,7 +16,7 @@ from .external import External -@hoomd.logging.modify_namespace(('hpmc', 'external', 'Linear')) +@hoomd.logging.modify_namespace(("hpmc", "external", "Linear")) class Linear(External): """Linear external potential (HPMC). @@ -46,7 +46,7 @@ class Linear(External): .. code-block:: python linear = hoomd.hpmc.external.Linear() - linear.alpha['A'] = 0.2 + linear.alpha["A"] = 0.2 simulation.operations.integrator.external_potentials = [linear] {inherited} @@ -75,19 +75,21 @@ class Linear(External): Type: (`float`, `float`, `float`) """ + _cpp_class_name = "ExternalPotentialLinear" __doc__ = __doc__.replace("{inherited}", External._doc_inherited) - def __init__(self, - default_alpha=None, - plane_origin=(0, 0, 0), - plane_normal=(0, 1, 0)): + def __init__( + self, default_alpha=None, plane_origin=(0, 0, 0), plane_normal=(0, 1, 0) + ): if default_alpha is not None: default_alpha = float(default_alpha) alpha = hoomd.data.typeparam.TypeParameter( - 'alpha', 'particle_types', - hoomd.data.parameterdicts.TypeParameterDict(float, len_keys=1)) + "alpha", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict(float, len_keys=1), + ) if default_alpha is not None: alpha.default = default_alpha @@ -95,9 +97,9 @@ def __init__(self, self._add_typeparam(alpha) self._param_dict.update( - hoomd.data.parameterdicts.ParameterDict(plane_origin=(float, float, - float), - plane_normal=(float, float, - float))) + hoomd.data.parameterdicts.ParameterDict( + plane_origin=(float, float, float), plane_normal=(float, float, float) + ) + ) self.plane_origin = plane_origin self.plane_normal = plane_normal diff --git a/hoomd/hpmc/external/wall.py b/hoomd/hpmc/external/wall.py index 1e1c11b6ea..ea8b33f5c0 100644 --- a/hoomd/hpmc/external/wall.py +++ b/hoomd/hpmc/external/wall.py @@ -25,14 +25,15 @@ def _to_hpmc_cpp_wall(wall): if isinstance(wall, hoomd.wall.Sphere): - return hoomd.hpmc._hpmc.SphereWall(wall.radius, wall.origin.to_base(), - wall.inside) + return hoomd.hpmc._hpmc.SphereWall( + wall.radius, wall.origin.to_base(), wall.inside + ) if isinstance(wall, hoomd.wall.Cylinder): - return hoomd.hpmc._hpmc.CylinderWall(wall.radius, wall.origin.to_base(), - wall.axis.to_base(), wall.inside) + return hoomd.hpmc._hpmc.CylinderWall( + wall.radius, wall.origin.to_base(), wall.axis.to_base(), wall.inside + ) if isinstance(wall, hoomd.wall.Plane): - return hoomd.hpmc._hpmc.PlaneWall(wall.origin.to_base(), - wall.normal.to_base()) + return hoomd.hpmc._hpmc.PlaneWall(wall.origin.to_base(), wall.normal.to_base()) raise TypeError(f"Unknown wall type encountered {type(wall)}.") @@ -44,16 +45,19 @@ class _HPMCWallsMetaList(_WallsMetaList): compatible with integrator in the simulation to which the `WallPotential` is attached. """ + _supported_shape_wall_pairs = { hpmc.integrate.Sphere: [ - hoomd.wall.Sphere, hoomd.wall.Cylinder, hoomd.wall.Plane + hoomd.wall.Sphere, + hoomd.wall.Cylinder, + hoomd.wall.Plane, ], hpmc.integrate.ConvexPolyhedron: [ - hoomd.wall.Sphere, hoomd.wall.Cylinder, hoomd.wall.Plane + hoomd.wall.Sphere, + hoomd.wall.Cylinder, + hoomd.wall.Plane, ], - hpmc.integrate.ConvexSpheropolyhedron: [ - hoomd.wall.Sphere, hoomd.wall.Plane - ] + hpmc.integrate.ConvexSpheropolyhedron: [hoomd.wall.Sphere, hoomd.wall.Plane], } def _check_wall_compatibility(self, wall): @@ -61,10 +65,9 @@ def _check_wall_compatibility(self, wall): return integrator = self._wall_potential._simulation.operations.integrator integrator_type = type(integrator) - if type(wall) not in self._supported_shape_wall_pairs.get( - integrator_type, []): - msg = f'Overlap checks between {type(wall)} and {integrator_type} ' - msg += 'are not supported.' + if type(wall) not in self._supported_shape_wall_pairs.get(integrator_type, []): + msg = f"Overlap checks between {type(wall)} and {integrator_type} " + msg += "are not supported." raise NotImplementedError(msg) def _validate_walls(self): @@ -172,13 +175,14 @@ def _make_cpp_obj(self): cpp_cls_name += integrator.__class__.__name__ cpp_cls = getattr(hoomd.hpmc._hpmc, cpp_cls_name) - cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - integrator._cpp_obj) - self._walls._sync({ - hoomd.wall.Sphere: cpp_obj.SphereWalls, - hoomd.wall.Cylinder: cpp_obj.CylinderWalls, - hoomd.wall.Plane: cpp_obj.PlaneWalls, - }) + cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, integrator._cpp_obj) + self._walls._sync( + { + hoomd.wall.Sphere: cpp_obj.SphereWalls, + hoomd.wall.Cylinder: cpp_obj.CylinderWalls, + hoomd.wall.Plane: cpp_obj.PlaneWalls, + } + ) return cpp_obj @@ -200,11 +204,13 @@ def walls(self, wall_list): return self._walls = _HPMCWallsMetaList(self, wall_list, _to_hpmc_cpp_wall) if self._attached: - self._walls._sync({ - hoomd.wall.Sphere: self._cpp_obj.SphereWalls, - hoomd.wall.Cylinder: self._cpp_obj.CylinderWalls, - hoomd.wall.Plane: self._cpp_obj.PlaneWalls, - }) + self._walls._sync( + { + hoomd.wall.Sphere: self._cpp_obj.SphereWalls, + hoomd.wall.Cylinder: self._cpp_obj.CylinderWalls, + hoomd.wall.Plane: self._cpp_obj.PlaneWalls, + } + ) @log(requires_run=True) def overlaps(self): diff --git a/hoomd/hpmc/integrate.py b/hoomd/hpmc/integrate.py index 317439b97b..ffadcd42a0 100644 --- a/hoomd/hpmc/integrate.py +++ b/hoomd/hpmc/integrate.py @@ -333,12 +333,15 @@ class HPMCIntegrator(Integrator): kT (hoomd.variant.Variant): Temperature set point :math:`[\\mathrm{energy}]`. """ + _ext_module = _hpmc - _remove_for_pickling = (*Integrator._remove_for_pickling, '_cpp_cell') - _skip_for_equality = Integrator._skip_for_equality | {'_cpp_cell'} + _remove_for_pickling = (*Integrator._remove_for_pickling, "_cpp_cell") + _skip_for_equality = Integrator._skip_for_equality | {"_cpp_cell"} _cpp_cls = None __doc__ = __doc__.replace("{inherited}", Integrator._doc_inherited) - _doc_inherited = Integrator._doc_inherited + """ + _doc_inherited = ( + Integrator._doc_inherited + + """ ---------- **Members inherited from** @@ -424,44 +427,48 @@ class HPMCIntegrator(Integrator): Count of the accepted and rejected translate moves. `Read more... ` """ + ) - def __init__(self, default_d, default_a, translation_move_probability, - nselect, kT): + def __init__(self, default_d, default_a, translation_move_probability, nselect, kT): super().__init__() # Set base parameter dict for hpmc integrators param_dict = ParameterDict( translation_move_probability=float(translation_move_probability), nselect=int(nselect), - kT=hoomd.variant.Variant) + kT=hoomd.variant.Variant, + ) self._param_dict.update(param_dict) self.kT = kT # Set standard typeparameters for hpmc integrators - typeparam_d = TypeParameter('d', - type_kind='particle_types', - param_dict=TypeParameterDict( - float(default_d), len_keys=1)) - typeparam_a = TypeParameter('a', - type_kind='particle_types', - param_dict=TypeParameterDict( - float(default_a), len_keys=1)) - - typeparam_inter_matrix = TypeParameter('interaction_matrix', - type_kind='particle_types', - param_dict=TypeParameterDict( - True, len_keys=2)) - - self._extend_typeparam( - [typeparam_d, typeparam_a, typeparam_inter_matrix]) + typeparam_d = TypeParameter( + "d", + type_kind="particle_types", + param_dict=TypeParameterDict(float(default_d), len_keys=1), + ) + typeparam_a = TypeParameter( + "a", + type_kind="particle_types", + param_dict=TypeParameterDict(float(default_a), len_keys=1), + ) + + typeparam_inter_matrix = TypeParameter( + "interaction_matrix", + type_kind="particle_types", + param_dict=TypeParameterDict(True, len_keys=2), + ) + + self._extend_typeparam([typeparam_d, typeparam_a, typeparam_inter_matrix]) self._pair_potentials = hoomd.data.syncedlist.SyncedList( - hoomd.hpmc.pair.Pair, - hoomd.data.syncedlist._PartialGetAttr('_cpp_obj')) + hoomd.hpmc.pair.Pair, hoomd.data.syncedlist._PartialGetAttr("_cpp_obj") + ) self._external_potentials = hoomd.data.syncedlist.SyncedList( hoomd.hpmc.external.External, - hoomd.data.syncedlist._PartialGetAttr('_cpp_obj')) + hoomd.data.syncedlist._PartialGetAttr("_cpp_obj"), + ) def _attach_hook(self): """Initialize the reflected c++ class. @@ -470,24 +477,27 @@ def _attach_hook(self): """ self._simulation._warn_if_seed_unset() sys_def = self._simulation.state._cpp_sys_def - if (isinstance(self._simulation.device, hoomd.device.GPU) - and (self._cpp_cls + 'GPU') in _hpmc.__dict__): + if ( + isinstance(self._simulation.device, hoomd.device.GPU) + and (self._cpp_cls + "GPU") in _hpmc.__dict__ + ): self._cpp_cell = _hoomd.CellListGPU(sys_def) - self._cpp_obj = getattr(self._ext_module, - self._cpp_cls + 'GPU')(sys_def, - self._cpp_cell) + self._cpp_obj = getattr(self._ext_module, self._cpp_cls + "GPU")( + sys_def, self._cpp_cell + ) else: if isinstance(self._simulation.device, hoomd.device.GPU): self._simulation.device._cpp_msg.warning( - "Falling back on CPU. No GPU implementation for shape.\n") + "Falling back on CPU. No GPU implementation for shape.\n" + ) self._cpp_obj = getattr(self._ext_module, self._cpp_cls)(sys_def) self._cpp_cell = None - self._pair_potentials._sync(self._simulation, - self._cpp_obj.pair_potentials) + self._pair_potentials._sync(self._simulation, self._cpp_obj.pair_potentials) - self._external_potentials._sync(self._simulation, - self._cpp_obj.external_potentials) + self._external_potentials._sync( + self._simulation, self._cpp_obj.external_potentials + ) super()._attach_hook() @@ -556,7 +566,7 @@ def external_potentials(self, value): self._external_potentials.clear() self._external_potentials.extend(value) - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def map_overlaps(self): """list[tuple[int, int]]: List of overlapping particles. @@ -579,7 +589,7 @@ def overlaps(self): self._cpp_obj.communicate(True) return self._cpp_obj.countOverlaps(False) - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def translate_moves(self): """tuple[int, int]: Count of the accepted and rejected translate moves. @@ -589,7 +599,7 @@ def translate_moves(self): """ return self._cpp_obj.getCounters(1).translate - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def rotate_moves(self): """tuple[int, int]: Count of the accepted and rejected rotate moves. @@ -644,8 +654,7 @@ def external_energy(self): """float: Total external energy contributed by all external potentials \ :math:`[\\mathrm{energy}]`. """ - return self._cpp_obj.computeTotalExternalEnergy( - self._simulation.timestep) + return self._cpp_obj.computeTotalExternalEnergy(self._simulation.timestep) class Sphere(HPMCIntegrator): @@ -693,7 +702,7 @@ class Sphere(HPMCIntegrator): mc.shape["A"] = dict(diameter=1.0) mc.shape["B"] = dict(diameter=2.0) mc.shape["C"] = dict(diameter=1.0, orientable=True) - print('diameter = ', mc.shape["A"]["diameter"]) + print("diameter = ", mc.shape["A"]["diameter"]) {inherited} @@ -713,30 +722,33 @@ class Sphere(HPMCIntegrator): * ``orientable`` (`bool`, **default:** `False`) - set to `True` to allow rotation moves on this particle type. """ - _cpp_cls = 'IntegratorHPMCMonoSphere' - __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): + _cpp_cls = "IntegratorHPMCMonoSphere" + __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - diameter=float, - ignore_statistics=False, - orientable=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + diameter=float, ignore_statistics=False, orientable=False, len_keys=1 + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -785,11 +797,15 @@ class ConvexPolygon(HPMCIntegrator): Examples:: mc = hoomd.hpmc.integrate.ConvexPolygon(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(vertices=[(-0.5, -0.5), - (0.5, -0.5), - (0.5, 0.5), - (-0.5, 0.5)]); - print('vertices = ', mc.shape["A"]["vertices"]) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5), + (0.5, -0.5), + (0.5, 0.5), + (-0.5, 0.5), + ] + ) + print("vertices = ", mc.shape["A"]["vertices"]) {inherited} @@ -823,32 +839,37 @@ class ConvexPolygon(HPMCIntegrator): Undefined behavior **will result** when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoConvexPolygon' - __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): + _cpp_cls = "IntegratorHPMCMonoConvexPolygon" + __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float)], - ignore_statistics=False, - sweep_radius=0.0, - len_keys=1, - )) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float)], + ignore_statistics=False, + sweep_radius=0.0, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -901,19 +922,24 @@ class ConvexSpheropolygon(HPMCIntegrator): Examples:: - mc = hoomd.hpmc.integrate.ConvexSpheropolygon(default_d=0.3, - default_a=0.4) - mc.shape["A"] = dict(vertices=[(-0.5, -0.5), - (0.5, -0.5), - (0.5, 0.5), - (-0.5, 0.5)], - sweep_radius=0.1); - - mc.shape["A"] = dict(vertices=[(0,0)], - sweep_radius=0.5, - ignore_statistics=True); - - print('vertices = ', mc.shape["A"]["vertices"]) + mc = hoomd.hpmc.integrate.ConvexSpheropolygon( + default_d=0.3, default_a=0.4 + ) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5), + (0.5, -0.5), + (0.5, 0.5), + (-0.5, 0.5), + ], + sweep_radius=0.1, + ) + mc.shape["A"] = dict( + vertices=[(0, 0)], + sweep_radius=0.5, + ignore_statistics=True, + ) + print("vertices = ", mc.shape["A"]["vertices"]) {inherited} @@ -945,31 +971,36 @@ class ConvexSpheropolygon(HPMCIntegrator): Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoSpheropolygon' + _cpp_cls = "IntegratorHPMCMonoSpheropolygon" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float)], - sweep_radius=0.0, - ignore_statistics=False, - len_keys=1)) + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float)], + sweep_radius=0.0, + ignore_statistics=False, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1016,11 +1047,15 @@ class SimplePolygon(HPMCIntegrator): Examples:: mc = hpmc.integrate.SimplePolygon(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(vertices=[(0, 0.5), - (-0.5, -0.5), - (0, 0), - (0.5, -0.5)]); - print('vertices = ', mc.shape["A"]["vertices"]) + mc.shape["A"] = dict( + vertices=[ + (0, 0.5), + (-0.5, -0.5), + (0, 0), + (0.5, -0.5), + ] + ) + print("vertices = ", mc.shape["A"]["vertices"]) {inherited} @@ -1054,31 +1089,36 @@ class SimplePolygon(HPMCIntegrator): Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoSimplePolygon' + _cpp_cls = "IntegratorHPMCMonoSimplePolygon" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float)], - ignore_statistics=False, - sweep_radius=0, - len_keys=1)) + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float)], + ignore_statistics=False, + sweep_radius=0, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1126,28 +1166,34 @@ class Polyhedron(HPMCIntegrator): Example:: mc = hpmc.integrate.Polyhedron(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(vertices=[(-0.5, -0.5, -0.5), - (-0.5, -0.5, 0.5), - (-0.5, 0.5, -0.5), - (-0.5, 0.5, 0.5), - (0.5, -0.5, -0.5), - (0.5, -0.5, 0.5), - (0.5, 0.5, -0.5), - (0.5, 0.5, 0.5)], - faces=[[0, 2, 6], - [6, 4, 0], - [5, 0, 4], - [5, 1, 0], - [5, 4, 6], - [5, 6, 7], - [3, 2, 0], - [3, 0, 1], - [3, 6, 2], - [3, 7, 6], - [3, 1, 5], - [3, 5, 7]]) - print('vertices = ', mc.shape["A"]["vertices"]) - print('faces = ', mc.shape["A"]["faces"]) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5, -0.5), + (-0.5, -0.5, 0.5), + (-0.5, 0.5, -0.5), + (-0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (0.5, -0.5, 0.5), + (0.5, 0.5, -0.5), + (0.5, 0.5, 0.5), + ], + faces=[ + [0, 2, 6], + [6, 4, 0], + [5, 0, 4], + [5, 1, 0], + [5, 4, 6], + [5, 6, 7], + [3, 2, 0], + [3, 0, 1], + [3, 6, 2], + [3, 7, 6], + [3, 1, 5], + [3, 5, 7], + ], + ) + print("vertices = ", mc.shape["A"]["vertices"]) + print("faces = ", mc.shape["A"]["faces"]) {inherited} @@ -1199,39 +1245,42 @@ class Polyhedron(HPMCIntegrator): Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoPolyhedron' + _cpp_cls = "IntegratorHPMCMonoPolyhedron" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float, float)], - faces=[(int, int, int)], - sweep_radius=0.0, - capacity=4, - origin=(0., 0., 0.), - hull_only=False, - overlap=OnlyIf(to_type_converter( - [bool]), - allow_none=True), - ignore_statistics=False, - len_keys=1, - _defaults={'overlap': None})) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float, float)], + faces=[(int, int, int)], + sweep_radius=0.0, + capacity=4, + origin=(0.0, 0.0, 0.0), + hull_only=False, + overlap=OnlyIf(to_type_converter([bool]), allow_none=True), + ignore_statistics=False, + len_keys=1, + _defaults={"overlap": None}, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1276,11 +1325,15 @@ class ConvexPolyhedron(HPMCIntegrator): Example:: mc = hpmc.integrate.ConvexPolyhedron(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(vertices=[(0.5, 0.5, 0.5), - (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5), - (-0.5, -0.5, 0.5)]); - print('vertices = ', mc.shape["A"]["vertices"]) + mc.shape["A"] = dict( + vertices=[ + (0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + (-0.5, -0.5, 0.5), + ] + ) + print("vertices = ", mc.shape["A"]["vertices"]) {inherited} @@ -1312,30 +1365,35 @@ class ConvexPolyhedron(HPMCIntegrator): Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoConvexPolyhedron' + _cpp_cls = "IntegratorHPMCMonoConvexPolyhedron" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float, float)], - sweep_radius=0.0, - ignore_statistics=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float, float)], + sweep_radius=0.0, + ignore_statistics=False, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1382,27 +1440,42 @@ class FacetedEllipsoid(HPMCIntegrator): mc = hpmc.integrate.FacetedEllipsoid(default_d=0.3, default_a=0.4) # half-space intersection - slab_normals = [(-1,0,0),(1,0,0),(0,-1,0),(0,1,0),(0,0,-1),(0,0,1)] - slab_offsets = [-0.1,-1,-.5,-.5,-.5,-.5] + slab_normals = [ + (-1, 0, 0), + (1, 0, 0), + (0, -1, 0), + (0, 1, 0), + (0, 0, -1), + (0, 0, 1), + ] + slab_offsets = [-0.1, -1, -0.5, -0.5, -0.5, -0.5] # polyedron vertices - slab_verts = [[-.1,-.5,-.5], - [-.1,-.5,.5], - [-.1,.5,.5], - [-.1,.5,-.5], - [1,-.5,-.5], - [1,-.5,.5], - [1,.5,.5], - [1,.5,-.5]] - - mc.shape["A"] = dict(normals=slab_normals, - offsets=slab_offsets, - vertices=slab_verts, - a=1.0, - b=0.5, - c=0.5); - print('a = {}, b = {}, c = {}', - mc.shape["A"]["a"], mc.shape["A"]["b"], mc.shape["A"]["c"]) + slab_verts = [ + [-0.1, -0.5, -0.5], + [-0.1, -0.5, 0.5], + [-0.1, 0.5, 0.5], + [-0.1, 0.5, -0.5], + [1, -0.5, -0.5], + [1, -0.5, 0.5], + [1, 0.5, 0.5], + [1, 0.5, -0.5], + ] + + mc.shape["A"] = dict( + normals=slab_normals, + offsets=slab_offsets, + vertices=slab_verts, + a=1.0, + b=0.5, + c=0.5, + ) + print( + "a = {}, b = {}, c = {}", + mc.shape["A"]["a"], + mc.shape["A"]["b"], + mc.shape["A"]["c"], + ) {inherited} @@ -1449,36 +1522,40 @@ class FacetedEllipsoid(HPMCIntegrator): the half-space intersection is **not** calculated automatically. """ - _cpp_cls = 'IntegratorHPMCMonoFacetedEllipsoid' + _cpp_cls = "IntegratorHPMCMonoFacetedEllipsoid" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - normals=[(float, float, float)], - offsets=[float], - a=float, - b=float, - c=float, - vertices=OnlyIf(to_type_converter([ - (float, float, float) - ]), - allow_none=True), - origin=(0.0, 0.0, 0.0), - ignore_statistics=False, - len_keys=1, - _defaults={'vertices': None})) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + normals=[(float, float, float)], + offsets=[float], + a=float, + b=float, + c=float, + vertices=OnlyIf( + to_type_converter([(float, float, float)]), allow_none=True + ), + origin=(0.0, 0.0, 0.0), + ignore_statistics=False, + len_keys=1, + _defaults={"vertices": None}, + ), + ) self._add_typeparam(typeparam_shape) @@ -1517,8 +1594,10 @@ class Sphinx(HPMCIntegrator): Example:: mc = hpmc.integrate.Sphinx(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(centers=[(0,0,0),(1,0,0)], diameters=[1,.25]) - print('diameters = ', mc.shape["A"]["diameters"]) + mc.shape["A"] = dict( + centers=[(0, 0, 0), (1, 0, 0)], diameters=[1, 0.25] + ) + print("diameters = ", mc.shape["A"]["diameters"]) {inherited} @@ -1541,27 +1620,32 @@ class Sphinx(HPMCIntegrator): `True` to ignore tracked statistics. """ - _cpp_cls = 'IntegratorHPMCMonoSphinx' + _cpp_cls = "IntegratorHPMCMonoSphinx" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - diameters=[float], - centers=[(float, float, float)], - ignore_statistics=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + diameters=[float], + centers=[(float, float, float)], + ignore_statistics=False, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) @@ -1598,10 +1682,14 @@ class ConvexSpheropolyhedron(HPMCIntegrator): Example:: mc = hpmc.integrate.ConvexSpheropolyhedron(default_d=0.3, default_a=0.4) - mc.shape['tetrahedron'] = dict(vertices=[(0.5, 0.5, 0.5), - (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5), - (-0.5, -0.5, 0.5)]); + mc.shape["tetrahedron"] = dict( + vertices=[ + (0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + (-0.5, -0.5, 0.5), + ] + ) {inherited} @@ -1634,30 +1722,35 @@ class ConvexSpheropolyhedron(HPMCIntegrator): Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoSpheropolyhedron' + _cpp_cls = "IntegratorHPMCMonoSpheropolyhedron" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float, float)], - sweep_radius=0.0, - ignore_statistics=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float, float)], + sweep_radius=0.0, + ignore_statistics=False, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1705,11 +1798,13 @@ class Ellipsoid(HPMCIntegrator): Example:: mc = hpmc.integrate.Ellipsoid(default_d=0.3, default_a=0.4) - mc.shape["A"] = dict(a=0.5, b=0.25, c=0.125); - print('ellipsoids parameters (a,b,c) = ', - mc.shape["A"]["a"], - mc.shape["A"]["b"], - mc.shape["A"]["c"]) + mc.shape["A"] = dict(a=0.5, b=0.25, c=0.125) + print( + "ellipsoids parameters (a,b,c) = ", + mc.shape["A"]["a"], + mc.shape["A"]["b"], + mc.shape["A"]["c"], + ) {inherited} @@ -1732,32 +1827,33 @@ class Ellipsoid(HPMCIntegrator): `True` to ignore tracked statistics. """ - _cpp_cls = 'IntegratorHPMCMonoEllipsoid' + _cpp_cls = "IntegratorHPMCMonoEllipsoid" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - a=float, - b=float, - c=float, - ignore_statistics=False, - len_keys=1)) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + a=float, b=float, c=float, ignore_statistics=False, len_keys=1 + ), + ) self._extend_typeparam([typeparam_shape]) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -1852,44 +1948,43 @@ class SphereUnion(HPMCIntegrator): `True` to ignore tracked statistics. """ - _cpp_cls = 'IntegratorHPMCMonoSphereUnion' + _cpp_cls = "IntegratorHPMCMonoSphereUnion" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) typeparam_shape = TypeParameter( - 'shape', - type_kind='particle_types', - param_dict=TypeParameterDict(shapes=[ - dict(diameter=float, ignore_statistics=False, orientable=False) - ], - positions=[(float, float, float)], - orientations=OnlyIf(to_type_converter([ - (float, float, float, float) - ]), - allow_none=True), - capacity=4, - overlap=OnlyIf(to_type_converter([int - ]), - allow_none=True), - ignore_statistics=False, - len_keys=1, - _defaults={ - 'orientations': None, - 'overlap': None - })) + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + shapes=[ + dict(diameter=float, ignore_statistics=False, orientable=False) + ], + positions=[(float, float, float)], + orientations=OnlyIf( + to_type_converter([(float, float, float, float)]), allow_none=True + ), + capacity=4, + overlap=OnlyIf(to_type_converter([int]), allow_none=True), + ignore_statistics=False, + len_keys=1, + _defaults={"orientations": None, "overlap": None}, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object', requires_run=True) + @log(category="object", requires_run=True) def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -2001,47 +2096,48 @@ class ConvexSpheropolyhedronUnion(HPMCIntegrator): `True` to ignore tracked statistics. """ - _cpp_cls = 'IntegratorHPMCMonoConvexPolyhedronUnion' + _cpp_cls = "IntegratorHPMCMonoConvexPolyhedronUnion" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) typeparam_shape = TypeParameter( - 'shape', - type_kind='particle_types', - param_dict=TypeParameterDict(shapes=[ - dict(vertices=[(float, float, float)], - sweep_radius=0.0, - ignore_statistics=False) - ], - positions=[(float, float, float)], - orientations=OnlyIf(to_type_converter([ - (float, float, float, float) - ]), - allow_none=True), - overlap=OnlyIf(to_type_converter([int - ]), - allow_none=True), - ignore_statistics=False, - capacity=4, - len_keys=1, - _defaults={ - 'orientations': None, - 'overlap': None - })) + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + shapes=[ + dict( + vertices=[(float, float, float)], + sweep_radius=0.0, + ignore_statistics=False, + ) + ], + positions=[(float, float, float)], + orientations=OnlyIf( + to_type_converter([(float, float, float, float)]), allow_none=True + ), + overlap=OnlyIf(to_type_converter([int]), allow_none=True), + ignore_statistics=False, + capacity=4, + len_keys=1, + _defaults={"orientations": None, "overlap": None}, + ), + ) self._add_typeparam(typeparam_shape) # meta data - self.metadata_fields = ['capacity'] + self.metadata_fields = ["capacity"] class FacetedEllipsoidUnion(HPMCIntegrator): @@ -2161,64 +2257,65 @@ class FacetedEllipsoidUnion(HPMCIntegrator): `True` to ignore tracked statistics. """ - _cpp_cls = 'IntegratorHPMCMonoFacetedEllipsoidUnion' + _cpp_cls = "IntegratorHPMCMonoFacetedEllipsoidUnion" __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - translation_move_probability=0.5, - nselect=4, - kT=1.0): - + def __init__( + self, + default_d=0.1, + default_a=0.1, + translation_move_probability=0.5, + nselect=4, + kT=1.0, + ): # initialize base class - super().__init__(default_d, default_a, translation_move_probability, - nselect, kT) + super().__init__( + default_d, default_a, translation_move_probability, nselect, kT + ) typeparam_shape = TypeParameter( - 'shape', - type_kind='particle_types', - param_dict=TypeParameterDict(shapes=[ - dict(a=float, - b=float, - c=float, - normals=[(float, float, float)], - offsets=[float], - vertices=[(float, float, float)], - origin=(float, float, float), - ignore_statistics=False) - ], - positions=[(float, float, float)], - orientations=OnlyIf(to_type_converter([ - (float, float, float, float) - ]), - allow_none=True), - overlap=OnlyIf(to_type_converter([int - ]), - allow_none=True), - ignore_statistics=False, - capacity=4, - len_keys=1, - _defaults={ - 'orientations': None, - 'overlap': None - })) + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + shapes=[ + dict( + a=float, + b=float, + c=float, + normals=[(float, float, float)], + offsets=[float], + vertices=[(float, float, float)], + origin=(float, float, float), + ignore_statistics=False, + ) + ], + positions=[(float, float, float)], + orientations=OnlyIf( + to_type_converter([(float, float, float, float)]), allow_none=True + ), + overlap=OnlyIf(to_type_converter([int]), allow_none=True), + ignore_statistics=False, + capacity=4, + len_keys=1, + _defaults={"orientations": None, "overlap": None}, + ), + ) self._add_typeparam(typeparam_shape) __all__ = [ - 'ConvexPolygon', - 'ConvexPolyhedron', - 'ConvexSpheropolygon', - 'ConvexSpheropolyhedron', - 'ConvexSpheropolyhedronUnion', - 'Ellipsoid', - 'FacetedEllipsoid', - 'FacetedEllipsoidUnion', - 'HPMCIntegrator', - 'Polyhedron', - 'SimplePolygon', - 'Sphere', - 'SphereUnion', - 'Sphinx', + "ConvexPolygon", + "ConvexPolyhedron", + "ConvexSpheropolygon", + "ConvexSpheropolyhedron", + "ConvexSpheropolyhedronUnion", + "Ellipsoid", + "FacetedEllipsoid", + "FacetedEllipsoidUnion", + "HPMCIntegrator", + "Polyhedron", + "SimplePolygon", + "Sphere", + "SphereUnion", + "Sphinx", ] diff --git a/hoomd/hpmc/nec/__init__.py b/hoomd/hpmc/nec/__init__.py index 733a4bf7c3..5dc87b78d8 100644 --- a/hoomd/hpmc/nec/__init__.py +++ b/hoomd/hpmc/nec/__init__.py @@ -50,6 +50,6 @@ from . import tune __all__ = [ - 'integrate', - 'tune', + "integrate", + "tune", ] diff --git a/hoomd/hpmc/nec/integrate.py b/hoomd/hpmc/nec/integrate.py index db8f0209ad..4e6a6ab324 100644 --- a/hoomd/hpmc/nec/integrate.py +++ b/hoomd/hpmc/nec/integrate.py @@ -28,8 +28,11 @@ class HPMCNECIntegrator(HPMCIntegrator): **Members defined in** `HPMCNECIntegrator`: """ + _cpp_cls = None - _doc_inherited = HPMCIntegrator._doc_inherited + """ + _doc_inherited = ( + HPMCIntegrator._doc_inherited + + """ ---------- **Members inherited from** @@ -55,26 +58,30 @@ class HPMCNECIntegrator(HPMCIntegrator): Virial pressure. `Read more... ` """ + ) __doc__ = __doc__.replace("{inherited}", HPMCIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - chain_probability=0.5, - chain_time=0.5, - update_fraction=0.5, - nselect=1): + def __init__( + self, + default_d=0.1, + default_a=0.1, + chain_probability=0.5, + chain_time=0.5, + update_fraction=0.5, + nselect=1, + ): # initialize base class super().__init__(default_d, default_a, 0.5, nselect) # Set base parameter dict for hpmc chain integrators param_dict = ParameterDict( chain_probability=OnlyTypes( - float, postprocess=self._process_chain_probability), + float, postprocess=self._process_chain_probability + ), chain_time=OnlyTypes(float, postprocess=self._process_chain_time), - update_fraction=OnlyTypes( - float, postprocess=self._process_update_fraction)) + update_fraction=OnlyTypes(float, postprocess=self._process_update_fraction), + ) self._param_dict.update(param_dict) self.chain_probability = chain_probability self.chain_time = chain_time @@ -86,16 +93,15 @@ def _process_chain_probability(value): return value else: raise ValueError( - "chain_probability has to be between 0 and 1 (got {}).".format( - value)) + "chain_probability has to be between 0 and 1 (got {}).".format(value) + ) @staticmethod def _process_chain_time(value): if 0.0 <= value: return value else: - raise ValueError( - "chain_time has to be positive (got {}).".format(value)) + raise ValueError("chain_time has to be positive (got {}).".format(value)) @staticmethod def _process_update_fraction(value): @@ -103,8 +109,8 @@ def _process_update_fraction(value): return value else: raise ValueError( - "update_fraction has to be between 0 and 1. (got {})".format( - value)) + "update_fraction has to be between 0 and 1. (got {})".format(value) + ) @property def nec_counters(self): @@ -147,8 +153,7 @@ def particles_per_chain(self): The statistics are reset at every `hoomd.Simulation.run`. """ necCounts = self._cpp_obj.getNECCounters(1) - return (necCounts.chain_at_collision_count * 1.0 - / necCounts.chain_start_count) + return necCounts.chain_at_collision_count * 1.0 / necCounts.chain_start_count @log(requires_run=True) def chains_in_space(self): @@ -158,9 +163,9 @@ def chains_in_space(self): The statistics are reset at every `hoomd.Simulation.run`. """ necCounts = self._cpp_obj.getNECCounters(1) - return (necCounts.chain_no_collision_count - necCounts.chain_start_count - ) / (necCounts.chain_at_collision_count - + necCounts.chain_no_collision_count) + return (necCounts.chain_no_collision_count - necCounts.chain_start_count) / ( + necCounts.chain_at_collision_count + necCounts.chain_no_collision_count + ) class Sphere(HPMCNECIntegrator): @@ -220,32 +225,30 @@ class Sphere(HPMCNECIntegrator): allow rotation moves on this particle type. """ - _cpp_cls = 'IntegratorHPMCMonoNECSphere' + _cpp_cls = "IntegratorHPMCMonoNECSphere" __doc__ = __doc__.replace("{inherited}", HPMCNECIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - chain_time=0.5, - update_fraction=0.5, - nselect=1): + def __init__(self, default_d=0.1, chain_time=0.5, update_fraction=0.5, nselect=1): # initialize base class - super().__init__(default_d=default_d, - default_a=0.1, - chain_probability=1.0, - chain_time=chain_time, - update_fraction=update_fraction, - nselect=nselect) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - diameter=float, - ignore_statistics=False, - orientable=False, - len_keys=1)) + super().__init__( + default_d=default_d, + default_a=0.1, + chain_probability=1.0, + chain_time=chain_time, + update_fraction=update_fraction, + nselect=nselect, + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + diameter=float, ignore_statistics=False, orientable=False, len_keys=1 + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object') + @log(category="object") def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -295,10 +298,21 @@ class ConvexPolyhedron(HPMCNECIntegrator): Example:: - mc = hoomd.hpmc.nec.integrate.ConvexPolyhedron(d=1.0, a=0.05, - chain_probability=0.1, nselect=10) - mc.shape['A'] = dict(vertices=[[1,1,1], [1,1,-1], [1,-1,1], [1,-1,-1], - [-1,1,1], [-1,1,-1], [-1,-1,1], [-1,-1,-1]]) + mc = hoomd.hpmc.nec.integrate.ConvexPolyhedron( + d=1.0, a=0.05, chain_probability=0.1, nselect=10 + ) + mc.shape["A"] = dict( + vertices=[ + [1, 1, 1], + [1, 1, -1], + [1, -1, 1], + [1, -1, -1], + [-1, 1, 1], + [-1, 1, -1], + [-1, -1, 1], + [-1, -1, -1], + ] + ) {inherited} @@ -336,34 +350,41 @@ class ConvexPolyhedron(HPMCNECIntegrator): HPMC does not check that all vertex requirements are met. Undefined behavior will result when they are violated. """ - _cpp_cls = 'IntegratorHPMCMonoNECConvexPolyhedron' + + _cpp_cls = "IntegratorHPMCMonoNECConvexPolyhedron" __doc__ = __doc__.replace("{inherited}", HPMCNECIntegrator._doc_inherited) - def __init__(self, - default_d=0.1, - default_a=0.1, - chain_probability=0.5, - chain_time=0.5, - update_fraction=0.5, - nselect=1): - - super().__init__(default_d=default_d, - default_a=default_a, - chain_probability=chain_probability, - chain_time=chain_time, - update_fraction=update_fraction, - nselect=nselect) - - typeparam_shape = TypeParameter('shape', - type_kind='particle_types', - param_dict=TypeParameterDict( - vertices=[(float, float, float)], - sweep_radius=0.0, - ignore_statistics=False, - len_keys=1)) + def __init__( + self, + default_d=0.1, + default_a=0.1, + chain_probability=0.5, + chain_time=0.5, + update_fraction=0.5, + nselect=1, + ): + super().__init__( + default_d=default_d, + default_a=default_a, + chain_probability=chain_probability, + chain_time=chain_time, + update_fraction=update_fraction, + nselect=nselect, + ) + + typeparam_shape = TypeParameter( + "shape", + type_kind="particle_types", + param_dict=TypeParameterDict( + vertices=[(float, float, float)], + sweep_radius=0.0, + ignore_statistics=False, + len_keys=1, + ), + ) self._add_typeparam(typeparam_shape) - @log(category='object') + @log(category="object") def type_shapes(self): """list[dict]: Description of shapes in ``type_shapes`` format. @@ -377,7 +398,7 @@ def type_shapes(self): __all__ = [ - 'ConvexPolyhedron', - 'HPMCNECIntegrator', - 'Sphere', + "ConvexPolyhedron", + "HPMCNECIntegrator", + "Sphere", ] diff --git a/hoomd/hpmc/nec/tune.py b/hoomd/hpmc/nec/tune.py index 35af257985..1cc7651999 100644 --- a/hoomd/hpmc/nec/tune.py +++ b/hoomd/hpmc/nec/tune.py @@ -56,11 +56,9 @@ def _get_y(self): # If we have recorded a previous total larger than the current one # then this condition implies a new run call. We should be able to # tune here as we have no other indication the system has changed. - elif ((self.previous_start > chain_start) - or (self.previous_hit > chain_hit)): + elif (self.previous_start > chain_start) or (self.previous_hit > chain_hit): particles_per_chain = chain_hit / chain_start else: - # yapf incorrectly formats this when written on one line delta_prev = chain_hit - self.previous_hit delta_start = chain_start - self.previous_start particles_per_chain = delta_prev / delta_start @@ -83,15 +81,15 @@ def __hash__(self): return hash(("chain_time", self._target, self._domain)) def __eq__(self, other): - return (self._target == other._target and self._domain == other._domain) + return self._target == other._target and self._domain == other._domain class _InternalChainTime(_InternalAction): """Internal class for the ChainTime tuner.""" + _min_chain_time = 1e-7 def __init__(self, target, solver, max_chain_time=None): - # A flag for knowing when to update the maximum move sizes self._update_chain_time = False @@ -106,18 +104,17 @@ def __init__(self, target, solver, max_chain_time=None): self._is_attached = False self._chain_time_def = _ChainTimeTuneDefinition( - target, (self._min_chain_time, max_chain_time)) + target, (self._min_chain_time, max_chain_time) + ) param_dict = ParameterDict( - target=OnlyTypes(float, - postprocess=self._process_chain_time_target), + target=OnlyTypes(float, postprocess=self._process_chain_time_target), solver=RootSolver, max_chain_time=OnlyTypes( - float, - allow_none=True, - postprocess=self._process_chain_time_range), - min_chain_time=OnlyTypes( - float, postprocess=self._process_chain_time_range)) + float, allow_none=True, postprocess=self._process_chain_time_range + ), + min_chain_time=OnlyTypes(float, postprocess=self._process_chain_time_range), + ) self._param_dict.update(param_dict) self.target = target @@ -135,8 +132,7 @@ def _process_chain_time_range(self, target): def _process_chain_time_target(self, target): # check range if not (0 <= target <= 1000): - raise ValueError( - "Value {} should be between 0 and 1000.".format(target)) + raise ValueError("Value {} should be between 0 and 1000.".format(target)) self._chain_time_def.target = target self._tuned = 0 @@ -145,7 +141,8 @@ def _process_chain_time_target(self, target): def attach(self, simulation): if not isinstance(simulation.operations.integrator, HPMCNECIntegrator): raise RuntimeError( - "ChainTimeTuner can only be used in HPMC-NEC simulations.") + "ChainTimeTuner can only be used in HPMC-NEC simulations." + ) self._chain_time_def.integrator = simulation.operations.integrator self._is_attached = True @@ -177,8 +174,7 @@ def act(self, timestep=None): if self._is_attached: # update maximum chain time if self._update_chain_time: - self._chain_time_def.domain = (self.min_chain_time, - self.max_chain_time) + self._chain_time_def.domain = (self.min_chain_time, self.max_chain_time) tuned = self.solver.solve([self._chain_time_def]) self._tuned = self._tuned + 1 if tuned else 0 @@ -213,17 +209,14 @@ class ChainTime(_InternalCustomTuner): the specified target. max_chain_time (float): The maximum value of chain time to attempt. """ + _internal_class = _InternalChainTime __doc__ = __doc__.replace("{inherited}", Tuner._doc_inherited) @classmethod - def scale_solver(cls, - trigger, - target, - max_chain_time=None, - max_scale=2., - gamma=1., - tol=1e-2): + def scale_solver( + cls, trigger, target, max_chain_time=None, max_scale=2.0, gamma=1.0, tol=1e-2 + ): """Create a `ChainTime` tuner with a `hoomd.tune.ScaleSolver`. Args: @@ -244,16 +237,11 @@ def scale_solver(cls, than the default of 0.01 as acceptance rates can vary significantly at typical tuning rates. """ - solver = ScaleSolver(max_scale, gamma, 'positive', tol) + solver = ScaleSolver(max_scale, gamma, "positive", tol) return cls(trigger, target, solver, max_chain_time) @classmethod - def secant_solver(cls, - trigger, - target, - max_chain_time=None, - gamma=0.8, - tol=1e-2): + def secant_solver(cls, trigger, target, max_chain_time=None, gamma=0.8, tol=1e-2): """Create a `ChainTime` tuner with a `hoomd.tune.SecantSolver`. This solver can be faster than `hoomd.tune.ScaleSolver`, but depending @@ -287,5 +275,5 @@ def secant_solver(cls, __all__ = [ - 'ChainTime', + "ChainTime", ] diff --git a/hoomd/hpmc/pair/__init__.py b/hoomd/hpmc/pair/__init__.py index 90ff862d4a..e3246fc138 100644 --- a/hoomd/hpmc/pair/__init__.py +++ b/hoomd/hpmc/pair/__init__.py @@ -35,12 +35,12 @@ from .step import Step __all__ = [ - 'OPP', - 'AngularStep', - 'ExpandedGaussian', - 'LJGauss', - 'LennardJones', - 'Pair', - 'Step', - 'Union', + "OPP", + "AngularStep", + "ExpandedGaussian", + "LJGauss", + "LennardJones", + "Pair", + "Step", + "Union", ] diff --git a/hoomd/hpmc/pair/angular_step.py b/hoomd/hpmc/pair/angular_step.py index 377ce86cf0..b30de9a32d 100644 --- a/hoomd/hpmc/pair/angular_step.py +++ b/hoomd/hpmc/pair/angular_step.py @@ -22,7 +22,7 @@ from .pair import Pair -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'AngularStep')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "AngularStep")) class AngularStep(Pair): r"""Angular-step pair potential (HPMC). @@ -99,21 +99,28 @@ class AngularStep(Pair): Type: `TypeParameter` [``particle_type``, `dict`] """ + _cpp_class_name = "PairPotentialAngularStep" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) def __init__(self, isotropic_potential): mask = TypeParameter( - 'mask', 'particle_types', - TypeParameterDict(OnlyIf(to_type_converter( - dict(directors=[(float,) * 3], deltas=[float])), - allow_none=True), - len_keys=1)) + "mask", + "particle_types", + TypeParameterDict( + OnlyIf( + to_type_converter(dict(directors=[(float,) * 3], deltas=[float])), + allow_none=True, + ), + len_keys=1, + ), + ) self._add_typeparam(mask) if not isinstance(isotropic_potential, hoomd.hpmc.pair.Pair): raise TypeError( - "isotropic_potential must be subclass of hoomd.hpmc.pair.Pair") + "isotropic_potential must be subclass of hoomd.hpmc.pair.Pair" + ) self._isotropic_potential = isotropic_potential @property diff --git a/hoomd/hpmc/pair/expanded_gaussian.py b/hoomd/hpmc/pair/expanded_gaussian.py index 304293c51a..61f77c04eb 100644 --- a/hoomd/hpmc/pair/expanded_gaussian.py +++ b/hoomd/hpmc/pair/expanded_gaussian.py @@ -17,7 +17,7 @@ from hoomd.data.typeconverter import positive_real -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'ExpandedGaussian')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "ExpandedGaussian")) class ExpandedGaussian(Pair): """Expanded Gaussian pair potential (HPMC). @@ -41,11 +41,10 @@ class ExpandedGaussian(Pair): .. code-block:: python - expanded_gaussian = hoomd.hpmc.pair.ExpandedGaussian() - expanded_gaussian.params[('A', 'A')] = dict(epsilon=1.0, - sigma=1.0, - delta=1.0, - r_cut=2.5) + expanded_gaussian = hoomd.hpmc.pair.ExpandedGaussian() + expanded_gaussian.params[("A", "A")] = dict( + epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5 + ) simulation.operations.integrator.pair_potentials = [expanded_gaussian] {inherited} @@ -83,32 +82,37 @@ class ExpandedGaussian(Pair): .. code-block:: python - expanded_gaussian.mode = 'shift' + expanded_gaussian.mode = "shift" Type: `str` """ + _cpp_class_name = "PairPotentialExpandedGaussian" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, default_r_cut=None, default_r_on=0.0, mode='none'): + def __init__(self, default_r_cut=None, default_r_on=0.0, mode="none"): if default_r_cut is None: default_r_cut = float else: default_r_cut = float(default_r_cut) params = hoomd.data.typeparam.TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", hoomd.data.parameterdicts.TypeParameterDict( epsilon=float, sigma=positive_real, delta=float, r_cut=default_r_cut, r_on=float(default_r_on), - len_keys=2)) + len_keys=2, + ), + ) self._add_typeparam(params) self._param_dict.update( hoomd.data.parameterdicts.ParameterDict( - mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", - "xplor")))) + mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", "xplor")) + ) + ) self.mode = mode diff --git a/hoomd/hpmc/pair/lennard_jones.py b/hoomd/hpmc/pair/lennard_jones.py index 8bff8693c7..6c190bf5ee 100644 --- a/hoomd/hpmc/pair/lennard_jones.py +++ b/hoomd/hpmc/pair/lennard_jones.py @@ -16,7 +16,7 @@ from .pair import Pair -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'LennardJones')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "LennardJones")) class LennardJones(Pair): """Lennard-Jones pair potential (HPMC). @@ -41,7 +41,7 @@ class LennardJones(Pair): .. code-block:: python lennard_jones = hoomd.hpmc.pair.LennardJones() - lennard_jones.params[('A', 'A')] = dict(epsilon=1, sigma=1, r_cut=2.5) + lennard_jones.params[("A", "A")] = dict(epsilon=1, sigma=1, r_cut=2.5) simulation.operations.integrator.pair_potentials = [lennard_jones] {inherited} @@ -76,31 +76,36 @@ class LennardJones(Pair): .. code-block:: python - lennard_jones.mode = 'shift' + lennard_jones.mode = "shift" Type: `str` """ + _cpp_class_name = "PairPotentialLennardJones" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, default_r_cut=None, default_r_on=0.0, mode='none'): + def __init__(self, default_r_cut=None, default_r_on=0.0, mode="none"): if default_r_cut is None: default_r_cut = float else: default_r_cut = float(default_r_cut) params = hoomd.data.typeparam.TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", hoomd.data.parameterdicts.TypeParameterDict( epsilon=float, sigma=float, r_cut=default_r_cut, r_on=float(default_r_on), - len_keys=2)) + len_keys=2, + ), + ) self._add_typeparam(params) self._param_dict.update( hoomd.data.parameterdicts.ParameterDict( - mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", - "xplor")))) + mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", "xplor")) + ) + ) self.mode = mode diff --git a/hoomd/hpmc/pair/lj_gauss.py b/hoomd/hpmc/pair/lj_gauss.py index 8f248b3a05..ec6e935844 100644 --- a/hoomd/hpmc/pair/lj_gauss.py +++ b/hoomd/hpmc/pair/lj_gauss.py @@ -17,7 +17,7 @@ from hoomd.data.typeconverter import positive_real -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'LJGauss')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "LJGauss")) class LJGauss(Pair): """Lennard-Jones-Gauss pair potential (HPMC). @@ -42,8 +42,8 @@ class LJGauss(Pair): .. code-block:: python lj_gauss = hoomd.hpmc.pair.LJGauss() - lj_gauss.params[('A', 'A')] = dict( - epsilon=1.0, sigma=0.02, r0=1.6, r_cut=2.5 + lj_gauss.params[("A", "A")] = dict( + epsilon=1.0, sigma=0.02, r0=1.6, r_cut=2.5 ) simulation.operations.integrator.pair_potentials = [lj_gauss] @@ -82,32 +82,37 @@ class LJGauss(Pair): .. code-block:: python - lj_gauss.mode = 'shift' + lj_gauss.mode = "shift" Type: `str` """ + _cpp_class_name = "PairPotentialLJGauss" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, default_r_cut=None, default_r_on=0.0, mode='none'): + def __init__(self, default_r_cut=None, default_r_on=0.0, mode="none"): if default_r_cut is None: default_r_cut = float else: default_r_cut = float(default_r_cut) params = hoomd.data.typeparam.TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", hoomd.data.parameterdicts.TypeParameterDict( epsilon=float, sigma=positive_real, r0=float, r_cut=default_r_cut, r_on=float(default_r_on), - len_keys=2)) + len_keys=2, + ), + ) self._add_typeparam(params) self._param_dict.update( hoomd.data.parameterdicts.ParameterDict( - mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", - "xplor")))) + mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", "xplor")) + ) + ) self.mode = mode diff --git a/hoomd/hpmc/pair/opp.py b/hoomd/hpmc/pair/opp.py index 5f7e6c765f..dd7f0387b2 100644 --- a/hoomd/hpmc/pair/opp.py +++ b/hoomd/hpmc/pair/opp.py @@ -16,7 +16,7 @@ from .pair import Pair -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'OPP')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "OPP")) class OPP(Pair): """Oscillating pair potential (HPMC). @@ -41,8 +41,14 @@ class OPP(Pair): .. code-block:: python opp = hoomd.hpmc.pair.OPP() - opp.params[('A', 'A')] = dict( - C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=3.14, r_cut=3.0 + opp.params[("A", "A")] = dict( + C1=1.0, + C2=1.0, + eta1=15, + eta2=3, + k=1.0, + phi=3.14, + r_cut=3.0, ) simulation.operations.integrator.pair_potentials = [opp] @@ -89,21 +95,23 @@ class OPP(Pair): .. code-block:: python - opp.mode = 'shift' + opp.mode = "shift" Type: `str` """ + _cpp_class_name = "PairPotentialOPP" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, default_r_cut=None, default_r_on=0.0, mode='none'): + def __init__(self, default_r_cut=None, default_r_on=0.0, mode="none"): if default_r_cut is None: default_r_cut = float else: default_r_cut = float(default_r_cut) params = hoomd.data.typeparam.TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", hoomd.data.parameterdicts.TypeParameterDict( C1=float, C2=float, @@ -113,11 +121,14 @@ def __init__(self, default_r_cut=None, default_r_on=0.0, mode='none'): phi=float, r_cut=default_r_cut, r_on=float(default_r_on), - len_keys=2)) + len_keys=2, + ), + ) self._add_typeparam(params) self._param_dict.update( hoomd.data.parameterdicts.ParameterDict( - mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", - "xplor")))) + mode=hoomd.data.typeconverter.OnlyFrom(("none", "shift", "xplor")) + ) + ) self.mode = mode diff --git a/hoomd/hpmc/pair/step.py b/hoomd/hpmc/pair/step.py index 851d1a9003..9d6a896e94 100644 --- a/hoomd/hpmc/pair/step.py +++ b/hoomd/hpmc/pair/step.py @@ -17,7 +17,7 @@ from hoomd.data.typeconverter import OnlyIf, to_type_converter -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'Step')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "Step")) class Step(Pair): r"""Step function pair potential (HPMC). @@ -64,17 +64,25 @@ class Step(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PairPotentialStep" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) def __init__(self): params = hoomd.data.typeparam.TypeParameter( - 'params', 'particle_types', - hoomd.data.parameterdicts.TypeParameterDict(OnlyIf( - to_type_converter({ - 'epsilon': [float], - 'r': [float], - }), - allow_none=True), - len_keys=2)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + OnlyIf( + to_type_converter( + { + "epsilon": [float], + "r": [float], + } + ), + allow_none=True, + ), + len_keys=2, + ), + ) self._add_typeparam(params) diff --git a/hoomd/hpmc/pair/union.py b/hoomd/hpmc/pair/union.py index dbfa0c27f3..a78f38e0fd 100644 --- a/hoomd/hpmc/pair/union.py +++ b/hoomd/hpmc/pair/union.py @@ -25,7 +25,7 @@ from .pair import Pair -@hoomd.logging.modify_namespace(('hpmc', 'pair', 'Union')) +@hoomd.logging.modify_namespace(("hpmc", "pair", "Union")) class Union(Pair): r"""Treat particles as extended bodies. @@ -81,9 +81,11 @@ class Union(Pair): .. code-block:: python union = hoomd.hpmc.pair.Union(constituent_potential=lennard_jones) - union.body['R'] = dict(types=['A', 'A', 'A'], - positions=[(-1,0,0), (0,0,0), (1,0,0)]) - union.body['A'] = None + union.body["R"] = dict( + types=["A", "A", "A"], + positions=[(-1, 0, 0), (0, 0, 0), (1, 0, 0)], + ) + union.body["A"] = None simulation.operations.integrator.pair_potentials = [union] @@ -134,35 +136,40 @@ class Union(Pair): union.leaf_capacity = 4 """ + _cpp_class_name = "PairPotentialUnion" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) def __init__(self, constituent_potential, leaf_capacity=0): body = TypeParameter( - 'body', 'particle_types', - TypeParameterDict(OnlyIf(to_type_converter( - dict(types=[str], - positions=[(float,) * 3], - orientations=OnlyIf(to_type_converter([(float,) * 4]), - allow_none=True), - charges=OnlyIf(to_type_converter([float]), - allow_none=True))), - allow_none=True), - len_keys=1, - _defaults={ - 'orientations': None, - 'charges': None - })) + "body", + "particle_types", + TypeParameterDict( + OnlyIf( + to_type_converter( + dict( + types=[str], + positions=[(float,) * 3], + orientations=OnlyIf( + to_type_converter([(float,) * 4]), allow_none=True + ), + charges=OnlyIf(to_type_converter([float]), allow_none=True), + ) + ), + allow_none=True, + ), + len_keys=1, + _defaults={"orientations": None, "charges": None}, + ), + ) self._add_typeparam(body) - param_dict = ParameterDict( - leaf_capacity=OnlyTypes(int, allow_none=True)) + param_dict = ParameterDict(leaf_capacity=OnlyTypes(int, allow_none=True)) param_dict.update(dict(leaf_capacity=leaf_capacity)) self._param_dict.update(param_dict) if not isinstance(constituent_potential, hoomd.hpmc.pair.Pair): - raise TypeError( - "constituent_potential must subclass hoomd.hpmc.pair.Pair") + raise TypeError("constituent_potential must subclass hoomd.hpmc.pair.Pair") self._constituent_potential = constituent_potential @property diff --git a/hoomd/hpmc/pytest/conftest.py b/hoomd/hpmc/pytest/conftest.py index 5a17d16e88..4e854439f2 100644 --- a/hoomd/hpmc/pytest/conftest.py +++ b/hoomd/hpmc/pytest/conftest.py @@ -3,236 +3,248 @@ import pytest import hoomd -from hoomd.hpmc.integrate import (ConvexPolygon, ConvexPolyhedron, - ConvexSpheropolygon, Ellipsoid, - FacetedEllipsoid, FacetedEllipsoidUnion, - Polyhedron, SimplePolygon, Sphere, - SphereUnion, ConvexSpheropolyhedron, - ConvexSpheropolyhedronUnion, Sphinx) +from hoomd.hpmc.integrate import ( + ConvexPolygon, + ConvexPolyhedron, + ConvexSpheropolygon, + Ellipsoid, + FacetedEllipsoid, + FacetedEllipsoidUnion, + Polyhedron, + SimplePolygon, + Sphere, + SphereUnion, + ConvexSpheropolyhedron, + ConvexSpheropolyhedronUnion, + Sphinx, +) from copy import deepcopy from collections import Counter _valid_args = [ - (ConvexPolygon, { - 'vertices': [(0, (0.75**0.5) / 2), (-0.5, -(0.75**0.5) / 2), - (0.5, -(0.75**0.5) / 2)] - }, 2), - (ConvexPolygon, { - 'vertices': [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)] - }, 2), - (ConvexPolygon, { - 'vertices': [(-0.125, -0.125), (0.375, 0.125), (0.125, 0.375), - (-0.125, 0.125)], - 'sweep_radius': 0.3 - }, 2), - (ConvexPolygon, { - 'vertices': [(0, 0), (0.25, 0), (0.5, 0.25), (0.25, 0.5), (0, 0.25)], - 'ignore_statistics': 1 - }, 2), - (ConvexPolyhedron, { - 'vertices': [(0, (0.75**0.5) / 2, -0.5), (-0.5, -(0.75**0.5) / 2, -0.5), - (0.5, -(0.75**0.5) / 2, -0.5), (0, 0, 0.5)] - }, 3), - (ConvexPolyhedron, { - 'vertices': [(0, 0.25, 0), (0.375, 0.375, 0.375), (0.375, 0, 0.375), - (0, 0.375, 0.375), (0.375, 0.375, 0), (0, 0, 0.375)], - 'ignore_statistics': 1, - 'sweep_radius': 0.125 - }, 3), - (ConvexPolyhedron, { - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)], - 'sweep_radius': 0.2 - }, 3), - (ConvexPolyhedron, { - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)], - 'ignore_statistics': 1 - }, 3), - (ConvexSpheropolygon, { - 'vertices': [(0, (0.75**0.5) / 2), (-0.5, -(0.75**0.5) / 2), - (0.5, -(0.75**0.5) / 2)] - }, 2), - (ConvexSpheropolygon, { - 'vertices': [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)] - }, 2), - (ConvexSpheropolygon, { - 'vertices': [(-0.125, -0.125), (0.375, 0.125), (0.125, 0.375), - (-0.125, 0.125)], - 'sweep_radius': 0.3 - }, 2), - (ConvexSpheropolygon, { - 'vertices': [(0, 0), (0.25, 0), (0.5, 0.25), (0.25, 0.5), (0, 0.25)], - 'ignore_statistics': 1 - }, 2), - (ConvexSpheropolyhedron, { - 'vertices': [(0, (0.75**0.5) / 2, -0.5), (-0.5, -(0.75**0.5) / 2, -0.5), - (0.5, -(0.75**0.5) / 2, -0.5), (0, 0, 0.5)] - }, 3), - (ConvexSpheropolyhedron, { - 'vertices': [(0, 0.25, 0), (0.375, 0.375, 0.375), (0.375, 0, 0.375), - (0, 0.375, 0.375), (0.375, 0.375, 0), (0, 0, 0.375)], - 'ignore_statistics': 1, - 'sweep_radius': 0.125 - }, 3), - (ConvexSpheropolyhedron, { - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)], - 'sweep_radius': 0.2 - }, 3), - (ConvexSpheropolyhedron, { - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)], - 'ignore_statistics': 1 - }, 3), - (Ellipsoid, { - 'a': 0.125, - 'b': 0.375, - 'c': 0.5 - }, 3), - (Ellipsoid, { - 'a': 1.0 / 6.0, - 'b': 2.0 / 6.0, - 'c': 0.5 - }, 3), - (Ellipsoid, { - 'a': 0.5, - 'b': 1.0 / 8.0, - 'c': 3.0 / 8.0, - 'ignore_statistics': 1 - }, 3), - (Ellipsoid, { - 'a': 1.0 / 12.0, - 'b': 5.0 / 12.0, - 'c': 0.5, - 'ignore_statistics': 0 - }, 3), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, 3), - (FacetedEllipsoid, { - "normals": [(0, 0, 1), (0, 1, 0), (1, 0, 0)], - "offsets": [0.1, 0.25, 0.25], - "a": 0.5, - "b": 0.25, - "c": 0.125, - "vertices": [], - "origin": (0, 0, 0) - }, 3), - (FacetedEllipsoid, { - "normals": [(1, 0, 0)], - "offsets": [0.25], - "a": 0.5, - "b": 0.25, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0.125), - "ignore_statistics": 1 - }, 3), - (FacetedEllipsoid, { - "normals": [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), - (0, 0, 1)], - "offsets": [-0.125, -1, -.5, -.5, -.5, -.5], - "a": 0.5, - "b": 0.5, - "c": 0.5, - "vertices": [[-.125, -.5, -.5], [-.125, -.5, .5], [-.125, .5, .5], - [-.125, .5, -.5], [1, -.5, -.5], [1, -.5, .5], [1, .5, .5], - [1, .5, -.5]], - "origin": (0, 0.125, 0) - }, 3), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ + ( + ConvexPolygon, + { + "vertices": [ + (0, (0.75**0.5) / 2), + (-0.5, -(0.75**0.5) / 2), + (0.5, -(0.75**0.5) / 2), + ] + }, + 2, + ), + ( + ConvexPolygon, + {"vertices": [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]}, + 2, + ), + ( + ConvexPolygon, + { + "vertices": [ + (-0.125, -0.125), + (0.375, 0.125), + (0.125, 0.375), + (-0.125, 0.125), + ], + "sweep_radius": 0.3, + }, + 2, + ), + ( + ConvexPolygon, + { + "vertices": [(0, 0), (0.25, 0), (0.5, 0.25), (0.25, 0.5), (0, 0.25)], + "ignore_statistics": 1, + }, + 2, + ), + ( + ConvexPolyhedron, + { + "vertices": [ + (0, (0.75**0.5) / 2, -0.5), + (-0.5, -(0.75**0.5) / 2, -0.5), + (0.5, -(0.75**0.5) / 2, -0.5), + (0, 0, 0.5), + ] + }, + 3, + ), + ( + ConvexPolyhedron, + { + "vertices": [ + (0, 0.25, 0), + (0.375, 0.375, 0.375), + (0.375, 0, 0.375), + (0, 0.375, 0.375), + (0.375, 0.375, 0), + (0, 0, 0.375), + ], + "ignore_statistics": 1, + "sweep_radius": 0.125, + }, + 3, + ), + ( + ConvexPolyhedron, + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ], + "sweep_radius": 0.2, + }, + 3, + ), + ( + ConvexPolyhedron, + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ], + "ignore_statistics": 1, + }, + 3, + ), + ( + ConvexSpheropolygon, + { + "vertices": [ + (0, (0.75**0.5) / 2), + (-0.5, -(0.75**0.5) / 2), + (0.5, -(0.75**0.5) / 2), + ] + }, + 2, + ), + ( + ConvexSpheropolygon, + {"vertices": [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]}, + 2, + ), + ( + ConvexSpheropolygon, + { + "vertices": [ + (-0.125, -0.125), + (0.375, 0.125), + (0.125, 0.375), + (-0.125, 0.125), + ], + "sweep_radius": 0.3, + }, + 2, + ), + ( + ConvexSpheropolygon, + { + "vertices": [(0, 0), (0.25, 0), (0.5, 0.25), (0.25, 0.5), (0, 0.25)], + "ignore_statistics": 1, + }, + 2, + ), + ( + ConvexSpheropolyhedron, + { + "vertices": [ + (0, (0.75**0.5) / 2, -0.5), + (-0.5, -(0.75**0.5) / 2, -0.5), + (0.5, -(0.75**0.5) / 2, -0.5), + (0, 0, 0.5), + ] + }, + 3, + ), + ( + ConvexSpheropolyhedron, + { + "vertices": [ + (0, 0.25, 0), + (0.375, 0.375, 0.375), + (0.375, 0, 0.375), + (0, 0.375, 0.375), + (0.375, 0.375, 0), + (0, 0, 0.375), + ], + "ignore_statistics": 1, + "sweep_radius": 0.125, + }, + 3, + ), + ( + ConvexSpheropolyhedron, + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ], + "sweep_radius": 0.2, + }, + 3, + ), + ( + ConvexSpheropolyhedron, + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ], + "ignore_statistics": 1, + }, + 3, + ), + (Ellipsoid, {"a": 0.125, "b": 0.375, "c": 0.5}, 3), + (Ellipsoid, {"a": 1.0 / 6.0, "b": 2.0 / 6.0, "c": 0.5}, 3), + (Ellipsoid, {"a": 0.5, "b": 1.0 / 8.0, "c": 3.0 / 8.0, "ignore_statistics": 1}, 3), + ( + Ellipsoid, + {"a": 1.0 / 12.0, "b": 5.0 / 12.0, "c": 0.5, "ignore_statistics": 0}, + 3, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], "a": 0.5, "b": 0.5, "c": 0.25, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }, 3), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(-0.1, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 1, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 0], - 'capacity': 3, - 'ignore_statistics': False - }, 3), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0.1, 0, 0.1), (0, 0, 0)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [0, 1] - }, 3), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { + "offsets": [0.125], + }, + 3, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1), (0, 1, 0), (1, 0, 0)], "offsets": [0.1, 0.25, 0.25], "a": 0.5, "b": 0.25, "c": 0.125, "vertices": [], - "origin": (0, 0, 0) - }, { + "origin": (0, 0, 0), + }, + 3, + ), + ( + FacetedEllipsoid, + { "normals": [(1, 0, 0)], "offsets": [0.25], "a": 0.5, @@ -240,703 +252,1176 @@ "c": 0.5, "vertices": [], "origin": (0, 0, 0.125), - "ignore_statistics": 1 - }], - 'positions': [(0, 0, 0), (0, 0, -0.1), (0.1, 0.1, 0.1)], - 'orientations': [(1, 1, 1, 1), (1, 0, 0, 0), (1, 0, 0, 1)], - 'overlap': [1, 1, 1], - 'capacity': 4, - 'ignore_statistics': 1 - }, 3), - (Polyhedron, { - "vertices": [(0.25, 0.25, 0.25), (-0.25, -0.25, 0.25), - (0.25, -0.25, -0.25), (-0.25, 0.25, -0.25)], - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]] - }, 3), - (Polyhedron, { - 'vertices': [(-0.25, -0.25, -0.25), (-0.25, -0.25, 0.25), - (-0.25, 0.25, -0.25), (-0.25, 0.25, 0.25), - (0.25, -0.25, -0.25), (0.25, -0.25, 0.25), - (0.25, 0.25, -0.25), (0.25, 0.25, 0.25)], - 'faces': [[0, 2, 6], [6, 4, 0], [5, 0, 4], [5, 1, 0], [5, 4, 6], - [5, 6, 7], [3, 2, 0], [3, 0, 1], [3, 6, 2], [3, 7, 6], - [3, 1, 5], [3, 5, 7]], - 'overlap': [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], - 'sweep_radius': 0.1 - }, 3), - (Polyhedron, { - 'vertices': [(0, 0.3, 0), (0.2, 0.1, 0), (0.1, 0.3, 0.1), - (0.1, 0.1, 0.1), (0.1, 0.2, 0.5), (0.3, 0, 0.1), - (0, 0.3, 0.3)], - 'faces': [(0, 1, 2), (3, 2, 6), (1, 2, 4), (6, 1, 3), (3, 4, 6), - (4, 5, 1), (6, 2, 5)], - 'ignore_statistics': 1, - 'capacity': 4 - }, 3), - (Polyhedron, { - 'vertices': [(0, 0.5, 0), (1 / 3, 1 / 6, 0), (0.5, 0, 1 / 6), - (0, 0.5, 0.5)], - 'faces': [(0, 1, 2), (3, 2, 1), (1, 2, 0), (3, 2, 1)], - 'capacity': 5, - 'hull_only': True - }, 3), - (SimplePolygon, { - "vertices": [(0, (0.75**0.5) / 2), (0, 0), (-0.5, -(0.75**0.5) / 2), - (0.5, -(0.75**0.5) / 2)] - }, 2), - (SimplePolygon, { - "vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, -0.5)] - }, 2), - (SimplePolygon, { - "vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5)], - "ignore_statistics": 1 - }, 2), - (SimplePolygon, { - "vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5)], - }, 2), - (Sphere, { - "diameter": 1 - }, 3), - (Sphere, { - 'diameter': 1.1, - 'ignore_statistics': 1 - }, 3), - (Sphere, { - 'diameter': 0.9, - 'orientable': 1 - }, 3), - (Sphere, { - 'diameter': 0.8, - 'orientable': 1, - 'ignore_statistics': 1 - }, 3), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }, 3), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 0.5 - }], - 'positions': [(0.2, 0, 0), (0, 0, 0.2)], - 'orientations': [(2**0.5, 2**0.5, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }, 3), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0.2, 0.2, 0), (-0.1, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 0] - }, 3), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 0.5 - }, { - "diameter": 0.75 - }], - 'positions': [(0, 0, 0), (0, -0.1, -0.1), (0.1, 0.1, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1, 1], - 'capacity': 5, - 'ignore_statistics': 1 - }, 3), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)] - }, { - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }, 3), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)] - }, { - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)] - }], - 'positions': [(-0.1, 0, 0), (0, 0, 0.1)], - 'orientations': [(2**0.5, 2**0.5, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }, 3), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)] - }, { - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)] - }], - 'positions': [(-0.1, -0.1, 0), (0.1, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 0] - }, 3), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)] - }, { - 'vertices': [(0.25, 0, 0), (0.25, 0.25, 0), (0.25, 0.5, 0.25), - (0, 0.25, 0.25), (0.25, 0.25, 0.5), (0, 0, 0.25)] - }, { - 'vertices': [(0, 0, 0), (0.25, 0.25, 0.25), (0.25, 0, 0.5), - (0.5, 0.25, 0.25)] - }], - 'positions': [(0, 0, 0), (0, -0.1, -0.1), (0.1, 0.1, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1, 1], - 'capacity': 5, - 'ignore_statistics': 1 - }, 3), - (Sphinx, { - 'diameters': [1, -.001], - 'centers': [(0, 0, 0), (0.5, 0, 0)] - }, 3), - (Sphinx, { - 'diameters': [1, -1], - 'centers': [(0, 0, 0), (0.75, 0, 0)] - }, 3), - (Sphinx, { - 'diameters': [1, -0.5], - 'centers': [(0, 0, 0), (0, 0, .6)] - }, 3), - (Sphinx, { - 'diameters': [1, -0.25], - 'centers': [(0, 0, 0), (0.6, 0, 0)], - 'ignore_statistics': 1 - }, 3), + "ignore_statistics": 1, + }, + 3, + ), + ( + FacetedEllipsoid, + { + "normals": [ + (-1, 0, 0), + (1, 0, 0), + (0, -1, 0), + (0, 1, 0), + (0, 0, -1), + (0, 0, 1), + ], + "offsets": [-0.125, -1, -0.5, -0.5, -0.5, -0.5], + "a": 0.5, + "b": 0.5, + "c": 0.5, + "vertices": [ + [-0.125, -0.5, -0.5], + [-0.125, -0.5, 0.5], + [-0.125, 0.5, 0.5], + [-0.125, 0.5, -0.5], + [1, -0.5, -0.5], + [1, -0.5, 0.5], + [1, 0.5, 0.5], + [1, 0.5, -0.5], + ], + "origin": (0, 0.125, 0), + }, + 3, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + 3, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(-0.1, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 1, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 0], + "capacity": 3, + "ignore_statistics": False, + }, + 3, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0.1, 0, 0.1), (0, 0, 0)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [0, 1], + }, + 3, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1), (0, 1, 0), (1, 0, 0)], + "offsets": [0.1, 0.25, 0.25], + "a": 0.5, + "b": 0.25, + "c": 0.125, + "vertices": [], + "origin": (0, 0, 0), + }, + { + "normals": [(1, 0, 0)], + "offsets": [0.25], + "a": 0.5, + "b": 0.25, + "c": 0.5, + "vertices": [], + "origin": (0, 0, 0.125), + "ignore_statistics": 1, + }, + ], + "positions": [(0, 0, 0), (0, 0, -0.1), (0.1, 0.1, 0.1)], + "orientations": [(1, 1, 1, 1), (1, 0, 0, 0), (1, 0, 0, 1)], + "overlap": [1, 1, 1], + "capacity": 4, + "ignore_statistics": 1, + }, + 3, + ), + ( + Polyhedron, + { + "vertices": [ + (0.25, 0.25, 0.25), + (-0.25, -0.25, 0.25), + (0.25, -0.25, -0.25), + (-0.25, 0.25, -0.25), + ], + "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], + }, + 3, + ), + ( + Polyhedron, + { + "vertices": [ + (-0.25, -0.25, -0.25), + (-0.25, -0.25, 0.25), + (-0.25, 0.25, -0.25), + (-0.25, 0.25, 0.25), + (0.25, -0.25, -0.25), + (0.25, -0.25, 0.25), + (0.25, 0.25, -0.25), + (0.25, 0.25, 0.25), + ], + "faces": [ + [0, 2, 6], + [6, 4, 0], + [5, 0, 4], + [5, 1, 0], + [5, 4, 6], + [5, 6, 7], + [3, 2, 0], + [3, 0, 1], + [3, 6, 2], + [3, 7, 6], + [3, 1, 5], + [3, 5, 7], + ], + "overlap": [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], + "sweep_radius": 0.1, + }, + 3, + ), + ( + Polyhedron, + { + "vertices": [ + (0, 0.3, 0), + (0.2, 0.1, 0), + (0.1, 0.3, 0.1), + (0.1, 0.1, 0.1), + (0.1, 0.2, 0.5), + (0.3, 0, 0.1), + (0, 0.3, 0.3), + ], + "faces": [ + (0, 1, 2), + (3, 2, 6), + (1, 2, 4), + (6, 1, 3), + (3, 4, 6), + (4, 5, 1), + (6, 2, 5), + ], + "ignore_statistics": 1, + "capacity": 4, + }, + 3, + ), + ( + Polyhedron, + { + "vertices": [ + (0, 0.5, 0), + (1 / 3, 1 / 6, 0), + (0.5, 0, 1 / 6), + (0, 0.5, 0.5), + ], + "faces": [(0, 1, 2), (3, 2, 1), (1, 2, 0), (3, 2, 1)], + "capacity": 5, + "hull_only": True, + }, + 3, + ), + ( + SimplePolygon, + { + "vertices": [ + (0, (0.75**0.5) / 2), + (0, 0), + (-0.5, -(0.75**0.5) / 2), + (0.5, -(0.75**0.5) / 2), + ] + }, + 2, + ), + ( + SimplePolygon, + {"vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, -0.5)]}, + 2, + ), + ( + SimplePolygon, + {"vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5)], "ignore_statistics": 1}, + 2, + ), + ( + SimplePolygon, + { + "vertices": [(-0.5, 0.5), (0.5, -0.5), (0.5, 0.5)], + }, + 2, + ), + (Sphere, {"diameter": 1}, 3), + (Sphere, {"diameter": 1.1, "ignore_statistics": 1}, 3), + (Sphere, {"diameter": 0.9, "orientable": 1}, 3), + (Sphere, {"diameter": 0.8, "orientable": 1, "ignore_statistics": 1}, 3), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + 3, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 0.5}], + "positions": [(0.2, 0, 0), (0, 0, 0.2)], + "orientations": [(2**0.5, 2**0.5, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + 3, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0.2, 0.2, 0), (-0.1, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 0], + }, + 3, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 0.5}, {"diameter": 0.75}], + "positions": [(0, 0, 0), (0, -0.1, -0.1), (0.1, 0.1, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1, 1], + "capacity": 5, + "ignore_statistics": 1, + }, + 3, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ] + }, + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ] + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + 3, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ] + }, + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ] + }, + ], + "positions": [(-0.1, 0, 0), (0, 0, 0.1)], + "orientations": [(2**0.5, 2**0.5, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + 3, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ] + }, + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ] + }, + ], + "positions": [(-0.1, -0.1, 0), (0.1, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 0], + }, + 3, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ] + }, + { + "vertices": [ + (0.25, 0, 0), + (0.25, 0.25, 0), + (0.25, 0.5, 0.25), + (0, 0.25, 0.25), + (0.25, 0.25, 0.5), + (0, 0, 0.25), + ] + }, + { + "vertices": [ + (0, 0, 0), + (0.25, 0.25, 0.25), + (0.25, 0, 0.5), + (0.5, 0.25, 0.25), + ] + }, + ], + "positions": [(0, 0, 0), (0, -0.1, -0.1), (0.1, 0.1, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1, 1], + "capacity": 5, + "ignore_statistics": 1, + }, + 3, + ), + (Sphinx, {"diameters": [1, -0.001], "centers": [(0, 0, 0), (0.5, 0, 0)]}, 3), + (Sphinx, {"diameters": [1, -1], "centers": [(0, 0, 0), (0.75, 0, 0)]}, 3), + (Sphinx, {"diameters": [1, -0.5], "centers": [(0, 0, 0), (0, 0, 0.6)]}, 3), + ( + Sphinx, + { + "diameters": [1, -0.25], + "centers": [(0, 0, 0), (0.6, 0, 0)], + "ignore_statistics": 1, + }, + 3, + ), ] _invalid_args = [ - (ConvexPolygon, { - 'vertices': "str" - }), - (ConvexPolygon, { - 'vertices': 1 - }), - (ConvexPolygon, { - 'vertices': [(0, 0), (1, 1), (1, 0), (0, 1), (1, 1), (0, 0), (2, 1), - (1, 3)], - 'sweep_radius': "str" - }), - (ConvexPolyhedron, { - 'vertices': "str" - }), - (ConvexPolyhedron, { - 'vertices': 1 - }), - (ConvexPolyhedron, { - 'vertices': [(0, 0, 0), (1, 1, 1), (1, 0, 2), (2, 1, 1)], - 'sweep_radius': "str" - }), - (ConvexSpheropolygon, { - 'vertices': "str" - }), - (ConvexSpheropolygon, { - 'vertices': 1 - }), - (ConvexSpheropolygon, { - 'vertices': [(0, 0), (1, 1), (1, 0), (0, 1), (1, 1), (0, 0), (2, 1), - (1, 3)], - 'sweep_radius': "str" - }), - (ConvexSpheropolyhedron, { - 'vertices': "str" - }), - (ConvexSpheropolyhedron, { - 'vertices': 1 - }), - (ConvexSpheropolyhedron, { - 'vertices': [(0, 0, 0), (1, 1, 1), (1, 0, 2), (2, 1, 1)], - 'sweep_radius': "str" - }), - (Ellipsoid, { - 'a': 'str', - 'b': 'str', - 'c': 'str' - }), - (Ellipsoid, { - 'a': 1, - 'b': 3, - 'c': 'str' - }), - (Ellipsoid, { - 'a': [1, 2, 3], - 'b': [3, 7, 7], - 'c': [2, 5, 9] - }), - (FacetedEllipsoid, { - "normals": "str", - "a": 1, - "b": 1, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0] - }), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": "str", - "b": 1, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0] - }), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": 1, - "b": 1, - "c": 0.5, - "vertices": "str", - "origin": (0, 0, 0), - "offsets": [0] - }), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": 1, - "b": 1, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0), - "offsets": "str" - }), - (FacetedEllipsoid, { - "normals": 1, - "a": 1, - "b": 1, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0] - }), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": [1, 2, 3], - "b": 1, - "c": 0.5, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0] - }), - (FacetedEllipsoid, { - "normals": [(0, 0, 1)], - "a": 1, - "b": 1, - "c": 0.5, - "vertices": 4, - "origin": (0, 0, 0), - "offsets": [0] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': "str", - 'positions': [(1, 0, 0), (0, 0, 1)], - 'orientations': [(1, 1, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 0] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': "str", - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [0, 1] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': "str", - 'overlap': [1, 1] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, + (ConvexPolygon, {"vertices": "str"}), + (ConvexPolygon, {"vertices": 1}), + ( + ConvexPolygon, + { + "vertices": [ + (0, 0), + (1, 1), + (1, 0), + (0, 1), + (1, 1), + (0, 0), + (2, 1), + (1, 3), + ], + "sweep_radius": "str", + }, + ), + (ConvexPolyhedron, {"vertices": "str"}), + (ConvexPolyhedron, {"vertices": 1}), + ( + ConvexPolyhedron, + { + "vertices": [(0, 0, 0), (1, 1, 1), (1, 0, 2), (2, 1, 1)], + "sweep_radius": "str", + }, + ), + (ConvexSpheropolygon, {"vertices": "str"}), + (ConvexSpheropolygon, {"vertices": 1}), + ( + ConvexSpheropolygon, + { + "vertices": [ + (0, 0), + (1, 1), + (1, 0), + (0, 1), + (1, 1), + (0, 0), + (2, 1), + (1, 3), + ], + "sweep_radius": "str", + }, + ), + (ConvexSpheropolyhedron, {"vertices": "str"}), + (ConvexSpheropolyhedron, {"vertices": 1}), + ( + ConvexSpheropolyhedron, + { + "vertices": [(0, 0, 0), (1, 1, 1), (1, 0, 2), (2, 1, 1)], + "sweep_radius": "str", + }, + ), + (Ellipsoid, {"a": "str", "b": "str", "c": "str"}), + (Ellipsoid, {"a": 1, "b": 3, "c": "str"}), + (Ellipsoid, {"a": [1, 2, 3], "b": [3, 7, 7], "c": [2, 5, 9]}), + ( + FacetedEllipsoid, + { + "normals": "str", + "a": 1, + "b": 1, + "c": 0.5, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': "str" - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': 1, - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ + "offsets": [0], + }, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, + "a": "str", + "b": 1, + "c": 0.5, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }, { + "offsets": [0], + }, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], + "a": 1, + "b": 1, + "c": 0.5, + "vertices": "str", "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': 1, - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ + "offsets": [0], + }, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, + "a": 1, + "b": 1, + "c": 0.5, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }, { - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, + "offsets": "str", + }, + ), + ( + FacetedEllipsoid, + { + "normals": 1, + "a": 1, + "b": 1, + "c": 0.5, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': 1, - 'overlap': [1, 1] - }), - ((FacetedEllipsoid, FacetedEllipsoidUnion), { - 'shapes': [{ + "offsets": [0], + }, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, + "a": [1, 2, 3], + "b": 1, + "c": 0.5, "vertices": [], "origin": (0, 0, 0), - "offsets": [0.125] - }, { + "offsets": [0], + }, + ), + ( + FacetedEllipsoid, + { "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 0.25, - "vertices": [], + "a": 1, + "b": 1, + "c": 0.5, + "vertices": 4, "origin": (0, 0, 0), - "offsets": [0.125] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': 1 - }), - (Polyhedron, { - "vertices": "str", - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]] - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": "str" - }), - (Polyhedron, { - "vertices": 1, - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]] - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": 1 - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], - 'overlap': "str" - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], - 'overlap': 1 - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], - 'sweep_radius': "str" - }), - (Polyhedron, { - "vertices": [(0.5, 0.5, 0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, -0.5), - (-0.5, 0.5, -0.5)], - "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], - 'capacity': "str" - }), - (SimplePolygon, { - "vertices": "str" - }), - (SimplePolygon, { - "vertices": 1 - }), - (SimplePolygon, { - "vertices": [(-1, 1), (1, -1), (1, 1)], - "sweep_radius": "str" - }), - (Sphere, { - "diameter": "str" - }), - (Sphere, { - 'diameter': [1, 2, 3] - }), - ((Sphere, SphereUnion), { - 'shapes': "str", - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': "str", - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': "str", - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': "str" - }), - ((Sphere, SphereUnion), { - 'shapes': 1, - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': 1, - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': 1, - 'overlap': [1, 1] - }), - ((Sphere, SphereUnion), { - 'shapes': [{ - "diameter": 1 - }, { - "diameter": 1 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': 1 - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': "str", - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }], - 'positions': "str", - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': "str", - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': "str" - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': 1, - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }], - 'positions': 1, - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': 1, - 'overlap': [1, 1] - }), - ((ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), { - 'shapes': [{ - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)] - }, { - 'vertices': [(1, 0, 0), (1, 1, 0), (1, 2, 1), (0, 1, 1), (1, 1, 2), - (0, 0, 1)], - 'sweep_radius': 0.3 - }], - 'positions': [(0, 0, 0), (0, 0, 0.1)], - 'orientations': [(1, 0, 0, 0), (1, 0, 0, 0)], - 'overlap': 1 - }), - (Sphinx, { - 'diameters': "str", - 'centers': [(0, 0, 0), (0.8, 0, 0)] - }), - (Sphinx, { - 'diameters': [1, -1], - 'centers': "str" - }), - (Sphinx, { - 'diameters': 1, - 'centers': [(0, 0, 0), (0, 0, .6)] - }), - (Sphinx, { - 'diameters': [0.5, -0.25], - 'centers': 1 - }), + "offsets": [0], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": "str", + "positions": [(1, 0, 0), (0, 0, 1)], + "orientations": [(1, 1, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 0], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": "str", + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [0, 1], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": "str", + "overlap": [1, 1], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": "str", + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": 1, + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": 1, + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": 1, + "overlap": [1, 1], + }, + ), + ( + (FacetedEllipsoid, FacetedEllipsoidUnion), + { + "shapes": [ + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 0.25, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0.125], + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": 1, + }, + ), + ( + Polyhedron, + {"vertices": "str", "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]]}, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": "str", + }, + ), + ( + Polyhedron, + {"vertices": 1, "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]]}, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": 1, + }, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], + "overlap": "str", + }, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], + "overlap": 1, + }, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], + "sweep_radius": "str", + }, + ), + ( + Polyhedron, + { + "vertices": [ + (0.5, 0.5, 0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, -0.5), + (-0.5, 0.5, -0.5), + ], + "faces": [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]], + "capacity": "str", + }, + ), + (SimplePolygon, {"vertices": "str"}), + (SimplePolygon, {"vertices": 1}), + (SimplePolygon, {"vertices": [(-1, 1), (1, -1), (1, 1)], "sweep_radius": "str"}), + (Sphere, {"diameter": "str"}), + (Sphere, {"diameter": [1, 2, 3]}), + ( + (Sphere, SphereUnion), + { + "shapes": "str", + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": "str", + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": "str", + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": "str", + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": 1, + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": 1, + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": 1, + "overlap": [1, 1], + }, + ), + ( + (Sphere, SphereUnion), + { + "shapes": [{"diameter": 1}, {"diameter": 1}], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": 1, + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": "str", + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + ], + "positions": "str", + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": "str", + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": "str", + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": 1, + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + ], + "positions": 1, + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": 1, + "overlap": [1, 1], + }, + ), + ( + (ConvexSpheropolyhedron, ConvexSpheropolyhedronUnion), + { + "shapes": [ + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ] + }, + { + "vertices": [ + (1, 0, 0), + (1, 1, 0), + (1, 2, 1), + (0, 1, 1), + (1, 1, 2), + (0, 0, 1), + ], + "sweep_radius": 0.3, + }, + ], + "positions": [(0, 0, 0), (0, 0, 0.1)], + "orientations": [(1, 0, 0, 0), (1, 0, 0, 0)], + "overlap": 1, + }, + ), + (Sphinx, {"diameters": "str", "centers": [(0, 0, 0), (0.8, 0, 0)]}), + (Sphinx, {"diameters": [1, -1], "centers": "str"}), + (Sphinx, {"diameters": 1, "centers": [(0, 0, 0), (0, 0, 0.6)]}), + (Sphinx, {"diameters": [0.5, -0.25], "centers": 1}), ] class CounterWrapper: - def __init__(self, func): self.func = func self._counter = [] def __call__(self, *args, **kwargs): - self._counter.append(str(args[0][0]).split('.')[-1][:-2]) + self._counter.append(str(args[0][0]).split(".")[-1][:-2]) return self.func(*args, **kwargs) def count(self, integrator): - return Counter(self._counter)[str(integrator).split('.')[-1][:-2]] + return Counter(self._counter)[str(integrator).split(".")[-1][:-2]] @CounterWrapper @@ -947,7 +1432,7 @@ def valid_args_id(args): else: name = integrator.__name__ - return name + '-' + str(valid_args_id.count(str(integrator))) + return name + "-" + str(valid_args_id.count(str(integrator))) @pytest.fixture(scope="function", params=_valid_args, ids=valid_args_id) @@ -962,7 +1447,7 @@ def invalid_args_id(args): name = integrator[1].__name__ else: name = integrator.__name__ - return name + '-' + str(valid_args_id.count(str(integrator))) + return name + "-" + str(valid_args_id.count(str(integrator))) @pytest.fixture(scope="function", params=_invalid_args, ids=invalid_args_id) @@ -987,9 +1472,9 @@ def _test_moves_args(_valid_args): return args_list -@pytest.fixture(scope="function", - params=_test_moves_args(_valid_args), - ids=_test_moves_id) +@pytest.fixture( + scope="function", params=_test_moves_args(_valid_args), ids=_test_moves_id +) def test_moves_args(request): return deepcopy(request.param) @@ -998,23 +1483,23 @@ def _cpp_args(_valid_args): args_list = [] for integrator, args, n_dimensions in _valid_args: cpp_shape = None - if 'SphereUnion' in str(integrator): + if "SphereUnion" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.SphereUnionParams - elif 'Sphere' in str(integrator): + elif "Sphere" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.SphereParams - elif 'Sphinx' in str(integrator): + elif "Sphinx" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.SphinxParams - elif 'FacetedEllipsoidUnion' in str(integrator): + elif "FacetedEllipsoidUnion" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.mfellipsoid_params - elif 'FacetedEllipsoid' in str(integrator): + elif "FacetedEllipsoid" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.FacetedEllipsoidParams - elif 'Ellipsoid' in str(integrator): + elif "Ellipsoid" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.EllipsoidParams - elif 'ConvexSpheropolyhedronUnion' in str(integrator): + elif "ConvexSpheropolyhedronUnion" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.mpoly3d_params - elif 'polygon' in str(integrator).lower(): + elif "polygon" in str(integrator).lower(): cpp_shape = hoomd.hpmc._hpmc.PolygonVertices - elif 'Convex' in str(integrator): + elif "Convex" in str(integrator): cpp_shape = hoomd.hpmc._hpmc.PolyhedronVertices if cpp_shape: if isinstance(integrator, tuple): @@ -1027,8 +1512,8 @@ def _cpp_args(_valid_args): inner_mc.shape["A"] = args["shapes"][i] args["shapes"][i] = inner_mc.shape["A"].to_base() mc = integrator() - mc.shape['A'] = args - args_list.append((cpp_shape, mc.shape['A'].to_base())) + mc.shape["A"] = args + args_list.append((cpp_shape, mc.shape["A"].to_base())) return args_list @@ -1038,8 +1523,6 @@ def cpp_args_id(args): return str(integrator) + str(valid_args_id.count(str(integrator))) -@pytest.fixture(scope="function", - params=_cpp_args(_valid_args), - ids=cpp_args_id) +@pytest.fixture(scope="function", params=_cpp_args(_valid_args), ids=cpp_args_id) def cpp_args(request): return deepcopy(request.param) diff --git a/hoomd/hpmc/pytest/test_boxmc.py b/hoomd/hpmc/pytest/test_boxmc.py index 4288a0312d..f4aacedac0 100644 --- a/hoomd/hpmc/pytest/test_boxmc.py +++ b/hoomd/hpmc/pytest/test_boxmc.py @@ -13,79 +13,41 @@ valid_constructor_args = [ dict(trigger=hoomd.trigger.Periodic(10), P=10), dict(trigger=hoomd.trigger.After(100), P=hoomd.variant.Ramp(1, 5, 0, 100)), - dict(trigger=hoomd.trigger.Before(100), - P=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), - dict(trigger=hoomd.trigger.Periodic(1000), - P=hoomd.variant.Power(1, 5, 3, 0, 100)), + dict( + trigger=hoomd.trigger.Before(100), + P=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), + ), + dict(trigger=hoomd.trigger.Periodic(1000), P=hoomd.variant.Power(1, 5, 3, 0, 100)), ] -valid_attrs = [('P', hoomd.variant.Constant(10)), - ('P', hoomd.variant.Ramp(1, 5, 0, 100)), - ('P', hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), - ('P', hoomd.variant.Power(1, 5, 3, 0, 100)), - ('volume', { - 'mode': 'standard', - 'weight': 0.7, - 'delta': 0.3 - }), ('volume', { - 'mode': 'ln', - 'weight': 0.1, - 'delta': 1.2 - }), ('aspect', { - 'weight': 0.3, - 'delta': 0.1 - }), ('length', { - 'weight': 0.5, - 'delta': [0.8] * 3 - }), ('shear', { - 'weight': 0.7, - 'delta': [0.3] * 3, - 'reduce': 0.1 - })] - -box_moves_attrs = [{ - 'move': 'volume', - "params": { - 'mode': 'standard', - 'weight': 1, - 'delta': 0.001 - } -}, { - 'move': 'volume', - "params": { - 'mode': 'ln', - 'weight': 1, - 'delta': 0.001 - } -}, { - 'move': 'aspect', - "params": { - 'weight': 1, - 'delta': 0.001 - } -}, { - 'move': 'shear', - "params": { - 'weight': 1, - 'delta': (0.001,) * 3, - 'reduce': 0.2 - } -}, { - 'move': 'length', - "params": { - 'weight': 1, - 'delta': (0.001,) * 3 - } -}] +valid_attrs = [ + ("P", hoomd.variant.Constant(10)), + ("P", hoomd.variant.Ramp(1, 5, 0, 100)), + ("P", hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), + ("P", hoomd.variant.Power(1, 5, 3, 0, 100)), + ("volume", {"mode": "standard", "weight": 0.7, "delta": 0.3}), + ("volume", {"mode": "ln", "weight": 0.1, "delta": 1.2}), + ("aspect", {"weight": 0.3, "delta": 0.1}), + ("length", {"weight": 0.5, "delta": [0.8] * 3}), + ("shear", {"weight": 0.7, "delta": [0.3] * 3, "reduce": 0.1}), +] + +box_moves_attrs = [ + {"move": "volume", "params": {"mode": "standard", "weight": 1, "delta": 0.001}}, + {"move": "volume", "params": {"mode": "ln", "weight": 1, "delta": 0.001}}, + {"move": "aspect", "params": {"weight": 1, "delta": 0.001}}, + {"move": "shear", "params": {"weight": 1, "delta": (0.001,) * 3, "reduce": 0.2}}, + {"move": "length", "params": {"weight": 1, "delta": (0.001,) * 3}}, +] @pytest.fixture def counter_attrs(): return { - 'volume': "volume_moves", - 'length': "volume_moves", - 'aspect': "aspect_moves", - 'shear': "shear_moves" + "volume": "volume_moves", + "length": "volume_moves", + "aspect": "aspect_moves", + "shear": "shear_moves", } @@ -101,8 +63,9 @@ def _is_close(v1, v2): def obj_attr_check(boxmc, mapping): for attr, value in mapping.items(): obj_value = getattr(boxmc, attr) - if (isinstance(obj_value, hoomd.variant.Constant) - and not isinstance(value, hoomd.variant.Constant)): + if isinstance(obj_value, hoomd.variant.Constant) and not isinstance( + value, hoomd.variant.Constant + ): assert obj_value(0) == value continue assert getattr(boxmc, attr) == value @@ -118,9 +81,9 @@ def test_valid_construction(constructor_args): @pytest.mark.parametrize("constructor_args", valid_constructor_args) -def test_valid_construction_and_attach(simulation_factory, - two_particle_snapshot_factory, - constructor_args): +def test_valid_construction_and_attach( + simulation_factory, two_particle_snapshot_factory, constructor_args +): """Test that BoxMC can be attached with valid arguments.""" boxmc = hoomd.hpmc.update.BoxMC(**constructor_args) @@ -129,7 +92,7 @@ def test_valid_construction_and_attach(simulation_factory, # BoxMC requires an HPMC integrator mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # create C++ mirror classes and set parameters @@ -155,8 +118,9 @@ def test_valid_setattr(attr, value): @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(attr, value, simulation_factory, - two_particle_snapshot_factory): +def test_valid_setattr_attached( + attr, value, simulation_factory, two_particle_snapshot_factory +): """Test that BoxMC can get and set attributes while attached.""" boxmc = hoomd.hpmc.update.BoxMC(trigger=hoomd.trigger.Periodic(10), P=10) @@ -165,7 +129,7 @@ def test_valid_setattr_attached(attr, value, simulation_factory, # BoxMC requires an HPMC integrator mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # create C++ mirror classes and set parameters @@ -183,8 +147,7 @@ def test_valid_setattr_attached(attr, value, simulation_factory, @pytest.mark.parametrize("P", [1, 3, 5, 7, 10]) @pytest.mark.parametrize("box_move", box_moves_attrs) -def test_sphere_compression(P, box_move, simulation_factory, - lattice_snapshot_factory): +def test_sphere_compression(P, box_move, simulation_factory, lattice_snapshot_factory): """Test that BoxMC can compress (and expand) simulation boxes.""" n = 7 snap = lattice_snapshot_factory(dimensions=3, n=n, a=1.3) @@ -196,7 +159,7 @@ def test_sphere_compression(P, box_move, simulation_factory, sim.operations.updaters.append(boxmc) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # run w/o setting any of the box moves @@ -207,7 +170,7 @@ def test_sphere_compression(P, box_move, simulation_factory, assert sim.state.box == initial_box # add a box move - setattr(boxmc, box_move['move'], box_move['params']) + setattr(boxmc, box_move["move"], box_move["params"]) sim.run(5) # check that box is changed @@ -217,8 +180,7 @@ def test_sphere_compression(P, box_move, simulation_factory, @pytest.mark.parametrize("P", [1, 3, 5, 7, 10]) @pytest.mark.parametrize("box_move", box_moves_attrs) -def test_disk_compression(P, box_move, simulation_factory, - lattice_snapshot_factory): +def test_disk_compression(P, box_move, simulation_factory, lattice_snapshot_factory): """Test that BoxMC can compress (and expand) simulation boxes.""" n = 7 snap = lattice_snapshot_factory(dimensions=2, n=n, a=1.3) @@ -230,7 +192,7 @@ def test_disk_compression(P, box_move, simulation_factory, sim.operations.updaters.append(boxmc) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # run w/o setting any of the box moves @@ -241,7 +203,7 @@ def test_disk_compression(P, box_move, simulation_factory, assert sim.state.box == initial_box # add a box move - setattr(boxmc, box_move['move'], box_move['params']) + setattr(boxmc, box_move["move"], box_move["params"]) sim.run(50) # check that box is changed @@ -250,8 +212,9 @@ def test_disk_compression(P, box_move, simulation_factory, @pytest.mark.parametrize("box_move", box_moves_attrs) -def test_counters(box_move, simulation_factory, lattice_snapshot_factory, - counter_attrs): +def test_counters( + box_move, simulation_factory, lattice_snapshot_factory, counter_attrs +): """Test that BoxMC counters count corectly.""" boxmc = hoomd.hpmc.update.BoxMC(P=3, trigger=1) # check result when box object is unattached @@ -264,7 +227,7 @@ def test_counters(box_move, simulation_factory, lattice_snapshot_factory, sim.operations.updaters.append(boxmc) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # run w/o setting any of the box moves @@ -275,13 +238,13 @@ def test_counters(box_move, simulation_factory, lattice_snapshot_factory, assert getattr(boxmc, v) == (0, 0) # add a box move - setattr(boxmc, box_move['move'], box_move['params']) + setattr(boxmc, box_move["move"], box_move["params"]) # run with box move sim.run(10) # check some moves are accepted after properly setting a box move - for (k, v) in counter_attrs.items(): - if k == box_move['move']: + for k, v in counter_attrs.items(): + if k == box_move["move"]: ctr = getattr(boxmc, v) assert ctr[0] > 0 assert ctr[0] + ctr[1] == 10 @@ -290,27 +253,21 @@ def test_counters(box_move, simulation_factory, lattice_snapshot_factory, @pytest.mark.parametrize("box_move", box_moves_attrs) def test_pickling(box_move, simulation_factory, two_particle_snapshot_factory): boxmc = hoomd.hpmc.update.BoxMC(P=3, trigger=1) - setattr(boxmc, box_move['move'], box_move['params']) + setattr(boxmc, box_move["move"], box_move["params"]) sim = simulation_factory(two_particle_snapshot_factory()) mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc operation_pickling_check(boxmc, sim) def test_logging(): logging_check( - hoomd.hpmc.update.BoxMC, ('hpmc', 'update'), { - 'aspect_moves': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'shear_moves': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'volume_moves': { - 'category': LoggerCategories.sequence, - 'default': True - } - }) + hoomd.hpmc.update.BoxMC, + ("hpmc", "update"), + { + "aspect_moves": {"category": LoggerCategories.sequence, "default": True}, + "shear_moves": {"category": LoggerCategories.sequence, "default": True}, + "volume_moves": {"category": LoggerCategories.sequence, "default": True}, + }, + ) diff --git a/hoomd/hpmc/pytest/test_boxmc_move_tuner.py b/hoomd/hpmc/pytest/test_boxmc_move_tuner.py index 37f4fa2155..9ca5accae0 100644 --- a/hoomd/hpmc/pytest/test_boxmc_move_tuner.py +++ b/hoomd/hpmc/pytest/test_boxmc_move_tuner.py @@ -7,11 +7,18 @@ import hoomd from hoomd import hpmc from hoomd.conftest import operation_pickling_check -from hoomd.hpmc.tune.boxmc_move_size import (_MoveSizeTuneDefinition, - BoxMCMoveSize) +from hoomd.hpmc.tune.boxmc_move_size import _MoveSizeTuneDefinition, BoxMCMoveSize -MOVE_TYPES = ("aspect", "volume", "shear_x", "shear_y", "shear_z", "length_x", - "length_y", "length_z") +MOVE_TYPES = ( + "aspect", + "volume", + "shear_x", + "shear_y", + "shear_z", + "length_x", + "length_y", + "length_z", +) def generate_move_definition(rng, move=None): @@ -45,8 +52,7 @@ def simulation(device, simulation_factory, lattice_snapshot_factory): n = (4, 4, 8) else: n = (6, 6, 8) - snap = lattice_snapshot_factory(dimensions=3, r=1e-2, n=n, - a=2) # 72 particles + snap = lattice_snapshot_factory(dimensions=3, r=1e-2, n=n, a=2) # 72 particles sim = simulation_factory(snap) integrator = hpmc.integrate.Sphere(default_d=0.01) integrator.shape["A"] = dict(diameter=0.9) @@ -77,15 +83,14 @@ def move_size_definition(move_definition_dict, boxmc): class TestMoveSizeTuneDefinition: - def test_getting_attrs(self, move_definition_dict, move_size_definition): for attr in move_definition_dict: if attr == "attr": assert move_definition_dict[attr].split("_")[0] == getattr( - move_size_definition, attr) + move_size_definition, attr + ) continue - assert move_definition_dict[attr] == getattr( - move_size_definition, attr) + assert move_definition_dict[attr] == getattr(move_size_definition, attr) def test_setting_attrs(self, move_size_definition): move_size_definition.domain = (None, 5) @@ -95,8 +100,7 @@ def test_setting_attrs(self, move_size_definition): move_size_definition.target = 0.9 assert move_size_definition.target == 0.9 - def test_getting_acceptance_rate(self, move_size_definition, simulation, - boxmc): + def test_getting_acceptance_rate(self, move_size_definition, simulation, boxmc): simulation.operations += boxmc simulation.run(0) # needed to set previous values need to to calculate acceptance rate @@ -114,8 +118,9 @@ def test_getting_acceptance_rate(self, move_size_definition, simulation, calc_acceptance_rate = (accepted) / (accepted + rejected) assert isclose(move_size_definition.y, calc_acceptance_rate) - def test_getting_setting_move_size(self, rng, boxmc, move_size_definition, - simulation): + def test_getting_setting_move_size( + self, rng, boxmc, move_size_definition, simulation + ): attr = move_size_definition.attr def set_move_size(new_value): @@ -141,24 +146,26 @@ def get_move_size(): move_size_definition.x = rng.uniform(0, 10) assert move_size_definition.x == get_move_size() - def test_hash(self, move_size_definition, move_definition_dict, simulation, - boxmc): - identical_definition = _MoveSizeTuneDefinition(**move_definition_dict, - boxmc=boxmc) + def test_hash(self, move_size_definition, move_definition_dict, simulation, boxmc): + identical_definition = _MoveSizeTuneDefinition( + **move_definition_dict, boxmc=boxmc + ) assert hash(identical_definition) == hash(move_size_definition) move_definition_dict["domain"] = (None, 5) - different_definition = _MoveSizeTuneDefinition(**move_definition_dict, - boxmc=boxmc) + different_definition = _MoveSizeTuneDefinition( + **move_definition_dict, boxmc=boxmc + ) assert hash(different_definition) != hash(move_size_definition) - def test_eq(self, move_size_definition, move_definition_dict, simulation, - boxmc): - identical_definition = _MoveSizeTuneDefinition(**move_definition_dict, - boxmc=boxmc) + def test_eq(self, move_size_definition, move_definition_dict, simulation, boxmc): + identical_definition = _MoveSizeTuneDefinition( + **move_definition_dict, boxmc=boxmc + ) assert identical_definition == move_size_definition move_definition_dict["domain"] = (None, 5) - different_definition = _MoveSizeTuneDefinition(**move_definition_dict, - boxmc=boxmc) + different_definition = _MoveSizeTuneDefinition( + **move_definition_dict, boxmc=boxmc + ) assert different_definition != move_size_definition @@ -190,11 +197,7 @@ def boxmc_with_tuner(rng, boxmc_tuner_method_and_kwargs): elif move.startswith("l"): delta = [0.0, 0.0, 0.0] delta[["x", "y", "z"].index(move[-1])] = 0.05 - setattr(boxmc, - move.split("_")[0], { - "weight": 1.0, - "delta": tuple(delta) - }) + setattr(boxmc, move.split("_")[0], {"weight": 1.0, "delta": tuple(delta)}) else: boxmc.shear = {"weight": 1.0, "delta": (1e-1,) * 3, "reduce": 1.0} cls_methods = (BoxMCMoveSize.secant_solver, BoxMCMoveSize.scale_solver) @@ -203,7 +206,6 @@ def boxmc_with_tuner(rng, boxmc_tuner_method_and_kwargs): class TestMoveSize: - def test_construction(self, boxmc_tuner_method_and_kwargs, boxmc): cls, params = boxmc_tuner_method_and_kwargs move_size = cls(**params, boxmc=boxmc) @@ -248,7 +250,7 @@ def test_set_params(self, boxmc_with_tuner): assert all(target == t.target for t in move_size_tuner._tunables) assert target == move_size_tuner.target - max_move = 4. + max_move = 4.0 move_size_tuner.max_move_size["volume"] = max_move assert move_size_tuner.max_move_size["volume"] == max_move diff --git a/hoomd/hpmc/pytest/test_compute_free_volume.py b/hoomd/hpmc/pytest/test_compute_free_volume.py index 585145fc58..670253de76 100644 --- a/hoomd/hpmc/pytest/test_compute_free_volume.py +++ b/hoomd/hpmc/pytest/test_compute_free_volume.py @@ -11,31 +11,29 @@ def test_before_attaching(): - free_volume = hoomd.hpmc.compute.FreeVolume(test_particle_type='B', - num_samples=100) + free_volume = hoomd.hpmc.compute.FreeVolume(test_particle_type="B", num_samples=100) - assert free_volume.test_particle_type == 'B' + assert free_volume.test_particle_type == "B" assert free_volume.num_samples == 100 with pytest.raises(DataAccessError): free_volume.free_volume def test_after_attaching(simulation_factory, lattice_snapshot_factory): - snap = lattice_snapshot_factory(particle_types=['A', 'B']) + snap = lattice_snapshot_factory(particle_types=["A", "B"]) sim = simulation_factory(snap) mc = hoomd.hpmc.integrate.Sphere() - mc.shape["A"] = {'diameter': 1.0} - mc.shape["B"] = {'diameter': 0.2} + mc.shape["A"] = {"diameter": 1.0} + mc.shape["B"] = {"diameter": 0.2} sim.operations.add(mc) - free_volume = hoomd.hpmc.compute.FreeVolume(test_particle_type='B', - num_samples=100) + free_volume = hoomd.hpmc.compute.FreeVolume(test_particle_type="B", num_samples=100) sim.operations.add(free_volume) assert len(sim.operations.computes) == 1 sim.run(0) - assert free_volume.test_particle_type == 'B' + assert free_volume.test_particle_type == "B" assert free_volume.num_samples == 100 sim.run(10) @@ -50,32 +48,29 @@ def test_after_attaching(simulation_factory, lattice_snapshot_factory): @pytest.mark.parametrize("radius1, radius2", _radii) -def test_validation_systems(simulation_factory, lattice_snapshot_factory, - radius1, radius2): +def test_validation_systems( + simulation_factory, lattice_snapshot_factory, radius1, radius2 +): n = 7 - free_volume = (n**3) * (1 - (4 / 3) * np.pi * (radius1 + radius2)**3) + free_volume = (n**3) * (1 - (4 / 3) * np.pi * (radius1 + radius2) ** 3) free_volume = max([0.0, free_volume]) sim = simulation_factory( - lattice_snapshot_factory(particle_types=['A', 'B'], - n=n, - a=1, - dimensions=3, - r=0)) + lattice_snapshot_factory(particle_types=["A", "B"], n=n, a=1, dimensions=3, r=0) + ) mc = hoomd.hpmc.integrate.Sphere() - mc.shape["A"] = {'diameter': radius1 * 2} - mc.shape["B"] = {'diameter': radius2 * 2} + mc.shape["A"] = {"diameter": radius1 * 2} + mc.shape["B"] = {"diameter": radius2 * 2} sim.operations.add(mc) - free_volume_compute = hoomd.hpmc.compute.FreeVolume(test_particle_type='B', - num_samples=10000) + free_volume_compute = hoomd.hpmc.compute.FreeVolume( + test_particle_type="B", num_samples=10000 + ) sim.operations.add(free_volume_compute) sim.run(0) # rtol is fairly high as the free volume available to a sized particle # is less than the total available volume - np.testing.assert_allclose(free_volume, - free_volume_compute.free_volume, - rtol=2e-2) + np.testing.assert_allclose(free_volume, free_volume_compute.free_volume, rtol=2e-2) # Tet the kernel parameter tuner. def activate_tuner(): @@ -83,17 +78,17 @@ def activate_tuner(): # We need to make the kernel be called. free_volume_compute.free_volume - autotuned_kernel_parameter_check(instance=free_volume_compute, - activate=activate_tuner) + autotuned_kernel_parameter_check( + instance=free_volume_compute, activate=activate_tuner + ) def test_logging(): logging_check( - hoomd.hpmc.compute.FreeVolume, ('hpmc', 'compute'), - {'free_volume': { - 'category': LoggerCategories.scalar, - 'default': True - }}) + hoomd.hpmc.compute.FreeVolume, + ("hpmc", "compute"), + {"free_volume": {"category": LoggerCategories.scalar, "default": True}}, + ) def test_2d_free_volume(simulation_factory): @@ -101,15 +96,16 @@ def test_2d_free_volume(simulation_factory): if snapshot.communicator.rank == 0: snapshot.configuration.box = (100, 100, 0, 0, 0, 0) snapshot.particles.N = 1 - snapshot.particles.types = ['A'] + snapshot.particles.types = ["A"] sim = simulation_factory(snapshot) mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) - free_volume = hoomd.hpmc.compute.FreeVolume(test_particle_type='A', - num_samples=100000) + free_volume = hoomd.hpmc.compute.FreeVolume( + test_particle_type="A", num_samples=100000 + ) sim.operations.integrator = mc sim.operations.computes.append(free_volume) diff --git a/hoomd/hpmc/pytest/test_compute_sdf.py b/hoomd/hpmc/pytest/test_compute_sdf.py index b49e5ae70d..8899279103 100644 --- a/hoomd/hpmc/pytest/test_compute_sdf.py +++ b/hoomd/hpmc/pytest/test_compute_sdf.py @@ -33,11 +33,9 @@ def test_before_attaching(): sdf.betaP -def test_after_attaching(valid_args, simulation_factory, - lattice_snapshot_factory): +def test_after_attaching(valid_args, simulation_factory, lattice_snapshot_factory): integrator, args, n_dimensions = valid_args - snap = lattice_snapshot_factory(particle_types=['A'], - dimensions=n_dimensions) + snap = lattice_snapshot_factory(particle_types=["A"], dimensions=n_dimensions) sim = simulation_factory(snap) # Need to unpack union integrators @@ -119,83 +117,415 @@ def test_after_attaching(valid_args, simulation_factory, assert betaP is None -_avg = numpy.array([ - 55.20126953, 54.89853516, 54.77910156, 54.56660156, 54.22255859, - 53.83935547, 53.77617188, 53.42109375, 53.05546875, 52.86376953, - 52.65576172, 52.21240234, 52.07402344, 51.88974609, 51.69990234, - 51.32099609, 51.09775391, 51.06533203, 50.61923828, 50.35566406, - 50.07197266, 49.92275391, 49.51914062, 49.39013672, 49.17597656, - 48.91982422, 48.64580078, 48.30712891, 48.12207031, 47.815625, 47.57744141, - 47.37099609, 47.14765625, 46.92382812, 46.6984375, 46.66943359, 46.18203125, - 45.95615234, 45.66650391, 45.52714844, 45.39951172, 45.04599609, - 44.90908203, 44.62197266, 44.37460937, 44.02998047, 43.84306641, - 43.53310547, 43.55, 43.29589844, 43.06054688, 42.85097656, 42.58837891, - 42.39326172, 42.21152344, 41.91777344, 41.71054687, 41.68232422, - 41.42177734, 41.08085938, 40.91435547, 40.76123047, 40.45380859, 40.178125, - 40.14853516, 39.81972656, 39.60585938, 39.44169922, 39.34179688, - 39.09541016, 38.78105469, 38.60087891, 38.56572266, 38.27158203, - 38.02011719, 37.865625, 37.77851562, 37.51113281, 37.25615234, 37.23857422, - 36.91757812, 36.68486328, 36.57675781, 36.39140625, 36.06240234, - 36.01962891, 35.8375, 35.51914062, 35.3640625, 35.29042969, 34.86337891, - 34.72460938, 34.73964844, 34.57871094, 34.32685547, 34.02607422, - 33.78271484, 33.82548828, 33.53808594, 33.40341797, 33.17861328, - 33.05439453, 32.80361328, 32.55478516, 32.53759766, 32.28447266, - 32.26513672, 32.05732422, 31.82294922, 31.83535156, 31.56376953, - 31.46337891, 31.27431641, 30.88310547, 30.85107422, 30.63320313, - 30.57822266, 30.28886719, 30.28183594, 30.05927734, 29.98896484, 29.690625, - 29.51816406, 29.40742188, 29.2328125, 29.19853516, 28.94599609, 28.80449219, - 28.47480469, 28.48476563, 28.31738281, 28.21455078, 28.00878906, - 27.90458984, 27.84970703, 27.54052734, 27.43818359, 27.31064453, - 27.12773437, 26.91464844, 26.84511719, 26.78701172, 26.53603516, - 26.39853516, 26.13779297, 26.16269531, 25.92138672, 25.80244141, - 25.75234375, 25.49384766, 25.37197266, 25.26962891, 25.14287109, - 24.87558594, 24.778125, 24.68320312, 24.65957031, 24.44404297, 24.31621094, - 24.203125, 24.12402344, 23.89628906, 23.76621094, 23.56923828, 23.38095703, - 23.32724609, 23.25498047, 23.09697266, 23.04716797, 22.90712891, - 22.68662109, 22.59970703, 22.54824219, 22.53632813, 22.29267578, - 22.08613281, 21.98398437, 21.89169922, 21.74550781, 21.75878906, 21.45625, - 21.37529297, 21.1890625, 21.18417969, 21.0671875, 20.95087891, 20.81650391, - 20.60390625, 20.66953125, 20.4640625, 20.47021484, 20.12988281, 20.17099609, - 20.05224609, 19.89619141, 19.80859375, 19.72558594, 19.64990234, - 19.43525391, 19.38203125 -]) - -_err = numpy.array([ - 1.21368492, 1.07520243, 1.22496485, 1.07203861, 1.31918198, 1.15482965, - 1.11606943, 1.12342247, 1.1214123, 1.2033176, 1.14923442, 1.11741796, - 1.08633901, 1.10809585, 1.13268611, 1.17159683, 1.12298656, 1.27754418, - 1.09430177, 1.08989947, 1.051715, 1.13990382, 1.16086636, 1.19538929, - 1.09450355, 1.10057404, 0.98204849, 1.02542969, 1.10736805, 1.18062055, - 1.12365972, 1.12265463, 1.06131492, 1.15169701, 1.13772836, 1.03968987, - 1.04348243, 1.00617502, 1.02450203, 1.08293272, 1.02187476, 1.00072731, - 1.0267637, 1.08289546, 1.03696814, 1.01035732, 1.05730499, 1.07088231, - 1.00528653, 0.9195167, 0.99235353, 1.00839744, 0.98700882, 0.87196929, - 1.00124084, 0.96481759, 0.9412312, 1.04691734, 0.92419062, 0.89478269, - 0.85106599, 1.0143535, 1.07011876, 0.88196475, 0.8708013, 0.91838154, - 0.9309356, 0.97521482, 0.94277816, 0.86336248, 0.8845162, 1.00421706, - 0.87940419, 0.85516477, 0.86071935, 0.96725404, 0.87175829, 0.86386878, - 0.96833751, 0.87554994, 0.8449041, 0.77404494, 0.92879454, 0.95780868, - 0.84341047, 0.88067771, 0.83393048, 0.94414754, 0.94671484, 0.84554255, - 0.8906436, 0.84538732, 0.78517686, 0.89134056, 0.78446042, 0.8952503, - 0.84624311, 0.79573064, 0.85422345, 0.88918562, 0.75531048, 0.82884413, - 0.83369698, 0.77627999, 0.84187759, 0.87986859, 0.86356705, 0.90929237, - 0.83017397, 0.86393341, 0.81426374, 0.80991068, 0.86676111, 0.75232448, - 0.8021119, 0.68794232, 0.69039919, 0.71421068, 0.77667793, 0.82113389, - 0.70256397, 0.83293526, 0.69512453, 0.75148262, 0.7407287, 0.74124134, - 0.77846167, 0.7941425, 0.81125561, 0.73334183, 0.76452184, 0.71159507, - 0.67302729, 0.66175046, 0.84778683, 0.66273563, 0.76777339, 0.71355888, - 0.74460445, 0.76623613, 0.63883733, 0.6887326, 0.74616778, 0.65223179, - 0.76358086, 0.68985286, 0.66273563, 0.72437662, 0.77382571, 0.66234322, - 0.74757211, 0.62809942, 0.75606851, 0.65375498, 0.65920693, 0.64767863, - 0.67683992, 0.63170556, 0.69891621, 0.70708048, 0.64583276, 0.73903135, - 0.60068155, 0.66055863, 0.69614341, 0.61515868, 0.63001311, 0.68602529, - 0.7014929, 0.61950453, 0.60049188, 0.6259654, 0.55819764, 0.65039367, - 0.67079534, 0.60552195, 0.64864663, 0.59901689, 0.65517427, 0.55348699, - 0.57578738, 0.6253923, 0.62679547, 0.61274744, 0.5681065, 0.6065114, - 0.61170127, 0.60009145, 0.61583989, 0.63889728, 0.66477228, 0.60133457, - 0.56484264, 0.5676353, 0.55359946, 0.59000379, 0.60483562, 0.57305916, - 0.57591598, 0.66462928 -]) +_avg = numpy.array( + [ + 55.20126953, + 54.89853516, + 54.77910156, + 54.56660156, + 54.22255859, + 53.83935547, + 53.77617188, + 53.42109375, + 53.05546875, + 52.86376953, + 52.65576172, + 52.21240234, + 52.07402344, + 51.88974609, + 51.69990234, + 51.32099609, + 51.09775391, + 51.06533203, + 50.61923828, + 50.35566406, + 50.07197266, + 49.92275391, + 49.51914062, + 49.39013672, + 49.17597656, + 48.91982422, + 48.64580078, + 48.30712891, + 48.12207031, + 47.815625, + 47.57744141, + 47.37099609, + 47.14765625, + 46.92382812, + 46.6984375, + 46.66943359, + 46.18203125, + 45.95615234, + 45.66650391, + 45.52714844, + 45.39951172, + 45.04599609, + 44.90908203, + 44.62197266, + 44.37460937, + 44.02998047, + 43.84306641, + 43.53310547, + 43.55, + 43.29589844, + 43.06054688, + 42.85097656, + 42.58837891, + 42.39326172, + 42.21152344, + 41.91777344, + 41.71054687, + 41.68232422, + 41.42177734, + 41.08085938, + 40.91435547, + 40.76123047, + 40.45380859, + 40.178125, + 40.14853516, + 39.81972656, + 39.60585938, + 39.44169922, + 39.34179688, + 39.09541016, + 38.78105469, + 38.60087891, + 38.56572266, + 38.27158203, + 38.02011719, + 37.865625, + 37.77851562, + 37.51113281, + 37.25615234, + 37.23857422, + 36.91757812, + 36.68486328, + 36.57675781, + 36.39140625, + 36.06240234, + 36.01962891, + 35.8375, + 35.51914062, + 35.3640625, + 35.29042969, + 34.86337891, + 34.72460938, + 34.73964844, + 34.57871094, + 34.32685547, + 34.02607422, + 33.78271484, + 33.82548828, + 33.53808594, + 33.40341797, + 33.17861328, + 33.05439453, + 32.80361328, + 32.55478516, + 32.53759766, + 32.28447266, + 32.26513672, + 32.05732422, + 31.82294922, + 31.83535156, + 31.56376953, + 31.46337891, + 31.27431641, + 30.88310547, + 30.85107422, + 30.63320313, + 30.57822266, + 30.28886719, + 30.28183594, + 30.05927734, + 29.98896484, + 29.690625, + 29.51816406, + 29.40742188, + 29.2328125, + 29.19853516, + 28.94599609, + 28.80449219, + 28.47480469, + 28.48476563, + 28.31738281, + 28.21455078, + 28.00878906, + 27.90458984, + 27.84970703, + 27.54052734, + 27.43818359, + 27.31064453, + 27.12773437, + 26.91464844, + 26.84511719, + 26.78701172, + 26.53603516, + 26.39853516, + 26.13779297, + 26.16269531, + 25.92138672, + 25.80244141, + 25.75234375, + 25.49384766, + 25.37197266, + 25.26962891, + 25.14287109, + 24.87558594, + 24.778125, + 24.68320312, + 24.65957031, + 24.44404297, + 24.31621094, + 24.203125, + 24.12402344, + 23.89628906, + 23.76621094, + 23.56923828, + 23.38095703, + 23.32724609, + 23.25498047, + 23.09697266, + 23.04716797, + 22.90712891, + 22.68662109, + 22.59970703, + 22.54824219, + 22.53632813, + 22.29267578, + 22.08613281, + 21.98398437, + 21.89169922, + 21.74550781, + 21.75878906, + 21.45625, + 21.37529297, + 21.1890625, + 21.18417969, + 21.0671875, + 20.95087891, + 20.81650391, + 20.60390625, + 20.66953125, + 20.4640625, + 20.47021484, + 20.12988281, + 20.17099609, + 20.05224609, + 19.89619141, + 19.80859375, + 19.72558594, + 19.64990234, + 19.43525391, + 19.38203125, + ] +) + +_err = numpy.array( + [ + 1.21368492, + 1.07520243, + 1.22496485, + 1.07203861, + 1.31918198, + 1.15482965, + 1.11606943, + 1.12342247, + 1.1214123, + 1.2033176, + 1.14923442, + 1.11741796, + 1.08633901, + 1.10809585, + 1.13268611, + 1.17159683, + 1.12298656, + 1.27754418, + 1.09430177, + 1.08989947, + 1.051715, + 1.13990382, + 1.16086636, + 1.19538929, + 1.09450355, + 1.10057404, + 0.98204849, + 1.02542969, + 1.10736805, + 1.18062055, + 1.12365972, + 1.12265463, + 1.06131492, + 1.15169701, + 1.13772836, + 1.03968987, + 1.04348243, + 1.00617502, + 1.02450203, + 1.08293272, + 1.02187476, + 1.00072731, + 1.0267637, + 1.08289546, + 1.03696814, + 1.01035732, + 1.05730499, + 1.07088231, + 1.00528653, + 0.9195167, + 0.99235353, + 1.00839744, + 0.98700882, + 0.87196929, + 1.00124084, + 0.96481759, + 0.9412312, + 1.04691734, + 0.92419062, + 0.89478269, + 0.85106599, + 1.0143535, + 1.07011876, + 0.88196475, + 0.8708013, + 0.91838154, + 0.9309356, + 0.97521482, + 0.94277816, + 0.86336248, + 0.8845162, + 1.00421706, + 0.87940419, + 0.85516477, + 0.86071935, + 0.96725404, + 0.87175829, + 0.86386878, + 0.96833751, + 0.87554994, + 0.8449041, + 0.77404494, + 0.92879454, + 0.95780868, + 0.84341047, + 0.88067771, + 0.83393048, + 0.94414754, + 0.94671484, + 0.84554255, + 0.8906436, + 0.84538732, + 0.78517686, + 0.89134056, + 0.78446042, + 0.8952503, + 0.84624311, + 0.79573064, + 0.85422345, + 0.88918562, + 0.75531048, + 0.82884413, + 0.83369698, + 0.77627999, + 0.84187759, + 0.87986859, + 0.86356705, + 0.90929237, + 0.83017397, + 0.86393341, + 0.81426374, + 0.80991068, + 0.86676111, + 0.75232448, + 0.8021119, + 0.68794232, + 0.69039919, + 0.71421068, + 0.77667793, + 0.82113389, + 0.70256397, + 0.83293526, + 0.69512453, + 0.75148262, + 0.7407287, + 0.74124134, + 0.77846167, + 0.7941425, + 0.81125561, + 0.73334183, + 0.76452184, + 0.71159507, + 0.67302729, + 0.66175046, + 0.84778683, + 0.66273563, + 0.76777339, + 0.71355888, + 0.74460445, + 0.76623613, + 0.63883733, + 0.6887326, + 0.74616778, + 0.65223179, + 0.76358086, + 0.68985286, + 0.66273563, + 0.72437662, + 0.77382571, + 0.66234322, + 0.74757211, + 0.62809942, + 0.75606851, + 0.65375498, + 0.65920693, + 0.64767863, + 0.67683992, + 0.63170556, + 0.69891621, + 0.70708048, + 0.64583276, + 0.73903135, + 0.60068155, + 0.66055863, + 0.69614341, + 0.61515868, + 0.63001311, + 0.68602529, + 0.7014929, + 0.61950453, + 0.60049188, + 0.6259654, + 0.55819764, + 0.65039367, + 0.67079534, + 0.60552195, + 0.64864663, + 0.59901689, + 0.65517427, + 0.55348699, + 0.57578738, + 0.6253923, + 0.62679547, + 0.61274744, + 0.5681065, + 0.6065114, + 0.61170127, + 0.60009145, + 0.61583989, + 0.63889728, + 0.66477228, + 0.60133457, + 0.56484264, + 0.5676353, + 0.55359946, + 0.59000379, + 0.60483562, + 0.57305916, + 0.57591598, + 0.66462928, + ] +) @pytest.mark.validate @@ -214,18 +544,16 @@ def test_values(simulation_factory, lattice_snapshot_factory): sim.seed = 10 mc = hoomd.hpmc.integrate.ConvexPolygon(default_d=0.1) - mc.shape["A"] = { - 'vertices': [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)] - } + mc.shape["A"] = {"vertices": [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]} sim.operations.add(mc) sdf = hoomd.hpmc.compute.SDF(xmax=0.02, dx=1e-4) sim.operations.add(sdf) - sdf_log = hoomd.conftest.ListWriter(sdf, 'sdf_compression') + sdf_log = hoomd.conftest.ListWriter(sdf, "sdf_compression") sim.operations.writers.append( - hoomd.write.CustomWriter(action=sdf_log, - trigger=hoomd.trigger.Periodic(10))) + hoomd.write.CustomWriter(action=sdf_log, trigger=hoomd.trigger.Periodic(10)) + ) sim.run(6000) @@ -257,7 +585,7 @@ def test_linear_search_path(simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory(d=1.001101081081081)) sim.seed = 0 mc = hoomd.hpmc.integrate.Sphere(default_d=0.0) - mc.shape['A'] = {'diameter': 2 * r_core} + mc.shape["A"] = {"diameter": 2 * r_core} sim.operations.add(mc) # sdf compute @@ -270,11 +598,11 @@ def test_linear_search_path(simulation_factory, two_particle_snapshot_factory): norm_factor = 1 / sdf.dx sdf_result = sdf.sdf_compression if sim.device.communicator.rank == 0: - assert (sdf_result[1] == norm_factor) - assert (numpy.count_nonzero(sdf_result) == 1) + assert sdf_result[1] == norm_factor + assert numpy.count_nonzero(sdf_result) == 1 square_well = hoomd.hpmc.pair.Step() - square_well.params[('A', 'A')] = {'epsilon': [-epsilon], 'r': [r_patch]} + square_well.params[("A", "A")] = {"epsilon": [-epsilon], "r": [r_patch]} mc.pair_potentials.append(square_well) # for a soft overlap with a negative change in energy, the weight of the @@ -283,46 +611,40 @@ def test_linear_search_path(simulation_factory, two_particle_snapshot_factory): neg_mayerF = 1 - numpy.exp(epsilon) sdf_result = sdf.sdf_compression if sim.device.communicator.rank == 0: - assert (numpy.count_nonzero(sdf_result) == 0) + assert numpy.count_nonzero(sdf_result) == 0 # change sign of epsilon, so that now there is a positive energy soft # overlap in bin 0 epsilon *= -1 - square_well.params[('A', 'A')] = {'epsilon': [-epsilon], 'r': [r_patch]} + square_well.params[("A", "A")] = {"epsilon": [-epsilon], "r": [r_patch]} sim.run(1) neg_mayerF = 1 - numpy.exp(epsilon) sdf_result = sdf.sdf_compression if sim.device.communicator.rank == 0: - assert (numpy.count_nonzero(sdf_result) == 1) - assert (sdf_result[0] == neg_mayerF * norm_factor) + assert numpy.count_nonzero(sdf_result) == 1 + assert sdf_result[0] == neg_mayerF * norm_factor # extend patches so that there is a soft overlap with positive energy # in the configuration and there will be a change in energy on expansions # the change in energy is < 0 so the weight should be 0 and # sdf_expansion should be all zeros - square_well.params[('A', 'A')] = { - 'epsilon': [-epsilon], - 'r': [r_patch + 2 * dx] - } + square_well.params[("A", "A")] = {"epsilon": [-epsilon], "r": [r_patch + 2 * dx]} sim.run(1) sdf_result = sdf.sdf_expansion if sim.device.communicator.rank == 0: - assert (numpy.count_nonzero(sdf_result) == 0) + assert numpy.count_nonzero(sdf_result) == 0 # change sign of epsilon so now the change in energy on expansion is # positive and the weight is nonzero and one of the sdf_expansion counts # is nonzero epsilon *= -1 - square_well.params[('A', 'A')] = { - 'epsilon': [-epsilon], - 'r': [r_patch + 2 * dx] - } + square_well.params[("A", "A")] = {"epsilon": [-epsilon], "r": [r_patch + 2 * dx]} sim.run(1) neg_mayerF = 1 - numpy.exp(-epsilon) sdf_result = sdf.sdf_expansion if sim.device.communicator.rank == 0: - assert (numpy.count_nonzero(sdf_result) == 1) - assert (sdf_result[-1] == neg_mayerF * norm_factor) + assert numpy.count_nonzero(sdf_result) == 1 + assert sdf_result[-1] == neg_mayerF * norm_factor @pytest.mark.cpu @@ -342,16 +664,18 @@ def test_sdf_expansion(simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(snapshot) mc = hoomd.hpmc.integrate.SimplePolygon() - mc.shape['A'] = dict(vertices=[ - (-2, 1), - (-2, -1), - (2, -1), - (2, 1), - (1, 1), - (1, -0.9), - (-1, -0.9), - (-1, 1), - ]) + mc.shape["A"] = dict( + vertices=[ + (-2, 1), + (-2, -1), + (2, -1), + (2, 1), + (1, 1), + (1, -0.9), + (-1, -0.9), + (-1, 1), + ] + ) sim.operations.add(mc) # sdf compute @@ -374,25 +698,13 @@ def test_sdf_expansion(simulation_factory, two_particle_snapshot_factory): def test_logging(): logging_check( - hoomd.hpmc.compute.SDF, ('hpmc', 'compute'), { - 'betaP': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'sdf_compression': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'sdf_expansion': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'x_compression': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'x_expansion': { - 'category': LoggerCategories.sequence, - 'default': True - }, - }) + hoomd.hpmc.compute.SDF, + ("hpmc", "compute"), + { + "betaP": {"category": LoggerCategories.scalar, "default": True}, + "sdf_compression": {"category": LoggerCategories.sequence, "default": True}, + "sdf_expansion": {"category": LoggerCategories.sequence, "default": True}, + "x_compression": {"category": LoggerCategories.sequence, "default": True}, + "x_expansion": {"category": LoggerCategories.sequence, "default": True}, + }, + ) diff --git a/hoomd/hpmc/pytest/test_external_field.py b/hoomd/hpmc/pytest/test_external_field.py index 82243cbfb6..a1013197a6 100644 --- a/hoomd/hpmc/pytest/test_external_field.py +++ b/hoomd/hpmc/pytest/test_external_field.py @@ -8,26 +8,34 @@ import numpy as np valid_constructor_args = [ - dict(reference_positions=[[0, 0, 0]], - reference_orientations=[[1, 0, 0, 0]], - k_translational=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), - k_rotational=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), - symmetries=[[1, 0, 0, 0]]), - dict(reference_positions=[[0, 0, 0]], - reference_orientations=[[1, 0, 0, 0]], - k_translational=hoomd.variant.Power(1, 5, 3, 0, 100), - k_rotational=hoomd.variant.Power(1, 5, 3, 0, 100), - symmetries=[[1, 0, 0, 0]]), - dict(reference_positions=[[0, 0, 0]], - reference_orientations=[[1, 0, 0, 0]], - k_translational=hoomd.variant.Ramp(100, 0, 0, 1000), - k_rotational=hoomd.variant.Ramp(10, 0, 0, 500), - symmetries=[[1, 0, 0, 0]]), - dict(reference_positions=[[0, 0, 0]], - reference_orientations=[[1, 0, 0, 0]], - k_translational=hoomd.variant.Constant(10), - k_rotational=hoomd.variant.Constant(0), - symmetries=[[1, 0, 0, 0]]), + dict( + reference_positions=[[0, 0, 0]], + reference_orientations=[[1, 0, 0, 0]], + k_translational=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), + k_rotational=hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), + symmetries=[[1, 0, 0, 0]], + ), + dict( + reference_positions=[[0, 0, 0]], + reference_orientations=[[1, 0, 0, 0]], + k_translational=hoomd.variant.Power(1, 5, 3, 0, 100), + k_rotational=hoomd.variant.Power(1, 5, 3, 0, 100), + symmetries=[[1, 0, 0, 0]], + ), + dict( + reference_positions=[[0, 0, 0]], + reference_orientations=[[1, 0, 0, 0]], + k_translational=hoomd.variant.Ramp(100, 0, 0, 1000), + k_rotational=hoomd.variant.Ramp(10, 0, 0, 500), + symmetries=[[1, 0, 0, 0]], + ), + dict( + reference_positions=[[0, 0, 0]], + reference_orientations=[[1, 0, 0, 0]], + k_translational=hoomd.variant.Constant(10), + k_rotational=hoomd.variant.Constant(0), + symmetries=[[1, 0, 0, 0]], + ), ] @@ -44,12 +52,11 @@ def test_valid_construction_harmonicfield(device, constructor_args): @pytest.fixture(scope="module") def add_default_integrator(): - def add(simulation, field_type): mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=0) + mc.shape["A"] = dict(diameter=0) snapshot = simulation.state.get_snapshot() - if field_type == 'harmonic': + if field_type == "harmonic": if simulation.device.communicator.rank == 0: reference_positions = snapshot.particles.position reference_orientations = snapshot.particles.orientation @@ -61,9 +68,10 @@ def add(simulation, field_type): reference_orientations=reference_orientations, k_translational=1.0, k_rotational=1.0, - symmetries=[[1, 0, 0, 0]]) + symmetries=[[1, 0, 0, 0]], + ) mc.external_potentials = [field] - elif field_type == 'linear': + elif field_type == "linear": field = hoomd.hpmc.external.Linear(default_alpha=3.0) mc.external_potentials = [field] simulation.operations.integrator = mc @@ -74,12 +82,12 @@ def add(simulation, field_type): @pytest.mark.cpu class TestExternalPotentialHarmonic: - - def test_attaching(self, simulation_factory, two_particle_snapshot_factory, - add_default_integrator): + def test_attaching( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, lattice = add_default_integrator(sim, 'harmonic') + mc, lattice = add_default_integrator(sim, "harmonic") # create C++ mirror classes and set parameters sim.run(0) @@ -88,11 +96,12 @@ def test_attaching(self, simulation_factory, two_particle_snapshot_factory, assert mc._attached assert lattice._attached - def test_detaching(self, simulation_factory, two_particle_snapshot_factory, - add_default_integrator): + def test_detaching( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, lattice = add_default_integrator(sim, 'harmonic') + mc, lattice = add_default_integrator(sim, "harmonic") # create C++ mirror classes and set parameters sim.run(0) @@ -102,14 +111,14 @@ def test_detaching(self, simulation_factory, two_particle_snapshot_factory, assert not mc._attached assert not lattice._attached - def test_harmonic_displacement_energy(self, simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): + def test_harmonic_displacement_energy( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): """Ensure harmonic displacements result in expected energy.""" # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, lattice = add_default_integrator(sim, 'harmonic') - mc.shape['A'] = dict(diameter=0, orientable=True) + mc, lattice = add_default_integrator(sim, "harmonic") + mc.shape["A"] = dict(diameter=0, orientable=True) dx = 0.01 disp = np.array([dx, 0, 0]) @@ -119,22 +128,22 @@ def test_harmonic_displacement_energy(self, simulation_factory, sim.run(0) k_translational = lattice.k_translational(sim.timestep) assert np.allclose( - lattice.energy, - 0.5 * dx**2 * k_translational * sim.state.N_particles) + lattice.energy, 0.5 * dx**2 * k_translational * sim.state.N_particles + ) # make some moves and make sure the energy is not zero sim.run(10) assert lattice.energy != 0.0 - def test_harmonic_displacement(self, simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): + def test_harmonic_displacement( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): """Ensure particles remain close to reference positions.""" # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, lattice = add_default_integrator(sim, 'harmonic') + mc, lattice = add_default_integrator(sim, "harmonic") particle_diameter = 0.5 - mc.shape['A'] = dict(diameter=particle_diameter) + mc.shape["A"] = dict(diameter=particle_diameter) k_trans = 200.0 lattice.k_translational = k_trans lattice.k_rotational = 0 @@ -145,23 +154,22 @@ def test_harmonic_displacement(self, simulation_factory, snapshot = sim.state.get_snapshot() if snapshot.communicator.rank == 0: new_positions = snapshot.particles.position - dx = np.linalg.norm(new_positions - lattice.reference_positions, - axis=1) + dx = np.linalg.norm(new_positions - lattice.reference_positions, axis=1) assert np.all(np.less(dx, particle_diameter / 2)) @pytest.mark.cpu class TestExternalPotentialLinear: - def test_valid_construction_linearfield(self, device): """Test that Linear can be constructed with valid arguments.""" hoomd.hpmc.external.Linear(default_alpha=1.0) - def test_attaching(self, simulation_factory, two_particle_snapshot_factory, - add_default_integrator): + def test_attaching( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, field = add_default_integrator(sim, 'linear') + mc, field = add_default_integrator(sim, "linear") # create C++ mirror classes and set parameters sim.run(0) @@ -170,11 +178,12 @@ def test_attaching(self, simulation_factory, two_particle_snapshot_factory, assert mc._attached assert field._attached - def test_detaching(self, simulation_factory, two_particle_snapshot_factory, - add_default_integrator): + def test_detaching( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, lattice = add_default_integrator(sim, 'linear') + mc, lattice = add_default_integrator(sim, "linear") # create C++ mirror classes and set parameters sim.run(0) @@ -184,15 +193,16 @@ def test_detaching(self, simulation_factory, two_particle_snapshot_factory, assert not mc._attached assert not lattice._attached - def test_energy(self, simulation_factory, two_particle_snapshot_factory, - add_default_integrator): + def test_energy( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): """Verify energy is what it should be with known particle positions.""" sim = simulation_factory(two_particle_snapshot_factory(d=1.0)) - mc, field = add_default_integrator(sim, 'linear') + mc, field = add_default_integrator(sim, "linear") field.plane_normal = (1, 0, 0) field.plane_origin = (0.3, 0.2, 0.4) - field.alpha['A'] = 1.23456 - mc.shape['A'] = dict(diameter=0, orientable=True) + field.alpha["A"] = 1.23456 + mc.shape["A"] = dict(diameter=0, orientable=True) sim.run(0) # move one particle to break symmetry and give non-zero energy @@ -205,9 +215,10 @@ def test_energy(self, simulation_factory, two_particle_snapshot_factory, if sim.device.communicator.rank == 0: field_reference_energy = 0.0 for r in snapshot.particles.position: - field_reference_energy += np.dot( - r - field.plane_origin, - field.plane_normal) * field.alpha['A'] + field_reference_energy += ( + np.dot(r - field.plane_origin, field.plane_normal) + * field.alpha["A"] + ) field_energy = field.energy mc_external_energy = mc.external_energy @@ -217,27 +228,29 @@ def test_energy(self, simulation_factory, two_particle_snapshot_factory, assert field_energy == pytest.approx(mc_external_energy) # Test that HPMCIntegrator correctly handles multiple fields. - field2 = hoomd.hpmc.external.Linear(default_alpha=2.345, - plane_origin=(0.1, 0.2, 0.3), - plane_normal=(-0.2, 5, -2)) + field2 = hoomd.hpmc.external.Linear( + default_alpha=2.345, + plane_origin=(0.1, 0.2, 0.3), + plane_normal=(-0.2, 5, -2), + ) mc.external_potentials.append(field2) field2_reference_energy = 0 if sim.device.communicator.rank == 0: for r in snapshot.particles.position: - field2_reference_energy += np.dot( - r - field2.plane_origin, - field2.plane_normal) * field2.alpha['A'] + field2_reference_energy += ( + np.dot(r - field2.plane_origin, field2.plane_normal) + * field2.alpha["A"] + ) field2_energy = field2.energy mc_external_energy = mc.external_energy if sim.device.communicator.rank == 0: assert field2_energy == pytest.approx(field2_reference_energy) - assert field_energy + field2_energy == pytest.approx( - mc_external_energy) + assert field_energy + field2_energy == pytest.approx(mc_external_energy) # Test that origin shifting does not change the energy - mc.d['A'] = 0 + mc.d["A"] = 0 sim.run(1000) field_energy = field.energy field2_energy = field2.energy @@ -246,20 +259,19 @@ def test_energy(self, simulation_factory, two_particle_snapshot_factory, assert field_energy == pytest.approx(field_reference_energy) assert field2_energy == pytest.approx(field2_reference_energy) - def test_normalization_of_plane_normal(self, simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): + def test_normalization_of_plane_normal( + self, simulation_factory, two_particle_snapshot_factory, add_default_integrator + ): # create simulation & attach objects sim = simulation_factory(two_particle_snapshot_factory()) - mc, field = add_default_integrator(sim, 'linear') + mc, field = add_default_integrator(sim, "linear") field.plane_normal = (1, 2, 3) magnitude = np.linalg.norm(field.plane_normal) # create C++ mirror classes and set parameters assert field.plane_normal == (1, 2, 3) sim.run(0) # normalization occurs on attaching - np.testing.assert_allclose( - np.array(field.plane_normal) * magnitude, (1, 2, 3)) + np.testing.assert_allclose(np.array(field.plane_normal) * magnitude, (1, 2, 3)) @pytest.mark.validate def test_z_bias(self, device, simulation_factory, lattice_snapshot_factory): @@ -271,18 +283,16 @@ def test_z_bias(self, device, simulation_factory, lattice_snapshot_factory): """ sim = simulation_factory(lattice_snapshot_factory(a=1.1, n=5)) mc = hoomd.hpmc.integrate.Sphere(default_d=0.01) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) mc.nselect = 1 # expand box and add external field old_box = sim.state.box - new_box = hoomd.Box(Lx=3 * old_box.Lx, - Ly=3 * old_box.Ly, - Lz=3 * old_box.Lz) + new_box = hoomd.Box(Lx=3 * old_box.Lx, Ly=3 * old_box.Ly, Lz=3 * old_box.Lz) sim.state.set_box(new_box) - ext = hoomd.hpmc.external.Linear(default_alpha=1000, - plane_origin=(0, 0, 0), - plane_normal=(0, 0, 1)) + ext = hoomd.hpmc.external.Linear( + default_alpha=1000, plane_origin=(0, 0, 0), plane_normal=(0, 0, 1) + ) mc.external_potentials = [ext] sim.operations.integrator = mc @@ -304,8 +314,8 @@ def test_z_bias(self, device, simulation_factory, lattice_snapshot_factory): # prefactor, and mean_z is the average position of the # particles. verify that this equality holds. np.testing.assert_allclose( - new_energy, - snapshot.particles.N * ext.alpha['A'] * new_mean_z) + new_energy, snapshot.particles.N * ext.alpha["A"] * new_mean_z + ) assert new_mean_z < old_mean_z assert new_energy < old_energy old_mean_z = new_mean_z diff --git a/hoomd/hpmc/pytest/test_external_wall.py b/hoomd/hpmc/pytest/test_external_wall.py index 835a271539..64364cd21f 100644 --- a/hoomd/hpmc/pytest/test_external_wall.py +++ b/hoomd/hpmc/pytest/test_external_wall.py @@ -14,7 +14,7 @@ wall_instances = [ hoomd.wall.Cylinder(1.0, (0, 0, 1)), hoomd.wall.Plane((0, 0, 0), (1, 1, 1)), - hoomd.wall.Sphere(1.0) + hoomd.wall.Sphere(1.0), ] valid_wall_lists = [] for r in 1, 2, 3: @@ -29,27 +29,22 @@ def test_valid_construction(device, wall_list): walls = hoomd.hpmc.external.WallPotential(wall_list) # validate the params were set properly - for wall_input, wall_in_object in itertools.zip_longest( - wall_list, walls.walls): + for wall_input, wall_in_object in itertools.zip_longest(wall_list, walls.walls): assert wall_input == wall_in_object default_wall_args = { hoomd.wall.Sphere: (1.0,), hoomd.wall.Cylinder: (1.0, (0, 0, 1)), - hoomd.wall.Plane: ((0, 0, 0), (1, 1, 1)) + hoomd.wall.Plane: ((0, 0, 0), (1, 1, 1)), } @pytest.fixture(scope="module") def add_default_integrator(): - - def add(simulation, - integrator_class, - wall_types, - use_default_wall_args=True): + def add(simulation, integrator_class, wall_types, use_default_wall_args=True): mc = integrator_class() - mc.shape['A'] = mc_params[integrator_class] + mc.shape["A"] = mc_params[integrator_class] if use_default_wall_args: wall_list = [wt(*default_wall_args[wt]) for wt in wall_types] else: @@ -77,44 +72,52 @@ def default_wall_compatibility(): return { hoomd.wall.Sphere: False, hoomd.wall.Cylinder: False, - hoomd.wall.Plane: False + hoomd.wall.Plane: False, } shape_wall_compatibilities = defaultdict(default_wall_compatibility) -shape_wall_compatibilities.update({ - hoomd.hpmc.integrate.ConvexPolyhedron: { - hoomd.wall.Sphere: True, - hoomd.wall.Cylinder: True, - hoomd.wall.Plane: True - }, - hoomd.hpmc.integrate.ConvexSpheropolyhedron: { - hoomd.wall.Sphere: True, - hoomd.wall.Cylinder: False, - hoomd.wall.Plane: True - }, - hoomd.hpmc.integrate.Sphere: { - hoomd.wall.Sphere: True, - hoomd.wall.Cylinder: True, - hoomd.wall.Plane: True - }, -}) +shape_wall_compatibilities.update( + { + hoomd.hpmc.integrate.ConvexPolyhedron: { + hoomd.wall.Sphere: True, + hoomd.wall.Cylinder: True, + hoomd.wall.Plane: True, + }, + hoomd.hpmc.integrate.ConvexSpheropolyhedron: { + hoomd.wall.Sphere: True, + hoomd.wall.Cylinder: False, + hoomd.wall.Plane: True, + }, + hoomd.hpmc.integrate.Sphere: { + hoomd.wall.Sphere: True, + hoomd.wall.Cylinder: True, + hoomd.wall.Plane: True, + }, + } +) valid_flattened_shape_wall_combos = [] invalid_flattened_shape_wall_combos = [] for shape in _integrator_classes: wall_info = shape_wall_compatibilities[shape] valid_flattened_shape_wall_combos.extend( - [shape, wall] for wall, v in wall_info.items() if v) + [shape, wall] for wall, v in wall_info.items() if v + ) invalid_flattened_shape_wall_combos.extend( - [shape, wall] for wall, v in wall_info.items() if not v) + [shape, wall] for wall, v in wall_info.items() if not v + ) @pytest.mark.cpu @pytest.mark.parametrize("shape_cls, wall", invalid_flattened_shape_wall_combos) -def test_attaching_invalid_combos(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator, shape_cls, wall): +def test_attaching_invalid_combos( + simulation_factory, + two_particle_snapshot_factory, + add_default_integrator, + shape_cls, + wall, +): sim = simulation_factory(two_particle_snapshot_factory()) mc, walls = add_default_integrator(sim, shape_cls, [wall]) with pytest.raises(NotImplementedError): @@ -123,9 +126,13 @@ def test_attaching_invalid_combos(simulation_factory, @pytest.mark.cpu @pytest.mark.parametrize("shape_cls, wall", valid_flattened_shape_wall_combos) -def test_attaching_valid_combos(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator, shape_cls, wall): +def test_attaching_valid_combos( + simulation_factory, + two_particle_snapshot_factory, + add_default_integrator, + shape_cls, + wall, +): sim = simulation_factory(two_particle_snapshot_factory()) mc, walls = add_default_integrator(sim, shape_cls, [wall]) sim.run(0) @@ -135,8 +142,13 @@ def test_attaching_valid_combos(simulation_factory, @pytest.mark.cpu @pytest.mark.parametrize("shape_cls, wall", valid_flattened_shape_wall_combos) -def test_detaching(simulation_factory, two_particle_snapshot_factory, - add_default_integrator, shape_cls, wall): +def test_detaching( + simulation_factory, + two_particle_snapshot_factory, + add_default_integrator, + shape_cls, + wall, +): sim = simulation_factory(two_particle_snapshot_factory()) mc, walls = add_default_integrator(sim, shape_cls, [wall]) sim.run(0) @@ -149,15 +161,18 @@ def test_detaching(simulation_factory, two_particle_snapshot_factory, for shape in _integrator_classes: wall_info = shape_wall_compatibilities[shape] if any((v for v in wall_info.values())): - shape_multiwall_combos.append( - (shape, [(w, v) for w, v in wall_info.items()])) + shape_multiwall_combos.append((shape, [(w, v) for w, v in wall_info.items()])) @pytest.mark.cpu @pytest.mark.parametrize("shape_cls, wall_validity", shape_multiwall_combos) -def test_multiple_wall_geometries(simulation_factory, shape_cls, wall_validity, - two_particle_snapshot_factory, - add_default_integrator): +def test_multiple_wall_geometries( + simulation_factory, + shape_cls, + wall_validity, + two_particle_snapshot_factory, + add_default_integrator, +): sim = simulation_factory(two_particle_snapshot_factory()) walls = [x[0] for x in wall_validity] is_valids = [x[1] for x in wall_validity] @@ -170,23 +185,22 @@ def test_multiple_wall_geometries(simulation_factory, shape_cls, wall_validity, @pytest.mark.cpu -def test_replace_with_invalid(simulation_factory, two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_invalid( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Sphere, hoomd.wall.Plane] mc, walls = add_default_integrator(sim, integrator_class, wall_types) sim.run(0) with pytest.raises(NotImplementedError): - mc.external_potentials[0].walls = [ - hoomd.wall.Cylinder(1.2345, (0, 0, 0)) - ] + mc.external_potentials[0].walls = [hoomd.wall.Cylinder(1.2345, (0, 0, 0))] @pytest.mark.cpu -def test_replace_with_invalid_by_append(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_invalid_by_append( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Sphere, hoomd.wall.Plane] @@ -198,9 +212,9 @@ def test_replace_with_invalid_by_append(simulation_factory, @pytest.mark.cpu -def test_replace_with_invalid_by_extend(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_invalid_by_extend( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Sphere, hoomd.wall.Plane] @@ -212,8 +226,9 @@ def test_replace_with_invalid_by_extend(simulation_factory, @pytest.mark.cpu -def test_replace_with_valid(simulation_factory, two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_valid( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Plane] @@ -223,9 +238,9 @@ def test_replace_with_valid(simulation_factory, two_particle_snapshot_factory, @pytest.mark.cpu -def test_replace_with_valid_by_append(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_valid_by_append( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Plane] @@ -235,9 +250,9 @@ def test_replace_with_valid_by_append(simulation_factory, @pytest.mark.cpu -def test_replace_with_valid_by_extend(simulation_factory, - two_particle_snapshot_factory, - add_default_integrator): +def test_replace_with_valid_by_extend( + simulation_factory, two_particle_snapshot_factory, add_default_integrator +): sim = simulation_factory(two_particle_snapshot_factory()) integrator_class = hoomd.hpmc.integrate.ConvexSpheropolyhedron wall_types = [hoomd.wall.Plane] @@ -247,8 +262,7 @@ def test_replace_with_valid_by_extend(simulation_factory, L_cube = 1.0 -cube_vertices = np.array( - list(itertools.product((-L_cube / 2, L_cube / 2), repeat=3))) +cube_vertices = np.array(list(itertools.product((-L_cube / 2, L_cube / 2), repeat=3))) cube_rc = max(np.linalg.norm(cube_vertices, axis=1)) # circumsphere radius cube_face_rc = np.sqrt(2) / 2 cube_r_s = 0.1 # sweep radius for spherocube @@ -517,7 +531,6 @@ def test_replace_with_valid_by_extend(simulation_factory, dict(diameter=1.0), False, ), - # cube in middle of cylindrical pore with larger radius than the # circumsphere radius of the cube projected onto the circular cross-section # of the cylinder (i.e., the square face of the cube) @@ -564,19 +577,25 @@ def test_replace_with_valid_by_extend(simulation_factory, @pytest.mark.cpu @pytest.mark.parametrize( - "pos, orientation, shape, wall_list, shapedef, expecting_overlap", - overlap_test_info) -def test_overlaps(simulation_factory, one_particle_snapshot_factory, - add_default_integrator, pos, orientation, shape, wall_list, - shapedef, expecting_overlap): + "pos, orientation, shape, wall_list, shapedef, expecting_overlap", overlap_test_info +) +def test_overlaps( + simulation_factory, + one_particle_snapshot_factory, + add_default_integrator, + pos, + orientation, + shape, + wall_list, + shapedef, + expecting_overlap, +): sim = simulation_factory( - one_particle_snapshot_factory(position=pos, - orientation=orientation, - L=100)) - mc, walls = add_default_integrator(sim, - shape, - wall_list, - use_default_wall_args=False) - mc.shape['A'] = shapedef + one_particle_snapshot_factory(position=pos, orientation=orientation, L=100) + ) + mc, walls = add_default_integrator( + sim, shape, wall_list, use_default_wall_args=False + ) + mc.shape["A"] = shapedef sim.run(0) assert (mc.external_potentials[0].overlaps > 0) == expecting_overlap diff --git a/hoomd/hpmc/pytest/test_gca.py b/hoomd/hpmc/pytest/test_gca.py index 26a02c4256..00626e4e7c 100644 --- a/hoomd/hpmc/pytest/test_gca.py +++ b/hoomd/hpmc/pytest/test_gca.py @@ -4,8 +4,11 @@ """Test hoomd.hpmc.update.GCA.""" import hoomd -from hoomd.conftest import (operation_pickling_check, logging_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + operation_pickling_check, + logging_check, + autotuned_kernel_parameter_check, +) from hoomd.logging import LoggerCategories import pytest import hoomd.hpmc.pytest.conftest @@ -13,26 +16,37 @@ # note: The parameterized tests validate parameters so we can't pass in values # here that require preprocessing valid_constructor_args = [ - dict(trigger=hoomd.trigger.Periodic(10), - pivot_move_probability=0.1, - flip_probability=0.8), - dict(trigger=hoomd.trigger.After(100), - pivot_move_probability=0.7, - flip_probability=1), - dict(trigger=hoomd.trigger.Before(100), - pivot_move_probability=0.7, - flip_probability=1), - dict(trigger=hoomd.trigger.Periodic(1000), - pivot_move_probability=0.7, - flip_probability=1), + dict( + trigger=hoomd.trigger.Periodic(10), + pivot_move_probability=0.1, + flip_probability=0.8, + ), + dict( + trigger=hoomd.trigger.After(100), pivot_move_probability=0.7, flip_probability=1 + ), + dict( + trigger=hoomd.trigger.Before(100), + pivot_move_probability=0.7, + flip_probability=1, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + pivot_move_probability=0.7, + flip_probability=1, + ), ] -valid_attrs = [('trigger', hoomd.trigger.Periodic(10000)), - ('trigger', hoomd.trigger.After(100)), - ('trigger', hoomd.trigger.Before(12345)), - ('flip_probability', 0.2), ('flip_probability', 0.5), - ('flip_probability', 0.8), ('pivot_move_probability', 0.2), - ('pivot_move_probability', 0.5), ('pivot_move_probability', 0.8)] +valid_attrs = [ + ("trigger", hoomd.trigger.Periodic(10000)), + ("trigger", hoomd.trigger.After(100)), + ("trigger", hoomd.trigger.Before(12345)), + ("flip_probability", 0.2), + ("flip_probability", 0.5), + ("flip_probability", 0.8), + ("pivot_move_probability", 0.2), + ("pivot_move_probability", 0.5), + ("pivot_move_probability", 0.8), +] @pytest.mark.serial @@ -48,9 +62,13 @@ def test_valid_construction(device, constructor_args): @pytest.mark.serial @pytest.mark.parametrize("constructor_args", valid_constructor_args) -def test_valid_construction_and_attach(device, simulation_factory, - two_particle_snapshot_factory, - constructor_args, valid_args): +def test_valid_construction_and_attach( + device, + simulation_factory, + two_particle_snapshot_factory, + constructor_args, + valid_args, +): """Test that GCA can be attached with valid arguments.""" integrator = valid_args[0] args = valid_args[1] @@ -70,10 +88,10 @@ def test_valid_construction_and_attach(device, simulation_factory, cl = hoomd.hpmc.update.GCA(**constructor_args) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=['A', 'B'], - dimensions=n_dimensions, - d=2, - L=50)) + two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=n_dimensions, d=2, L=50 + ) + ) sim.operations.updaters.append(cl) sim.operations.integrator = mc @@ -96,8 +114,9 @@ def test_valid_setattr(device, attr, value): @pytest.mark.serial @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(device, attr, value, simulation_factory, - two_particle_snapshot_factory, valid_args): +def test_valid_setattr_attached( + device, attr, value, simulation_factory, two_particle_snapshot_factory, valid_args +): """Test that GCA can get and set attributes while attached.""" integrator = valid_args[0] args = valid_args[1] @@ -117,10 +136,10 @@ def test_valid_setattr_attached(device, attr, value, simulation_factory, cl = hoomd.hpmc.update.GCA(trigger=hoomd.trigger.Periodic(10)) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=['A', 'B'], - dimensions=n_dimensions, - d=2, - L=50)) + two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=n_dimensions, d=2, L=50 + ) + ) sim.operations.updaters.append(cl) sim.operations.integrator = mc @@ -133,24 +152,23 @@ def test_valid_setattr_attached(device, attr, value, simulation_factory, @pytest.mark.serial def test_pivot_moves(device, simulation_factory, lattice_snapshot_factory): """Test that GCA produces finite size clusters.""" - if (isinstance(device, hoomd.device.GPU) - and hoomd.version.gpu_platform == 'ROCm'): + if isinstance(device, hoomd.device.GPU) and hoomd.version.gpu_platform == "ROCm": pytest.xfail("GCA fails on ROCm (#1605)") sim = simulation_factory( - lattice_snapshot_factory(particle_types=['A', 'B'], - dimensions=3, - a=4, - n=7, - r=0.1)) + lattice_snapshot_factory( + particle_types=["A", "B"], dimensions=3, a=4, n=7, r=0.1 + ) + ) mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1) - mc.shape['A'] = dict(diameter=1.1) - mc.shape['B'] = dict(diameter=1.3) + mc.shape["A"] = dict(diameter=1.1) + mc.shape["B"] = dict(diameter=1.3) sim.operations.integrator = mc - cl = hoomd.hpmc.update.GCA(trigger=hoomd.trigger.Periodic(5), - pivot_move_probability=0.5) + cl = hoomd.hpmc.update.GCA( + trigger=hoomd.trigger.Periodic(5), pivot_move_probability=0.5 + ) sim.operations.updaters.append(cl) sim.run(10) @@ -163,12 +181,13 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): """Test that Cluster objects are picklable.""" sim = simulation_factory(two_particle_snapshot_factory()) mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1) - mc.shape['A'] = dict(diameter=1.1) - mc.shape['B'] = dict(diameter=1.3) + mc.shape["A"] = dict(diameter=1.1) + mc.shape["B"] = dict(diameter=1.3) sim.operations.integrator = mc - cl = hoomd.hpmc.update.GCA(trigger=hoomd.trigger.Periodic(5), - pivot_move_probability=0.1) + cl = hoomd.hpmc.update.GCA( + trigger=hoomd.trigger.Periodic(5), pivot_move_probability=0.1 + ) operation_pickling_check(cl, sim) @@ -177,12 +196,13 @@ def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory): """Test that Cluster objects tune their kernel parameters.""" sim = simulation_factory(two_particle_snapshot_factory()) mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1) - mc.shape['A'] = dict(diameter=1.1) - mc.shape['B'] = dict(diameter=1.3) + mc.shape["A"] = dict(diameter=1.1) + mc.shape["B"] = dict(diameter=1.3) sim.operations.integrator = mc - cl = hoomd.hpmc.update.GCA(trigger=hoomd.trigger.Periodic(1), - pivot_move_probability=0.1) + cl = hoomd.hpmc.update.GCA( + trigger=hoomd.trigger.Periodic(1), pivot_move_probability=0.1 + ) sim.operations.updaters.append(cl) sim.run(0) @@ -191,9 +211,8 @@ def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory): def test_logging(): - logging_check(hoomd.hpmc.update.GCA, ('hpmc', 'update'), { - 'avg_cluster_size': { - 'category': LoggerCategories.scalar, - 'default': True - } - }) + logging_check( + hoomd.hpmc.update.GCA, + ("hpmc", "update"), + {"avg_cluster_size": {"category": LoggerCategories.scalar, "default": True}}, + ) diff --git a/hoomd/hpmc/pytest/test_kt.py b/hoomd/hpmc/pytest/test_kt.py index da00d57b29..ce936036fb 100644 --- a/hoomd/hpmc/pytest/test_kt.py +++ b/hoomd/hpmc/pytest/test_kt.py @@ -11,10 +11,10 @@ # here that require preprocessing valid_attrs = [ - ('kT', hoomd.variant.Constant(10)), - ('kT', hoomd.variant.Ramp(1, 5, 0, 100)), - ('kT', hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), - ('kT', hoomd.variant.Power(1, 5, 3, 0, 100)), + ("kT", hoomd.variant.Constant(10)), + ("kT", hoomd.variant.Ramp(1, 5, 0, 100)), + ("kT", hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), + ("kT", hoomd.variant.Power(1, 5, 3, 0, 100)), ] @@ -35,14 +35,15 @@ def test_valid_setattr(device, attr, value): @pytest.mark.serial @pytest.mark.cpu @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(attr, value, simulation_factory, - two_particle_snapshot_factory): +def test_valid_setattr_attached( + attr, value, simulation_factory, two_particle_snapshot_factory +): """Test that integrator can get and set attributes while attached.""" sim = simulation_factory(two_particle_snapshot_factory()) # BoxMC requires an HPMC integrator mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc # create C++ mirror classes and set parameters @@ -55,11 +56,11 @@ def test_valid_setattr_attached(attr, value, simulation_factory, @pytest.mark.serial @pytest.mark.cpu @pytest.mark.parametrize("attr,value", valid_attrs) -def test_after_attaching(attr, value, valid_args, simulation_factory, - lattice_snapshot_factory): +def test_after_attaching( + attr, value, valid_args, simulation_factory, lattice_snapshot_factory +): integrator, args, n_dimensions = valid_args - snap = lattice_snapshot_factory(particle_types=['A'], - dimensions=n_dimensions) + snap = lattice_snapshot_factory(particle_types=["A"], dimensions=n_dimensions) sim = simulation_factory(snap) # Need to unpack union integrators diff --git a/hoomd/hpmc/pytest/test_move_size_tuner.py b/hoomd/hpmc/pytest/test_move_size_tuner.py index a2f2223ddc..e1367fdaab 100644 --- a/hoomd/hpmc/pytest/test_move_size_tuner.py +++ b/hoomd/hpmc/pytest/test_move_size_tuner.py @@ -6,12 +6,12 @@ from hoomd import hpmc from hoomd.conftest import operation_pickling_check -from hoomd.hpmc.tune.move_size import (_MoveSizeTuneDefinition, MoveSize) +from hoomd.hpmc.tune.move_size import _MoveSizeTuneDefinition, MoveSize @pytest.fixture def move_definition_dict(): - return dict(attr='d', type='A', target=0.5, domain=(1e-5, None)) + return dict(attr="d", type="A", target=0.5, domain=(1e-5, None)) @pytest.fixture @@ -24,25 +24,23 @@ def simulation(simulation_factory, lattice_snapshot_factory): snap = lattice_snapshot_factory(dimensions=2, r=1e-3, n=20) # 400 particles sim = simulation_factory(snap) integrator = hpmc.integrate.Sphere(default_d=0.01) - integrator.shape['A'] = dict(diameter=0.9) + integrator.shape["A"] = dict(diameter=0.9) sim.operations.integrator = integrator return sim class TestMoveSizeTuneDefinition: - def test_getting_attrs(self, move_definition_dict, move_size_definition): for attr in move_definition_dict: - assert move_definition_dict[attr] == getattr( - move_size_definition, attr) + assert move_definition_dict[attr] == getattr(move_size_definition, attr) def test_setting_attrs(self, move_size_definition): move_size_definition.domain = (None, 5) assert move_size_definition.domain == (None, 5) - move_size_definition.attr = 'a' - assert move_size_definition.attr == 'a' - move_size_definition.type = 'B' - assert move_size_definition.type == 'B' + move_size_definition.attr = "a" + assert move_size_definition.attr == "a" + move_size_definition.type = "B" + assert move_size_definition.type == "B" move_size_definition.target = 0.9 assert move_size_definition.target == 0.9 @@ -67,48 +65,55 @@ def test_getting_acceptance_rate(self, move_size_definition, simulation): def test_getting_setting_move_size(self, move_size_definition, simulation): integrator = simulation.operations.integrator move_size_definition.integrator = integrator - assert move_size_definition.x == integrator.d['A'] - d = integrator.d['A'] * 1.1 - integrator.d['A'] = d + assert move_size_definition.x == integrator.d["A"] + d = integrator.d["A"] * 1.1 + integrator.d["A"] = d assert move_size_definition.x == d d *= 1.1 move_size_definition.x = d - assert integrator.d['A'] == d + assert integrator.d["A"] == d def test_hash(self, move_size_definition, move_definition_dict, simulation): identical_definition = _MoveSizeTuneDefinition(**move_definition_dict) assert hash(identical_definition) == hash(move_size_definition) - move_definition_dict['domain'] = (None, 5) + move_definition_dict["domain"] = (None, 5) different_definition = _MoveSizeTuneDefinition(**move_definition_dict) assert hash(different_definition) != hash(move_size_definition) def test_eq(self, move_size_definition, move_definition_dict, simulation): identical_definition = _MoveSizeTuneDefinition(**move_definition_dict) assert identical_definition == move_size_definition - move_definition_dict['domain'] = (None, 5) + move_definition_dict["domain"] = (None, 5) different_definition = _MoveSizeTuneDefinition(**move_definition_dict) assert different_definition != move_size_definition -_move_size_options = [(MoveSize.scale_solver, - dict(trigger=300, - moves=['d'], - target=0.5, - types=['A'], - max_translation_move=5, - max_rotation_move=3., - tol=1e-1)), - (MoveSize.secant_solver, - dict( - trigger=300, - moves=['d'], - target=0.6, - types=['A'], - ))] - - -@pytest.fixture(params=_move_size_options, - ids=lambda x: 'MoveSize-' + x[0].__name__) +_move_size_options = [ + ( + MoveSize.scale_solver, + dict( + trigger=300, + moves=["d"], + target=0.5, + types=["A"], + max_translation_move=5, + max_rotation_move=3.0, + tol=1e-1, + ), + ), + ( + MoveSize.secant_solver, + dict( + trigger=300, + moves=["d"], + target=0.6, + types=["A"], + ), + ), +] + + +@pytest.fixture(params=_move_size_options, ids=lambda x: "MoveSize-" + x[0].__name__) def move_size_tuner_pairs(request): return request.param @@ -119,14 +124,13 @@ def move_size_tuner(move_size_tuner_pairs): class TestMoveSize: - def test_construction(self, move_size_tuner_pairs): move_size_dict = move_size_tuner_pairs[1] move_size = move_size_tuner_pairs[0](**move_size_dict) for attr in move_size_dict: - if attr == 'trigger': + if attr == "trigger": assert getattr(move_size, attr).period == move_size_dict[attr] - elif attr in ['max_rotation_move', 'max_translation_move']: + elif attr in ["max_rotation_move", "max_translation_move"]: assert getattr(move_size, attr).default == move_size_dict[attr] else: try: @@ -135,8 +139,7 @@ def test_construction(self, move_size_tuner_pairs): # have an attribute. This allows us to check that all attributes # are getting set correctly. except AttributeError: - assert getattr(move_size.solver, - attr) == move_size_dict[attr] + assert getattr(move_size.solver, attr) == move_size_dict[attr] def test_attach(self, move_size_tuner, simulation): simulation.operations.tuners.append(move_size_tuner) @@ -168,7 +171,7 @@ def test_set_params(self, move_size_tuner): assert all(target == t.target for t in move_size_tuner._tunables) assert target == move_size_tuner.target - max_move = 4. + max_move = 4.0 move_size_tuner.max_translation_move.default = max_move assert move_size_tuner.max_translation_move.default == max_move @@ -176,12 +179,12 @@ def test_set_params(self, move_size_tuner): assert move_size_tuner.max_rotation_move.default == max_move with pytest.raises(ValueError): - move_size_tuner.moves = ['f', 'a'] - move_size_tuner.moves = ['a'] + move_size_tuner.moves = ["f", "a"] + move_size_tuner.moves = ["a"] - move_size_tuner.types = ['A', 'B'] + move_size_tuner.types = ["A", "B"] with pytest.raises(ValueError): - move_size_tuner.types = 'foo' + move_size_tuner.types = "foo" # All tests (using differnt fixtures) combined take about 17 seconds, so # only test during validation diff --git a/hoomd/hpmc/pytest/test_muvt.py b/hoomd/hpmc/pytest/test_muvt.py index 29136493ae..22dcd7aff5 100644 --- a/hoomd/hpmc/pytest/test_muvt.py +++ b/hoomd/hpmc/pytest/test_muvt.py @@ -71,10 +71,10 @@ def test_valid_construction_and_attach( muvt = hoomd.hpmc.update.MuVT(**constructor_args) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=["A", "B"], - dimensions=n_dimensions, - d=2, - L=50)) + two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=n_dimensions, d=2, L=50 + ) + ) sim.operations.updaters.append(muvt) sim.operations.integrator = mc @@ -89,8 +89,9 @@ def test_valid_construction_and_attach( @pytest.mark.parametrize("attr,value", valid_attrs) def test_valid_setattr(device, attr, value): """Test that MuVT can get and set attributes.""" - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(10), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(10), transfer_types=["A"] + ) setattr(muvt, attr, value) assert getattr(muvt, attr) == value @@ -98,8 +99,9 @@ def test_valid_setattr(device, attr, value): @pytest.mark.serial @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(device, attr, value, simulation_factory, - two_particle_snapshot_factory, valid_args): +def test_valid_setattr_attached( + device, attr, value, simulation_factory, two_particle_snapshot_factory, valid_args +): """Test that MuVT can get and set attributes while attached.""" integrator = valid_args[0] args = valid_args[1] @@ -117,13 +119,14 @@ def test_valid_setattr_attached(device, attr, value, simulation_factory, mc.shape["A"] = args mc.shape["B"] = args - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(10), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(10), transfer_types=["A"] + ) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=["A", "B"], - dimensions=n_dimensions, - d=2, - L=50)) + two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=n_dimensions, d=2, L=50 + ) + ) sim.operations.updaters.append(muvt) sim.operations.integrator = mc @@ -133,23 +136,22 @@ def test_valid_setattr_attached(device, attr, value, simulation_factory, assert getattr(muvt, attr) == value -def test_insertion_removal(device, simulation_factory, - lattice_snapshot_factory): +def test_insertion_removal(device, simulation_factory, lattice_snapshot_factory): """Test that MuVT is able to insert and remove particles.""" sim = simulation_factory( - lattice_snapshot_factory(particle_types=["A", "B"], - dimensions=3, - a=4, - n=7, - r=0.1)) + lattice_snapshot_factory( + particle_types=["A", "B"], dimensions=3, a=4, n=7, r=0.1 + ) + ) mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1) mc.shape["A"] = dict(diameter=1.1) mc.shape["B"] = dict(diameter=1.3) sim.operations.integrator = mc - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(5), - transfer_types=["B"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(5), transfer_types=["B"] + ) sim.operations.updaters.append(muvt) sim.run(0) @@ -173,8 +175,7 @@ def test_insertion_removal(device, simulation_factory, @pytest.mark.cpu -def test_pair_remove_insert(device, simulation_factory, - one_particle_snapshot_factory): +def test_pair_remove_insert(device, simulation_factory, one_particle_snapshot_factory): """Test that MuVT considers pair potentials.""" sim = simulation_factory( one_particle_snapshot_factory( @@ -183,7 +184,8 @@ def test_pair_remove_insert(device, simulation_factory, position=(-5, 0, 0), orientation=(1, 0, 0, 0), L=20, - )) + ) + ) sphere_radius = 0.6 mc = hoomd.hpmc.integrate.Sphere(default_d=0.0, default_a=0.0) @@ -191,21 +193,21 @@ def test_pair_remove_insert(device, simulation_factory, # apply a potential gradient linear = hoomd.hpmc.external.Linear(plane_normal=(1, 0, 0)) - linear.alpha['A'] = 100 + linear.alpha["A"] = 100 mc.external_potentials.append(linear) sim.operations.integrator = mc - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(1), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(1), transfer_types=["A"] + ) muvt.fugacity["A"] = 1e6 sim.operations.updaters.append(muvt) sim.run(3000) snapshot = sim.state.get_snapshot() if snapshot.communicator.rank == 0: - pos = snapshot.particles.position # We should have added more than one particle to the box @@ -224,8 +226,9 @@ def test_pair_remove_insert(device, simulation_factory, @pytest.mark.cpu -def test_plane_wall_insertion(device, simulation_factory, - one_particle_snapshot_factory): +def test_plane_wall_insertion( + device, simulation_factory, one_particle_snapshot_factory +): """Test that MuVT considers a planar wall when inserting particles.""" sim = simulation_factory( one_particle_snapshot_factory( @@ -234,7 +237,8 @@ def test_plane_wall_insertion(device, simulation_factory, position=(0, 0, 5), orientation=(1, 0, 0, 0), L=20, - )) + ) + ) sphere_radius = 0.6 mc = hoomd.hpmc.integrate.Sphere(default_d=0.0, default_a=0.0) @@ -244,8 +248,9 @@ def test_plane_wall_insertion(device, simulation_factory, mc.external_potentials = [wall_potential] sim.operations.integrator = mc - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(1), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(1), transfer_types=["A"] + ) muvt.fugacity["A"] = 1000 sim.operations.updaters.append(muvt) sim.run(300) @@ -265,8 +270,9 @@ def test_plane_wall_insertion(device, simulation_factory, @pytest.mark.cpu -def test_spherical_wall_insertion(device, simulation_factory, - one_particle_snapshot_factory): +def test_spherical_wall_insertion( + device, simulation_factory, one_particle_snapshot_factory +): """Test that MuVT considers a spherical wall when inserting particles.""" sim = simulation_factory( one_particle_snapshot_factory( @@ -275,7 +281,8 @@ def test_spherical_wall_insertion(device, simulation_factory, position=(0, 0, 0), orientation=(1, 0, 0, 0), L=20, - )) + ) + ) mc = hoomd.hpmc.integrate.Sphere(default_d=0.1, default_a=0.1) sphere_radius = 0.6 @@ -285,8 +292,9 @@ def test_spherical_wall_insertion(device, simulation_factory, mc.external_potentials = [wall_potential] sim.operations.integrator = mc - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(1), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(1), transfer_types=["A"] + ) muvt.fugacity["A"] = 1000 sim.operations.updaters.append(muvt) sim.run(300) @@ -305,8 +313,9 @@ def test_spherical_wall_insertion(device, simulation_factory, @pytest.mark.cpu -def test_cylindrical_wall_insertion(device, simulation_factory, - one_particle_snapshot_factory): +def test_cylindrical_wall_insertion( + device, simulation_factory, one_particle_snapshot_factory +): """Test that MuVT considers a cylindrical wall when inserting particles.""" sim = simulation_factory( one_particle_snapshot_factory( @@ -315,7 +324,8 @@ def test_cylindrical_wall_insertion(device, simulation_factory, position=(0, 0, 0), orientation=(1, 0, 0, 0), L=20, - )) + ) + ) sphere_radius = 0.6 mc = hoomd.hpmc.integrate.Sphere(default_d=0.0, default_a=0.0) @@ -325,8 +335,9 @@ def test_cylindrical_wall_insertion(device, simulation_factory, mc.external_potentials = [wall_potential] sim.operations.integrator = mc - muvt = hoomd.hpmc.update.MuVT(trigger=hoomd.trigger.Periodic(1), - transfer_types=["A"]) + muvt = hoomd.hpmc.update.MuVT( + trigger=hoomd.trigger.Periodic(1), transfer_types=["A"] + ) muvt.fugacity["A"] = 1000 sim.operations.updaters.append(muvt) sim.run(300) @@ -334,8 +345,7 @@ def test_cylindrical_wall_insertion(device, simulation_factory, if snapshot.communicator.rank == 0: pos = snapshot.particles.position # Test if inserted spheres are inside the cylinder wall - assert numpy.max(numpy.linalg.norm(pos[:, :2], - axis=1)) - sphere_radius <= 5 + assert numpy.max(numpy.linalg.norm(pos[:, :2], axis=1)) - sphere_radius <= 5 assert len(pos) > 1 # We should have inserted particles successfully diff --git a/hoomd/hpmc/pytest/test_pair_angular_step.py b/hoomd/hpmc/pytest/test_pair_angular_step.py index f194cedb4a..af34516d3e 100644 --- a/hoomd/hpmc/pytest/test_pair_angular_step.py +++ b/hoomd/hpmc/pytest/test_pair_angular_step.py @@ -33,7 +33,7 @@ def test_contruction(pair_potential): hpmc.pair.AngularStep(LJ()) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def angular_step_potential(pair_potential): return hpmc.pair.AngularStep(pair_potential) @@ -41,9 +41,9 @@ def angular_step_potential(pair_potential): def _valid_particle_dicts(): valid_dicts = [ # numpy arrays - dict(directors=[np.array([1.0, 0, 0]), - np.array([0, 1.0, 0])], - deltas=[0.1, 0.2]), + dict( + directors=[np.array([1.0, 0, 0]), np.array([0, 1.0, 0])], deltas=[0.1, 0.2] + ), # lists dict(directors=[[1.0, 0, 0], [0, 1.0, 0]], deltas=[0.1, 0.2]), # tuples @@ -52,23 +52,26 @@ def _valid_particle_dicts(): return valid_dicts -@pytest.fixture(scope='module', params=_valid_particle_dicts()) +@pytest.fixture(scope="module", params=_valid_particle_dicts()) def valid_particle_dict(request): return copy.deepcopy(request.param) -@pytest.fixture(scope='module') -def pair_angular_step_simulation_factory(simulation_factory, - two_particle_snapshot_factory): +@pytest.fixture(scope="module") +def pair_angular_step_simulation_factory( + simulation_factory, two_particle_snapshot_factory +): """Make two particle sphere simulations with an angular step potential.""" def make_angular_step_sim(d=1, theta_0=0, theta_1=0): snapshot = two_particle_snapshot_factory(d=d) if snapshot.communicator.rank == 0: - snapshot.particles.orientation[0] = rowan.from_axis_angle((0, 0, 1), - theta_0) - snapshot.particles.orientation[1] = rowan.from_axis_angle((0, 0, 1), - theta_1) + snapshot.particles.orientation[0] = rowan.from_axis_angle( + (0, 0, 1), theta_0 + ) + snapshot.particles.orientation[1] = rowan.from_axis_angle( + (0, 0, 1), theta_1 + ) sim = simulation_factory(snapshot) sphere = hpmc.integrate.Sphere() @@ -80,8 +83,9 @@ def make_angular_step_sim(d=1, theta_0=0, theta_1=0): @pytest.mark.cpu -def test_valid_particle_params(pair_angular_step_simulation_factory, - angular_step_potential, valid_particle_dict): +def test_valid_particle_params( + pair_angular_step_simulation_factory, angular_step_potential, valid_particle_dict +): """Test we can set and attach with valid particle params.""" angular_step_potential.mask["A"] = valid_particle_dict sim = pair_angular_step_simulation_factory() @@ -102,26 +106,26 @@ def _invalid_particle_dicts(): # one of the directors tuples is 2 elements dict(directors=[(1.0, 0, 0), (0, 1.0)], deltas=[0.1, 0.2]), # set one of the values set to the wrong type - dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], deltas='invalid'), + dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], deltas="invalid"), # include an unexpected key - dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], - deltas=[0.1, 0.2], - key='invalid'), + dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], deltas=[0.1, 0.2], key="invalid"), ] return invalid_dicts -@pytest.fixture(scope='module', params=_invalid_particle_dicts()) +@pytest.fixture(scope="module", params=_invalid_particle_dicts()) def invalid_particle_dict(request): return copy.deepcopy(request.param) @pytest.mark.cpu -def test_invalid_particle_params(pair_angular_step_simulation_factory, - angular_step_potential, invalid_particle_dict): +def test_invalid_particle_params( + pair_angular_step_simulation_factory, angular_step_potential, invalid_particle_dict +): """Test that invalid parameter combinations result in errors.""" - with pytest.raises((IncompleteSpecificationError, TypeConversionError, - KeyError, RuntimeError)): + with pytest.raises( + (IncompleteSpecificationError, TypeConversionError, KeyError, RuntimeError) + ): angular_step_potential.mask["A"] = invalid_particle_dict sim = pair_angular_step_simulation_factory() sim.operations.integrator.pair_potentials = [angular_step_potential] @@ -129,14 +133,14 @@ def test_invalid_particle_params(pair_angular_step_simulation_factory, @pytest.mark.cpu -def test_get_set_patch_params(pair_angular_step_simulation_factory, - angular_step_potential): +def test_get_set_patch_params( + pair_angular_step_simulation_factory, angular_step_potential +): """Testing getting/setting in multiple ways, before and after attaching.""" # before attaching, setting as dict particle_dict = dict(directors=[(1.0, 0, 0)], deltas=[0.1]) angular_step_potential.mask["A"] = particle_dict - assert angular_step_potential.mask["A"]["directors"] == particle_dict[ - "directors"] + assert angular_step_potential.mask["A"]["directors"] == particle_dict["directors"] assert angular_step_potential.mask["A"]["deltas"] == particle_dict["deltas"] # after attaching, setting as dict @@ -145,10 +149,10 @@ def test_get_set_patch_params(pair_angular_step_simulation_factory, sim.run(0) new_particle_dict = dict(directors=[(0, 1, 0)], deltas=[0.2]) angular_step_potential.mask["A"] = new_particle_dict - assert angular_step_potential.mask["A"]["directors"] == new_particle_dict[ - "directors"] - assert angular_step_potential.mask["A"]["deltas"] == new_particle_dict[ - "deltas"] + assert ( + angular_step_potential.mask["A"]["directors"] == new_particle_dict["directors"] + ) + assert angular_step_potential.mask["A"]["deltas"] == new_particle_dict["deltas"] # after attaching, change the director value angular_step_potential.mask["A"]["directors"] = [(0, 0, 1.0)] @@ -163,8 +167,7 @@ def test_get_set_patch_params(pair_angular_step_simulation_factory, @pytest.mark.cpu def test_detach(pair_angular_step_simulation_factory, angular_step_potential): - particle_dict = dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], - deltas=[0.1, 0.2]) + particle_dict = dict(directors=[(1.0, 0, 0), (0, 1.0, 0)], deltas=[0.1, 0.2]) angular_step_potential.mask["A"] = particle_dict sim = pair_angular_step_simulation_factory() sim.operations.integrator.pair_potentials = [angular_step_potential] @@ -179,7 +182,7 @@ def test_detach(pair_angular_step_simulation_factory, angular_step_potential): def lj(r, r_cut, epsilon, sigma): """Compute the lj energy.""" - return 4 * epsilon * ((sigma / r)**12 - (sigma / r)**6) + return 4 * epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6) # Test 1 particle type @@ -263,25 +266,27 @@ def lj(r, r_cut, epsilon, sigma): ] -@pytest.mark.parametrize('params, theta_0, theta_1, d, expected_energy', - angular_step_test_parameters_one_type) +@pytest.mark.parametrize( + "params, theta_0, theta_1, d, expected_energy", + angular_step_test_parameters_one_type, +) @pytest.mark.cpu -def test_energy(pair_angular_step_simulation_factory, params, theta_0, theta_1, - d, expected_energy): +def test_energy( + pair_angular_step_simulation_factory, params, theta_0, theta_1, d, expected_energy +): """Test that LennardJones computes the correct energies for 1 pair.""" - lennard_jones = hpmc.pair.LennardJones(mode='none') - lennard_jones.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, r_cut=4.0) + lennard_jones = hpmc.pair.LennardJones(mode="none") + lennard_jones.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=4.0) angular_step = hpmc.pair.AngularStep(isotropic_potential=lennard_jones) - angular_step.mask['A'] = params + angular_step.mask["A"] = params - simulation = pair_angular_step_simulation_factory(d=d, - theta_0=theta_0, - theta_1=theta_1) + simulation = pair_angular_step_simulation_factory( + d=d, theta_0=theta_0, theta_1=theta_1 + ) simulation.operations.integrator.pair_potentials = [angular_step] simulation.run(0) - assert angular_step.energy == pytest.approx(expected=expected_energy, - rel=1e-5) + assert angular_step.energy == pytest.approx(expected=expected_energy, rel=1e-5) # Test 2 particle types @@ -377,25 +382,32 @@ def test_energy(pair_angular_step_simulation_factory, params, theta_0, theta_1, @pytest.mark.parametrize( - 'params_0, params_1, theta_0, theta_1, d,' - 'expected_energy', angular_step_test_parameters_two_types) + "params_0, params_1, theta_0, theta_1, d," "expected_energy", + angular_step_test_parameters_two_types, +) @pytest.mark.cpu -def test_energy_two_types(pair_angular_step_simulation_factory, params_0, - params_1, theta_0, theta_1, d, expected_energy): +def test_energy_two_types( + pair_angular_step_simulation_factory, + params_0, + params_1, + theta_0, + theta_1, + d, + expected_energy, +): """Test that LennardJones computes the correct energies for 1 pair.""" - lennard_jones = hpmc.pair.LennardJones(mode='none') - lennard_jones.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, r_cut=4.0) - lennard_jones.params[('A', 'B')] = dict(epsilon=2.0, sigma=1.0, r_cut=4.0) - lennard_jones.params[('B', 'B')] = dict(epsilon=3.0, sigma=1.0, r_cut=4.0) + lennard_jones = hpmc.pair.LennardJones(mode="none") + lennard_jones.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=4.0) + lennard_jones.params[("A", "B")] = dict(epsilon=2.0, sigma=1.0, r_cut=4.0) + lennard_jones.params[("B", "B")] = dict(epsilon=3.0, sigma=1.0, r_cut=4.0) angular_step = hpmc.pair.AngularStep(isotropic_potential=lennard_jones) - angular_step.mask['A'] = params_0 - angular_step.mask['B'] = params_1 + angular_step.mask["A"] = params_0 + angular_step.mask["B"] = params_1 - simulation = pair_angular_step_simulation_factory(d=d, - theta_0=theta_0, - theta_1=theta_1) + simulation = pair_angular_step_simulation_factory( + d=d, theta_0=theta_0, theta_1=theta_1 + ) simulation.operations.integrator.pair_potentials = [angular_step] simulation.run(0) - assert angular_step.energy == pytest.approx(expected=expected_energy, - rel=1e-5) + assert angular_step.energy == pytest.approx(expected=expected_energy, rel=1e-5) diff --git a/hoomd/hpmc/pytest/test_pair_expanded_gaussian.py b/hoomd/hpmc/pytest/test_pair_expanded_gaussian.py index e3040e8b5d..9bbaa9655f 100644 --- a/hoomd/hpmc/pytest/test_pair_expanded_gaussian.py +++ b/hoomd/hpmc/pytest/test_pair_expanded_gaussian.py @@ -11,7 +11,7 @@ {}, dict(default_r_cut=2.5), dict(default_r_on=2.0), - dict(mode='shift'), + dict(mode="shift"), ] @@ -21,7 +21,7 @@ def test_valid_construction(device, constructor_args): hoomd.hpmc.pair.ExpandedGaussian(**constructor_args) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mc_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make a MC simulation with two particles separate dy by a distance d.""" @@ -30,7 +30,7 @@ def make_simulation(d=1): simulation = simulation_factory(snapshot) sphere = hoomd.hpmc.integrate.Sphere() - sphere.shape['A'] = dict(diameter=0) + sphere.shape["A"] = dict(diameter=0) simulation.operations.integrator = sphere return simulation @@ -42,10 +42,9 @@ def make_simulation(d=1): def test_attaching(mc_simulation_factory): """Test that ExpandedGaussian attaches.""" expanded_gauss = hoomd.hpmc.pair.ExpandedGaussian() - expanded_gauss.params[('A', 'A')] = dict(epsilon=1.0, - sigma=1.0, - delta=1.0, - r_cut=2.5) + expanded_gauss.params[("A", "A")] = dict( + epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5 + ) simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [expanded_gauss] @@ -62,8 +61,8 @@ def test_attaching(mc_simulation_factory): {}, dict(epsilon=1.0), dict(epsilon=1.0, sigma=1.0), - dict(epsilon=1.0, sigma=1.0, delta=1.0, r_cut='invalid'), - dict(epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5, r_on='invalid'), + dict(epsilon=1.0, sigma=1.0, delta=1.0, r_cut="invalid"), + dict(epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5, r_on="invalid"), dict(epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5, r_on=2.0, invalid=10), ] @@ -73,22 +72,23 @@ def test_attaching(mc_simulation_factory): def test_invalid_params_on_attach(mc_simulation_factory, parameters): """Test that ExpandedGaussian validates parameters.""" expanded_gauss = hoomd.hpmc.pair.ExpandedGaussian() - expanded_gauss.params[('A', 'A')] = dict(epsilon=1.0, - sigma=1.0, - delta=1.0, - r_cut=2.5) + expanded_gauss.params[("A", "A")] = dict( + epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5 + ) # Some parameters are validated only after attaching. simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [expanded_gauss] simulation.run(0) - with pytest.raises(( + with pytest.raises( + ( RuntimeError, hoomd.error.TypeConversionError, KeyError, - )): - expanded_gauss.params[('A', 'A')] = parameters + ) + ): + expanded_gauss.params[("A", "A")] = parameters def xplor_factor(r, r_on, r_cut): @@ -96,8 +96,8 @@ def xplor_factor(r, r_on, r_cut): if r < r_on: return 1 if r < r_cut: - denominator = (r_cut**2 - r_on**2)**3 - numerator = (r_cut**2 - r**2)**2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) + denominator = (r_cut**2 - r_on**2) ** 3 + numerator = (r_cut**2 - r**2) ** 2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) return numerator / denominator return 0 @@ -105,7 +105,7 @@ def xplor_factor(r, r_on, r_cut): def eg(r, epsilon, sigma, delta): """Compute the eg energy.""" - return epsilon * math.exp(-0.5 * (((r - delta) / sigma)**2)) + return epsilon * math.exp(-0.5 * (((r - delta) / sigma) ** 2)) # (pair params, @@ -115,108 +115,109 @@ def eg(r, epsilon, sigma, delta): expanded_gauss_test_parameters = [ ( dict(epsilon=2.0, sigma=1.5, delta=1.0, r_cut=2.5), - 'none', + "none", 3.0, 0.0, ), ( dict(epsilon=2.0, sigma=1.5, delta=1.0, r_cut=2.5), - 'none', + "none", 1.0, 2.0, ), ( dict(epsilon=5.0, sigma=1.1, delta=1.0, r_cut=2.0), - 'none', + "none", 1.5, eg(1.5, 5, 1.1, 1.0), ), ( dict(epsilon=5.0, sigma=1.1, delta=1.0, r_cut=2.0), - 'shift', + "shift", 1.5, eg(1.5, 5, 1.1, 1.0) - eg(2.0, 5, 1.1, 1.0), ), ( dict(epsilon=5.0, sigma=1.1, delta=1.0, r_cut=2.5, r_on=2.0), - 'xplor', + "xplor", 1.5, eg(1.5, 5.0, 1.1, 1.0) * xplor_factor(1.5, 2.0, 2.5), ), ( dict(epsilon=5.0, sigma=1.1, delta=1.0, r_cut=2.5, r_on=2.0), - 'xplor', + "xplor", 2.3, eg(2.3, 5, 1.1, 1.0) * xplor_factor(2.3, 2.0, 2.5), ), ( dict(epsilon=5.0, sigma=1.1, delta=1.0, r_cut=2.0, r_on=3), - 'xplor', + "xplor", 1.5, eg(1.5, 5, 1.1, 1.0) - eg(2, 5, 1.1, 1.0), ), ( dict(epsilon=1.0, sigma=1, delta=1.0, r_cut=3.0, r_on=4), - 'xplor', + "xplor", 3.2, 0, ), ] -@pytest.mark.parametrize('params, mode, d, expected_energy', - expanded_gauss_test_parameters) +@pytest.mark.parametrize( + "params, mode, d, expected_energy", expanded_gauss_test_parameters +) @pytest.mark.cpu def test_energy(mc_simulation_factory, params, mode, d, expected_energy): """Test that ExpandedGaussian computes the correct energies for 1 pair.""" expanded_gauss = hoomd.hpmc.pair.ExpandedGaussian(mode=mode) - expanded_gauss.params[('A', 'A')] = params + expanded_gauss.params[("A", "A")] = params simulation = mc_simulation_factory(d=d) simulation.operations.integrator.pair_potentials = [expanded_gauss] simulation.run(0) - assert expanded_gauss.energy == pytest.approx(expected=expected_energy, - rel=1e-5) + assert expanded_gauss.energy == pytest.approx(expected=expected_energy, rel=1e-5) @pytest.mark.cpu def test_multiple_pair_potentials(mc_simulation_factory): """Test that energy operates correctly with multiple pair potentials.""" expanded_gauss_1 = hoomd.hpmc.pair.ExpandedGaussian() - expanded_gauss_1.params[('A', 'A')] = dict(epsilon=1.0, - sigma=1.0, - delta=1.0, - r_cut=2.5) + expanded_gauss_1.params[("A", "A")] = dict( + epsilon=1.0, sigma=1.0, delta=1.0, r_cut=2.5 + ) expanded_gauss_2 = hoomd.hpmc.pair.ExpandedGaussian() - expanded_gauss_2.params[('A', 'A')] = dict(epsilon=2.0, - sigma=1.0, - delta=1.0, - r_cut=2.5) + expanded_gauss_2.params[("A", "A")] = dict( + epsilon=2.0, sigma=1.0, delta=1.0, r_cut=2.5 + ) expected_1 = eg(1.5, 1.0, 1.0, 1.0) expected_2 = eg(1.5, 2.0, 1.0, 1.0) # Some parameters are validated only after attaching. simulation = mc_simulation_factory(1.5) simulation.operations.integrator.pair_potentials = [ - expanded_gauss_1, expanded_gauss_2 + expanded_gauss_1, + expanded_gauss_2, ] simulation.run(0) - assert expanded_gauss_1.energy == pytest.approx(expected=expected_1, - rel=1e-5) - assert expanded_gauss_2.energy == pytest.approx(expected=expected_2, - rel=1e-5) + assert expanded_gauss_1.energy == pytest.approx(expected=expected_1, rel=1e-5) + assert expanded_gauss_2.energy == pytest.approx(expected=expected_2, rel=1e-5) assert simulation.operations.integrator.pair_energy == pytest.approx( - expected=expected_1 + expected_2, rel=1e-5) + expected=expected_1 + expected_2, rel=1e-5 + ) def test_logging(): hoomd.conftest.logging_check( - hoomd.hpmc.pair.ExpandedGaussian, ('hpmc', 'pair'), { - 'energy': { - 'category': hoomd.logging.LoggerCategories.scalar, - 'default': True + hoomd.hpmc.pair.ExpandedGaussian, + ("hpmc", "pair"), + { + "energy": { + "category": hoomd.logging.LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/hpmc/pytest/test_pair_lennard_jones.py b/hoomd/hpmc/pytest/test_pair_lennard_jones.py index d0e23a8668..8d89428de7 100644 --- a/hoomd/hpmc/pytest/test_pair_lennard_jones.py +++ b/hoomd/hpmc/pytest/test_pair_lennard_jones.py @@ -10,7 +10,7 @@ {}, dict(default_r_cut=2.5), dict(default_r_on=2.0), - dict(mode='shift'), + dict(mode="shift"), ] @@ -20,7 +20,7 @@ def test_valid_construction(device, constructor_args): hoomd.hpmc.pair.LennardJones(**constructor_args) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mc_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make a MC simulation with two particles separate dy by a distance d.""" @@ -29,7 +29,7 @@ def make_simulation(d=1): simulation = simulation_factory(snapshot) sphere = hoomd.hpmc.integrate.Sphere() - sphere.shape['A'] = dict(diameter=0) + sphere.shape["A"] = dict(diameter=0) simulation.operations.integrator = sphere return simulation @@ -41,7 +41,7 @@ def make_simulation(d=1): def test_attaching(mc_simulation_factory): """Test that LennardJones attaches.""" lennard_jones = hoomd.hpmc.pair.LennardJones() - lennard_jones.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) + lennard_jones.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [lennard_jones] @@ -58,8 +58,8 @@ def test_attaching(mc_simulation_factory): {}, dict(epsilon=1.0), dict(epsilon=1.0, sigma=1.0), - dict(epsilon=1.0, sigma=1.0, r_cut='invalid'), - dict(epsilon=1.0, sigma=1.0, r_cut=2.5, r_on='invalid'), + dict(epsilon=1.0, sigma=1.0, r_cut="invalid"), + dict(epsilon=1.0, sigma=1.0, r_cut=2.5, r_on="invalid"), dict(epsilon=1.0, sigma=1.0, r_cut=2.5, r_on=2.0, invalid=10), ] @@ -69,19 +69,21 @@ def test_attaching(mc_simulation_factory): def test_invalid_params_on_attach(mc_simulation_factory, parameters): """Test that LennardJones validates parameters.""" lennard_jones = hoomd.hpmc.pair.LennardJones() - lennard_jones.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) + lennard_jones.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) # Some parameters are validated only after attaching. simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [lennard_jones] simulation.run(0) - with pytest.raises(( + with pytest.raises( + ( RuntimeError, hoomd.error.TypeConversionError, KeyError, - )): - lennard_jones.params[('A', 'A')] = parameters + ) + ): + lennard_jones.params[("A", "A")] = parameters def xplor_factor(r, r_on, r_cut): @@ -89,8 +91,8 @@ def xplor_factor(r, r_on, r_cut): if r < r_on: return 1 if r < r_cut: - denominator = (r_cut**2 - r_on**2)**3 - numerator = (r_cut**2 - r**2)**2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) + denominator = (r_cut**2 - r_on**2) ** 3 + numerator = (r_cut**2 - r**2) ** 2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) return numerator / denominator return 0 @@ -98,7 +100,7 @@ def xplor_factor(r, r_on, r_cut): def lj(r, r_cut, epsilon, sigma): """Compute the lj energy.""" - return 4 * epsilon * ((sigma / r)**12 - (sigma / r)**6) + return 4 * epsilon * ((sigma / r) ** 12 - (sigma / r) ** 6) # (pair params, @@ -107,117 +109,123 @@ def lj(r, r_cut, epsilon, sigma): lennard_jones_test_parameters = [ ( dict(epsilon=2.0, sigma=1.5, r_cut=2.5), - 'none', + "none", 3.0, 0.0, ), ( dict(epsilon=2.0, sigma=1.5, r_cut=2.5), - 'none', - 1.5 * 2**(1 / 6), + "none", + 1.5 * 2 ** (1 / 6), -2.0, ), ( dict(epsilon=3.0, sigma=0.5, r_cut=2.5), - 'none', + "none", 0.5, 0, ), ( dict(epsilon=5.0, sigma=1.1, r_cut=2), - 'none', + "none", 1.5, lj(1.5, 2, 5, 1.1), ), ( dict(epsilon=5.0, sigma=1.1, r_cut=2), - 'shift', + "shift", 1.5, lj(1.5, 2, 5, 1.1) - lj(2, 2, 5, 1.1), ), ( dict(epsilon=1.0, sigma=1, r_cut=3), - 'shift', + "shift", 3.2, 0, ), ( dict(epsilon=5.0, sigma=1.1, r_cut=2.5, r_on=2.0), - 'xplor', + "xplor", 1.5, lj(1.5, 2.5, 5.0, 1.1) * xplor_factor(1.5, 2.0, 2.5), ), ( dict(epsilon=5.0, sigma=1.1, r_cut=2.5, r_on=2.0), - 'xplor', + "xplor", 2.3, lj(2.3, 2.5, 5, 1.1) * xplor_factor(2.3, 2.0, 2.5), ), ( dict(epsilon=5.0, sigma=1.1, r_cut=2, r_on=3), - 'xplor', + "xplor", 1.5, lj(1.5, 2, 5, 1.1) - lj(2, 2, 5, 1.1), ), ( dict(epsilon=1.0, sigma=1, r_cut=3, r_on=4), - 'xplor', + "xplor", 3.2, 0, ), ] -@pytest.mark.parametrize('params, mode, d, expected_energy', - lennard_jones_test_parameters) +@pytest.mark.parametrize( + "params, mode, d, expected_energy", lennard_jones_test_parameters +) @pytest.mark.cpu def test_energy(mc_simulation_factory, params, mode, d, expected_energy): """Test that LennardJones computes the correct energies for 1 pair.""" lennard_jones = hoomd.hpmc.pair.LennardJones(mode=mode) - lennard_jones.params[('A', 'A')] = params + lennard_jones.params[("A", "A")] = params simulation = mc_simulation_factory(d=d) simulation.operations.integrator.pair_potentials = [lennard_jones] simulation.run(0) - assert lennard_jones.energy == pytest.approx(expected=expected_energy, - rel=1e-5) + assert lennard_jones.energy == pytest.approx(expected=expected_energy, rel=1e-5) @pytest.mark.cpu def test_multiple_pair_potentials(mc_simulation_factory): """Test that energy operates correctly with multiple pair potentials.""" lennard_jones_1 = hoomd.hpmc.pair.LennardJones() - lennard_jones_1.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) + lennard_jones_1.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=2.5) lennard_jones_2 = hoomd.hpmc.pair.LennardJones() - lennard_jones_2.params[('A', 'A')] = dict(epsilon=2.0, sigma=1.0, r_cut=2.5) + lennard_jones_2.params[("A", "A")] = dict(epsilon=2.0, sigma=1.0, r_cut=2.5) # Some parameters are validated only after attaching. - simulation = mc_simulation_factory(2**(1 / 6)) + simulation = mc_simulation_factory(2 ** (1 / 6)) simulation.operations.integrator.pair_potentials = [ - lennard_jones_1, lennard_jones_2 + lennard_jones_1, + lennard_jones_2, ] simulation.run(0) assert lennard_jones_1.energy == pytest.approx(expected=-1.0, rel=1e-5) assert lennard_jones_2.energy == pytest.approx(expected=-2.0, rel=1e-5) assert simulation.operations.integrator.pair_energy == pytest.approx( - expected=-3.0, rel=1e-5) + expected=-3.0, rel=1e-5 + ) # check that individual energies are computed correctly with varying r_cut - lennard_jones_2.params[('A', 'A')] = dict(epsilon=2.0, sigma=1.0, r_cut=0) + lennard_jones_2.params[("A", "A")] = dict(epsilon=2.0, sigma=1.0, r_cut=0) assert simulation.operations.integrator.pair_energy == pytest.approx( - expected=-1.0, rel=1e-5) + expected=-1.0, rel=1e-5 + ) assert lennard_jones_1.energy == pytest.approx(expected=-1.0, rel=1e-5) assert lennard_jones_2.energy == pytest.approx(expected=0.0, abs=1e-5) def test_logging(): hoomd.conftest.logging_check( - hoomd.hpmc.pair.LennardJones, ('hpmc', 'pair'), { - 'energy': { - 'category': hoomd.logging.LoggerCategories.scalar, - 'default': True + hoomd.hpmc.pair.LennardJones, + ("hpmc", "pair"), + { + "energy": { + "category": hoomd.logging.LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/hpmc/pytest/test_pair_lj_gauss.py b/hoomd/hpmc/pytest/test_pair_lj_gauss.py index c2b9a8f0dc..9e84606174 100644 --- a/hoomd/hpmc/pytest/test_pair_lj_gauss.py +++ b/hoomd/hpmc/pytest/test_pair_lj_gauss.py @@ -11,7 +11,7 @@ {}, dict(default_r_cut=2.5), dict(default_r_on=2.0), - dict(mode='shift'), + dict(mode="shift"), ] @@ -21,7 +21,7 @@ def test_valid_construction(device, constructor_args): hoomd.hpmc.pair.LJGauss(**constructor_args) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mc_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make a MC simulation with two particles separate dy by a distance d.""" @@ -30,7 +30,7 @@ def make_simulation(d=1): simulation = simulation_factory(snapshot) sphere = hoomd.hpmc.integrate.Sphere() - sphere.shape['A'] = dict(diameter=0) + sphere.shape["A"] = dict(diameter=0) simulation.operations.integrator = sphere return simulation @@ -42,10 +42,7 @@ def make_simulation(d=1): def test_attaching(mc_simulation_factory): """Test that LJGauss attaches.""" lj_gauss = hoomd.hpmc.pair.LJGauss() - lj_gauss.params[('A', 'A')] = dict(epsilon=1.0, - sigma=0.02, - r0=1.5, - r_cut=2.5) + lj_gauss.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut=2.5) simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [lj_gauss] @@ -62,8 +59,8 @@ def test_attaching(mc_simulation_factory): {}, dict(epsilon=1.0), dict(epsilon=1.0, sigma=0.02), - dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut='invalid'), - dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut=2.5, r_on='invalid'), + dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut="invalid"), + dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut=2.5, r_on="invalid"), dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut=2.5, r_on=2.0, invalid=10), ] @@ -73,22 +70,21 @@ def test_attaching(mc_simulation_factory): def test_invalid_params_on_attach(mc_simulation_factory, parameters): """Test that LJGauss validates parameters.""" lj_gauss = hoomd.hpmc.pair.LJGauss() - lj_gauss.params[('A', 'A')] = dict(epsilon=1.0, - sigma=0.02, - r0=1.5, - r_cut=2.5) + lj_gauss.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.5, r_cut=2.5) # Some parameters are validated only after attaching. simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [lj_gauss] simulation.run(0) - with pytest.raises(( + with pytest.raises( + ( RuntimeError, hoomd.error.TypeConversionError, KeyError, - )): - lj_gauss.params[('A', 'A')] = parameters + ) + ): + lj_gauss.params[("A", "A")] = parameters def xplor_factor(r, r_on, r_cut): @@ -96,8 +92,8 @@ def xplor_factor(r, r_on, r_cut): if r < r_on: return 1 if r < r_cut: - denominator = (r_cut**2 - r_on**2)**3 - numerator = (r_cut**2 - r**2)**2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) + denominator = (r_cut**2 - r_on**2) ** 3 + numerator = (r_cut**2 - r**2) ** 2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) return numerator / denominator return 0 @@ -105,8 +101,7 @@ def xplor_factor(r, r_on, r_cut): def ljg(r, epsilon, sigma, r0): """Compute lj-gauss energy.""" - return (1 / r**12 - - 2 / r**6) - epsilon * np.exp(-(r - r0)**2 / 2 / sigma**2) + return (1 / r**12 - 2 / r**6) - epsilon * np.exp(-((r - r0) ** 2) / 2 / sigma**2) # (pair params, @@ -115,74 +110,73 @@ def ljg(r, epsilon, sigma, r0): lj_gauss_test_parameters = [ ( dict(epsilon=0.0, sigma=0.02, r0=1.5, r_cut=2.5), - 'none', + "none", 1.0, -1.0, ), ( dict(epsilon=1.0, sigma=0.02, r0=1.0, r_cut=2.5), - 'none', + "none", 1.0, -2.0, ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5), - 'none', + "none", 1.0, ljg(1.0, 1.0, 0.5, 1.5), ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5), - 'none', + "none", 1.5, ljg(1.5, 1.0, 0.5, 1.5), ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5), - 'shift', + "shift", 2.0, ljg(2.0, 1.0, 0.5, 1.5) - ljg(2.5, 1.0, 0.5, 1.5), ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5), - 'shift', + "shift", 2.7, 0, ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5, r_on=0.5), - 'xplor', + "xplor", 1.0, ljg(1.0, 1.0, 0.5, 1.5) * xplor_factor(1.0, 0.5, 2.5), ), ( dict(epsilon=1.0, sigma=1.0, r0=1.5, r_cut=2.5, r_on=2.0), - 'xplor', + "xplor", 2.3, ljg(2.3, 1.0, 1.0, 1.5) * xplor_factor(2.3, 2.0, 2.5), ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5, r_on=3.0), - 'xplor', + "xplor", 1.5, ljg(1.5, 1.0, 0.5, 1.5) - ljg(2.5, 1.0, 0.5, 1.5), ), ( dict(epsilon=1.0, sigma=0.5, r0=1.5, r_cut=2.5, r_on=3.0), - 'xplor', + "xplor", 2.7, 0, ), ] -@pytest.mark.parametrize('params, mode, d, expected_energy', - lj_gauss_test_parameters) +@pytest.mark.parametrize("params, mode, d, expected_energy", lj_gauss_test_parameters) @pytest.mark.cpu def test_energy(mc_simulation_factory, params, mode, d, expected_energy): """Test that LJGauss computes the correct energies for 1 pair.""" lj_gauss = hoomd.hpmc.pair.LJGauss(mode=mode) - lj_gauss.params[('A', 'A')] = params + lj_gauss.params[("A", "A")] = params simulation = mc_simulation_factory(d=d) simulation.operations.integrator.pair_potentials = [lj_gauss] @@ -195,16 +189,10 @@ def test_energy(mc_simulation_factory, params, mode, d, expected_energy): def test_multiple_pair_potentials(mc_simulation_factory): """Test that energy operates correctly with multiple pair potentials.""" lj_gauss_1 = hoomd.hpmc.pair.LJGauss() - lj_gauss_1.params[('A', 'A')] = dict(epsilon=0.0, - sigma=0.02, - r0=1.5, - r_cut=2.5) + lj_gauss_1.params[("A", "A")] = dict(epsilon=0.0, sigma=0.02, r0=1.5, r_cut=2.5) lj_gauss_2 = hoomd.hpmc.pair.LJGauss() - lj_gauss_2.params[('A', 'A')] = dict(epsilon=1.0, - sigma=0.02, - r0=1.0, - r_cut=2.5) + lj_gauss_2.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.0, r_cut=2.5) # Some parameters are validated only after attaching. simulation = mc_simulation_factory(1.0) @@ -214,14 +202,18 @@ def test_multiple_pair_potentials(mc_simulation_factory): assert lj_gauss_1.energy == pytest.approx(expected=-1.0, rel=1e-5) assert lj_gauss_2.energy == pytest.approx(expected=-2.0, rel=1e-5) assert simulation.operations.integrator.pair_energy == pytest.approx( - expected=-3.0, rel=1e-5) + expected=-3.0, rel=1e-5 + ) def test_logging(): hoomd.conftest.logging_check( - hoomd.hpmc.pair.LJGauss, ('hpmc', 'pair'), { - 'energy': { - 'category': hoomd.logging.LoggerCategories.scalar, - 'default': True + hoomd.hpmc.pair.LJGauss, + ("hpmc", "pair"), + { + "energy": { + "category": hoomd.logging.LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/hpmc/pytest/test_pair_opp.py b/hoomd/hpmc/pytest/test_pair_opp.py index 757dd1b923..ecc3c6ce14 100644 --- a/hoomd/hpmc/pytest/test_pair_opp.py +++ b/hoomd/hpmc/pytest/test_pair_opp.py @@ -11,7 +11,7 @@ {}, dict(default_r_cut=3.0), dict(default_r_on=2.0), - dict(mode='shift'), + dict(mode="shift"), ] @@ -21,7 +21,7 @@ def test_valid_construction(device, constructor_args): hoomd.hpmc.pair.OPP(**constructor_args) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mc_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make a MC simulation with two particles separate dy by a distance d.""" @@ -30,7 +30,7 @@ def make_simulation(d=1): simulation = simulation_factory(snapshot) sphere = hoomd.hpmc.integrate.Sphere() - sphere.shape['A'] = dict(diameter=0) + sphere.shape["A"] = dict(diameter=0) simulation.operations.integrator = sphere return simulation @@ -42,13 +42,9 @@ def make_simulation(d=1): def test_attaching(mc_simulation_factory): """Test that OPP attaches.""" opp = hoomd.hpmc.pair.OPP() - opp.params[('A', 'A')] = dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0) + opp.params[("A", "A")] = dict( + C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0 + ) simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [opp] @@ -61,37 +57,28 @@ def test_attaching(mc_simulation_factory): assert not opp._attached -invalid_parameters = [{}, - dict(C1=1.), - dict(C1=1., C2=1.), - dict(C1=1., C2=1., eta1=15), - dict(C1=1., C2=1., eta1=15, eta2=3), - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0), - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut='invalid'), - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on='invalid'), - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on=2.0, - invalid=10)] +invalid_parameters = [ + {}, + dict(C1=1.0), + dict(C1=1.0, C2=1.0), + dict(C1=1.0, C2=1.0, eta1=15), + dict(C1=1.0, C2=1.0, eta1=15, eta2=3), + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0), + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut="invalid"), + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0, r_on="invalid"), + dict( + C1=1.0, + C2=1.0, + eta1=15, + eta2=3, + k=1.0, + phi=np.pi, + r_cut=3.0, + r_on=2.0, + invalid=10, + ), +] @pytest.mark.parametrize("parameters", invalid_parameters) @@ -99,25 +86,23 @@ def test_attaching(mc_simulation_factory): def test_invalid_params_on_attach(mc_simulation_factory, parameters): """Test that OPP validates parameters.""" opp = hoomd.hpmc.pair.OPP() - opp.params[('A', 'A')] = dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0) + opp.params[("A", "A")] = dict( + C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0 + ) # Some parameters are validated only after attaching. simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [opp] simulation.run(0) - with pytest.raises(( + with pytest.raises( + ( RuntimeError, hoomd.error.TypeConversionError, KeyError, - )): - opp.params[('A', 'A')] = parameters + ) + ): + opp.params[("A", "A")] = parameters def xplor_factor(r, r_on, r_cut): @@ -125,8 +110,8 @@ def xplor_factor(r, r_on, r_cut): if r < r_on: return 1 if r < r_cut: - denominator = (r_cut**2 - r_on**2)**3 - numerator = (r_cut**2 - r**2)**2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) + denominator = (r_cut**2 - r_on**2) ** 3 + numerator = (r_cut**2 - r**2) ** 2 * (r_cut**2 + 2 * r**2 - 3 * r_on**2) return numerator / denominator return 0 @@ -134,7 +119,7 @@ def xplor_factor(r, r_on, r_cut): def vopp(r, C1, C2, eta1, eta2, k, phi): """Compute opp energy.""" - return C1 * r**(-eta1) + C2 * r**(-eta2) * np.cos(k * r - phi) + return C1 * r ** (-eta1) + C2 * r ** (-eta2) * np.cos(k * r - phi) # (pair params, @@ -142,137 +127,108 @@ def vopp(r, C1, C2, eta1, eta2, k, phi): # expected energy) lj_gauss_test_parameters = [ ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=12, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=12, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=1., C2=1., eta1=12, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.0, C1=1.0, C2=1.0, eta1=12, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=15, eta2=5, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=15, eta2=5, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=1., C2=1., eta1=15, eta2=5, k=1.0, phi=np.pi), + vopp(r=1.0, C1=1.0, C2=1.0, eta1=15, eta2=5, k=1.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=3.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=3.0, phi=np.pi, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=1., C2=1., eta1=15, eta2=3, k=3.0, phi=np.pi), + vopp(r=1.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=3.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi / 4, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi / 4, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi / 4), + vopp(r=1.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi / 4), ), ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.5, - vopp(r=1.5, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.5, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=5., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.0, - vopp(r=1.0, C1=5., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.0, C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=5., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.5, - vopp(r=1.5, C1=5., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.5, C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=5., C2=2.5, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'none', + dict(C1=5.0, C2=2.5, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "none", 1.5, - vopp(r=1.5, C1=5., C2=2.5, eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.5, C1=5.0, C2=2.5, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'shift', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "shift", 2.0, - vopp(r=2.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) - - vopp(r=3.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=2.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) + - vopp(r=3.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), - 'shift', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0), + "shift", 3.2, 0, ), ( - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on=1.0), - 'xplor', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0, r_on=1.0), + "xplor", 1.5, - vopp(r=1.5, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) + vopp(r=1.5, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) * xplor_factor(1.5, 1.0, 3.0), ), ( - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on=2.0), - 'xplor', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0, r_on=2.0), + "xplor", 2.5, - vopp(r=2.5, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) + vopp(r=2.5, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) * xplor_factor(2.5, 2.0, 3.0), ), ( - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on=3.5), - 'xplor', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0, r_on=3.5), + "xplor", 1.5, - vopp(r=1.5, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) - - vopp(r=3.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi), + vopp(r=1.5, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) + - vopp(r=3.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi), ), ( - dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0, - r_on=3.5), - 'xplor', + dict(C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0, r_on=3.5), + "xplor", 3.2, 0, ), ] -@pytest.mark.parametrize('params, mode, d, expected_energy', - lj_gauss_test_parameters) +@pytest.mark.parametrize("params, mode, d, expected_energy", lj_gauss_test_parameters) @pytest.mark.cpu def test_energy(mc_simulation_factory, params, mode, d, expected_energy): """Test that OPP computes the correct energies for 1 pair.""" opp = hoomd.hpmc.pair.OPP(mode=mode) - opp.params[('A', 'A')] = params + opp.params[("A", "A")] = params simulation = mc_simulation_factory(d=d) simulation.operations.integrator.pair_potentials = [opp] @@ -285,25 +241,17 @@ def test_energy(mc_simulation_factory, params, mode, d, expected_energy): def test_multiple_pair_potentials(mc_simulation_factory): """Test that energy operates correctly with multiple pair potentials.""" opp_1 = hoomd.hpmc.pair.OPP() - opp_1.params[('A', 'A')] = dict(C1=1., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0) + opp_1.params[("A", "A")] = dict( + C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0 + ) opp_2 = hoomd.hpmc.pair.OPP() - opp_2.params[('A', 'A')] = dict(C1=5., - C2=1., - eta1=15, - eta2=3, - k=1.0, - phi=np.pi, - r_cut=3.0) + opp_2.params[("A", "A")] = dict( + C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi, r_cut=3.0 + ) - expected_1 = vopp(1.0, C1=1., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) - expected_2 = vopp(1.0, C1=5., C2=1., eta1=15, eta2=3, k=1.0, phi=np.pi) + expected_1 = vopp(1.0, C1=1.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) + expected_2 = vopp(1.0, C1=5.0, C2=1.0, eta1=15, eta2=3, k=1.0, phi=np.pi) # Some parameters are validated only after attaching. simulation = mc_simulation_factory(1.0) @@ -313,14 +261,18 @@ def test_multiple_pair_potentials(mc_simulation_factory): assert opp_1.energy == pytest.approx(expected=expected_1, rel=1e-5) assert opp_2.energy == pytest.approx(expected=expected_2, rel=1e-5) assert simulation.operations.integrator.pair_energy == pytest.approx( - expected=expected_1 + expected_2, rel=1e-5) + expected=expected_1 + expected_2, rel=1e-5 + ) def test_logging(): hoomd.conftest.logging_check( - hoomd.hpmc.pair.OPP, ('hpmc', 'pair'), { - 'energy': { - 'category': hoomd.logging.LoggerCategories.scalar, - 'default': True + hoomd.hpmc.pair.OPP, + ("hpmc", "pair"), + { + "energy": { + "category": hoomd.logging.LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/hpmc/pytest/test_pair_step.py b/hoomd/hpmc/pytest/test_pair_step.py index 50e824b40e..b86173a57b 100644 --- a/hoomd/hpmc/pytest/test_pair_step.py +++ b/hoomd/hpmc/pytest/test_pair_step.py @@ -12,7 +12,7 @@ def test_valid_construction(device): hoomd.hpmc.pair.Step() -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mc_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make a MC simulation with two particles separate dy by a distance d.""" @@ -21,7 +21,7 @@ def make_simulation(d=1): simulation = simulation_factory(snapshot) sphere = hoomd.hpmc.integrate.Sphere() - sphere.shape['A'] = dict(diameter=0) + sphere.shape["A"] = dict(diameter=0) simulation.operations.integrator = sphere return simulation @@ -33,7 +33,7 @@ def make_simulation(d=1): def test_attaching(mc_simulation_factory): """Test that Step attaches.""" step = hoomd.hpmc.pair.Step() - step.params[('A', 'A')] = dict(epsilon=[1.0], r=[1.5]) + step.params[("A", "A")] = dict(epsilon=[1.0], r=[1.5]) simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [step] @@ -50,8 +50,8 @@ def test_attaching(mc_simulation_factory): {}, dict(epsilon=[1.0]), dict(epsilon=[1.0], r=0.5), - dict(epsilon=[1.0], r='invalid'), - dict(epsilon='invalid', r=[1.0]), + dict(epsilon=[1.0], r="invalid"), + dict(epsilon="invalid", r=[1.0]), dict(epsilon=[1.0, 2.0], r=[0.5]), dict(epsilon=[1.0], r=[0.5], invalid=10), dict(epsilon=[1.0, 2.0], r=[1.0, 0.5]), @@ -64,94 +64,112 @@ def test_attaching(mc_simulation_factory): def test_invalid_params_on_attach(mc_simulation_factory, parameters): """Test that Step validates parameters.""" step = hoomd.hpmc.pair.Step() - step.params[('A', 'A')] = dict(epsilon=[1.0], r=[1.5]) + step.params[("A", "A")] = dict(epsilon=[1.0], r=[1.5]) # Some parameters are validated only after attaching. simulation = mc_simulation_factory() simulation.operations.integrator.pair_potentials = [step] simulation.run(0) - with pytest.raises(( + with pytest.raises( + ( RuntimeError, hoomd.error.TypeConversionError, KeyError, ValueError, - )): - step.params[('A', 'A')] = parameters + ) + ): + step.params[("A", "A")] = parameters # (pair params, # distance between particles, # expected energy) -step_test_parameters = [( - dict(epsilon=[-1.125], r=[0.5]), - 3.0, - 0.0, -), ( - dict(epsilon=[-1.125], r=[0.5]), - 0.5125, - 0.0, -), ( - dict(epsilon=[-1.125], r=[0.5]), - 0.5, - 0, -), ( - dict(epsilon=[-1.125], r=[0.5]), - 0.25, - -1.125, -), ( - dict(epsilon=[-1.125], r=[0.5]), - 0.0, - -1.125, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 2.5, - 0, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 2.4, - 3, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 1.6, - 3, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 1.5, - 3, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 1.49, - 2, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 0.6, - 2, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 0.5, - 2, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 0.49, - 1, -), ( - dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), - 0.0, - 1, -), ( - None, - 0.0, - 0.0, -)] - - -@pytest.mark.parametrize('params, d, expected_energy', step_test_parameters) +step_test_parameters = [ + ( + dict(epsilon=[-1.125], r=[0.5]), + 3.0, + 0.0, + ), + ( + dict(epsilon=[-1.125], r=[0.5]), + 0.5125, + 0.0, + ), + ( + dict(epsilon=[-1.125], r=[0.5]), + 0.5, + 0, + ), + ( + dict(epsilon=[-1.125], r=[0.5]), + 0.25, + -1.125, + ), + ( + dict(epsilon=[-1.125], r=[0.5]), + 0.0, + -1.125, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 2.5, + 0, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 2.4, + 3, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 1.6, + 3, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 1.5, + 3, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 1.49, + 2, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 0.6, + 2, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 0.5, + 2, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 0.49, + 1, + ), + ( + dict(epsilon=[1, 2, 3], r=[0.5, 1.5, 2.5]), + 0.0, + 1, + ), + ( + None, + 0.0, + 0.0, + ), +] + + +@pytest.mark.parametrize("params, d, expected_energy", step_test_parameters) @pytest.mark.cpu def test_energy(mc_simulation_factory, params, d, expected_energy): """Test that Step computes the correct energies for 1 pair.""" step = hoomd.hpmc.pair.Step() - step.params[('A', 'A')] = params + step.params[("A", "A")] = params simulation = mc_simulation_factory(d=d) simulation.operations.integrator.pair_potentials = [step] @@ -162,9 +180,12 @@ def test_energy(mc_simulation_factory, params, d, expected_energy): def test_logging(): hoomd.conftest.logging_check( - hoomd.hpmc.pair.Step, ('hpmc', 'pair'), { - 'energy': { - 'category': hoomd.logging.LoggerCategories.scalar, - 'default': True + hoomd.hpmc.pair.Step, + ("hpmc", "pair"), + { + "energy": { + "category": hoomd.logging.LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/hpmc/pytest/test_pair_union.py b/hoomd/hpmc/pytest/test_pair_union.py index 30628d1fa8..40c4cc0845 100644 --- a/hoomd/hpmc/pytest/test_pair_union.py +++ b/hoomd/hpmc/pytest/test_pair_union.py @@ -34,7 +34,7 @@ def test_contruction(pair_potential): hpmc.pair.Union(LJ()) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def union_potential(pair_potential): return hpmc.pair.Union(pair_potential) @@ -42,15 +42,19 @@ def union_potential(pair_potential): def _valid_body_dicts(): valid_dicts = [ # numpy arrays - dict(types=["A", "A"], - positions=np.array([[0, 0, 1.0], [0, 0, -1.0]]), - orientations=rowan.random.rand(2), - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + positions=np.array([[0, 0, 1.0], [0, 0, -1.0]]), + orientations=rowan.random.rand(2), + charges=[-0.5, 0.5], + ), # tuples - dict(types=["A", "A"], - positions=[(0, 0, 1.0), (0, 0, -1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + positions=[(0, 0, 1.0), (0, 0, -1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges=[-0.5, 0.5], + ), # orientations and charges should have defaults dict(types=["A", "A"], positions=[(0, 0, 1.0), (0, 0, -1.0)]), # No constituents @@ -59,19 +63,19 @@ def _valid_body_dicts(): return valid_dicts -@pytest.fixture(scope='module', params=_valid_body_dicts()) +@pytest.fixture(scope="module", params=_valid_body_dicts()) def valid_body_dict(request): return copy.deepcopy(request.param) -@pytest.fixture(scope='module') -def pair_union_simulation_factory(simulation_factory, - two_particle_snapshot_factory): +@pytest.fixture(scope="module") +def pair_union_simulation_factory(simulation_factory, two_particle_snapshot_factory): """Make two particle sphere simulations with a union potential.""" - def make_union_sim(union_potential, particle_types=['A'], d=1, L=20): + def make_union_sim(union_potential, particle_types=["A"], d=1, L=20): sim = simulation_factory( - two_particle_snapshot_factory(particle_types, d=d, L=L)) + two_particle_snapshot_factory(particle_types, d=d, L=L) + ) sphere = hpmc.integrate.Sphere() sphere.shape["A"] = dict(diameter=2.0) sphere.pair_potentials = [union_potential] @@ -82,8 +86,9 @@ def make_union_sim(union_potential, particle_types=['A'], d=1, L=20): @pytest.mark.cpu -def test_valid_body_params(pair_union_simulation_factory, union_potential, - valid_body_dict): +def test_valid_body_params( + pair_union_simulation_factory, union_potential, valid_body_dict +): """Test we can set and attach with valid body params.""" union_potential.body["A"] = valid_body_dict sim = pair_union_simulation_factory(union_potential) @@ -93,48 +98,62 @@ def test_valid_body_params(pair_union_simulation_factory, union_potential, def _invalid_body_dicts(): invalid_dicts = [ # missing types - dict(positions=[(0, 0, 1.0), (0, 0, -1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges=[-0.5, 0.5]), + dict( + positions=[(0, 0, 1.0), (0, 0, -1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges=[-0.5, 0.5], + ), # missing positions - dict(types=["A", "A"], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges=[-0.5, 0.5], + ), # positions list too short - dict(types=["A", "A"], - positions=[(0, 0, 1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + positions=[(0, 0, 1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges=[-0.5, 0.5], + ), # one of the orientations tuples is 3 elements - dict(types=["A", "A"], - positions=[(0, 0, 1.0), (0, 0, -1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5)], - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + positions=[(0, 0, 1.0), (0, 0, -1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5)], + charges=[-0.5, 0.5], + ), # one of the positions tuples is 2 elements - dict(types=["A", "A"], - positions=[(0, 0, 1.0), (0, -1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges=[-0.5, 0.5]), + dict( + types=["A", "A"], + positions=[(0, 0, 1.0), (0, -1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges=[-0.5, 0.5], + ), # set one of the values set to the wrong type - dict(types=["A", "A"], - positions=[(0, 0, 1.0), (0, 0, -1.0)], - orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], - charges='invalid'), + dict( + types=["A", "A"], + positions=[(0, 0, 1.0), (0, 0, -1.0)], + orientations=[(0.5, 0.5, -0.5, -0.5), (-0.5, 0.5, -0.5, 0.5)], + charges="invalid", + ), ] return invalid_dicts -@pytest.fixture(scope='module', params=_invalid_body_dicts()) +@pytest.fixture(scope="module", params=_invalid_body_dicts()) def invalid_body_dict(request): return copy.deepcopy(request.param) @pytest.mark.cpu -def test_invalid_body_params(pair_union_simulation_factory, union_potential, - invalid_body_dict): +def test_invalid_body_params( + pair_union_simulation_factory, union_potential, invalid_body_dict +): """Test that invalid parameter combinations result in errors.""" - with pytest.raises((IncompleteSpecificationError, TypeConversionError, - KeyError, RuntimeError)): + with pytest.raises( + (IncompleteSpecificationError, TypeConversionError, KeyError, RuntimeError) + ): union_potential.body["A"] = invalid_body_dict sim = pair_union_simulation_factory(union_potential) sim.run(0) @@ -143,8 +162,9 @@ def test_invalid_body_params(pair_union_simulation_factory, union_potential, @pytest.mark.cpu def test_default_body_params(pair_union_simulation_factory, union_potential): """Test default values for charges and orientations.""" - union_potential.body["A"] = dict(types=["A", "A"], - positions=[(0, 0, 1.0), (0, 0, -1.0)]) + union_potential.body["A"] = dict( + types=["A", "A"], positions=[(0, 0, 1.0), (0, 0, -1.0)] + ) sim = pair_union_simulation_factory(union_potential) sim.run(0) @@ -157,17 +177,17 @@ def test_default_body_params(pair_union_simulation_factory, union_potential): def test_get_set_body_params(pair_union_simulation_factory, union_potential): """Testing getting/setting in multiple ways, before and after attaching.""" # before attaching, setting as dict - body_dict = dict(types=['A'], positions=[(0, 0, 1)]) + body_dict = dict(types=["A"], positions=[(0, 0, 1)]) union_potential.body["A"] = body_dict assert union_potential.body["A"]["positions"] == body_dict["positions"] assert union_potential.body["A"]["types"] == body_dict["types"] - assert 'orientations' not in union_potential.body["A"] - assert 'charges' not in union_potential.body["A"] + assert "orientations" not in union_potential.body["A"] + assert "charges" not in union_potential.body["A"] # after attaching, setting as dict sim = pair_union_simulation_factory(union_potential) sim.run(0) - new_body_dict = dict(types=['A'], positions=[(0, 1, 0)]) + new_body_dict = dict(types=["A"], positions=[(0, 1, 0)]) union_potential.body["A"] = new_body_dict assert union_potential.body["A"]["positions"] == new_body_dict["positions"] assert union_potential.body["A"]["types"] == new_body_dict["types"] @@ -195,14 +215,11 @@ def test_get_set_properties(pair_union_simulation_factory, union_potential): # assert values are right on construction assert union_potential.leaf_capacity == 0 lj = union_potential.constituent_potential - assert lj.params[('A', 'A')] == dict(epsilon=1.0, - sigma=1.0, - r_cut=2.0, - r_on=0.0) + assert lj.params[("A", "A")] == dict(epsilon=1.0, sigma=1.0, r_cut=2.0, r_on=0.0) # try to set params lj2 = hpmc.pair.LennardJones() - lj2.params[('A', 'A')] = dict(epsilon=0.5, sigma=2.0, r_cut=3.0) + lj2.params[("A", "A")] = dict(epsilon=0.5, sigma=2.0, r_cut=3.0) with pytest.raises(AttributeError): union_potential.constituent_potential = lj2 union_potential.leaf_capacity = 3 @@ -220,7 +237,7 @@ def test_get_set_properties(pair_union_simulation_factory, union_potential): @pytest.mark.cpu def test_detach(pair_union_simulation_factory, union_potential): - body_dict = dict(types=['A'], positions=[(0, 0, 1)]) + body_dict = dict(types=["A"], positions=[(0, 0, 1)]) union_potential.body["A"] = body_dict sim = pair_union_simulation_factory(union_potential) sim.run(0) @@ -243,14 +260,15 @@ def test_energy(pair_union_simulation_factory, union_potential, leaf_capacity): lj.params[("B", "B")] = dict(epsilon=3.0, sigma=1.0, r_cut=4.0) lj.params[("A", "B")] = dict(epsilon=2.0, sigma=1.0, r_cut=4.0) lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, r_cut=4.0) - union_potential.body["A"] = dict(types=['A', 'B'], - positions=[(-1, 0, 0), (1, 0, 0)]) - union_potential.body["B"] = dict(types=['A', 'B'], - positions=[(-1, 0, 0), (1, 0, 0)]) - sim = pair_union_simulation_factory(union_potential, - particle_types=['A', 'B'], - d=3, - L=30) + union_potential.body["A"] = dict( + types=["A", "B"], positions=[(-1, 0, 0), (1, 0, 0)] + ) + union_potential.body["B"] = dict( + types=["A", "B"], positions=[(-1, 0, 0), (1, 0, 0)] + ) + sim = pair_union_simulation_factory( + union_potential, particle_types=["A", "B"], d=3, L=30 + ) sim.operations.integrator.shape["B"] = dict(diameter=2) sim.run(0) @@ -258,6 +276,7 @@ def lj_energy(epsilon, sigma, distance): sdivd = sigma / distance return 4 * epsilon * (sdivd**12 - sdivd**6) - system_energy = lj_energy(1.0, 1.0, 3.0) + lj_energy( - 3.0, 1.0, 3.0) + lj_energy(2.0, 1.0, 1.0) + system_energy = ( + lj_energy(1.0, 1.0, 3.0) + lj_energy(3.0, 1.0, 3.0) + lj_energy(2.0, 1.0, 1.0) + ) npt.assert_allclose(system_energy, union_potential.energy) diff --git a/hoomd/hpmc/pytest/test_quick_compress.py b/hoomd/hpmc/pytest/test_quick_compress.py index a8235f5317..c223db525c 100644 --- a/hoomd/hpmc/pytest/test_quick_compress.py +++ b/hoomd/hpmc/pytest/test_quick_compress.py @@ -13,68 +13,94 @@ # here that require preprocessing valid_constructor_args = [ # 3d box from constant box variant - dict(trigger=hoomd.trigger.Periodic(10), - target_box=hoomd.variant.box.Constant(hoomd.Box.from_box([10, 10, - 10]))), + dict( + trigger=hoomd.trigger.Periodic(10), + target_box=hoomd.variant.box.Constant(hoomd.Box.from_box([10, 10, 10])), + ), # 3d box with box object - dict(trigger=hoomd.trigger.After(100), - target_box=hoomd.Box.from_box([10, 20, 40]), - max_overlaps_per_particle=0.2), + dict( + trigger=hoomd.trigger.After(100), + target_box=hoomd.Box.from_box([10, 20, 40]), + max_overlaps_per_particle=0.2, + ), # 2d box with box object - dict(trigger=hoomd.trigger.Before(100), - target_box=hoomd.Box.from_box([50, 50]), - min_scale=0.75), + dict( + trigger=hoomd.trigger.Before(100), + target_box=hoomd.Box.from_box([50, 50]), + min_scale=0.75, + ), # 2d box with box variant - dict(trigger=hoomd.trigger.Before(100), - target_box=hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50])), - min_scale=0.75), - dict(trigger=hoomd.trigger.Periodic(1000), - target_box=hoomd.variant.box.Constant( - hoomd.Box.from_box([80, 50, 40, 0.2, 0.4, 0.5])), - max_overlaps_per_particle=0.2, - min_scale=0.999, - allow_unsafe_resize=True), - dict(trigger=hoomd.trigger.Periodic(1000), - target_box=hoomd.variant.box.Constant( - hoomd.Box.from_box([80, 50, 40, -0.2, 0.4, 0.5])), - max_overlaps_per_particle=0.2, - min_scale=0.999), - dict(trigger=hoomd.trigger.Periodic(1000), - target_box=hoomd.variant.box.Interpolate( - hoomd.Box.from_box([10, 20, 30]), hoomd.Box.from_box([24, 7, 365]), - hoomd.variant.Ramp(0, 1, 0, 100)), - max_overlaps_per_particle=0.2, - min_scale=0.999), - dict(trigger=hoomd.trigger.Periodic(1000), - target_box=hoomd.variant.box.InverseVolumeRamp( - hoomd.Box.from_box([10, 20, 30]), 3000, 10, 100), - max_overlaps_per_particle=0.2, - min_scale=0.999), + dict( + trigger=hoomd.trigger.Before(100), + target_box=hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50])), + min_scale=0.75, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + target_box=hoomd.variant.box.Constant( + hoomd.Box.from_box([80, 50, 40, 0.2, 0.4, 0.5]) + ), + max_overlaps_per_particle=0.2, + min_scale=0.999, + allow_unsafe_resize=True, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + target_box=hoomd.variant.box.Constant( + hoomd.Box.from_box([80, 50, 40, -0.2, 0.4, 0.5]) + ), + max_overlaps_per_particle=0.2, + min_scale=0.999, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + target_box=hoomd.variant.box.Interpolate( + hoomd.Box.from_box([10, 20, 30]), + hoomd.Box.from_box([24, 7, 365]), + hoomd.variant.Ramp(0, 1, 0, 100), + ), + max_overlaps_per_particle=0.2, + min_scale=0.999, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + target_box=hoomd.variant.box.InverseVolumeRamp( + hoomd.Box.from_box([10, 20, 30]), 3000, 10, 100 + ), + max_overlaps_per_particle=0.2, + min_scale=0.999, + ), ] valid_attrs = [ - ('trigger', hoomd.trigger.Periodic(10000)), - ('trigger', hoomd.trigger.After(100)), - ('trigger', hoomd.trigger.Before(12345)), - ('target_box', hoomd.variant.box.Constant(hoomd.Box.from_box([10, 20, - 30]))), - ('target_box', hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50]))), - ('target_box', hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50]))), - ('target_box', - hoomd.variant.box.Interpolate(hoomd.Box.from_box([10, 20, 30]), - hoomd.Box.from_box([24, 7, 365]), - hoomd.variant.Ramp(0, 1, 0, 100))), - ('target_box', - hoomd.variant.box.InverseVolumeRamp(hoomd.Box.from_box([10, 20, 30]), 3000, - 10, 100)), - ('target_box', hoomd.Box.cube(5)), - ('max_overlaps_per_particle', 0.2), - ('max_overlaps_per_particle', 0.5), - ('max_overlaps_per_particle', 2.5), - ('min_scale', 0.1), - ('min_scale', 0.5), - ('min_scale', 0.9999), - ('allow_unsafe_resize', True), + ("trigger", hoomd.trigger.Periodic(10000)), + ("trigger", hoomd.trigger.After(100)), + ("trigger", hoomd.trigger.Before(12345)), + ("target_box", hoomd.variant.box.Constant(hoomd.Box.from_box([10, 20, 30]))), + ("target_box", hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50]))), + ("target_box", hoomd.variant.box.Constant(hoomd.Box.from_box([50, 50]))), + ( + "target_box", + hoomd.variant.box.Interpolate( + hoomd.Box.from_box([10, 20, 30]), + hoomd.Box.from_box([24, 7, 365]), + hoomd.variant.Ramp(0, 1, 0, 100), + ), + ), + ( + "target_box", + hoomd.variant.box.InverseVolumeRamp( + hoomd.Box.from_box([10, 20, 30]), 3000, 10, 100 + ), + ), + ("target_box", hoomd.Box.cube(5)), + ("max_overlaps_per_particle", 0.2), + ("max_overlaps_per_particle", 0.5), + ("max_overlaps_per_particle", 2.5), + ("min_scale", 0.1), + ("min_scale", 0.5), + ("min_scale", 0.9999), + ("allow_unsafe_resize", True), ] @@ -92,9 +118,9 @@ def test_valid_construction(constructor_args): @pytest.mark.parametrize("constructor_args", valid_constructor_args) -def test_valid_construction_and_attach(simulation_factory, - two_particle_snapshot_factory, - constructor_args): +def test_valid_construction_and_attach( + simulation_factory, two_particle_snapshot_factory, constructor_args +): """Test that QuickCompress can be attached with valid arguments.""" qc = hoomd.hpmc.update.QuickCompress(**constructor_args) @@ -103,7 +129,7 @@ def test_valid_construction_and_attach(simulation_factory, # QuickCompress requires an HPMC integrator mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc sim.operations._schedule() @@ -119,9 +145,9 @@ def test_valid_construction_and_attach(simulation_factory, @pytest.mark.parametrize("attr,value", valid_attrs) def test_valid_setattr(attr, value): """Test that QuickCompress can get and set attributes.""" - qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10), - target_box=hoomd.Box.from_box( - [10, 10, 10])) + qc = hoomd.hpmc.update.QuickCompress( + trigger=hoomd.trigger.Periodic(10), target_box=hoomd.Box.from_box([10, 10, 10]) + ) setattr(qc, attr, value) if type(value) is hoomd.Box: @@ -130,19 +156,21 @@ def test_valid_setattr(attr, value): @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(attr, value, simulation_factory, - two_particle_snapshot_factory): +def test_valid_setattr_attached( + attr, value, simulation_factory, two_particle_snapshot_factory +): """Test that QuickCompress can get and set attributes while attached.""" - qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10), - target_box=hoomd.variant.box.Constant( - hoomd.Box.from_box([10, 10, 10]))) + qc = hoomd.hpmc.update.QuickCompress( + trigger=hoomd.trigger.Periodic(10), + target_box=hoomd.variant.box.Constant(hoomd.Box.from_box([10, 10, 10])), + ) sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.updaters.append(qc) # QuickCompress requires an HPMC integrator mc = hoomd.hpmc.integrate.Sphere() - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc sim.operations._schedule() @@ -159,8 +187,9 @@ def test_valid_setattr_attached(attr, value, simulation_factory, @pytest.mark.parametrize("phi", [0.01, 0.4, 0.6]) @pytest.mark.cpu @pytest.mark.validate -def test_sphere_compression_triclinic(xy, xz, yz, phi, simulation_factory, - lattice_snapshot_factory, device): +def test_sphere_compression_triclinic( + xy, xz, yz, phi, simulation_factory, lattice_snapshot_factory, device +): """Test that QuickCompress can resize and reshape triclinic boxes.""" n = 7 if device.communicator.num_ranks > 1 else 3 if isinstance(device, hoomd.device.GPU): @@ -173,16 +202,17 @@ def test_sphere_compression_triclinic(xy, xz, yz, phi, simulation_factory, tilts = np.random.rand(3) * 2 - 1 target_box = hoomd.Box.from_box([0.95, 1.05, 1, *tilts]) - v_particle = 4 / 3 * math.pi * (0.5)**3 + v_particle = 4 / 3 * math.pi * (0.5) ** 3 target_box.volume = n**3 * v_particle / phi - qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(25), - target_box=target_box) + qc = hoomd.hpmc.update.QuickCompress( + trigger=hoomd.trigger.Periodic(25), target_box=target_box + ) sim = simulation_factory(snap) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc sim.operations.updaters.append(qc) sim.run(1) @@ -202,31 +232,33 @@ def test_sphere_compression_triclinic(xy, xz, yz, phi, simulation_factory, @pytest.mark.parametrize("phi", [0.2, 0.3, 0.4, 0.5, 0.55, 0.58, 0.6]) @pytest.mark.parametrize("allow_unsafe_resize", [False, True]) @pytest.mark.validate -def test_sphere_compression(phi, allow_unsafe_resize, simulation_factory, - lattice_snapshot_factory): +def test_sphere_compression( + phi, allow_unsafe_resize, simulation_factory, lattice_snapshot_factory +): """Test that QuickCompress can compress (and expand) simulation boxes.""" if allow_unsafe_resize and phi > math.pi / 6: pytest.skip("Skipped impossible compression.") n = 7 snap = lattice_snapshot_factory(n=n, a=1.1) - v_particle = 4 / 3 * math.pi * (0.5)**3 - target_box = hoomd.Box.cube((n * n * n * v_particle / phi)**(1 / 3)) + v_particle = 4 / 3 * math.pi * (0.5) ** 3 + target_box = hoomd.Box.cube((n * n * n * v_particle / phi) ** (1 / 3)) qc = hoomd.hpmc.update.QuickCompress( trigger=hoomd.trigger.Periodic(10), target_box=target_box, - allow_unsafe_resize=allow_unsafe_resize) + allow_unsafe_resize=allow_unsafe_resize, + ) sim = simulation_factory(snap) sim.operations.updaters.append(qc) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc if allow_unsafe_resize: - mc.d['A'] = 0 + mc.d["A"] = 0 sim.run(1) @@ -249,17 +281,18 @@ def test_disk_compression(phi, simulation_factory, lattice_snapshot_factory): """Test that QuickCompress can compress (and expand) simulation boxes.""" n = 7 snap = lattice_snapshot_factory(dimensions=2, n=n, a=1.1) - v_particle = math.pi * (0.5)**2 - target_box = hoomd.Box.square((n * n * v_particle / phi)**(1 / 2)) + v_particle = math.pi * (0.5) ** 2 + target_box = hoomd.Box.square((n * n * v_particle / phi) ** (1 / 2)) - qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10), - target_box=target_box) + qc = hoomd.hpmc.update.QuickCompress( + trigger=hoomd.trigger.Periodic(10), target_box=target_box + ) sim = simulation_factory(snap) sim.operations.updaters.append(qc) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc sim.run(1) @@ -278,8 +311,9 @@ def test_disk_compression(phi, simulation_factory, lattice_snapshot_factory): @pytest.mark.parametrize("ndim", [2, 3]) @pytest.mark.validate -def test_inverse_volume_slow_compress(ndim, simulation_factory, - lattice_snapshot_factory): +def test_inverse_volume_slow_compress( + ndim, simulation_factory, lattice_snapshot_factory +): """Test that InverseVolumeRamp compresses at an appropriate rate. Ensure that the density is no greater than the set point determined by the @@ -300,22 +334,25 @@ def test_inverse_volume_slow_compress(ndim, simulation_factory, final_volume = n**ndim * v_p / final_packing_fraction t_ramp = 10000 target_box_variant = hoomd.variant.box.InverseVolumeRamp( - sim_with_target_box.state.box, final_volume, 0, t_ramp) + sim_with_target_box.state.box, final_volume, 0, t_ramp + ) qc_trigger = 10 target_box = hoomd.Box(*target_box_variant(t_ramp)) qc_with_variant = hoomd.hpmc.update.QuickCompress( - trigger=qc_trigger, target_box=target_box_variant) - qc_with_target_box = hoomd.hpmc.update.QuickCompress(trigger=qc_trigger, - target_box=target_box) + trigger=qc_trigger, target_box=target_box_variant + ) + qc_with_target_box = hoomd.hpmc.update.QuickCompress( + trigger=qc_trigger, target_box=target_box + ) sim_with_variant.operations.updaters.append(qc_with_variant) sim_with_target_box.operations.updaters.append(qc_with_target_box) mc = hoomd.hpmc.integrate.Sphere(default_d=0.01) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim_with_variant.operations.integrator = mc mc2 = hoomd.hpmc.integrate.Sphere(default_d=0.01) - mc2.shape['A'] = dict(diameter=1) + mc2.shape["A"] = dict(diameter=1) sim_with_target_box.operations.integrator = mc2 sims = (sim_with_variant, sim_with_target_box) @@ -331,7 +368,8 @@ def stop_loop(sims, target_box): # dictates if _sim is sim_with_variant: current_target_volume = hoomd.Box( - *target_box_variant(_sim.timestep)).volume + *target_box_variant(_sim.timestep) + ).volume current_volume = _sim.state.box.volume assert current_volume >= current_target_volume # ensure that the system with the variant took longer to compress than the @@ -341,11 +379,12 @@ def stop_loop(sims, target_box): def test_pickling(simulation_factory, two_particle_snapshot_factory): """Test that QuickCompress objects are picklable.""" - qc = hoomd.hpmc.update.QuickCompress(trigger=hoomd.trigger.Periodic(10), - target_box=hoomd.Box.square(10.)) + qc = hoomd.hpmc.update.QuickCompress( + trigger=hoomd.trigger.Periodic(10), target_box=hoomd.Box.square(10.0) + ) sim = simulation_factory(two_particle_snapshot_factory()) mc = hoomd.hpmc.integrate.Sphere(default_d=0.05) - mc.shape['A'] = dict(diameter=1) + mc.shape["A"] = dict(diameter=1) sim.operations.integrator = mc operation_pickling_check(qc, sim) diff --git a/hoomd/hpmc/pytest/test_scale.py b/hoomd/hpmc/pytest/test_scale.py index eb9361ff70..fa245cfa6a 100644 --- a/hoomd/hpmc/pytest/test_scale.py +++ b/hoomd/hpmc/pytest/test_scale.py @@ -9,12 +9,13 @@ @pytest.mark.parametrize("scale", [1e-9, 1, 1000, 1e9]) -@pytest.mark.parametrize("shape", ['ConvexPolygon', 'SimplePolygon']) +@pytest.mark.parametrize("shape", ["ConvexPolygon", "SimplePolygon"]) @pytest.mark.parametrize("offset", [-100, -10, -1, 1, 10, 100]) @pytest.mark.serial @pytest.mark.cpu -def test_polygon(scale, shape, offset, simulation_factory, - two_particle_snapshot_factory): +def test_polygon( + scale, shape, offset, simulation_factory, two_particle_snapshot_factory +): """Test polygons at a variety of scales.""" # make a many sided polygon to ensure that the overlap check is non-trivial a = 0.5 * scale @@ -31,7 +32,7 @@ def test_polygon(scale, shape, offset, simulation_factory, sim = simulation_factory(initial_snap) mc = getattr(hoomd.hpmc.integrate, shape)(default_d=0) - mc.shape['A'] = dict(vertices=vertices) + mc.shape["A"] = dict(vertices=vertices) sim.operations.integrator = mc sim.run(0) @@ -45,8 +46,9 @@ def test_polygon(scale, shape, offset, simulation_factory, @pytest.mark.parametrize("offset", [-100, -10, -1, 1, 10, 100]) @pytest.mark.serial @pytest.mark.cpu -def test_convex_polyhedron(scale, offset, simulation_factory, - two_particle_snapshot_factory): +def test_convex_polyhedron( + scale, offset, simulation_factory, two_particle_snapshot_factory +): """Test convex polyhedrons at a variety of scales.""" # make a many sized prism to ensure that the overlap check is non-trivial a = 0.5 * scale @@ -64,7 +66,7 @@ def test_convex_polyhedron(scale, offset, simulation_factory, sim = simulation_factory(initial_snap) mc = hoomd.hpmc.integrate.ConvexPolyhedron(default_d=0) - mc.shape['A'] = dict(vertices=vertices) + mc.shape["A"] = dict(vertices=vertices) sim.operations.integrator = mc sim.run(0) diff --git a/hoomd/hpmc/pytest/test_shape.py b/hoomd/hpmc/pytest/test_shape.py index 6e37e17463..e79a432b8b 100644 --- a/hoomd/hpmc/pytest/test_shape.py +++ b/hoomd/hpmc/pytest/test_shape.py @@ -4,8 +4,11 @@ from collections.abc import Sequence import hoomd -from hoomd.conftest import (operation_pickling_check, logging_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + operation_pickling_check, + logging_check, + autotuned_kernel_parameter_check, +) from hoomd.error import DataAccessError import hoomd.hpmc import numpy as np @@ -28,17 +31,21 @@ def check_dict(shape_dict, args): Used to test that the dictionary passed in is what gets passed out """ for key, val in args.items(): - if isinstance(shape_dict[key], list) and len(shape_dict[key]) > 0 \ - and key != 'shapes': + if ( + isinstance(shape_dict[key], list) + and len(shape_dict[key]) > 0 + and key != "shapes" + ): np.testing.assert_allclose(shape_dict[key], val) - elif key == 'shapes': + elif key == "shapes": for i in range(len(shape_dict[key])): shape_args = shape_dict[key][i] val_args = val[i] for shape_key in shape_args: if isinstance(shape_args[shape_key], Sequence): - np.testing.assert_allclose(val_args[shape_key], - shape_args[shape_key]) + np.testing.assert_allclose( + val_args[shape_key], shape_args[shape_key] + ) else: assert shape_args[shape_key] == val_args[shape_key] else: @@ -82,27 +89,27 @@ def test_invalid_shape_params(invalid_args): @pytest.mark.parametrize( "cpp_shape", - [hoomd.hpmc._hpmc.EllipsoidParams, hoomd.hpmc._hpmc.FacetedEllipsoidParams]) + [hoomd.hpmc._hpmc.EllipsoidParams, hoomd.hpmc._hpmc.FacetedEllipsoidParams], +) @pytest.mark.parametrize("c", [0.0, -0.5]) def test_semimajor_axis_validity(cpp_shape, c): args = { - 'a': 0.125, - 'b': 0.375, - 'c': c, + "a": 0.125, + "b": 0.375, + "c": c, # These properties are only read for the FacetedEllipsoid - 'normals': [], - 'offsets': [], - 'vertices': [], - 'origin': [] + "normals": [], + "offsets": [], + "vertices": [], + "origin": [], } with pytest.raises(ValueError) as err: cpp_shape({"ignore_statistics": False} | args) - assert ("All semimajor axes must be nonzero!" in str(err)) + assert "All semimajor axes must be nonzero!" in str(err) -def test_shape_attached(simulation_factory, two_particle_snapshot_factory, - valid_args): +def test_shape_attached(simulation_factory, two_particle_snapshot_factory, valid_args): integrator = valid_args[0] args = valid_args[1] n_dimensions = valid_args[2] @@ -116,8 +123,7 @@ def test_shape_attached(simulation_factory, two_particle_snapshot_factory, args["shapes"][i] = inner_mc.shape["A"].to_base() mc = integrator() mc.shape["A"] = args - sim = simulation_factory( - two_particle_snapshot_factory(dimensions=n_dimensions)) + sim = simulation_factory(two_particle_snapshot_factory(dimensions=n_dimensions)) assert sim.operations.integrator is None sim.operations.add(mc) sim.operations._schedule() @@ -129,7 +135,7 @@ def test_moves(simulation_factory, lattice_snapshot_factory, test_moves_args): args = test_moves_args[1] n_dimensions = test_moves_args[2] mc = integrator() - mc.shape['A'] = args + mc.shape["A"] = args sim = simulation_factory(lattice_snapshot_factory(dimensions=n_dimensions)) sim.operations.add(mc) @@ -146,13 +152,14 @@ def test_moves(simulation_factory, lattice_snapshot_factory, test_moves_args): sim.run(10) accepted_rejected_trans = sum(sim.operations.integrator.translate_moves) assert accepted_rejected_trans > 0 - if 'sphere' not in str(integrator).lower(): + if "sphere" not in str(integrator).lower(): accepted_rejected_rot = sum(sim.operations.integrator.rotate_moves) assert accepted_rejected_rot > 0 -def test_kernel_parameters(simulation_factory, lattice_snapshot_factory, - test_moves_args): +def test_kernel_parameters( + simulation_factory, lattice_snapshot_factory, test_moves_args +): integrator = test_moves_args[0] if integrator == hoomd.hpmc.integrate.Sphinx: @@ -161,7 +168,7 @@ def test_kernel_parameters(simulation_factory, lattice_snapshot_factory, args = test_moves_args[1] n_dimensions = test_moves_args[2] mc = integrator() - mc.shape['A'] = args + mc.shape["A"] = args sim = simulation_factory(lattice_snapshot_factory(dimensions=n_dimensions)) sim.operations.add(mc) @@ -173,22 +180,18 @@ def test_kernel_parameters(simulation_factory, lattice_snapshot_factory, # An ellipsoid with a = b = c should be a sphere # A spheropolyhedron with a single vertex should be a sphere # A sphinx where the indenting sphere is negligible should also be a sphere -_sphere_shapes = [({ - 'diameter': 1 -}, hoomd.hpmc.integrate.Sphere), - ({ - 'a': 0.5, - 'b': 0.5, - 'c': 0.5 - }, hoomd.hpmc.integrate.Ellipsoid), - ({ - 'vertices': [(0, 0, 0)], - 'sweep_radius': 0.5 - }, hoomd.hpmc.integrate.ConvexSpheropolyhedron), - ({ - 'diameters': [1, -0.0001], - 'centers': [(0, 0, 0), (0, 0, 0.5)] - }, hoomd.hpmc.integrate.Sphinx)] +_sphere_shapes = [ + ({"diameter": 1}, hoomd.hpmc.integrate.Sphere), + ({"a": 0.5, "b": 0.5, "c": 0.5}, hoomd.hpmc.integrate.Ellipsoid), + ( + {"vertices": [(0, 0, 0)], "sweep_radius": 0.5}, + hoomd.hpmc.integrate.ConvexSpheropolyhedron, + ), + ( + {"diameters": [1, -0.0001], "centers": [(0, 0, 0), (0, 0, 0.5)]}, + hoomd.hpmc.integrate.Sphinx, + ), +] @pytest.fixture(scope="function", params=_sphere_shapes) @@ -196,8 +199,9 @@ def sphere_overlap_args(request): return deepcopy(request.param) -def test_overlaps_sphere(device, sphere_overlap_args, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_sphere( + device, sphere_overlap_args, simulation_factory, two_particle_snapshot_factory +): integrator_args = sphere_overlap_args[0] integrator = sphere_overlap_args[1] @@ -207,7 +211,8 @@ def test_overlaps_sphere(device, sphere_overlap_args, simulation_factory, # Should overlap when spheres are less than one diameter apart sim = simulation_factory( - two_particle_snapshot_factory(dimensions=3, d=diameter * 0.9)) + two_particle_snapshot_factory(dimensions=3, d=diameter * 0.9) + ) sim.operations.add(mc) sim.operations._schedule() assert mc.overlaps > 0 @@ -229,13 +234,12 @@ def test_overlaps_sphere(device, sphere_overlap_args, simulation_factory, assert mc.overlaps == 1 -def test_overlaps_ellipsoid(device, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_ellipsoid(device, simulation_factory, two_particle_snapshot_factory): a = 1 / 4 b = 1 / 2 c = 1 mc = hoomd.hpmc.integrate.Ellipsoid() - mc.shape["A"] = {'a': a, 'b': b, 'c': c} + mc.shape["A"] = {"a": a, "b": b, "c": c} sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) @@ -249,8 +253,11 @@ def test_overlaps_ellipsoid(device, simulation_factory, s = sim.state.get_snapshot() if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) - s.particles.position[1] = (abc[0] * 0.9 * 2, abc[1] * 0.9 * 2, - abc[2] * 0.9 * 2) + s.particles.position[1] = ( + abc[0] * 0.9 * 2, + abc[1] * 0.9 * 2, + abc[2] * 0.9 * 2, + ) sim.state.set_snapshot(s) assert mc.overlaps == 1 @@ -258,8 +265,11 @@ def test_overlaps_ellipsoid(device, simulation_factory, s = sim.state.get_snapshot() if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) - s.particles.position[1] = (abc[0] * 1.15 * 2, abc[1] * 1.15 * 2, - abc[2] * 1.15 * 2) + s.particles.position[1] = ( + abc[0] * 1.15 * 2, + abc[1] * 1.15 * 2, + abc[2] * 1.15 * 2, + ) sim.state.set_snapshot(s) assert mc.overlaps == 0 @@ -269,24 +279,28 @@ def test_overlaps_ellipsoid(device, simulation_factory, if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) s.particles.position[1] = (a * 1.1 * 2, 0, 0) - s.particles.orientation[1] = tuple( - np.array([1, 0, 0.45, 0]) / (1.2025**0.5)) + s.particles.orientation[1] = tuple(np.array([1, 0, 0.45, 0]) / (1.2025**0.5)) sim.state.set_snapshot(s) assert mc.overlaps > 0 _triangle = { - 'vertices': [(0, (0.75**0.5) / 2), (-0.5, -(0.75**0.5) / 2), - (0.5, -(0.75**0.5) / 2)] + "vertices": [ + (0, (0.75**0.5) / 2), + (-0.5, -(0.75**0.5) / 2), + (0.5, -(0.75**0.5) / 2), + ] } _square = {"vertices": np.array([(-1, -1), (1, -1), (1, 1), (-1, 1)]) / 2} # Args should work for ConvexPolygon, SimplePolygon, and ConvexSpheropolygon -_polygon_shapes = [(_triangle, hoomd.hpmc.integrate.ConvexPolygon), - (_triangle, hoomd.hpmc.integrate.SimplePolygon), - (_triangle, hoomd.hpmc.integrate.ConvexSpheropolygon), - (_square, hoomd.hpmc.integrate.ConvexPolygon), - (_square, hoomd.hpmc.integrate.SimplePolygon), - (_square, hoomd.hpmc.integrate.ConvexSpheropolygon)] +_polygon_shapes = [ + (_triangle, hoomd.hpmc.integrate.ConvexPolygon), + (_triangle, hoomd.hpmc.integrate.SimplePolygon), + (_triangle, hoomd.hpmc.integrate.ConvexSpheropolygon), + (_square, hoomd.hpmc.integrate.ConvexPolygon), + (_square, hoomd.hpmc.integrate.SimplePolygon), + (_square, hoomd.hpmc.integrate.ConvexSpheropolygon), +] @pytest.fixture(scope="function", params=_polygon_shapes) @@ -294,12 +308,13 @@ def polygon_overlap_args(request): return deepcopy(request.param) -def test_overlaps_polygons(device, polygon_overlap_args, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_polygons( + device, polygon_overlap_args, simulation_factory, two_particle_snapshot_factory +): integrator_args = polygon_overlap_args[0] integrator = polygon_overlap_args[1] mc = integrator() - mc.shape['A'] = integrator_args + mc.shape["A"] = integrator_args sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) @@ -325,45 +340,53 @@ def test_overlaps_polygons(device, polygon_overlap_args, simulation_factory, # Rotate one of the shapes so they will overlap s = sim.state.get_snapshot() if s.communicator.rank == 0: - s.particles.orientation[1] = tuple( - np.array([1, 0, 0, 0.45]) / (1.2025**0.5)) + s.particles.orientation[1] = tuple(np.array([1, 0, 0, 0.45]) / (1.2025**0.5)) sim.state.set_snapshot(s) assert mc.overlaps > 0 -_tetrahedron_verts = np.array([(1, 1, 1), (-1, -1, 1), (1, -1, -1), - (-1, 1, -1)]) / 2 +_tetrahedron_verts = np.array([(1, 1, 1), (-1, -1, 1), (1, -1, -1), (-1, 1, -1)]) / 2 _tetrahedron_faces = [[1, 3, 2], [3, 0, 2], [1, 0, 3], [1, 2, 0]] -_cube_verts = [(-0.5, -0.5, -0.5), (-0.5, -0.5, 0.5), (-0.5, 0.5, -0.5), - (-0.5, 0.5, 0.5), (0.5, -0.5, -0.5), (0.5, -0.5, 0.5), - (0.5, 0.5, -0.5), (0.5, 0.5, 0.5)] +_cube_verts = [ + (-0.5, -0.5, -0.5), + (-0.5, -0.5, 0.5), + (-0.5, 0.5, -0.5), + (-0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (0.5, -0.5, 0.5), + (0.5, 0.5, -0.5), + (0.5, 0.5, 0.5), +] -_cube_faces = [[0, 2, 6], [6, 4, 0], [5, 0, 4], [5, 1, 0], [5, 4, 6], [5, 6, 7], - [3, 2, 0], [3, 0, 1], [3, 6, 2], [3, 7, 6], [3, 1, 5], [3, 5, 7]] +_cube_faces = [ + [0, 2, 6], + [6, 4, 0], + [5, 0, 4], + [5, 1, 0], + [5, 4, 6], + [5, 6, 7], + [3, 2, 0], + [3, 0, 1], + [3, 6, 2], + [3, 7, 6], + [3, 1, 5], + [3, 5, 7], +] # Test args with ConvexPolyhedron, ConvexSpheropolyhedron, and Polyhedron -_polyhedron_shapes = [({ - "vertices": _tetrahedron_verts -}, hoomd.hpmc.integrate.ConvexPolyhedron), - ({ - "vertices": _tetrahedron_verts - }, hoomd.hpmc.integrate.ConvexSpheropolyhedron), - ({ - "vertices": _tetrahedron_verts, - "faces": _tetrahedron_faces - }, hoomd.hpmc.integrate.Polyhedron), - ({ - "vertices": _cube_verts - }, hoomd.hpmc.integrate.ConvexPolyhedron), - ({ - "vertices": _cube_verts - }, hoomd.hpmc.integrate.ConvexSpheropolyhedron), - ({ - "vertices": _cube_verts, - "faces": _cube_faces - }, hoomd.hpmc.integrate.Polyhedron)] +_polyhedron_shapes = [ + ({"vertices": _tetrahedron_verts}, hoomd.hpmc.integrate.ConvexPolyhedron), + ({"vertices": _tetrahedron_verts}, hoomd.hpmc.integrate.ConvexSpheropolyhedron), + ( + {"vertices": _tetrahedron_verts, "faces": _tetrahedron_faces}, + hoomd.hpmc.integrate.Polyhedron, + ), + ({"vertices": _cube_verts}, hoomd.hpmc.integrate.ConvexPolyhedron), + ({"vertices": _cube_verts}, hoomd.hpmc.integrate.ConvexSpheropolyhedron), + ({"vertices": _cube_verts, "faces": _cube_faces}, hoomd.hpmc.integrate.Polyhedron), +] @pytest.fixture(scope="function", params=_polyhedron_shapes) @@ -371,13 +394,14 @@ def polyhedron_overlap_args(request): return deepcopy(request.param) -def test_overlaps_polyhedra(device, polyhedron_overlap_args, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_polyhedra( + device, polyhedron_overlap_args, simulation_factory, two_particle_snapshot_factory +): integrator_args = polyhedron_overlap_args[0] integrator = polyhedron_overlap_args[1] mc = integrator() - mc.shape['A'] = integrator_args + mc.shape["A"] = integrator_args sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) sim.operations._schedule() @@ -414,13 +438,10 @@ def test_overlaps_polyhedra(device, polyhedron_overlap_args, simulation_factory, assert mc.overlaps > 0 -_spheropolygon_shapes = [{ - 'vertices': _triangle['vertices'], - 'sweep_radius': 0.2 -}, { - 'vertices': _square['vertices'], - 'sweep_radius': 0.1 -}] +_spheropolygon_shapes = [ + {"vertices": _triangle["vertices"], "sweep_radius": 0.2}, + {"vertices": _square["vertices"], "sweep_radius": 0.1}, +] @pytest.fixture(scope="function", params=_spheropolygon_shapes) @@ -428,11 +449,14 @@ def spheropolygon_overlap_args(request): return deepcopy(request.param) -def test_overlaps_spheropolygon(device, spheropolygon_overlap_args, - simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_spheropolygon( + device, + spheropolygon_overlap_args, + simulation_factory, + two_particle_snapshot_factory, +): mc = hoomd.hpmc.integrate.ConvexSpheropolygon() - mc.shape['A'] = spheropolygon_overlap_args + mc.shape["A"] = spheropolygon_overlap_args sim = simulation_factory(two_particle_snapshot_factory(dimensions=2, d=2)) sim.operations.add(mc) @@ -466,19 +490,15 @@ def test_overlaps_spheropolygon(device, spheropolygon_overlap_args, # Rotate one of the shapes so they will overlap s = sim.state.get_snapshot() if s.communicator.rank == 0: - s.particles.orientation[1] = tuple( - np.array([1, 0, 0, 0.45]) / (1.2025**0.5)) + s.particles.orientation[1] = tuple(np.array([1, 0, 0, 0.45]) / (1.2025**0.5)) sim.state.set_snapshot(s) assert mc.overlaps > 0 -_spheropolyhedron_shapes = [{ - 'vertices': _tetrahedron_verts, - 'sweep_radius': 0.2 -}, { - 'vertices': _cube_verts, - 'sweep_radius': 0.2 -}] +_spheropolyhedron_shapes = [ + {"vertices": _tetrahedron_verts, "sweep_radius": 0.2}, + {"vertices": _cube_verts, "sweep_radius": 0.2}, +] @pytest.fixture(scope="function", params=_spheropolyhedron_shapes) @@ -486,11 +506,14 @@ def spheropolyhedron_overlap_args(request): return deepcopy(request.param) -def test_overlaps_spheropolyhedron(device, spheropolyhedron_overlap_args, - simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_spheropolyhedron( + device, + spheropolyhedron_overlap_args, + simulation_factory, + two_particle_snapshot_factory, +): mc = hoomd.hpmc.integrate.ConvexSpheropolyhedron() - mc.shape['A'] = spheropolyhedron_overlap_args + mc.shape["A"] = spheropolyhedron_overlap_args sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) @@ -529,21 +552,23 @@ def test_overlaps_spheropolyhedron(device, spheropolyhedron_overlap_args, _union_shapes = [ - ({ - 'diameter': 1 - }, hoomd.hpmc.integrate.SphereUnion), - ({ - "vertices": _tetrahedron_verts - }, hoomd.hpmc.integrate.ConvexSpheropolyhedronUnion), - ({ - "normals": [(0, 0, 1)], - "a": 0.5, - "b": 0.5, - "c": 1, - "vertices": [], - "origin": (0, 0, 0), - "offsets": [0] - }, hoomd.hpmc.integrate.FacetedEllipsoidUnion), + ({"diameter": 1}, hoomd.hpmc.integrate.SphereUnion), + ( + {"vertices": _tetrahedron_verts}, + hoomd.hpmc.integrate.ConvexSpheropolyhedronUnion, + ), + ( + { + "normals": [(0, 0, 1)], + "a": 0.5, + "b": 0.5, + "c": 1, + "vertices": [], + "origin": (0, 0, 0), + "offsets": [0], + }, + hoomd.hpmc.integrate.FacetedEllipsoidUnion, + ), ] @@ -552,17 +577,18 @@ def union_overlap_args(request): return deepcopy(request.param) -def test_overlaps_union(device, union_overlap_args, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_union( + device, union_overlap_args, simulation_factory, two_particle_snapshot_factory +): inner_args = union_overlap_args[0] integrator = union_overlap_args[1] union_args = { - 'shapes': [inner_args, inner_args], - 'positions': [(0, 0, 0), (0, 0, 1)] + "shapes": [inner_args, inner_args], + "positions": [(0, 0, 0), (0, 0, 1)], } mc = integrator() - mc.shape['A'] = union_args + mc.shape["A"] = union_args sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) @@ -571,8 +597,7 @@ def test_overlaps_union(device, union_overlap_args, simulation_factory, assert mc.overlaps == 0 test_positions = [(1.1, 0, 0), (0, 1.1, 0)] test_orientations = np.array([[1, 0, -0.06, 0], [1, 0.06, 0, 0]]) - test_orientations = test_orientations.T / np.linalg.norm(test_orientations, - axis=1) + test_orientations = test_orientations.T / np.linalg.norm(test_orientations, axis=1) test_orientations = test_orientations.T # Shapes are stacked in z direction for i in range(len(test_positions)): @@ -600,20 +625,21 @@ def test_overlaps_union(device, union_overlap_args, simulation_factory, assert mc.overlaps > 0 -def test_overlaps_faceted_ellipsoid(device, simulation_factory, - two_particle_snapshot_factory): +def test_overlaps_faceted_ellipsoid( + device, simulation_factory, two_particle_snapshot_factory +): a = 1 / 2 b = 1 / 2 c = 1 mc = hoomd.hpmc.integrate.FacetedEllipsoid() - mc.shape['A'] = { + mc.shape["A"] = { "normals": [(0, 0, 1)], "a": 0.5, "b": 0.5, "c": 1, "vertices": [], "origin": (0, 0, 0), - "offsets": [0] + "offsets": [0], } sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) @@ -628,8 +654,11 @@ def test_overlaps_faceted_ellipsoid(device, simulation_factory, s = sim.state.get_snapshot() if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) - s.particles.position[1] = (abc[0] * 0.9 * 2, abc[1] * 0.9 * 2, - abc[2] * 0.9 * 2) + s.particles.position[1] = ( + abc[0] * 0.9 * 2, + abc[1] * 0.9 * 2, + abc[2] * 0.9 * 2, + ) sim.state.set_snapshot(s) assert mc.overlaps == 1 @@ -637,8 +666,11 @@ def test_overlaps_faceted_ellipsoid(device, simulation_factory, s = sim.state.get_snapshot() if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) - s.particles.position[1] = (abc[0] * 1.15 * 2, abc[1] * 1.15 * 2, - abc[2] * 1.15 * 2) + s.particles.position[1] = ( + abc[0] * 1.15 * 2, + abc[1] * 1.15 * 2, + abc[2] * 1.15 * 2, + ) sim.state.set_snapshot(s) assert mc.overlaps == 0 @@ -648,17 +680,14 @@ def test_overlaps_faceted_ellipsoid(device, simulation_factory, if s.communicator.rank == 0: s.particles.position[0] = (0, 0, 0) s.particles.position[1] = (a * 1.1 * 2, 0, 0) - s.particles.orientation[1] = tuple( - np.array([1, 0, 0.45, 0]) / (1.2025**0.5)) + s.particles.orientation[1] = tuple(np.array([1, 0, 0.45, 0]) / (1.2025**0.5)) sim.state.set_snapshot(s) assert mc.overlaps > 0 -def test_overlaps_sphinx(device, simulation_factory, - two_particle_snapshot_factory): - +def test_overlaps_sphinx(device, simulation_factory, two_particle_snapshot_factory): mc = hoomd.hpmc.integrate.Sphinx() - mc.shape["A"] = {'diameters': [1, -1], 'centers': [(0, 0, 0), (0.75, 0, 0)]} + mc.shape["A"] = {"diameters": [1, -1], "centers": [(0, 0, 0), (0.75, 0, 0)]} sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=2)) sim.operations.add(mc) @@ -680,8 +709,7 @@ def test_overlaps_sphinx(device, simulation_factory, assert mc.overlaps == 0 -def test_pickling(valid_args, simulation_factory, - two_particle_snapshot_factory): +def test_pickling(valid_args, simulation_factory, two_particle_snapshot_factory): integrator = valid_args[0] args = valid_args[1] n_dimensions = valid_args[2] @@ -700,50 +728,38 @@ def test_pickling(valid_args, simulation_factory, # decomposition of some of the shapes definitions in valid_args which have # shapes with large extent in at least one dimension. sim = simulation_factory( - two_particle_snapshot_factory(L=1000, dimensions=n_dimensions)) + two_particle_snapshot_factory(L=1000, dimensions=n_dimensions) + ) operation_pickling_check(mc, sim) def test_logging(): logging_check( - hoomd.hpmc.integrate.HPMCIntegrator, ('hpmc', 'integrate'), { - 'map_overlaps': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'mps': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'overlaps': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'rotate_moves': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'translate_moves': { - 'category': LoggerCategories.sequence, - 'default': True - } - }) - - integrators = (hoomd.hpmc.integrate.Sphere, - hoomd.hpmc.integrate.ConvexPolygon, - hoomd.hpmc.integrate.ConvexSpheropolygon, - hoomd.hpmc.integrate.Polyhedron, - hoomd.hpmc.integrate.ConvexPolyhedron, - hoomd.hpmc.integrate.ConvexSpheropolyhedron, - hoomd.hpmc.integrate.Ellipsoid, - hoomd.hpmc.integrate.SphereUnion) + hoomd.hpmc.integrate.HPMCIntegrator, + ("hpmc", "integrate"), + { + "map_overlaps": {"category": LoggerCategories.sequence, "default": True}, + "mps": {"category": LoggerCategories.scalar, "default": True}, + "overlaps": {"category": LoggerCategories.scalar, "default": True}, + "rotate_moves": {"category": LoggerCategories.sequence, "default": True}, + "translate_moves": {"category": LoggerCategories.sequence, "default": True}, + }, + ) + + integrators = ( + hoomd.hpmc.integrate.Sphere, + hoomd.hpmc.integrate.ConvexPolygon, + hoomd.hpmc.integrate.ConvexSpheropolygon, + hoomd.hpmc.integrate.Polyhedron, + hoomd.hpmc.integrate.ConvexPolyhedron, + hoomd.hpmc.integrate.ConvexSpheropolyhedron, + hoomd.hpmc.integrate.Ellipsoid, + hoomd.hpmc.integrate.SphereUnion, + ) type_shapes_check = { - 'type_shapes': { - 'category': LoggerCategories.object, - 'default': True - } + "type_shapes": {"category": LoggerCategories.object, "default": True} } for integrator in integrators: - logging_check(integrator, ('hpmc', 'integrate'), type_shapes_check) + logging_check(integrator, ("hpmc", "integrate"), type_shapes_check) diff --git a/hoomd/hpmc/pytest/test_shape_updater.py b/hoomd/hpmc/pytest/test_shape_updater.py index 9488827a36..b2684443be 100644 --- a/hoomd/hpmc/pytest/test_shape_updater.py +++ b/hoomd/hpmc/pytest/test_shape_updater.py @@ -16,80 +16,109 @@ def _test_callback(typeid, param_list): # vertices of a regular cube: used for testing vertex and elastic shape moves -verts = np.asarray([[-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5], [-0.5, 0.5, -0.5], - [0.5, -0.5, -0.5], [-0.5, 0.5, 0.5], [0.5, -0.5, 0.5], - [0.5, 0.5, -0.5], [0.5, 0.5, 0.5]]) +verts = np.asarray( + [ + [-0.5, -0.5, -0.5], + [-0.5, -0.5, 0.5], + [-0.5, 0.5, -0.5], + [0.5, -0.5, -0.5], + [-0.5, 0.5, 0.5], + [0.5, -0.5, 0.5], + [0.5, 0.5, -0.5], + [0.5, 0.5, 0.5], + ] +) shape_move_constructor_args = [ (Vertex, dict(vertex_move_probability=0.7)), (ShapeSpace, dict(callback=_test_callback, param_move_probability=1)), - (Elastic, - dict(stiffness=hoomd.variant.Constant(10), - mc=hpmc.integrate.ConvexPolyhedron, - normal_shear_ratio=0.5)), + ( + Elastic, + dict( + stiffness=hoomd.variant.Constant(10), + mc=hpmc.integrate.ConvexPolyhedron, + normal_shear_ratio=0.5, + ), + ), ] shape_move_valid_attrs = [ (Vertex(), "vertex_move_probability", 0.1), (ShapeSpace(callback=_test_callback), "param_move_probability", 0.1), - (ShapeSpace(callback=_test_callback), "callback", - lambda type, param_list: {}), + (ShapeSpace(callback=_test_callback), "callback", lambda type, param_list: {}), (Elastic(1, hpmc.integrate.ConvexPolyhedron), "normal_shear_ratio", 0.5), - (Elastic(1, hpmc.integrate.ConvexPolyhedron), "stiffness", - hoomd.variant.Constant(10)), - (Elastic(1, hpmc.integrate.ConvexPolyhedron), "stiffness", - hoomd.variant.Ramp(1, 5, 0, 100)), - (Elastic(1, hpmc.integrate.ConvexPolyhedron), "stiffness", - hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15)), - (Elastic(1, hpmc.integrate.ConvexPolyhedron), "stiffness", - hoomd.variant.Power(1, 5, 3, 0, 100)) + ( + Elastic(1, hpmc.integrate.ConvexPolyhedron), + "stiffness", + hoomd.variant.Constant(10), + ), + ( + Elastic(1, hpmc.integrate.ConvexPolyhedron), + "stiffness", + hoomd.variant.Ramp(1, 5, 0, 100), + ), + ( + Elastic(1, hpmc.integrate.ConvexPolyhedron), + "stiffness", + hoomd.variant.Cycle(1, 5, 0, 10, 20, 10, 15), + ), + ( + Elastic(1, hpmc.integrate.ConvexPolyhedron), + "stiffness", + hoomd.variant.Power(1, 5, 3, 0, 100), + ), ] -shape_updater_valid_attrs = [("trigger", hoomd.trigger.Periodic(10)), - ("trigger", hoomd.trigger.After(100)), - ("trigger", hoomd.trigger.Before(100)), - ("type_select", 2), ("nweeps", 4), - ("shape_move", Vertex()), - ("shape_move", - ShapeSpace(callback=_test_callback)), - ("shape_move", - Elastic(stiffness=10, - mc=hpmc.integrate.ConvexPolyhedron))] +shape_updater_valid_attrs = [ + ("trigger", hoomd.trigger.Periodic(10)), + ("trigger", hoomd.trigger.After(100)), + ("trigger", hoomd.trigger.Before(100)), + ("type_select", 2), + ("nweeps", 4), + ("shape_move", Vertex()), + ("shape_move", ShapeSpace(callback=_test_callback)), + ("shape_move", Elastic(stiffness=10, mc=hpmc.integrate.ConvexPolyhedron)), +] updater_constructor_args = [ dict(trigger=hoomd.trigger.Periodic(10), shape_move=ShapeMove()), - dict(trigger=hoomd.trigger.After(100), - shape_move=Vertex(), - type_select=4, - nsweeps=2), - dict(trigger=hoomd.trigger.Before(100), - shape_move=ShapeSpace(callback=_test_callback), - nsweeps=4), - dict(trigger=hoomd.trigger.Periodic(1000), - shape_move=ShapeSpace(callback=_test_callback), - type_select=1), - dict(trigger=hoomd.trigger.Periodic(10), - shape_move=Elastic(stiffness=10, mc=hpmc.integrate.ConvexPolyhedron), - type_select=3, - pretend=True) + dict( + trigger=hoomd.trigger.After(100), shape_move=Vertex(), type_select=4, nsweeps=2 + ), + dict( + trigger=hoomd.trigger.Before(100), + shape_move=ShapeSpace(callback=_test_callback), + nsweeps=4, + ), + dict( + trigger=hoomd.trigger.Periodic(1000), + shape_move=ShapeSpace(callback=_test_callback), + type_select=1, + ), + dict( + trigger=hoomd.trigger.Periodic(10), + shape_move=Elastic(stiffness=10, mc=hpmc.integrate.ConvexPolyhedron), + type_select=3, + pretend=True, + ), ] type_parameters = [ (ShapeSpace(callback=_test_callback), "params", [0.1, 0.3, 0.4]), (ShapeSpace(callback=_test_callback), "step_size", 0.4), - (Vertex(), "volume", 1.2), (Vertex(), "step_size", 0.1), - (Elastic(stiffness=10.0, - mc=hpmc.integrate.ConvexPolyhedron), "reference_shape", { - "vertices": verts - }), - (Elastic(stiffness=10.0, - mc=hpmc.integrate.ConvexPolyhedron), "step_size", 0.2) + (Vertex(), "volume", 1.2), + (Vertex(), "step_size", 0.1), + ( + Elastic(stiffness=10.0, mc=hpmc.integrate.ConvexPolyhedron), + "reference_shape", + {"vertices": verts}, + ), + (Elastic(stiffness=10.0, mc=hpmc.integrate.ConvexPolyhedron), "step_size", 0.2), ] @pytest.mark.parametrize("shape_move_class,params", shape_move_constructor_args) def test_valid_construction_shape_moves(shape_move_class, params): - move = shape_move_class(**params) # validate the params were set properly @@ -100,7 +129,6 @@ def test_valid_construction_shape_moves(shape_move_class, params): @pytest.mark.parametrize("updater_constructor_args", updater_constructor_args) def test_valid_construction_shape_updater(updater_constructor_args): - updater = hpmc.update.Shape(**updater_constructor_args) # validate the params were set properly @@ -131,7 +159,6 @@ def test_type_parameters(obj, attr, value): def test_vertex_shape_move(simulation_factory, two_particle_snapshot_factory): - move = Vertex(default_step_size=0.2) move.volume["A"] = 1 @@ -173,13 +200,13 @@ def test_vertex_shape_move(simulation_factory, two_particle_snapshot_factory): assert np.isclose(updater.particle_volumes[0], 1) -def test_python_callback_shape_move_ellipsoid(simulation_factory, - lattice_snapshot_factory): +def test_python_callback_shape_move_ellipsoid( + simulation_factory, lattice_snapshot_factory +): """Test ShapeSpace with a toy class that randomly squashes spheres \ into oblate ellipsoids with constant volume.""" class ScaleEllipsoid: - def __init__(self, a, b, c): self.vol_factor = 4 * np.pi / 3 self.volume = self.vol_factor * a * b * c @@ -187,14 +214,13 @@ def __init__(self, a, b, c): def __call__(self, type_id, param_list): x = param_list[0] - b = (self.volume / x / (self.vol_factor))**(1 / 3) + b = (self.volume / x / (self.vol_factor)) ** (1 / 3) ret = dict(a=x * b, b=b, c=b, **self.default_dict) return ret ellipsoid = dict(a=1, b=1, c=1) - move = ShapeSpace(callback=ScaleEllipsoid(**ellipsoid), - default_step_size=0.2) + move = ShapeSpace(callback=ScaleEllipsoid(**ellipsoid), default_step_size=0.2) move.params["A"] = [1] updater = hpmc.update.Shape(trigger=1, shape_move=move, nsweeps=2) @@ -244,17 +270,21 @@ def __call__(self, type_id, param_list): assert not np.allclose(move.params["A"], [1]) # Check that the shape parameters map back to the correct geometry - assert np.allclose(move.params["A"], [ - mc.shape["A"]["a"] / mc.shape["A"]["b"], - mc.shape["A"]["a"] / mc.shape["A"]["c"] - ]) + assert np.allclose( + move.params["A"], + [ + mc.shape["A"]["a"] / mc.shape["A"]["b"], + mc.shape["A"]["a"] / mc.shape["A"]["c"], + ], + ) # Check that the callback is conserving volume properly assert np.allclose(updater.particle_volumes, 4 * np.pi / 3) -def test_python_callback_shape_move_pyramid(simulation_factory, - two_particle_snapshot_factory): +def test_python_callback_shape_move_pyramid( + simulation_factory, two_particle_snapshot_factory +): """Test ShapeSpace with a toy class that randomly stretches square \ pyramids.""" @@ -262,13 +292,12 @@ def square_pyramid_factory(h): """Generate a square pyramid with unit volume.""" theta = np.arange(0, 2 * np.pi, np.pi / 2) base_vertices = np.array( - [np.cos(theta), np.sin(theta), - np.zeros_like(theta)]).T * np.sqrt(3 / 2) + [np.cos(theta), np.sin(theta), np.zeros_like(theta)] + ).T * np.sqrt(3 / 2) vertices = np.vstack([base_vertices, [0, 0, h]]) return vertices / np.cbrt(h), base_vertices class ScalePyramid: - def __init__(self, h=1.1): _, self.base_vertices = square_pyramid_factory(h=1.1) self.default_dict = dict(sweep_radius=0, ignore_statistics=True) @@ -331,14 +360,14 @@ def __call__(self, type_id, param_list): # Check that the shape parameters map back to the correct geometry assert np.allclose( - square_pyramid_factory(current_h + 0.1)[0], mc.shape["A"]["vertices"]) + square_pyramid_factory(current_h + 0.1)[0], mc.shape["A"]["vertices"] + ) # Check that the callback is conserving volume properly assert np.allclose(updater.particle_volumes, 1) def test_elastic_shape_move(simulation_factory, two_particle_snapshot_factory): - mc = hoomd.hpmc.integrate.ConvexPolyhedron() mc.d["A"] = 0 mc.a["A"] = 0 diff --git a/hoomd/hpmc/pytest/test_shape_utils.py b/hoomd/hpmc/pytest/test_shape_utils.py index 0c41056e56..944a2d229d 100644 --- a/hoomd/hpmc/pytest/test_shape_utils.py +++ b/hoomd/hpmc/pytest/test_shape_utils.py @@ -4,9 +4,11 @@ import numpy as np import pytest from hoomd.hpmc._hpmc import PolyhedronVertices, EllipsoidParams -from hoomd.hpmc._hpmc import (MassPropertiesConvexPolyhedron, - MassPropertiesConvexSpheropolyhedron, - MassPropertiesEllipsoid) +from hoomd.hpmc._hpmc import ( + MassPropertiesConvexPolyhedron, + MassPropertiesConvexSpheropolyhedron, + MassPropertiesEllipsoid, +) # list of tuples with shape definitions and precomputed volume and # determinant of the moment of inertia tensor (det_moi) @@ -14,96 +16,154 @@ # - volume and det_moi precomputed externally with the coxeter package shape_list = [ # cube - ("ConvexPolyhedron", { - "vertices": [[-1, -1, -1], [-1, -1, 1], [-1, 1, 1], [-1, 1, -1], - [1, -1, -1], [1, -1, 1], [1, 1, 1], [1, 1, -1]], - "sweep_radius": 0, - "ignore_statistics": True - }, 8.0, 151.70370370370372), + ( + "ConvexPolyhedron", + { + "vertices": [ + [-1, -1, -1], + [-1, -1, 1], + [-1, 1, 1], + [-1, 1, -1], + [1, -1, -1], + [1, -1, 1], + [1, 1, 1], + [1, 1, -1], + ], + "sweep_radius": 0, + "ignore_statistics": True, + }, + 8.0, + 151.70370370370372, + ), # deformed cubed - ("ConvexPolyhedron", { - "vertices": [[-1.00010558, -1.05498028, -1.02785711], - [-0.60306277, -0.94357813, 1.15216173], - [-0.93980368, 1.10805945, 1.07469946], - [-0.92443732, 0.98195736, -1.46118865], - [1.228552, -1.30713086, -1.1727504], - [1.20330899, -0.79320722, 0.83510155], - [1.00378097, 0.92333129, 0.93916291], - [1.1994583, 0.97454523, -1.29517718]], - "sweep_radius": 0, - "ignore_statistics": True - }, 9.906613237811571, 433.6939933469258), + ( + "ConvexPolyhedron", + { + "vertices": [ + [-1.00010558, -1.05498028, -1.02785711], + [-0.60306277, -0.94357813, 1.15216173], + [-0.93980368, 1.10805945, 1.07469946], + [-0.92443732, 0.98195736, -1.46118865], + [1.228552, -1.30713086, -1.1727504], + [1.20330899, -0.79320722, 0.83510155], + [1.00378097, 0.92333129, 0.93916291], + [1.1994583, 0.97454523, -1.29517718], + ], + "sweep_radius": 0, + "ignore_statistics": True, + }, + 9.906613237811571, + 433.6939933469258, + ), # truncated tetrahedron with truncation = 0.4 - ("ConvexPolyhedron", { - "vertices": [[-1., -0.6, 0.6], [-1., 0.6, -0.6], [-0.6, -1., 0.6], - [-0.6, -0.6, 1.], [-0.6, 0.6, -1.], [-0.6, 1., -0.6], - [0.6, -1., -0.6], [0.6, -0.6, -1.], [0.6, 0.6, 1.], - [0.6, 1., 0.6], [1., -0.6, -0.6], [1., 0.6, 0.6]], - "sweep_radius": 0, - "ignore_statistics": True - }, 2.5813333333333337, 0.873927553653835), + ( + "ConvexPolyhedron", + { + "vertices": [ + [-1.0, -0.6, 0.6], + [-1.0, 0.6, -0.6], + [-0.6, -1.0, 0.6], + [-0.6, -0.6, 1.0], + [-0.6, 0.6, -1.0], + [-0.6, 1.0, -0.6], + [0.6, -1.0, -0.6], + [0.6, -0.6, -1.0], + [0.6, 0.6, 1.0], + [0.6, 1.0, 0.6], + [1.0, -0.6, -0.6], + [1.0, 0.6, 0.6], + ], + "sweep_radius": 0, + "ignore_statistics": True, + }, + 2.5813333333333337, + 0.873927553653835, + ), # deformed truncated tetrahedron - ("ConvexPolyhedron", { - "vertices": [[-1.38818127, -0.43327022, 0.48655642], - [-0.76510261, 0.66381377, -0.56182591], - [-0.52614596, -1.02022957, 0.4116381], - [-0.88082834, -0.1838706, 0.97593675], - [-0.44804162, 0.96548643, -1.13214542], - [-0.76156125, 1.17756002, -0.64348949], - [0.4120951, -0.88009234, -0.15537746], - [0.80000109, -0.37006509, -1.03111526], - [0.26988483, 0.30897717, 1.06415232], - [0.76226993, 0.95183908, 0.63302422], - [0.99329989, -0.58242708, -0.39317069], - [0.78783469, 0.39728311, 0.51595356]], - "sweep_radius": 0, - "ignore_statistics": True - }, 3.406051603090999, 2.754115599932528), - ("Ellipsoid", { - "a": 1, - "b": 1, - "c": 1, - "ignore_statistics": True - }, 4.1887902047863905, 4.70376701046326), - ("Ellipsoid", { - "a": 1, - "b": 1, - "c": 2, - "ignore_statistics": True - }, 8.377580409572781, 235.18835052316288), - ("Ellipsoid", { - "a": 1.5, - "b": 1, - "c": 1, - "ignore_statistics": True - }, 6.283185307179586, 41.920486071765346), - ("Ellipsoid", { - "a": 1, - "b": 0.8, - "c": 1, - "ignore_statistics": True - }, 3.3510321638291125, 1.6193602241717742), - ("Ellipsoid", { - "a": 1.3, - "b": 2.7, - "c": 0.7, - "ignore_statistics": True - }, 10.291857533160162, 1328.2618881466828), + ( + "ConvexPolyhedron", + { + "vertices": [ + [-1.38818127, -0.43327022, 0.48655642], + [-0.76510261, 0.66381377, -0.56182591], + [-0.52614596, -1.02022957, 0.4116381], + [-0.88082834, -0.1838706, 0.97593675], + [-0.44804162, 0.96548643, -1.13214542], + [-0.76156125, 1.17756002, -0.64348949], + [0.4120951, -0.88009234, -0.15537746], + [0.80000109, -0.37006509, -1.03111526], + [0.26988483, 0.30897717, 1.06415232], + [0.76226993, 0.95183908, 0.63302422], + [0.99329989, -0.58242708, -0.39317069], + [0.78783469, 0.39728311, 0.51595356], + ], + "sweep_radius": 0, + "ignore_statistics": True, + }, + 3.406051603090999, + 2.754115599932528, + ), + ( + "Ellipsoid", + {"a": 1, "b": 1, "c": 1, "ignore_statistics": True}, + 4.1887902047863905, + 4.70376701046326, + ), + ( + "Ellipsoid", + {"a": 1, "b": 1, "c": 2, "ignore_statistics": True}, + 8.377580409572781, + 235.18835052316288, + ), + ( + "Ellipsoid", + {"a": 1.5, "b": 1, "c": 1, "ignore_statistics": True}, + 6.283185307179586, + 41.920486071765346, + ), + ( + "Ellipsoid", + {"a": 1, "b": 0.8, "c": 1, "ignore_statistics": True}, + 3.3510321638291125, + 1.6193602241717742, + ), + ( + "Ellipsoid", + {"a": 1.3, "b": 2.7, "c": 0.7, "ignore_statistics": True}, + 10.291857533160162, + 1328.2618881466828, + ), # sphere as convex spheropolyhedron - ("ConvexSpheropolyhedron", { - "vertices": [[0, 0, 0]], - "sweep_radius": 1, - "ignore_statistics": True - }, 4.1887902047863905, 4.70376701046326), + ( + "ConvexSpheropolyhedron", + {"vertices": [[0, 0, 0]], "sweep_radius": 1, "ignore_statistics": True}, + 4.1887902047863905, + 4.70376701046326, + ), # truncated tetrahedra (trunc = 0.1) as convex spheropolyhedron - ("ConvexSpheropolyhedron", { - "vertices": [[-1., -0.9, 0.9], [-1., 0.9, -0.9], [-0.9, -1., 0.9], - [-0.9, -0.9, 1.], [-0.9, 0.9, -1.], [-0.9, 1., -0.9], - [0.9, -1., -0.9], [0.9, -0.9, -1.], [0.9, 0.9, 1.], - [0.9, 1., 0.9], [1., -0.9, -0.9], [1., 0.9, 0.9]], - "sweep_radius": 0, - "ignore_statistics": True - }, 2.6653333333333333, 1.2054288640850612), + ( + "ConvexSpheropolyhedron", + { + "vertices": [ + [-1.0, -0.9, 0.9], + [-1.0, 0.9, -0.9], + [-0.9, -1.0, 0.9], + [-0.9, -0.9, 1.0], + [-0.9, 0.9, -1.0], + [-0.9, 1.0, -0.9], + [0.9, -1.0, -0.9], + [0.9, -0.9, -1.0], + [0.9, 0.9, 1.0], + [0.9, 1.0, 0.9], + [1.0, -0.9, -0.9], + [1.0, 0.9, 0.9], + ], + "sweep_radius": 0, + "ignore_statistics": True, + }, + 2.6653333333333333, + 1.2054288640850612, + ), ] diff --git a/hoomd/hpmc/pytest/test_small_box_2d.py b/hoomd/hpmc/pytest/test_small_box_2d.py index 1bdb5c485a..826ffac9e8 100644 --- a/hoomd/hpmc/pytest/test_small_box_2d.py +++ b/hoomd/hpmc/pytest/test_small_box_2d.py @@ -17,18 +17,20 @@ def one_square_simulation(simulation_factory): """ snap = hoomd.Snapshot() snap.particles.N = 1 - snap.particles.types = ['A'] + snap.particles.types = ["A"] snap.particles.position[:] = [[0, 0, 0]] snap.configuration.box = [1.2, 1.2, 0, 0, 0, 0] sim = simulation_factory(snap) mc = hoomd.hpmc.integrate.ConvexPolygon() - mc.shape['A'] = dict(vertices=[ - (-0.5, -0.5), - (0.5, -0.5), - (0.5, 0.5), - (-0.5, 0.5), - ]) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5), + (0.5, -0.5), + (0.5, 0.5), + (-0.5, 0.5), + ] + ) sim.operations.integrator = mc return sim @@ -63,7 +65,10 @@ def test_self_interaction_overlap(one_square_simulation): with sim.state.cpu_local_snapshot as data: data.particles.orientation[0, :] = [ - 0.9238795325112867, 0, 0, 0.3826834323650898 + 0.9238795325112867, + 0, + 0, + 0.3826834323650898, ] sim.operations._schedule() @@ -113,15 +118,17 @@ def test_large_moves(simulation_factory, lattice_snapshot_factory): snap = lattice_snapshot_factory(dimensions=2, a=2, n=16) sim = simulation_factory(snap) - mc = hoomd.hpmc.integrate.ConvexPolygon(translation_move_probability=1.0, - nselect=4, - default_d=100) - mc.shape['A'] = dict(vertices=[ - (-0.5, -0.5), - (0.5, -0.5), - (0.5, 0.5), - (-0.5, 0.5), - ]) + mc = hoomd.hpmc.integrate.ConvexPolygon( + translation_move_probability=1.0, nselect=4, default_d=100 + ) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5), + (0.5, -0.5), + (0.5, 0.5), + (-0.5, 0.5), + ] + ) sim.operations.integrator = mc translate_moves = numpy.zeros(2, dtype=numpy.uint64) diff --git a/hoomd/hpmc/pytest/test_small_box_3d.py b/hoomd/hpmc/pytest/test_small_box_3d.py index 99117147dc..edc26bfc0a 100644 --- a/hoomd/hpmc/pytest/test_small_box_3d.py +++ b/hoomd/hpmc/pytest/test_small_box_3d.py @@ -17,22 +17,24 @@ def one_cube_simulation(simulation_factory): """ snap = hoomd.Snapshot() snap.particles.N = 1 - snap.particles.types = ['A'] + snap.particles.types = ["A"] snap.particles.position[:] = [[0, 0, 0]] snap.configuration.box = [1.2, 1.2, 1.2, 0, 0, 0] sim = simulation_factory(snap) mc = hoomd.hpmc.integrate.ConvexPolyhedron() - mc.shape['A'] = dict(vertices=[ - (-0.5, -0.5, -0.5), - (-0.5, -0.5, 0.5), - (-0.5, 0.5, -0.5), - (-0.5, 0.5, 0.5), - (0.5, -0.5, -0.5), - (0.5, -0.5, 0.5), - (0.5, 0.5, -0.5), - (0.5, 0.5, 0.5), - ]) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5, -0.5), + (-0.5, -0.5, 0.5), + (-0.5, 0.5, -0.5), + (-0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (0.5, -0.5, 0.5), + (0.5, 0.5, -0.5), + (0.5, 0.5, 0.5), + ] + ) sim.operations.integrator = mc return sim @@ -67,7 +69,10 @@ def test_self_interaction_overlap(one_cube_simulation): with sim.state.cpu_local_snapshot as data: data.particles.orientation[0, :] = [ - 0.9238795325112867, 0, 0, 0.3826834323650898 + 0.9238795325112867, + 0, + 0, + 0.3826834323650898, ] sim.operations._schedule() @@ -117,20 +122,22 @@ def test_large_moves(simulation_factory, lattice_snapshot_factory): snap = lattice_snapshot_factory(dimensions=3, a=2, n=16) sim = simulation_factory(snap) - mc = hoomd.hpmc.integrate.ConvexPolygon(translation_move_probability=1.0, - nselect=4, - default_d=100) + mc = hoomd.hpmc.integrate.ConvexPolygon( + translation_move_probability=1.0, nselect=4, default_d=100 + ) mc = hoomd.hpmc.integrate.ConvexPolyhedron() - mc.shape['A'] = dict(vertices=[ - (-0.5, -0.5, -0.5), - (-0.5, -0.5, 0.5), - (-0.5, 0.5, -0.5), - (-0.5, 0.5, 0.5), - (0.5, -0.5, -0.5), - (0.5, -0.5, 0.5), - (0.5, 0.5, -0.5), - (0.5, 0.5, 0.5), - ]) + mc.shape["A"] = dict( + vertices=[ + (-0.5, -0.5, -0.5), + (-0.5, -0.5, 0.5), + (-0.5, 0.5, -0.5), + (-0.5, 0.5, 0.5), + (0.5, -0.5, -0.5), + (0.5, -0.5, 0.5), + (0.5, 0.5, -0.5), + (0.5, 0.5, 0.5), + ] + ) sim.operations.integrator = mc diff --git a/hoomd/hpmc/shape_move.py b/hoomd/hpmc/shape_move.py index 43e20adfb3..3f6e0b8332 100644 --- a/hoomd/hpmc/shape_move.py +++ b/hoomd/hpmc/shape_move.py @@ -57,10 +57,11 @@ def __init__(self, default_step_size=None): step_size = float else: step_size = float(default_step_size) - typeparam_step_size = TypeParameter('step_size', - type_kind='particle_types', - param_dict=TypeParameterDict( - step_size, len_keys=1)) + typeparam_step_size = TypeParameter( + "step_size", + type_kind="particle_types", + param_dict=TypeParameterDict(step_size, len_keys=1), + ) self._add_typeparam(typeparam_step_size) def _attach_hook(self): @@ -72,13 +73,13 @@ def _attach_hook(self): integrator_name = integrator.__class__.__name__ if integrator_name in self._supported_shapes: - self._move_cls = getattr(_hpmc, - self.__class__.__name__ + integrator_name) + self._move_cls = getattr(_hpmc, self.__class__.__name__ + integrator_name) else: raise RuntimeError("Integrator not supported") self._cpp_obj = self._move_cls( self._simulation.state._cpp_sys_def, - self._simulation.operations.integrator._cpp_obj) + self._simulation.operations.integrator._cpp_obj, + ) class Elastic(ShapeMove): @@ -148,17 +149,15 @@ class Elastic(ShapeMove): deformation trial moves (**default**: 0.5). """ - _supported_shapes = {'ConvexPolyhedron'} + _supported_shapes = {"ConvexPolyhedron"} __doc__ = __doc__.replace("{inherited}", ShapeMove._doc_inherited) - def __init__(self, - stiffness, - mc, - default_step_size=None, - normal_shear_ratio=0.5): + def __init__(self, stiffness, mc, default_step_size=None, normal_shear_ratio=0.5): super().__init__(default_step_size) - param_dict = ParameterDict(normal_shear_ratio=float(normal_shear_ratio), - stiffness=hoomd.variant.Variant) + param_dict = ParameterDict( + normal_shear_ratio=float(normal_shear_ratio), + stiffness=hoomd.variant.Variant, + ) param_dict["stiffness"] = stiffness self._param_dict.update(param_dict) self._add_typeparam(self._get_shape_param(mc)) @@ -169,8 +168,10 @@ def _get_shape_param(self, mc): else: cls = mc if cls.__name__ not in self._supported_shapes: - raise ValueError(f"Unsupported integrator type {cls}. Supported " - f"types are {self._supported_shapes}") + raise ValueError( + f"Unsupported integrator type {cls}. Supported " + f"types are {self._supported_shapes}" + ) # Class can only be used for this type of integrator now. self._supported_shapes = {cls.__name__} shape = cls().shape @@ -181,8 +182,9 @@ def _attach_hook(self): integrator = self._simulation.operations.integrator if isinstance(integrator, integrate.Ellipsoid): for shape in integrator.shape.values(): - is_sphere = numpy.allclose((shape["a"], shape["b"], shape["c"]), - shape["a"]) + is_sphere = numpy.allclose( + (shape["a"], shape["b"], shape["c"]), shape["a"] + ) if not is_sphere: raise ValueError("This updater only works when a=b=c.") super()._attach_hook() @@ -261,26 +263,22 @@ def __call__(self, type, param_list): parameters to change each timestep (**default**: 1). """ - _supported_shapes = { - 'ConvexPolyhedron', 'ConvexSpheropolyhedron', 'Ellipsoid' - } + _supported_shapes = {"ConvexPolyhedron", "ConvexSpheropolyhedron", "Ellipsoid"} __doc__ = __doc__.replace("{inherited}", ShapeMove._doc_inherited) - def __init__(self, - callback, - default_step_size=None, - param_move_probability=1): + def __init__(self, callback, default_step_size=None, param_move_probability=1): super().__init__(default_step_size) param_dict = ParameterDict( - param_move_probability=float(param_move_probability), - callback=object) + param_move_probability=float(param_move_probability), callback=object + ) param_dict["callback"] = callback self._param_dict.update(param_dict) - typeparam_shapeparams = TypeParameter('params', - type_kind='particle_types', - param_dict=TypeParameterDict( - [float], len_keys=1)) + typeparam_shapeparams = TypeParameter( + "params", + type_kind="particle_types", + param_dict=TypeParameterDict([float], len_keys=1), + ) self._add_typeparam(typeparam_shapeparams) @@ -340,24 +338,26 @@ class Vertex(ShapeMove): maintain this volume. """ - _supported_shapes = {'ConvexPolyhedron'} + _supported_shapes = {"ConvexPolyhedron"} __doc__ = __doc__.replace("{inherited}", ShapeMove._doc_inherited) def __init__(self, default_step_size=None, vertex_move_probability=1): super().__init__(default_step_size) param_dict = ParameterDict( - vertex_move_probability=float(vertex_move_probability)) + vertex_move_probability=float(vertex_move_probability) + ) self._param_dict.update(param_dict) - typeparam_volume = TypeParameter('volume', - type_kind='particle_types', - param_dict=TypeParameterDict( - float, len_keys=1)) + typeparam_volume = TypeParameter( + "volume", + type_kind="particle_types", + param_dict=TypeParameterDict(float, len_keys=1), + ) self._add_typeparam(typeparam_volume) __all__ = [ - 'Elastic', - 'ShapeMove', - 'ShapeSpace', - 'Vertex', + "Elastic", + "ShapeMove", + "ShapeSpace", + "Vertex", ] diff --git a/hoomd/hpmc/tune/__init__.py b/hoomd/hpmc/tune/__init__.py index 26bedc1fdd..346e8d3ade 100644 --- a/hoomd/hpmc/tune/__init__.py +++ b/hoomd/hpmc/tune/__init__.py @@ -7,6 +7,6 @@ from hoomd.hpmc.tune.boxmc_move_size import BoxMCMoveSize __all__ = [ - 'BoxMCMoveSize', - 'MoveSize', + "BoxMCMoveSize", + "MoveSize", ] diff --git a/hoomd/hpmc/tune/boxmc_move_size.py b/hoomd/hpmc/tune/boxmc_move_size.py index 3bd1b0900f..2d2c05ea7b 100644 --- a/hoomd/hpmc/tune/boxmc_move_size.py +++ b/hoomd/hpmc/tune/boxmc_move_size.py @@ -5,8 +5,7 @@ import hoomd from hoomd.data.parameterdicts import ParameterDict -from hoomd.data.typeconverter import (OnlyFrom, OnlyTypes, OnlyIf, - to_type_converter) +from hoomd.data.typeconverter import OnlyFrom, OnlyTypes, OnlyIf, to_type_converter from hoomd.tune.custom_tuner import _InternalCustomTuner from hoomd.tune import ScaleSolver, SecantSolver @@ -23,15 +22,24 @@ class _MoveSizeTuneDefinition(mc_move_tune._MCTuneDefinition): move sizes. For this class 'x' is the move size and 'y' is the acceptance rate. """ + acceptable_attrs = { - "volume", "aspect", "shear_x", "shear_y", "shear_z", "length_x", - "length_y", "length_z" + "volume", + "aspect", + "shear_x", + "shear_y", + "shear_z", + "length_x", + "length_y", + "length_z", } def __init__(self, boxmc, attr, target, domain=None): if attr not in self.acceptable_attrs: - raise ValueError(f"Only {self.acceptable_attrs} are allowed as " - f"tunable attributes not {attr}.") + raise ValueError( + f"Only {self.acceptable_attrs} are allowed as " + f"tunable attributes not {attr}." + ) splits = attr.split("_") self.attr = splits[0] if len(splits) > 1: @@ -67,8 +75,11 @@ def __hash__(self): return hash((self.attr, self._target, self._domain)) def __eq__(self, other): - return (self.attr == other.attr and self._target == other._target - and self._domain == other._domain) + return ( + self.attr == other.attr + and self._target == other._target + and self._domain == other._domain + ) class _InternalBoxMCMoveSize(mc_move_tune._TuneMCMove): @@ -93,17 +104,24 @@ def __init__( params = ParameterDict( boxmc=hoomd.hpmc.update.BoxMC, moves=[ - OnlyFrom(_MoveSizeTuneDefinition.acceptable_attrs, - postprocess=self._flag_new_tunables) + OnlyFrom( + _MoveSizeTuneDefinition.acceptable_attrs, + postprocess=self._flag_new_tunables, + ) ], max_move_size=OnlyIf( - to_type_converter({ - attr: - OnlyTypes(float, - allow_none=True, - postprocess=self._flag_move_size_update) - for attr in _MoveSizeTuneDefinition.acceptable_attrs - }),)) + to_type_converter( + { + attr: OnlyTypes( + float, + allow_none=True, + postprocess=self._flag_move_size_update, + ) + for attr in _MoveSizeTuneDefinition.acceptable_attrs + } + ), + ), + ) params["boxmc"] = boxmc params["moves"] = moves if max_move_size is None: @@ -117,8 +135,7 @@ def __init__( def attach(self, simulation): if not isinstance(simulation.operations.integrator, HPMCIntegrator): - raise RuntimeError( - "MoveSizeTuner can only be used in HPMC simulations.") + raise RuntimeError("MoveSizeTuner can only be used in HPMC simulations.") super().attach(simulation) def act(self, timestep=None): @@ -157,8 +174,8 @@ def filter_tunables(tunable): continue max_move_size = self.max_move_size[move] move_definition = _MoveSizeTuneDefinition( - self.boxmc, move, self.target, - (self._min_move_size, max_move_size)) + self.boxmc, move, self.target, (self._min_move_size, max_move_size) + ) self._tunables.append(move_definition) self._should_update_tunables = False self._tuned = 0 @@ -239,20 +256,23 @@ class BoxMCMoveSize(_InternalCustomTuner): to attempt for each move time. See the available moves in the `moves` attribute documentation. """ + _internal_class = _InternalBoxMCMoveSize _wrap_methods = ("tuned",) __doc__ = __doc__.replace("{inherited}", Tuner._doc_inherited) @classmethod - def scale_solver(cls, - trigger, - boxmc, - moves, - target, - max_move_size=None, - max_scale=2., - gamma=1., - tol=1e-2): + def scale_solver( + cls, + trigger, + boxmc, + moves, + target, + max_move_size=None, + max_scale=2.0, + gamma=1.0, + tol=1e-2, + ): """Create a `BoxMCMoveSize` tuner with a `hoomd.tune.ScaleSolver`. Args: @@ -279,18 +299,13 @@ def scale_solver(cls, than the default of 0.01 as acceptance rates can vary significantly at typical tuning rates. """ - solver = ScaleSolver(max_scale, gamma, 'negative', tol) + solver = ScaleSolver(max_scale, gamma, "negative", tol) return cls(trigger, boxmc, moves, target, solver, max_move_size) @classmethod - def secant_solver(cls, - trigger, - boxmc, - moves, - target, - max_move_size=None, - gamma=0.8, - tol=1e-2): + def secant_solver( + cls, trigger, boxmc, moves, target, max_move_size=None, gamma=0.8, tol=1e-2 + ): """Create a `BoxMCMoveSize` tuner with a `hoomd.tune.SecantSolver`. This solver can be faster than `hoomd.tune.ScaleSolver`, but depending diff --git a/hoomd/hpmc/tune/mc_move_tune.py b/hoomd/hpmc/tune/mc_move_tune.py index 365733aeda..1a0aa7d495 100644 --- a/hoomd/hpmc/tune/mc_move_tune.py +++ b/hoomd/hpmc/tune/mc_move_tune.py @@ -2,6 +2,7 @@ # Part of HOOMD-blue, released under the BSD 3-Clause License. """Implement generic classes for move acceptance ratio tuning.""" + import abc from hoomd.data.parameterdicts import ParameterDict @@ -48,19 +49,24 @@ def _get_y(self): return None # Called twice in same step return computed value - elif (self.previous_total == total_moves - and self.previous_accepted_moves == accepted_moves): + elif ( + self.previous_total == total_moves + and self.previous_accepted_moves == accepted_moves + ): return self.previous_acceptance_rate # If we have recorded a previous total then this condition implies a new # run call. We should be able to tune here as we have no other # indication the system has changed. - elif (self.previous_total >= total_moves - or self.previous_accepted_moves >= accepted_moves): + elif ( + self.previous_total >= total_moves + or self.previous_accepted_moves >= accepted_moves + ): acceptance_rate = accepted_moves / total_moves else: - acceptance_rate = (accepted_moves - self.previous_accepted_moves) \ - / (total_moves - self.previous_total) + acceptance_rate = (accepted_moves - self.previous_accepted_moves) / ( + total_moves - self.previous_total + ) # We store the previous information becuase this lets us find the # acceptance rate since this has last been called which allows for us to @@ -73,6 +79,7 @@ def _get_y(self): class _TuneMCMove(_InternalAction): """Internal class for the MoveSize tuner.""" + _min_move_size = 1e-7 def __init__(self, target, solver): @@ -90,9 +97,10 @@ def __init__(self, target, solver): # This is a bit complicated because we are having to ensure that we keep # the list of tunables and the solver updated with the changes to # attributes. However, these are simply forwarding a change along. - param_dict = ParameterDict(target=OnlyTypes( - float, postprocess=self._target_postprocess), - solver=RootSolver) + param_dict = ParameterDict( + target=OnlyTypes(float, postprocess=self._target_postprocess), + solver=RootSolver, + ) self._param_dict.update(param_dict) self.target = target @@ -137,6 +145,6 @@ def _target_postprocess(self, target): if not (0 <= target <= 1): raise ValueError(f"target {target} should be between 0 and 1.") - self._update_tunables_attr('target', target) + self._update_tunables_attr("target", target) self._tuned = 0 return target diff --git a/hoomd/hpmc/tune/move_size.py b/hoomd/hpmc/tune/move_size.py index 9545f84c9f..db06c5ec68 100644 --- a/hoomd/hpmc/tune/move_size.py +++ b/hoomd/hpmc/tune/move_size.py @@ -5,8 +5,7 @@ from hoomd.data.parameterdicts import ParameterDict, TypeParameterDict from hoomd.data.typeparam import TypeParameter -from hoomd.data.typeconverter import (OnlyFrom, OnlyTypes, OnlyIf, - to_type_converter) +from hoomd.data.typeconverter import OnlyFrom, OnlyTypes, OnlyIf, to_type_converter from hoomd.tune import ScaleSolver, SecantSolver from hoomd.hpmc.integrate import HPMCIntegrator from hoomd.hpmc.tune import mc_move_tune @@ -21,12 +20,16 @@ class _MoveSizeTuneDefinition(mc_move_tune._MCTuneDefinition): move sizes. For this class 'x' is the move size and 'y' is the acceptance rate. """ - _attr_acceptance = {'a': 'rotate_moves', 'd': 'translate_moves'} + + _attr_acceptance = {"a": "rotate_moves", "d": "translate_moves"} def __init__(self, attr, type, target, domain=None): if attr not in self._attr_acceptance: - raise ValueError("Only {} are allowed as tunable " - "attributes.".format(self._available_attrs)) + raise ValueError( + "Only {} are allowed as tunable " "attributes.".format( + self._available_attrs + ) + ) self.attr = attr self.type = type self.integrator = None @@ -45,52 +48,69 @@ def __hash__(self): return hash((self.attr, self.type, self._target, self._domain)) def __eq__(self, other): - return (self.attr == other.attr and self.type == other.type - and self._target == other._target - and self._domain == other._domain) + return ( + self.attr == other.attr + and self.type == other.type + and self._target == other._target + and self._domain == other._domain + ) class _InternalMoveSize(mc_move_tune._TuneMCMove): """Internal class for the MoveSize tuner.""" - def __init__(self, - moves, - target, - solver, - types=None, - max_translation_move=None, - max_rotation_move=None): + def __init__( + self, + moves, + target, + solver, + types=None, + max_translation_move=None, + max_rotation_move=None, + ): super().__init__(target, solver) # A flag for knowing when to update the maximum move sizes self._should_update_move_sizes = False # set up maximum trial move sizes t_moves = TypeParameter( - 'max_translation_move', 'particle_type', - TypeParameterDict(OnlyTypes(float, - postprocess=self._flag_move_size_update, - allow_none=True), - len_keys=1)) + "max_translation_move", + "particle_type", + TypeParameterDict( + OnlyTypes( + float, postprocess=self._flag_move_size_update, allow_none=True + ), + len_keys=1, + ), + ) r_moves = TypeParameter( - 'max_rotation_move', 'particle_type', - TypeParameterDict(OnlyTypes(float, - postprocess=self._flag_move_size_update, - allow_none=True), - len_keys=1)) + "max_rotation_move", + "particle_type", + TypeParameterDict( + OnlyTypes( + float, postprocess=self._flag_move_size_update, allow_none=True + ), + len_keys=1, + ), + ) self._typeparam_dict = { - 'max_translation_move': t_moves, - 'max_rotation_move': r_moves + "max_translation_move": t_moves, + "max_rotation_move": r_moves, } # This is a bit complicated because we are having to ensure that we keep # the list of tunables and the solver updated with the changes to # attributes. However, these are simply forwarding a change along. param_dict = ParameterDict( - moves=OnlyIf(to_type_converter([OnlyFrom(['a', 'd'])]), - postprocess=self._update_moves), - types=OnlyIf(to_type_converter([str]), - postprocess=self._update_types, - allow_none=True), + moves=OnlyIf( + to_type_converter([OnlyFrom(["a", "d"])]), + postprocess=self._update_moves, + ), + types=OnlyIf( + to_type_converter([str]), + postprocess=self._update_types, + allow_none=True, + ), ) self._param_dict.update(param_dict) @@ -106,21 +126,20 @@ def __init__(self, def attach(self, simulation): if not isinstance(simulation.operations.integrator, HPMCIntegrator): - raise RuntimeError( - "MoveSizeTuner can only be used in HPMC simulations.") + raise RuntimeError("MoveSizeTuner can only be used in HPMC simulations.") particle_types = simulation.state.particle_types if self.types is None: self.types = particle_types if not all(t in particle_types for t in self.types): raise RuntimeError( - "Invalid particle type found specified types for tuning.") + "Invalid particle type found specified types for tuning." + ) self._update_tunables(new_moves=self.moves, new_types=self.types) - self._update_tunables_attr('integrator', - simulation.operations.integrator) + self._update_tunables_attr("integrator", simulation.operations.integrator) super().attach(simulation) def detach(self): - self._update_tunables_attr('integrator', None) + self._update_tunables_attr("integrator", None) super().detach() def act(self, timestep=None): @@ -140,8 +159,9 @@ def _update_tunables(self, *, new_moves=tuple(), new_types=tuple()): # First filter out any move size tune definitions that don't match # the new specification. def filter_tunables(tunable): - return ((new_moves is None or tunable.attr in new_moves) - and (new_types is None or tunable.type in new_types)) + return (new_moves is None or tunable.attr in new_moves) and ( + new_types is None or tunable.type in new_types + ) self._tunables = list(filter(filter_tunables, tunables)) tune_definitions = set(self._tunables) @@ -150,13 +170,13 @@ def filter_tunables(tunable): # specification. for move in new_moves: for new_type in new_types: - if move == 'a': + if move == "a": max_move_size = self.max_rotation_move[new_type] else: max_move_size = self.max_translation_move[new_type] move_definition = _MoveSizeTuneDefinition( - move, new_type, self.target, - (self._min_move_size, max_move_size)) + move, new_type, self.target, (self._min_move_size, max_move_size) + ) if move_definition not in tune_definitions: self._tunables.append(move_definition) @@ -176,7 +196,7 @@ def _flag_move_size_update(self, value): def _update_move_sizes(self): for tunable in self._tunables: - if tunable.attr == 'a': + if tunable.attr == "a": max_move_size = self.max_rotation_move[tunable.type] else: max_move_size = self.max_translation_move[tunable.type] @@ -251,21 +271,24 @@ class methods to create a `MoveSize` tuner with built-in solvers; see max_rotation_move (float): The maximum value of a rotational move size to attempt. """ + _internal_class = _InternalMoveSize _wrap_methods = ("tuned",) __doc__ = __doc__.replace("{inherited}", Tuner._doc_inherited) @classmethod - def scale_solver(cls, - trigger, - moves, - target, - types=None, - max_translation_move=None, - max_rotation_move=None, - max_scale=2., - gamma=1., - tol=1e-2): + def scale_solver( + cls, + trigger, + moves, + target, + types=None, + max_translation_move=None, + max_rotation_move=None, + max_scale=2.0, + gamma=1.0, + tol=1e-2, + ): """Create a `MoveSize` tuner with a `hoomd.tune.ScaleSolver`. Args: @@ -293,20 +316,29 @@ def scale_solver(cls, than the default of 0.01 as acceptance rates can vary significantly at typical tuning rates. """ - solver = ScaleSolver(max_scale, gamma, 'negative', tol) - return cls(trigger, moves, target, solver, types, max_translation_move, - max_rotation_move) + solver = ScaleSolver(max_scale, gamma, "negative", tol) + return cls( + trigger, + moves, + target, + solver, + types, + max_translation_move, + max_rotation_move, + ) @classmethod - def secant_solver(cls, - trigger, - moves, - target, - types=None, - max_translation_move=None, - max_rotation_move=None, - gamma=0.8, - tol=1e-2): + def secant_solver( + cls, + trigger, + moves, + target, + types=None, + max_translation_move=None, + max_rotation_move=None, + gamma=0.8, + tol=1e-2, + ): """Create a `MoveSize` tuner with a `hoomd.tune.SecantSolver`. This solver can be faster than `hoomd.tune.ScaleSolver`, but depending @@ -346,5 +378,12 @@ def secant_solver(cls, frequently. """ solver = SecantSolver(gamma, tol) - return cls(trigger, moves, target, solver, types, max_translation_move, - max_rotation_move) + return cls( + trigger, + moves, + target, + solver, + types, + max_translation_move, + max_rotation_move, + ) diff --git a/hoomd/hpmc/update.py b/hoomd/hpmc/update.py index d712f53000..de46d59d58 100644 --- a/hoomd/hpmc/update.py +++ b/hoomd/hpmc/update.py @@ -285,8 +285,8 @@ def __init__(self, trigger, P): _default_dict = dict(weight=0.0, delta=0.0) param_dict = ParameterDict( volume={ - "mode": hoomd.data.typeconverter.OnlyFrom(['standard', 'ln']), - **_default_dict + "mode": hoomd.data.typeconverter.OnlyFrom(["standard", "ln"]), + **_default_dict, }, aspect=_default_dict, length=dict(weight=0.0, delta=(0.0,) * 3), @@ -309,9 +309,12 @@ def _attach_hook(self): if not integrator._attached: raise RuntimeError("Integrator is not attached yet.") - self._cpp_obj = _hpmc.UpdaterBoxMC(self._simulation.state._cpp_sys_def, - self.trigger, integrator._cpp_obj, - self.P) + self._cpp_obj = _hpmc.UpdaterBoxMC( + self._simulation.state._cpp_sys_def, + self.trigger, + integrator._cpp_obj, + self.P, + ) @property def counter(self): @@ -430,12 +433,14 @@ class MuVT(Updater): __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) - def __init__(self, - transfer_types, - ngibbs=1, - max_volume_rescale=0.1, - volume_move_probability=0.5, - trigger=1): + def __init__( + self, + transfer_types, + ngibbs=1, + max_volume_rescale=0.1, + volume_move_probability=0.5, + trigger=1, + ): super().__init__(trigger) self.ngibbs = int(ngibbs) @@ -448,11 +453,12 @@ def __init__(self, self._param_dict.update(param_dict) typeparam_fugacity = TypeParameter( - 'fugacity', - type_kind='particle_types', - param_dict=TypeParameterDict(hoomd.variant.Variant, - len_keys=1, - _defaults=hoomd.variant.Constant(0.0))) + "fugacity", + type_kind="particle_types", + param_dict=TypeParameterDict( + hoomd.variant.Variant, len_keys=1, _defaults=hoomd.variant.Constant(0.0) + ), + ) self._append_typeparam(typeparam_fugacity) def _attach_hook(self): @@ -464,10 +470,14 @@ def _attach_hook(self): cpp_cls_name += integrator.__class__.__name__ cpp_cls = getattr(_hpmc, cpp_cls_name) - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self.trigger, integrator._cpp_obj, self.ngibbs) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, + self.trigger, + integrator._cpp_obj, + self.ngibbs, + ) - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def insert_moves(self): """tuple[int, int]: Count of the accepted and rejected particle \ insertion moves. @@ -477,7 +487,7 @@ def insert_moves(self): counter = self._cpp_obj.getCounters(1) return counter.insert - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def remove_moves(self): """tuple[int, int]: Count of the accepted and rejected particle \ removal moves. @@ -487,7 +497,7 @@ def remove_moves(self): counter = self._cpp_obj.getCounters(1) return counter.remove - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def exchange_moves(self): """tuple[int, int]: Count of the accepted and rejected particle \ exchange moves. @@ -497,7 +507,7 @@ def exchange_moves(self): counter = self._cpp_obj.getCounters(1) return counter.exchange - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def volume_moves(self): """tuple[int, int]: Count of the accepted and rejected particle volume \ moves. @@ -507,7 +517,7 @@ def volume_moves(self): counter = self._cpp_obj.getCounters(1) return counter.volume - @log(category='object') + @log(category="object") def N(self): # noqa: N802 - allow N as a function name """dict: Map of number of particles per type. @@ -551,15 +561,25 @@ class Shape(Updater): Example:: mc = hoomd.hpmc.integrate.ConvexPolyhedron() - mc.shape["A"] = dict(vertices=numpy.asarray([(1, 1, 1), (-1, -1, 1), - (1, -1, -1), (-1, 1, -1)])) - vertex_move = hoomd.hpmc.shape_move.Vertex(stepsize={'A': 0.01}, - param_ratio=0.2, - volume=1.0) - updater = hoomd.hpmc.update.Shape(shape_move=vertex_move, - trigger=hoomd.trigger.Periodic(1), - type_select=1, - nsweeps=1) + mc.shape["A"] = dict( + vertices=numpy.asarray( + [ + (1, 1, 1), + (-1, -1, 1), + (1, -1, -1), + (-1, 1, -1), + ] + ) + ) + vertex_move = hoomd.hpmc.shape_move.Vertex( + stepsize={"A": 0.01}, param_ratio=0.2, volume=1.0 + ) + updater = hoomd.hpmc.update.Shape( + shape_move=vertex_move, + trigger=hoomd.trigger.Periodic(1), + type_select=1, + nsweeps=1, + ) {inherited} @@ -584,17 +604,14 @@ class Shape(Updater): __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) - def __init__(self, - trigger, - shape_move, - pretend=False, - type_select=1, - nsweeps=1): + def __init__(self, trigger, shape_move, pretend=False, type_select=1, nsweeps=1): super().__init__(trigger) - param_dict = ParameterDict(shape_move=hoomd.hpmc.shape_move.ShapeMove, - pretend=bool(pretend), - type_select=int(type_select), - nsweeps=int(nsweeps)) + param_dict = ParameterDict( + shape_move=hoomd.hpmc.shape_move.ShapeMove, + pretend=bool(pretend), + type_select=int(type_select), + nsweeps=int(nsweeps), + ) param_dict["shape_move"] = shape_move self._param_dict.update(param_dict) @@ -632,19 +649,22 @@ def _attach_hook(self): # check for supported shapes is done in the shape move classes integrator_name = integrator.__class__.__name__ - updater_cls = getattr(_hpmc, 'UpdaterShape' + integrator_name) + updater_cls = getattr(_hpmc, "UpdaterShape" + integrator_name) self.shape_move._attach(self._simulation) - self._cpp_obj = updater_cls(self._simulation.state._cpp_sys_def, - self.trigger, integrator._cpp_obj, - self.shape_move._cpp_obj) + self._cpp_obj = updater_cls( + self._simulation.state._cpp_sys_def, + self.trigger, + integrator._cpp_obj, + self.shape_move._cpp_obj, + ) - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def shape_moves(self): """tuple[int, int]: Count of the accepted and rejected shape moves.""" return self._cpp_obj.getShapeMovesCount() - @log(category='scalar', requires_run=True) + @log(category="scalar", requires_run=True) def particle_volumes(self): """list[float]: Volume of a single particle for each type.""" return self._cpp_obj.particle_volumes @@ -698,19 +718,18 @@ class GCA(Updater): pivot_move_probability (float): Set the probability for attempting a pivot move. """ + _remove_for_pickling = (*Updater._remove_for_pickling, "_cpp_cell") - _skip_for_equality = Updater._skip_for_equality | {'_cpp_cell'} + _skip_for_equality = Updater._skip_for_equality | {"_cpp_cell"} __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) - def __init__(self, - pivot_move_probability=0.5, - flip_probability=0.5, - trigger=1): + def __init__(self, pivot_move_probability=0.5, flip_probability=0.5, trigger=1): super().__init__(trigger) param_dict = ParameterDict( pivot_move_probability=float(pivot_move_probability), - flip_probability=float(flip_probability)) + flip_probability=float(flip_probability), + ) self._param_dict.update(param_dict) self.instance = 0 @@ -725,8 +744,10 @@ def _attach_hook(self): cpp_cls_name = "UpdaterGCA" cpp_cls_name += integrator.__class__.__name__ cpp_cls = getattr(_hpmc, cpp_cls_name) - use_gpu = (isinstance(self._simulation.device, hoomd.device.GPU) - and (cpp_cls_name + 'GPU') in _hpmc.__dict__) + use_gpu = ( + isinstance(self._simulation.device, hoomd.device.GPU) + and (cpp_cls_name + "GPU") in _hpmc.__dict__ + ) if use_gpu: cpp_cls_name += "GPU" cpp_cls = getattr(_hpmc, cpp_cls_name) @@ -737,12 +758,16 @@ def _attach_hook(self): if use_gpu: sys_def = self._simulation.state._cpp_sys_def self._cpp_cell = _hoomd.CellListGPU(sys_def) - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self.trigger, integrator._cpp_obj, - self._cpp_cell) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, + self.trigger, + integrator._cpp_obj, + self._cpp_cell, + ) else: - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self.trigger, integrator._cpp_obj) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, self.trigger, integrator._cpp_obj + ) @log(requires_run=True) def avg_cluster_size(self): @@ -936,25 +961,29 @@ class QuickCompress(Updater): __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) - def __init__(self, - trigger, - target_box, - max_overlaps_per_particle=0.25, - min_scale=0.99, - allow_unsafe_resize=False): + def __init__( + self, + trigger, + target_box, + max_overlaps_per_particle=0.25, + min_scale=0.99, + allow_unsafe_resize=False, + ): super().__init__(trigger) - param_dict = ParameterDict(max_overlaps_per_particle=float, - min_scale=float, - target_box=hoomd.variant.box.BoxVariant, - instance=int, - allow_unsafe_resize=bool) + param_dict = ParameterDict( + max_overlaps_per_particle=float, + min_scale=float, + target_box=hoomd.variant.box.BoxVariant, + instance=int, + allow_unsafe_resize=bool, + ) if isinstance(target_box, hoomd.Box): target_box = hoomd.variant.box.Constant(target_box) - param_dict['max_overlaps_per_particle'] = max_overlaps_per_particle - param_dict['min_scale'] = min_scale - param_dict['target_box'] = target_box - param_dict['allow_unsafe_resize'] = allow_unsafe_resize + param_dict["max_overlaps_per_particle"] = max_overlaps_per_particle + param_dict["min_scale"] = min_scale + param_dict["target_box"] = target_box + param_dict["allow_unsafe_resize"] = allow_unsafe_resize self._param_dict.update(param_dict) @@ -971,9 +1000,13 @@ def _attach_hook(self): raise RuntimeError("Integrator is not attached yet.") self._cpp_obj = _hpmc.UpdaterQuickCompress( - self._simulation.state._cpp_sys_def, self.trigger, - integrator._cpp_obj, self.max_overlaps_per_particle, self.min_scale, - self.target_box) + self._simulation.state._cpp_sys_def, + self.trigger, + integrator._cpp_obj, + self.max_overlaps_per_particle, + self.min_scale, + self.target_box, + ) @property def complete(self): @@ -985,9 +1018,9 @@ def complete(self): __all__ = [ - 'GCA', - 'BoxMC', - 'MuVT', - 'QuickCompress', - 'Shape', + "GCA", + "BoxMC", + "MuVT", + "QuickCompress", + "Shape", ] diff --git a/hoomd/logging.py b/hoomd/logging.py index 8cd038b604..345aed5ef1 100644 --- a/hoomd/logging.py +++ b/hoomd/logging.py @@ -65,6 +65,7 @@ class LoggerCategories(Flag): NONE: Represents no category. """ + NONE = 0 scalar = auto() sequence = auto() @@ -92,8 +93,7 @@ def any(cls, categories=None): `LoggerCategories`: the `LoggerCategories` object that represents any of the given categories. """ - categories = cls.__members__.values( - ) if categories is None else categories + categories = cls.__members__.values() if categories is None else categories return reduce(cls._combine_flags, categories, LoggerCategories.NONE) @@ -124,8 +124,7 @@ def _get_string_list(cls, category): def _loggables(self): """dict[str, str]: Name, category mapping of loggable quantities.""" return { - name: quantity.category.name - for name, quantity in self._export_dict.items() + name: quantity.category.name for name, quantity in self._export_dict.items() } @@ -158,11 +157,13 @@ class _NamespaceFilter: pattern means that the inner module is imported into its parent. """ - def __init__(self, - remove_names=None, - base_names=None, - non_native_remove=None, - skip_duplicates=True): + def __init__( + self, + remove_names=None, + base_names=None, + non_native_remove=None, + skip_duplicates=True, + ): self.remove_names = set() if remove_names is None else remove_names if non_native_remove is None: self.non_native_remove = set() @@ -228,15 +229,16 @@ class _LoggerQuantity: namespace_filter = _NamespaceFilter( # Names that are imported directly into the hoomd namespace - remove_names={"hoomd", 'simulation', 'state', 'operations', 'snapshot'}, + remove_names={"hoomd", "simulation", "state", "operations", "snapshot"}, # Names that have their submodules' classes directly imported into them # (e.g. `hoomd.update.box_resize.BoxResize` gets used as # `hoomd.update.BoxResize`) - base_names={'update', 'tune', 'write'}, + base_names={"update", "tune", "write"}, non_native_remove={"__main__"}, - skip_duplicates=True) + skip_duplicates=True, + ) - def __init__(self, name, cls, category='scalar', default=True): + def __init__(self, name, cls, category="scalar", default=True): self.name = name self.update_cls(cls) if isinstance(category, str): @@ -244,8 +246,10 @@ def __init__(self, name, cls, category='scalar', default=True): elif isinstance(category, LoggerCategories): self.category = category else: - raise ValueError("Flag must be a string convertible into " - "LoggerCategories or a LoggerCategories object.") + raise ValueError( + "Flag must be a string convertible into " + "LoggerCategories or a LoggerCategories object." + ) self.default = bool(default) def yield_names(self, user_name=None): @@ -264,7 +268,7 @@ def yield_names(self, user_name=None): namespace = self.namespace[:-1] + (user_name,) yield (*namespace, self.name) for i in count(start=1, step=1): - yield namespace[:-1] + (namespace[-1] + '_' + str(i), self.name) + yield namespace[:-1] + (namespace[-1] + "_" + str(i), self.name) def update_cls(self, cls): """Allow updating the class/namespace of the object. @@ -288,7 +292,7 @@ def __eq__(self, other): @classmethod def _generate_namespace(cls, loggable_cls): """Generate the namespace of a class given its module hierarchy.""" - ns = tuple(loggable_cls.__module__.split('.')) + ns = tuple(loggable_cls.__module__.split(".")) cls_name = loggable_cls.__name__ # Only filter namespaces of objects in the hoomd package return (*tuple(cls.namespace_filter(ns, ns[0] == "hoomd")), cls_name) @@ -296,6 +300,7 @@ def _generate_namespace(cls, loggable_cls): class Loggable(type): """Loggable quantity.""" + _meta_export_dict = dict() def __init__(cls, name, bases, dct): @@ -321,7 +326,8 @@ def __init__(cls, name, bases, dct): # overwriting the property every time, but lose the ability to error on # improper class definitions. if log_dict == {} and not any( - issubclass(type(c), Loggable) for c in cls.__mro__[1:]): + issubclass(type(c), Loggable) for c in cls.__mro__[1:] + ): Loggable._add_property_for_displaying_loggables(cls) # grab the current class's loggable quantities @@ -331,9 +337,11 @@ def __init__(cls, name, bases, dct): @staticmethod def _add_property_for_displaying_loggables(new_cls): - if hasattr(new_cls, 'loggables'): - raise ValueError("classes of type Loggable cannot implement a " - "loggables method, property, or attribute.") + if hasattr(new_cls, "loggables"): + raise ValueError( + "classes of type Loggable cannot implement a " + "loggables method, property, or attribute." + ) else: new_cls.loggables = property(_loggables) @@ -351,10 +359,12 @@ def _get_inherited_loggables(cls, new_cls): # new_cls has a metaclass (or type) which is a subclass of Loggable # or one of its subclasses. if issubclass(type(base_cls), Loggable): - inherited_loggables.update({ - name: copy.deepcopy(quantity).update_cls(new_cls) - for name, quantity in base_cls._export_dict.items() - }) + inherited_loggables.update( + { + name: copy.deepcopy(quantity).update_cls(new_cls) + for name, quantity in base_cls._export_dict.items() + } + ) return inherited_loggables @classmethod @@ -362,11 +372,12 @@ def _get_current_cls_loggables(cls, new_cls): """Gets the current class's new loggables (not inherited).""" current_loggables = {} for name, entry in cls._meta_export_dict.items(): - current_loggables[name] = _LoggerQuantity(name, new_cls, - entry.category, - entry.default) - cls._add_loggable_docstring_info(new_cls, name, entry.category, - entry.default) + current_loggables[name] = _LoggerQuantity( + name, new_cls, entry.category, entry.default + ) + cls._add_loggable_docstring_info( + new_cls, name, entry.category, entry.default + ) return current_loggables @classmethod @@ -377,31 +388,28 @@ def _add_loggable_docstring_info(cls, new_cls, attr, category, default): # the rendering of invalid docs since we need a non-empty docstring. if __doc__ == "": return - str_msg = '\n\n{}(`Loggable `: ' + str_msg = "\n\n{}(`Loggable `: " str_msg += f'category="{str(category)[17:]}"' if default: - str_msg += ')' + str_msg += ")" else: - str_msg += ', default=False)' + str_msg += ", default=False)" if doc is None: - getattr(new_cls, attr).__doc__ = str_msg.format('') + getattr(new_cls, attr).__doc__ = str_msg.format("") else: indent = 0 - lines = doc.split('\n') + lines = doc.split("\n") if len(lines) >= 3: cnt = 2 - while lines[cnt] == '': + while lines[cnt] == "": cnt += 1 indent = len(lines[cnt]) - len(lines[cnt].lstrip()) - getattr(new_cls, attr).__doc__ += str_msg.format(' ' * indent) + getattr(new_cls, attr).__doc__ += str_msg.format(" " * indent) -def log(func=None, - *, - is_property=True, - category='scalar', - default=True, - requires_run=False): +def log( + func=None, *, is_property=True, category="scalar", default=True, requires_run=False +): """Creates loggable quantities for classes of type Loggable. Use :py:func:`log` with `hoomd.custom.Action` to expose loggable quantities from a @@ -450,14 +458,13 @@ def loggable(self): def helper(func): name = func.__name__ if name in Loggable._meta_export_dict: - raise KeyError( - "Multiple loggable quantities named {}.".format(name)) + raise KeyError("Multiple loggable quantities named {}.".format(name)) Loggable._meta_export_dict[name] = _LoggableEntry( - LoggerCategories[category], default) + LoggerCategories[category], default + ) if requires_run: def wrap_with_exception(func): - @wraps(func) def wrapped_func(self, *args, **kwargs): if not self._attached: @@ -478,7 +485,7 @@ def wrapped_func(self, *args, **kwargs): return helper(func) -class _InvalidLogEntryType(): +class _InvalidLogEntryType: pass @@ -512,8 +519,7 @@ def from_tuple(cls, entry): err_msg = "Expected either (callable, category) or \ (obj, method/property, category)." - if (not isinstance(entry, Sequence) or len(entry) <= 1 - or len(entry) > 3): + if not isinstance(entry, Sequence) or len(entry) <= 1 or len(entry) > 3: raise ValueError(err_msg) # Get the method and category from the passed entry. Also perform some @@ -522,14 +528,13 @@ def from_tuple(cls, entry): if not callable(entry[0]): raise ValueError(err_msg) category = entry[1] - method = '__call__' + method = "__call__" elif len(entry) == 3: if not isinstance(entry[1], str): raise ValueError(err_msg) method = entry[1] if not hasattr(entry[0], method): - raise ValueError( - "Provided method/property must exist in given object.") + raise ValueError("Provided method/property must exist in given object.") category = entry[2] # Ensure category is valid and converted to LoggerCategories enum. @@ -537,8 +542,8 @@ def from_tuple(cls, entry): category = LoggerCategories[category] elif not isinstance(category, LoggerCategories): raise ValueError( - "category must be a string or hoomd.logging.LoggerCategories " - "object.") + "category must be a string or hoomd.logging.LoggerCategories " "object." + ) return cls(entry[0], method, category) @property @@ -552,8 +557,7 @@ def obj(self): @obj.setter def obj(self, new_obj): - if not isinstance(new_obj, - (hoomd.operation.Operation, hoomd.Simulation)): + if not isinstance(new_obj, (hoomd.operation.Operation, hoomd.Simulation)): self._obj = new_obj return try: @@ -576,11 +580,15 @@ def __call__(self): return (attr, self.category.name) def __eq__(self, other): - return (self.obj == other.obj and self.attr == other.attr - and self.category == other.category) + return ( + self.obj == other.obj + and self.attr == other.attr + and self.category == other.category + ) return all( getattr(self, attr) == getattr(other, attr) - for attr in ['obj', 'attr', 'category']) + for attr in ["obj", "attr", "category"] + ) def __getstate__(self): state = copy.copy(self.__dict__) @@ -701,10 +709,11 @@ def _get_loggables_by_name(self, obj, quantities): # ensure all keys are valid if bad_keys != []: raise ValueError( - "object {} has not loggable quantities {}.".format( - obj, bad_keys)) + "object {} has not loggable quantities {}.".format(obj, bad_keys) + ) yield from self._filter_quantities( - map(lambda q: obj._export_dict[q], quantities), True) + map(lambda q: obj._export_dict[q], quantities), True + ) def add(self, obj, quantities=None, user_name=None): """Add loggables. @@ -727,7 +736,7 @@ def add(self, obj, quantities=None, user_name=None): .. code-block:: python - logger.add(obj=lj, quantities=['energy']) + logger.add(obj=lj, quantities=["energy"]) """ for quantity in self._get_loggables_by_name(obj, quantities): self._add_single_quantity(obj, quantity, user_name) @@ -756,11 +765,10 @@ def remove(self, obj=None, quantities=None, user_name=None): .. code-block:: python - logger.remove(obj=lj, quantities=['energy']) + logger.remove(obj=lj, quantities=["energy"]) """ if obj is None and quantities is None: - raise ValueError( - "Either obj, quantities, or both must be specified.") + raise ValueError("Either obj, quantities, or both must be specified.") if obj is None: for quantity in self._wrap_quantity(quantities): @@ -787,8 +795,7 @@ def _add_single_quantity(self, obj, quantity, user_name): if self._contains_obj(namespace, obj): return None else: - self[namespace] = _LoggerEntry.from_logger_quantity( - obj, quantity) + self[namespace] = _LoggerEntry.from_logger_quantity(obj, quantity) return None def __setitem__(self, namespace, value): @@ -812,13 +819,12 @@ def __setitem__(self, namespace, value): .. code-block:: python - logger[('custom', 'name')] = (lambda: 42, 'scalar') + logger[("custom", "name")] = (lambda: 42, "scalar") """ if not isinstance(value, _LoggerEntry): value = _LoggerEntry.from_tuple(value) if value.category not in self.categories: - raise ValueError( - "User specified loggable is not of an accepted category.") + raise ValueError("User specified loggable is not of an accepted category.") super().__setitem__(namespace, value) def __iadd__(self, obj): @@ -834,7 +840,7 @@ def __iadd__(self, obj): logger += lj """ - if hasattr(obj, '__iter__'): + if hasattr(obj, "__iter__"): for o in obj: self.add(o) else: @@ -854,7 +860,7 @@ def __isub__(self, value): """ if isinstance(value, str) or isinstance(value, tuple): self.remove(quantities=value) - elif hasattr(value, '__iter__'): + elif hasattr(value, "__iter__"): for v in value: self.__isub__(v) else: @@ -914,9 +920,11 @@ def __eq__(self, other): """Check for equality.""" if not isinstance(other, type(self)): return NotImplemented - return (self.categories == other.categories - and self.only_default == other.only_default - and self._dict == other._dict) + return ( + self.categories == other.categories + and self.only_default == other.only_default + and self._dict == other._dict + ) def modify_namespace(cls, namespace=None): @@ -949,8 +957,8 @@ def modify(cls): __all__ = [ - 'Logger', - 'LoggerCategories', - 'log', - 'modify_namespace', + "Logger", + "LoggerCategories", + "log", + "modify_namespace", ] diff --git a/hoomd/md/__init__.py b/hoomd/md/__init__.py index f5bdcb9c58..c9f318b4b4 100644 --- a/hoomd/md/__init__.py +++ b/hoomd/md/__init__.py @@ -42,27 +42,27 @@ from hoomd.md.half_step_hook import HalfStepHook __all__ = [ - 'HalfStepHook', - 'Integrator', - 'alchemy', - 'angle', - 'bond', - 'compute', - 'constrain', - 'data', - 'dihedral', - 'external', - 'force', - 'improper', - 'long_range', - 'manifold', - 'many_body', - 'mesh', - 'methods', - 'minimize', - 'nlist', - 'pair', - 'special_pair', - 'tune', - 'update', + "HalfStepHook", + "Integrator", + "alchemy", + "angle", + "bond", + "compute", + "constrain", + "data", + "dihedral", + "external", + "force", + "improper", + "long_range", + "manifold", + "many_body", + "mesh", + "methods", + "minimize", + "nlist", + "pair", + "special_pair", + "tune", + "update", ] diff --git a/hoomd/md/alchemy/__init__.py b/hoomd/md/alchemy/__init__.py index 5164b06ab6..054776cab9 100644 --- a/hoomd/md/alchemy/__init__.py +++ b/hoomd/md/alchemy/__init__.py @@ -11,7 +11,7 @@ integrator.methods.append(nvt) ljg = hoomd.md.alchemy.pair.LJGauss(...) integrator.forces.append(ljg) - r0_alchemical_dof = ljg.r0[('A', 'A')] + r0_alchemical_dof = ljg.r0[("A", "A")] alchemostat = hoomd.md.alchemy.methods.NVT( period=period, alchemical_dof=[r0_alchemical_dof], @@ -25,6 +25,6 @@ from . import pair __all__ = [ - 'methods', - 'pair', + "methods", + "pair", ] diff --git a/hoomd/md/alchemy/methods.py b/hoomd/md/alchemy/methods.py index 63e3df4ff7..519af4cd7b 100644 --- a/hoomd/md/alchemy/methods.py +++ b/hoomd/md/alchemy/methods.py @@ -28,7 +28,9 @@ class Alchemostat(Method): __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) - _doc_inherited = Method._doc_inherited + """ + _doc_inherited = ( + Method._doc_inherited + + """ ---------- **Members inherited from** @@ -39,16 +41,17 @@ class Alchemostat(Method): List of alchemical degrees of freedom. `Read more... ` """ + ) def __init__(self, alchemical_dof): self._alchemical_dof = syncedlist.SyncedList( - AlchemicalDOF, syncedlist._PartialGetAttr("_cpp_obj")) + AlchemicalDOF, syncedlist._PartialGetAttr("_cpp_obj") + ) if alchemical_dof is not None: self._alchemical_dof.extend(alchemical_dof) def _attach_hook(self): - self._alchemical_dof._sync(self._simulation, - self._cpp_obj.alchemical_dof) + self._alchemical_dof._sync(self._simulation, self._cpp_obj.alchemical_dof) def _detach_hook(self): self._alchemical_dof._unsync() @@ -114,7 +117,6 @@ class NVT(Alchemostat): __doc__ = __doc__.replace("{inherited}", Alchemostat._doc_inherited) def __init__(self, alchemical_kT, alchemical_dof, period=1): - # store metadata param_dict = ParameterDict(alchemical_kT=Variant, period=int) param_dict.update(dict(alchemical_kT=alchemical_kT, period=period)) @@ -131,6 +133,6 @@ def _attach_hook(self): __all__ = [ - 'NVT', - 'Alchemostat', + "NVT", + "Alchemostat", ] diff --git a/hoomd/md/alchemy/pair.py b/hoomd/md/alchemy/pair.py index bac65bfc7f..7fb0fc2b7e 100644 --- a/hoomd/md/alchemy/pair.py +++ b/hoomd/md/alchemy/pair.py @@ -2,6 +2,7 @@ # Part of HOOMD-blue, released under the BSD 3-Clause License. """Alchemical pair forces.""" + from collections.abc import Mapping import hoomd.data @@ -19,17 +20,15 @@ def _modify_pair_cls_to_alchemical(cls): ``_accepted_modes``, and ``_reserved_default_attrs``, and sets ``normalize = False`` if not set. """ - new_cpp_name = [ - 'PotentialPair', 'Alchemical', cls.__mro__[0]._cpp_class_name[13:] - ] - if getattr(cls, 'normalized', False): - new_cpp_name.insert(2, 'Normalized') + new_cpp_name = ["PotentialPair", "Alchemical", cls.__mro__[0]._cpp_class_name[13:]] + if getattr(cls, "normalized", False): + new_cpp_name.insert(2, "Normalized") cls._dof_cls = _AlchemicalNormalizedDOF else: cls.normalized = False cls._dof_cls = AlchemicalDOF - cls._cpp_class_name = ''.join(new_cpp_name) - cls._accepted_modes = ('none', 'shift') + cls._cpp_class_name = "".join(new_cpp_name) + cls._accepted_modes = ("none", "shift") return cls @@ -57,8 +56,7 @@ def __getitem__(self, key): items = {} for k in self._indexer(key): if k not in self._data: - self._data[k] = self._dof_cls(self._pair_instance, self._name, - k) + self._data[k] = self._dof_cls(self._pair_instance, self._name, k) items[k] = self._data[k] if len(items) == 1: return items.popitem()[1] @@ -85,7 +83,8 @@ def _attach(self, types): if not self._indexer.are_valid_types(key): raise RuntimeError( f"Alchemical DOF ({self._name}) for non-existent type pair " - f"{key} was accessed.") + f"{key} was accessed." + ) def _detach(self): self._indexer.valid_types = None @@ -104,6 +103,7 @@ class _AlchemicalPairForce(_HOOMDBaseObject): _dof_cls (AlchemicalDOF): The correct DOF class. Automatically set via `_modify_pair_cls_to_alchemical`. """ + _alchemical_dofs = [] _dof_cls = None @@ -114,7 +114,8 @@ def _set_alchemical_parameters(self): self._alchemical_params = {} for dof in self._alchemical_dofs: self._alchemical_params[dof] = AlchemicalDOFStore( - name=dof, pair_instance=self, dof_cls=self._dof_cls) + name=dof, pair_instance=self, dof_cls=self._dof_cls + ) def _setattr_hook(self, attr, value): if attr in self._alchemical_dofs: @@ -148,13 +149,15 @@ class AlchemicalDOF(_HOOMDBaseObject): alchemical_momentum (float): The momentum of the alchemical parameter. """ - def __init__(self, - force, - name: str = '', - typepair: Optional[tuple] = None, - alpha: float = 1.0, - mass: float = 1.0, - mu: float = 0.0): + def __init__( + self, + force, + name: str = "", + typepair: Optional[tuple] = None, + alpha: float = 1.0, + mass: float = 1.0, + mu: float = 0.0, + ): """Cache existing instances of AlchemicalDOF. Args: @@ -174,24 +177,26 @@ def __init__(self, if self._force._attached: self._attach(force._simulation) # store metadata - param_dict = ParameterDict(mass=float, - mu=float, - alpha=float, - alchemical_momentum=float) - param_dict['mass'] = mass - param_dict['mu'] = mu - param_dict['alpha'] = alpha - param_dict['alchemical_momentum'] = 0.0 + param_dict = ParameterDict( + mass=float, mu=float, alpha=float, alchemical_momentum=float + ) + param_dict["mass"] = mass + param_dict["mu"] = mu + param_dict["alpha"] = alpha + param_dict["alchemical_momentum"] = 0.0 # set defaults self._param_dict.update(param_dict) def _attach_hook(self): if not self._force._attached: - raise RuntimeError("Call Simulation.run(0) before attaching " - "alchemical degrees of freedom.") + raise RuntimeError( + "Call Simulation.run(0) before attaching " + "alchemical degrees of freedom." + ) self._cpp_obj = self._force._cpp_obj.getAlchemicalPairParticle( - self.typepair, self.name) + self.typepair, self.name + ) self._force._cpp_obj.enableAlchemicalPairParticle(self._cpp_obj) def _detach_hook(self): @@ -202,10 +207,9 @@ def _detach_hook(self): @log(requires_run=True) def value(self): """Current value of alpha multiplied by its corresponding parameter.""" - return self._force.params[self.typepair][self.name] * ( - self._cpp_obj.alpha) + return self._force.params[self.typepair][self.name] * (self._cpp_obj.alpha) - @log(default=False, requires_run=True, category='particle') + @log(default=False, requires_run=True, category="particle") def alchemical_forces(self): r"""Per particle forces in alchemical alpha space. @@ -232,18 +236,20 @@ def net_alchemical_force(self): class _AlchemicalNormalizedDOF(AlchemicalDOF): """Alchemical normalized degree of freedom.""" - def __init__(self, - force: _AlchemicalPairForce, - name: str = '', - typepair: Optional[tuple] = None, - alpha: float = 1.0, - norm_value: float = 0.0, - mass: float = 1.0, - mu: float = 0.0): + def __init__( + self, + force: _AlchemicalPairForce, + name: str = "", + typepair: Optional[tuple] = None, + alpha: float = 1.0, + norm_value: float = 0.0, + mass: float = 1.0, + mu: float = 0.0, + ): super().__init__(force, name, typepair, alpha, mass, mu) self._param_dict.update(dict(norm_value=norm_value)) - @log(default=False, requires_run=True, category='particle') + @log(default=False, requires_run=True, category="particle") def alchemical_forces(self): """Per particle forces in alchemical alpha space.""" return self._cpp_obj._forces * self._cpp_obj.norm_value @@ -327,14 +333,11 @@ class LJGauss(BaseLJGauss, _AlchemicalPairForce): Type: `AlchemicalDOFStore` [`tuple` [``particle_type``, ``particle_type``], `AlchemicalDOF`]) """ - _alchemical_dofs = ['epsilon', 'sigma', 'r0'] + + _alchemical_dofs = ["epsilon", "sigma", "r0"] __doc__ = __doc__.replace("{inherited}", BaseLJGauss._doc_inherited) - def __init__(self, - nlist, - default_r_cut=None, - default_r_on=0.0, - mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): _AlchemicalPairForce.__init__(self) super().__init__(nlist, default_r_cut, default_r_on, mode) @@ -356,20 +359,17 @@ class _NLJGauss(BaseLJGauss, _AlchemicalPairForce): a single particle type with a single pair force. """ - _alchemical_dofs = ['epsilon', 'sigma', 'r0'] + + _alchemical_dofs = ["epsilon", "sigma", "r0"] normalized = True - def __init__(self, - nlist, - default_r_cut=None, - default_r_on=0.0, - mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): _AlchemicalPairForce.__init__(self) super().__init__(nlist, default_r_cut, default_r_on, mode) __all__ = [ - 'AlchemicalDOF', - 'AlchemicalDOFStore', - 'LJGauss', + "AlchemicalDOF", + "AlchemicalDOFStore", + "LJGauss", ] diff --git a/hoomd/md/angle.py b/hoomd/md/angle.py index 76a3a7efa3..d88f508eb3 100644 --- a/hoomd/md/angle.py +++ b/hoomd/md/angle.py @@ -85,8 +85,8 @@ class Harmonic(Angle): Examples:: harmonic = angle.Harmonic() - harmonic.params['A-A-A'] = dict(k=3.0, t0=0.7851) - harmonic.params['A-B-A'] = dict(k=100.0, t0=1.0) + harmonic.params["A-A-A"] = dict(k=3.0, t0=0.7851) + harmonic.params["A-B-A"] = dict(k=100.0, t0=1.0) {inherited} @@ -106,13 +106,14 @@ class Harmonic(Angle): :math:`[\mathrm{radians}]` """ - _cpp_class_name = 'HarmonicAngleForceCompute' + _cpp_class_name = "HarmonicAngleForceCompute" __doc__ = __doc__.replace("{inherited}", Angle._doc_inherited) def __init__(self): super().__init__() - params = TypeParameter('params', 'angle_types', - TypeParameterDict(t0=float, k=float, len_keys=1)) + params = TypeParameter( + "params", "angle_types", TypeParameterDict(t0=float, k=float, len_keys=1) + ) self._add_typeparam(params) @@ -131,8 +132,8 @@ class CosineSquared(Angle): Examples:: cosinesq = angle.CosineSquared() - cosinesq.params['A-A-A'] = dict(k=3.0, t0=0.7851) - cosinesq.params['A-B-A'] = dict(k=100.0, t0=1.0) + cosinesq.params["A-A-A"] = dict(k=3.0, t0=0.7851) + cosinesq.params["A-B-A"] = dict(k=100.0, t0=1.0) {inherited} @@ -152,13 +153,14 @@ class CosineSquared(Angle): :math:`[\mathrm{radians}]` """ - _cpp_class_name = 'CosineSqAngleForceCompute' + _cpp_class_name = "CosineSqAngleForceCompute" __doc__ = __doc__.replace("{inherited}", Angle._doc_inherited) def __init__(self): super().__init__() - params = TypeParameter('params', 'angle_types', - TypeParameterDict(t0=float, k=float, len_keys=1)) + params = TypeParameter( + "params", "angle_types", TypeParameterDict(t0=float, k=float, len_keys=1) + ) self._add_typeparam(params) @@ -214,15 +216,18 @@ class Table(Angle): def __init__(self, width): super().__init__() param_dict = hoomd.data.parameterdicts.ParameterDict(width=int) - param_dict['width'] = width + param_dict["width"] = width self._param_dict = param_dict params = TypeParameter( - "params", "angle_types", + "params", + "angle_types", TypeParameterDict( U=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), tau=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), - len_keys=1)) + len_keys=1, + ), + ) self._add_typeparam(params) def _attach_hook(self): @@ -236,8 +241,8 @@ def _attach_hook(self): __all__ = [ - 'Angle', - 'CosineSquared', - 'Harmonic', - 'Table', + "Angle", + "CosineSquared", + "Harmonic", + "Table", ] diff --git a/hoomd/md/bond.py b/hoomd/md/bond.py index e1a5d5079a..fccaf9c11e 100644 --- a/hoomd/md/bond.py +++ b/hoomd/md/bond.py @@ -82,8 +82,8 @@ class Harmonic(Bond): Examples:: harmonic = bond.Harmonic() - harmonic.params['A-A'] = dict(k=3.0, r0=2.38) - harmonic.params['A-B'] = dict(k=10.0, r0=1.0) + harmonic.params["A-A"] = dict(k=3.0, r0=2.38) + harmonic.params["A-B"] = dict(k=10.0, r0=1.0) {inherited} @@ -102,13 +102,15 @@ class Harmonic(Bond): * ``r0`` (`float`, **required**) - rest length :math:`[\mathrm{length}]` """ + _cpp_class_name = "PotentialBondHarmonic" __doc__ = __doc__.replace("{inherited}", Bond._doc_inherited) def __init__(self): super().__init__() - params = TypeParameter("params", "bond_types", - TypeParameterDict(k=float, r0=float, len_keys=1)) + params = TypeParameter( + "params", "bond_types", TypeParameterDict(k=float, r0=float, len_keys=1) + ) self._add_typeparam(params) @@ -172,19 +174,19 @@ class FENEWCA(Bond): * ``delta`` (`float`, **required**) - radial shift :math:`\Delta` :math:`[\mathrm{length}]`. """ + _cpp_class_name = "PotentialBondFENE" __doc__ = __doc__.replace("{inherited}", Bond._doc_inherited) def __init__(self): super().__init__() params = TypeParameter( - "params", "bond_types", - TypeParameterDict(k=float, - r0=float, - epsilon=float, - sigma=float, - delta=float, - len_keys=1)) + "params", + "bond_types", + TypeParameterDict( + k=float, r0=float, epsilon=float, sigma=float, delta=float, len_keys=1 + ), + ) self._add_typeparam(params) @@ -271,17 +273,20 @@ class Table(Bond): def __init__(self, width): super().__init__() param_dict = hoomd.data.parameterdicts.ParameterDict(width=int) - param_dict['width'] = width + param_dict["width"] = width self._param_dict = param_dict params = TypeParameter( - "params", "bond_types", + "params", + "bond_types", TypeParameterDict( r_min=float, r_max=float, U=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), F=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), - len_keys=1)) + len_keys=1, + ), + ) self._add_typeparam(params) def _attach_hook(self): @@ -362,26 +367,26 @@ class Tether(Bond): * ``l_max`` (`float`, **required**) - maximum bond length :math:`[\mathrm{length}]` """ + _cpp_class_name = "PotentialBondTether" __doc__ = __doc__.replace("{inherited}", Bond._doc_inherited) def __init__(self): super().__init__() params = TypeParameter( - "params", "bond_types", - TypeParameterDict(k_b=float, - l_min=float, - l_c1=float, - l_c0=float, - l_max=float, - len_keys=1)) + "params", + "bond_types", + TypeParameterDict( + k_b=float, l_min=float, l_c1=float, l_c0=float, l_max=float, len_keys=1 + ), + ) self._add_typeparam(params) __all__ = [ - 'FENEWCA', - 'Bond', - 'Harmonic', - 'Table', - 'Tether', + "FENEWCA", + "Bond", + "Harmonic", + "Table", + "Tether", ] diff --git a/hoomd/md/compute.py b/hoomd/md/compute.py index 742339a920..24b2d36d61 100644 --- a/hoomd/md/compute.py +++ b/hoomd/md/compute.py @@ -34,7 +34,7 @@ class ThermodynamicQuantities(Compute): Examples:: - f = filter.Type('A') + f = filter.Type("A") compute.ThermodynamicQuantities(filter=f) {inherited} @@ -103,7 +103,7 @@ def pressure(self): self._cpp_obj.compute(self._simulation.timestep) return self._cpp_obj.pressure - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def pressure_tensor(self): """Instantaneous pressure tensor of the subset \ :math:`[\\mathrm{pressure}]`. @@ -342,7 +342,8 @@ class HarmonicAveragedThermodynamicQuantities(Compute): Examples:: hma = hoomd.compute.HarmonicAveragedThermodynamicQuantities( - filter=hoomd.filter.Type('A'), kT=1.0) + filter=hoomd.filter.Type("A"), kT=1.0 + ) {inherited} @@ -364,10 +365,10 @@ class HarmonicAveragedThermodynamicQuantities(Compute): __doc__ = __doc__.replace("{inherited}", Compute._doc_inherited) def __init__(self, filter, kT, harmonic_pressure=0): - # store metadata - param_dict = ParameterDict(kT=float(kT), - harmonic_pressure=float(harmonic_pressure)) + param_dict = ParameterDict( + kT=float(kT), harmonic_pressure=float(harmonic_pressure) + ) # set defaults self._param_dict.update(param_dict) @@ -381,8 +382,9 @@ def _attach_hook(self): else: thermoHMA_cls = _md.ComputeThermoHMAGPU group = self._simulation.state._get_group(self._filter) - self._cpp_obj = thermoHMA_cls(self._simulation.state._cpp_sys_def, - group, self.kT, self.harmonic_pressure) + self._cpp_obj = thermoHMA_cls( + self._simulation.state._cpp_sys_def, group, self.kT, self.harmonic_pressure + ) @log(requires_run=True) def potential_energy(self): @@ -398,6 +400,6 @@ def pressure(self): __all__ = [ - 'HarmonicAveragedThermodynamicQuantities', - 'ThermodynamicQuantities', + "HarmonicAveragedThermodynamicQuantities", + "ThermodynamicQuantities", ] diff --git a/hoomd/md/constrain.py b/hoomd/md/constrain.py index c1ec36c821..d116256e83 100644 --- a/hoomd/md/constrain.py +++ b/hoomd/md/constrain.py @@ -303,14 +303,22 @@ class Rigid(Constraint): def __init__(self): body = TypeParameter( - "body", "particle_types", - TypeParameterDict(OnlyIf(to_type_converter({ - 'constituent_types': [str], - 'positions': [(float,) * 3], - 'orientations': [(float,) * 4], - }), - allow_none=True), - len_keys=1)) + "body", + "particle_types", + TypeParameterDict( + OnlyIf( + to_type_converter( + { + "constituent_types": [str], + "positions": [(float,) * 3], + "orientations": [(float,) * 4], + } + ), + allow_none=True, + ), + len_keys=1, + ), + ) self._add_typeparam(body) self.body.default = None @@ -328,8 +336,7 @@ def create_bodies(self, state, charges=None): particle ``body`` tags in the state. """ if self._attached: - raise RuntimeError( - "Cannot call create_bodies after running simulation.") + raise RuntimeError("Cannot call create_bodies after running simulation.") super()._attach(state._simulation) self._cpp_obj.createRigidBodies({} if charges is None else charges) # Restore previous state @@ -344,7 +351,7 @@ def _attach_hook(self): __all__ = [ - 'Constraint', - 'Distance', - 'Rigid', + "Constraint", + "Distance", + "Rigid", ] diff --git a/hoomd/md/data/__init__.py b/hoomd/md/data/__init__.py index c7c4c584ad..4f41505631 100644 --- a/hoomd/md/data/__init__.py +++ b/hoomd/md/data/__init__.py @@ -16,8 +16,8 @@ from .local_access_gpu import ForceLocalAccessGPU, NeighborListLocalAccessGPU __all__ = [ - 'ForceLocalAccess', - 'ForceLocalAccessGPU', - 'NeighborListLocalAccess', - 'NeighborListLocalAccessGPU', + "ForceLocalAccess", + "ForceLocalAccessGPU", + "NeighborListLocalAccess", + "NeighborListLocalAccessGPU", ] diff --git a/hoomd/md/data/local_access.py b/hoomd/md/data/local_access.py index c7072c6cf7..45f6b5bc30 100644 --- a/hoomd/md/data/local_access.py +++ b/hoomd/md/data/local_access.py @@ -8,7 +8,7 @@ class _ForceLocalAccessBase(hoomd.data.local_access._LocalAccess): - __slots__ = ('_accessed_fields', '_cpp_obj', '_entered', '_force_obj') + __slots__ = ("_accessed_fields", "_cpp_obj", "_entered", "_force_obj") @property @abstractmethod @@ -16,17 +16,18 @@ def _cpp_cls(self): pass _fields = { - 'force': 'getForce', - 'potential_energy': 'getPotentialEnergy', - 'torque': 'getTorque', - 'virial': 'getVirial' + "force": "getForce", + "potential_energy": "getPotentialEnergy", + "torque": "getTorque", + "virial": "getVirial", } def __init__(self, force_obj, state): super().__init__() self._force_obj = force_obj - self._cpp_obj = self._cpp_cls(force_obj._cpp_obj, - state._cpp_sys_def.getParticleData()) + self._cpp_obj = self._cpp_cls( + force_obj._cpp_obj, state._cpp_sys_def.getParticleData() + ) def __enter__(self): self._force_obj._in_context_manager = True @@ -39,7 +40,7 @@ def __exit__(self, type, value, traceback): class _NeighborListLocalAccessBase(hoomd.data.local_access._LocalAccess): - __slots__ = ('_accessed_fields', '_cpp_obj', '_entered', '_nlist_obj') + __slots__ = ("_accessed_fields", "_cpp_obj", "_entered", "_nlist_obj") @property @abstractmethod @@ -50,9 +51,9 @@ def _cpp_cls(self): # Prevents the usage of extensions _global_fields = { - 'head_list': 'getHeadList', - 'n_neigh': 'getNNeigh', - 'nlist': 'getNList' + "head_list": "getHeadList", + "n_neigh": "getNNeigh", + "nlist": "getNList", } @property @@ -63,8 +64,8 @@ def __init__(self, nlist_obj, state): super().__init__() self._nlist_obj = nlist_obj self._cpp_obj = self._cpp_cls( - nlist_obj._cpp_obj, - state._cpp_sys_def.getParticleData().getN()) + nlist_obj._cpp_obj, state._cpp_sys_def.getParticleData().getN() + ) def __enter__(self): self._nlist_obj._in_context_manager = True diff --git a/hoomd/md/data/local_access_cpu.py b/hoomd/md/data/local_access_cpu.py index a587b0f350..1b3d4fd0ac 100644 --- a/hoomd/md/data/local_access_cpu.py +++ b/hoomd/md/data/local_access_cpu.py @@ -3,8 +3,10 @@ """Implement local access classes for the CPU.""" -from hoomd.md.data.local_access import _ForceLocalAccessBase, \ - _NeighborListLocalAccessBase +from hoomd.md.data.local_access import ( + _ForceLocalAccessBase, + _NeighborListLocalAccessBase, +) from hoomd.data.array import HOOMDArray from hoomd import _hoomd from hoomd.md import _md diff --git a/hoomd/md/data/local_access_gpu.py b/hoomd/md/data/local_access_gpu.py index 5856a58c42..c7091e80c1 100644 --- a/hoomd/md/data/local_access_gpu.py +++ b/hoomd/md/data/local_access_gpu.py @@ -7,18 +7,22 @@ from hoomd import _hoomd from hoomd.md import _md from hoomd.data.array import HOOMDGPUArray -from hoomd.md.data.local_access import _ForceLocalAccessBase, \ - _NeighborListLocalAccessBase +from hoomd.md.data.local_access import ( + _ForceLocalAccessBase, + _NeighborListLocalAccessBase, +) if hoomd.version.gpu_enabled: class ForceLocalAccessGPU(_ForceLocalAccessBase): """Access force array data on the GPU.""" + _cpp_cls = _hoomd.LocalForceComputeDataDevice _array_cls = HOOMDGPUArray class NeighborListLocalAccessGPU(_NeighborListLocalAccessBase): """Access neighbor list array data on the GPU.""" + _cpp_cls = _md.LocalNeighborListDataDevice _array_cls = HOOMDGPUArray @@ -27,10 +31,12 @@ class NeighborListLocalAccessGPU(_NeighborListLocalAccessBase): class ForceLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass class NeighborListLocalAccessGPU(_NoGPU): """GPU data access is not available in CPU builds.""" + pass diff --git a/hoomd/md/dihedral.py b/hoomd/md/dihedral.py index 5e999c85df..f3c018f4e6 100644 --- a/hoomd/md/dihedral.py +++ b/hoomd/md/dihedral.py @@ -69,10 +69,8 @@ def __init__(self): def _attach_hook(self): # check that some dihedrals are defined - if self._simulation.state._cpp_sys_def.getDihedralData().getNGlobal( - ) == 0: - self._simulation.device._cpp_msg.warning( - "No dihedrals are defined.\n") + if self._simulation.state._cpp_sys_def.getDihedralData().getNGlobal() == 0: + self._simulation.device._cpp_msg.warning("No dihedrals are defined.\n") # create the c++ mirror class if isinstance(self._simulation.device, hoomd.device.CPU): @@ -97,8 +95,8 @@ class Periodic(Dihedral): Examples:: harmonic = dihedral.Periodic() - harmonic.params['A-A-A-A'] = dict(k=3.0, d=-1, n=3, phi0=0) - harmonic.params['A-B-C-D'] = dict(k=100.0, d=1, n=4, phi0=math.pi/2) + harmonic.params["A-A-A-A"] = dict(k=3.0, d=-1, n=3, phi0=0) + harmonic.params["A-B-C-D"] = dict(k=100.0, d=1, n=4, phi0=math.pi / 2) {inherited} @@ -118,14 +116,17 @@ class Periodic(Dihedral): * ``phi0`` (`float`, **required**) - phase shift :math:`\phi_0` :math:`[\mathrm{radians}]` """ + _cpp_class_name = "HarmonicDihedralForceCompute" __doc__ = __doc__.replace("{inherited}", Dihedral._doc_inherited) def __init__(self): super().__init__() params = TypeParameter( - 'params', 'dihedral_types', - TypeParameterDict(k=float, d=float, n=int, phi0=float, len_keys=1)) + "params", + "dihedral_types", + TypeParameterDict(k=float, d=float, n=int, phi0=float, len_keys=1), + ) self._add_typeparam(params) @@ -181,15 +182,18 @@ class Table(Dihedral): def __init__(self, width): super().__init__() param_dict = hoomd.data.parameterdicts.ParameterDict(width=int) - param_dict['width'] = width + param_dict["width"] = width self._param_dict = param_dict params = TypeParameter( - "params", "dihedral_types", + "params", + "dihedral_types", TypeParameterDict( U=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), tau=hoomd.data.typeconverter.NDArrayValidator(numpy.float64), - len_keys=1)) + len_keys=1, + ), + ) self._add_typeparam(params) def _attach_hook(self): @@ -220,7 +224,7 @@ class OPLS(Dihedral): Examples:: opls = dihedral.OPLS() - opls.params['A-A-A-A'] = dict(k1=1.0, k2=1.0, k3=1.0, k4=1.0) + opls.params["A-A-A-A"] = dict(k1=1.0, k2=1.0, k3=1.0, k4=1.0) {inherited} @@ -245,6 +249,7 @@ class OPLS(Dihedral): * ``k4`` (`float`, **required**) - force constant of the fourth term :math:`[\mathrm{energy}]` """ + _cpp_class_name = "OPLSDihedralForceCompute" __doc__ = __doc__.replace("{inherited}", Dihedral._doc_inherited) @@ -252,18 +257,16 @@ def __init__(self): super().__init__() # check that some dihedrals are defined params = TypeParameter( - 'params', 'dihedral_types', - TypeParameterDict(k1=float, - k2=float, - k3=float, - k4=float, - len_keys=1)) + "params", + "dihedral_types", + TypeParameterDict(k1=float, k2=float, k3=float, k4=float, len_keys=1), + ) self._add_typeparam(params) __all__ = [ - 'OPLS', - 'Dihedral', - 'Periodic', - 'Table', + "OPLS", + "Dihedral", + "Periodic", + "Table", ] diff --git a/hoomd/md/external/__init__.py b/hoomd/md/external/__init__.py index 9bf59a2178..e29bf899d6 100644 --- a/hoomd/md/external/__init__.py +++ b/hoomd/md/external/__init__.py @@ -13,6 +13,6 @@ from . import wall __all__ = [ - 'field', - 'wall', + "field", + "wall", ] diff --git a/hoomd/md/external/field.py b/hoomd/md/external/field.py index f8357a890d..a61d34f8d8 100644 --- a/hoomd/md/external/field.py +++ b/hoomd/md/external/field.py @@ -94,13 +94,16 @@ class Periodic(Field): Type: `TypeParameter` [``particle_type``, `dict`] """ + _cpp_class_name = "PotentialExternalPeriodic" __doc__ = __doc__.replace("{inherited}", Field._doc_inherited) def __init__(self): params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(i=int, A=float, w=float, p=int, len_keys=1)) + "params", + "particle_types", + TypeParameterDict(i=int, A=float, w=float, p=int, len_keys=1), + ) self._add_typeparam(params) @@ -124,7 +127,7 @@ class Electric(Field): .. code-block:: python electric = hoomd.md.external.field.Electric() - electric.E['A'] = (1, 0, 0) + electric.E["A"] = (1, 0, 0) simulation.operations.integrator.forces = [electric] {inherited} @@ -142,13 +145,14 @@ class Electric(Field): Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`, `float`]] """ + _cpp_class_name = "PotentialExternalElectricField" __doc__ = __doc__.replace("{inherited}", Field._doc_inherited) def __init__(self): params = TypeParameter( - 'E', 'particle_types', - TypeParameterDict((float, float, float), len_keys=1)) + "E", "particle_types", TypeParameterDict((float, float, float), len_keys=1) + ) self._add_typeparam(params) @@ -171,7 +175,7 @@ class Magnetic(Field): .. code-block:: python magnetic = hoomd.md.external.field.Magnetic() - magnetic.params['A'] = dict(B=(1.0,0.0,0.0), mu=(1.0,0.0,0.0)) + magnetic.params["A"] = dict(B=(1.0, 0.0, 0.0), mu=(1.0, 0.0, 0.0)) simulation.operations.integrator.forces = [magnetic] {inherited} @@ -196,21 +200,24 @@ class Magnetic(Field): Type: `TypeParameter` [``particle_type``, `dict`] """ + _cpp_class_name = "PotentialExternalMagneticField" __doc__ = __doc__.replace("{inherited}", Field._doc_inherited) def __init__(self): params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(B=(float, float, float), - mu=(float, float, float), - len_keys=1)) + "params", + "particle_types", + TypeParameterDict( + B=(float, float, float), mu=(float, float, float), len_keys=1 + ), + ) self._add_typeparam(params) __all__ = [ - 'Electric', - 'Field', - 'Magnetic', - 'Periodic', + "Electric", + "Field", + "Magnetic", + "Periodic", ] diff --git a/hoomd/md/external/wall.py b/hoomd/md/external/wall.py index b539efc1b9..0c47e7968e 100644 --- a/hoomd/md/external/wall.py +++ b/hoomd/md/external/wall.py @@ -144,25 +144,29 @@ def _to_md_cpp_wall(wall): if isinstance(wall, hoomd.wall.Sphere): - return _md.SphereWall(wall.radius, wall.origin.to_base(), wall.inside, - wall.open) + return _md.SphereWall( + wall.radius, wall.origin.to_base(), wall.inside, wall.open + ) if isinstance(wall, hoomd.wall.Cylinder): - return _md.CylinderWall(wall.radius, wall.origin.to_base(), - wall.axis.to_base(), wall.inside, wall.open) + return _md.CylinderWall( + wall.radius, + wall.origin.to_base(), + wall.axis.to_base(), + wall.inside, + wall.open, + ) if isinstance(wall, hoomd.wall.Plane): - return _md.PlaneWall(wall.origin.to_base(), wall.normal.to_base(), - wall.open) + return _md.PlaneWall(wall.origin.to_base(), wall.normal.to_base(), wall.open) raise TypeError(f"Unknown wall type encountered {type(wall)}.") class _WallArrayViewFactory: - def __init__(self, cpp_wall_potential, wall_type): self.cpp_obj = cpp_wall_potential self.func_name = { hoomd.wall.Sphere: "get_sphere_list", hoomd.wall.Cylinder: "get_cylinder_list", - hoomd.wall.Plane: "get_plane_list" + hoomd.wall.Plane: "get_plane_list", }[wall_type] def __call__(self): @@ -186,7 +190,9 @@ class WallPotential(force.Force): __doc__ = __doc__.replace("{inherited}", force.Force._doc_inherited) - _doc_inherited = force.Force._doc_inherited + """ + _doc_inherited = ( + force.Force._doc_inherited + + """ ---------- **Members inherited from** @@ -197,6 +203,7 @@ class WallPotential(force.Force): A list of wall definitions to use for the force. `Read more... ` """ + ) # Module where the C++ class is defined. Reassign this when developing an # external plugin. @@ -212,17 +219,19 @@ def _attach_hook(self): else: cls = getattr(self._ext_module, self._cpp_class_name + "GPU") self._cpp_obj = cls(self._simulation.state._cpp_sys_def) - self._walls._sync({ - hoomd.wall.Sphere: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Sphere)), - hoomd.wall.Cylinder: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Cylinder)), - hoomd.wall.Plane: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Plane)), - }) + self._walls._sync( + { + hoomd.wall.Sphere: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Sphere) + ), + hoomd.wall.Cylinder: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Cylinder) + ), + hoomd.wall.Plane: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Plane) + ), + } + ) @property def walls(self): @@ -237,19 +246,19 @@ def walls(self, wall_list): return self._walls = hoomd.wall._WallsMetaList(wall_list, _to_md_cpp_wall) if self._attached: - self._walls._sync({ - hoomd.wall.Sphere: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, - hoomd.wall.Sphere)), - hoomd.wall.Cylinder: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, - hoomd.wall.Cylinder)), - hoomd.wall.Plane: - _ArrayViewWrapper( - _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Plane)), - }) + self._walls._sync( + { + hoomd.wall.Sphere: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Sphere) + ), + hoomd.wall.Cylinder: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Cylinder) + ), + hoomd.wall.Plane: _ArrayViewWrapper( + _WallArrayViewFactory(self._cpp_obj, hoomd.wall.Plane) + ), + } + ) class LJ(WallPotential): @@ -266,8 +275,16 @@ class LJ(WallPotential): walls = [hoomd.wall.Sphere(radius=4.0)] lj = hoomd.md.external.wall.LJ(walls=walls) - lj.params['A'] = {"sigma": 1.0, "epsilon": 1.0, "r_cut": 2.5} - lj.params[['A','B']] = {"epsilon": 2.0, "sigma": 1.0, "r_cut": 2.8} + lj.params["A"] = { + "sigma": 1.0, + "epsilon": 1.0, + "r_cut": 2.5, + } + lj.params[["A", "B"]] = { + "epsilon": 2.0, + "sigma": 1.0, + "r_cut": 2.8, + } lj.params["A"] = {"r_extrap": 1.1} {inherited} @@ -298,17 +315,16 @@ class LJ(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(epsilon=float, - sigma=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + epsilon=float, sigma=float, r_cut=float, r_extrap=0.0, len_keys=1 + ), + ) self._add_typeparam(params) @@ -326,9 +342,16 @@ class Gaussian(WallPotential): walls = [hoomd.wall.Sphere(radius=4.0)] gaussian_wall = hoomd.md.external.wall.Gaussian(walls=walls) - gaussian_wall.params['A'] = {"epsilon": 1.0, "sigma": 1.0, "r_cut": 2.5} - gaussian_wall.params[['A','B']] = { - "epsilon": 2.0, "sigma": 1.0, "r_cut": 1.0} + gaussian_wall.params["A"] = { + "epsilon": 1.0, + "sigma": 1.0, + "r_cut": 2.5, + } + gaussian_wall.params[["A", "B"]] = { + "epsilon": 2.0, + "sigma": 1.0, + "r_cut": 1.0, + } {inherited} @@ -358,17 +381,16 @@ class Gaussian(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(epsilon=float, - sigma=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + epsilon=float, sigma=float, r_cut=float, r_extrap=0.0, len_keys=1 + ), + ) self._add_typeparam(params) @@ -386,10 +408,16 @@ class Yukawa(WallPotential): walls = [hoomd.wall.Sphere(radius=4.0)] yukawa_wall = hoomd.md.external.wall.Yukawa(walls=walls) - yukawa_wall.params['A'] = { - "epsilon": 1.0, "kappa": 1.0, "r_cut": 3.0} - yukawa_wall.params[['A','B']] = { - "epsilon": 0.5, "kappa": 3.0, "r_cut": 3.2} + yukawa_wall.params["A"] = { + "epsilon": 1.0, + "kappa": 1.0, + "r_cut": 3.0, + } + yukawa_wall.params[["A", "B"]] = { + "epsilon": 0.5, + "kappa": 3.0, + "r_cut": 3.2, + } {inherited} @@ -419,17 +447,16 @@ class Yukawa(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(epsilon=float, - kappa=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + epsilon=float, kappa=float, r_cut=float, r_extrap=0.0, len_keys=1 + ), + ) self._add_typeparam(params) @@ -448,10 +475,18 @@ class Morse(WallPotential): walls = [hoomd.wall.Sphere(radius=4.0)] morse_wall = hoomd.md.external.wall.Morse(walls=walls) - morse_wall.params['A'] = { - "D0": 1.0, "alpha": 1.0, "r0": 1.0, "r_cut": 3.0} - morse_wall.params[['A','B']] = { - "D0": 0.5, "alpha": 3.0, "r0": 1.0, "r_cut": 3.2} + morse_wall.params["A"] = { + "D0": 1.0, + "alpha": 1.0, + "r0": 1.0, + "r_cut": 3.0, + } + morse_wall.params[["A", "B"]] = { + "D0": 0.5, + "alpha": 3.0, + "r0": 1.0, + "r_cut": 3.2, + } {inherited} @@ -481,18 +516,16 @@ class Morse(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(D0=float, - r0=float, - alpha=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + D0=float, r0=float, alpha=float, r_cut=float, r_extrap=0.0, len_keys=1 + ), + ) self._add_typeparam(params) @@ -510,12 +543,17 @@ class ForceShiftedLJ(WallPotential): Example:: walls = [hoomd.wall.Sphere(radius=4.0)] - shifted_lj_wall = hoomd.md.external.wall.ForceShiftedLJ( - walls=walls) - shifted_lj_wall.params['A'] = { - "epsilon": 1.0, "sigma": 1.0, "r_cut": 3.0} - shifted_lj_wall.params[['A','B']] = { - "epsilon": 0.5, "sigma": 3.0, "r_cut": 3.2} + shifted_lj_wall = hoomd.md.external.wall.ForceShiftedLJ(walls=walls) + shifted_lj_wall.params["A"] = { + "epsilon": 1.0, + "sigma": 1.0, + "r_cut": 3.0, + } + shifted_lj_wall.params[["A", "B"]] = { + "epsilon": 0.5, + "sigma": 3.0, + "r_cut": 3.2, + } {inherited} @@ -545,17 +583,16 @@ class ForceShiftedLJ(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(epsilon=float, - sigma=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + epsilon=float, sigma=float, r_cut=float, r_extrap=0.0, len_keys=1 + ), + ) self._add_typeparam(params) @@ -573,10 +610,20 @@ class Mie(WallPotential): walls = [hoomd.wall.Sphere(radius=4.0)] mie_wall = hoomd.md.external.wall.Mie(walls=walls) - mie_wall.params['A'] = { - "epsilon": 1.0, "sigma": 1.0, "n": 12, "m": 6, "r_cut": 3.0} - mie_wall.params[['A','B']] = { - "epsilon": 0.5, "sigma": 3.0, "n": 49, "m": 50, "r_cut": 3.2} + mie_wall.params["A"] = { + "epsilon": 1.0, + "sigma": 1.0, + "n": 12, + "m": 6, + "r_cut": 3.0, + } + mie_wall.params[["A", "B"]] = { + "epsilon": 0.5, + "sigma": 3.0, + "n": 49, + "m": 50, + "r_cut": 3.2, + } {inherited} @@ -606,28 +653,31 @@ class Mie(WallPotential): __doc__ = __doc__.replace("{inherited}", WallPotential._doc_inherited) def __init__(self, walls): - # initialize the base class super().__init__(walls) params = hoomd.data.typeparam.TypeParameter( - "params", "particle_types", - hoomd.data.parameterdicts.TypeParameterDict(epsilon=float, - sigma=float, - m=float, - n=float, - r_cut=float, - r_extrap=0.0, - len_keys=1)) + "params", + "particle_types", + hoomd.data.parameterdicts.TypeParameterDict( + epsilon=float, + sigma=float, + m=float, + n=float, + r_cut=float, + r_extrap=0.0, + len_keys=1, + ), + ) self._add_typeparam(params) __all__ = [ - 'LJ', - 'ForceShiftedLJ', - 'Gaussian', - 'Mie', - 'Morse', - 'WallPotential', - 'Yukawa', + "LJ", + "ForceShiftedLJ", + "Gaussian", + "Mie", + "Morse", + "WallPotential", + "Yukawa", ] diff --git a/hoomd/md/force.py b/hoomd/md/force.py index 9cefb7e886..c1b2a4de00 100644 --- a/hoomd/md/force.py +++ b/hoomd/md/force.py @@ -79,7 +79,9 @@ class Force(Compute): __doc__ = __doc__.replace("{inherited}", Compute._doc_inherited) - _doc_inherited = Compute._doc_inherited + """ + _doc_inherited = ( + Compute._doc_inherited + + """ ---------- **Members inherited from** @@ -130,6 +132,7 @@ class Force(Compute): Virial tensor contribution :math:`W_i` from each particle. `Read more... ` """ + ) def __init__(self): self._in_context_manager = False @@ -243,9 +246,11 @@ def cpu_local_force_arrays(self): arrays.virial[:] = ... """ if self._in_context_manager: - raise RuntimeError("Cannot enter cpu_local_force_arrays context " - "manager inside another local_force_arrays " - "context manager") + raise RuntimeError( + "Cannot enter cpu_local_force_arrays context " + "manager inside another local_force_arrays " + "context manager" + ) if not self._attached: raise hoomd.error.DataAccessError("cpu_local_force_arrays") return hoomd.md.data.ForceLocalAccess(self, self._simulation.state) @@ -279,11 +284,13 @@ def gpu_local_force_arrays(self): """ if not isinstance(self._simulation.device, hoomd.device.GPU): raise RuntimeError( - "Cannot access gpu_local_force_arrays without a GPU device") + "Cannot access gpu_local_force_arrays without a GPU device" + ) if self._in_context_manager: raise RuntimeError( "Cannot enter gpu_local_force_arrays context manager inside " - "another local_force_arrays context manager") + "another local_force_arrays context manager" + ) if not self._attached: raise hoomd.error.DataAccessError("gpu_local_force_arrays") return hoomd.md.data.ForceLocalAccessGPU(self, self._simulation.state) @@ -373,8 +380,9 @@ def __init__(self, aniso=False): def _attach_hook(self): self._state = self._simulation.state - self._cpp_obj = _md.CustomForceCompute(self._state._cpp_sys_def, - self.set_forces, self._aniso) + self._cpp_obj = _md.CustomForceCompute( + self._state._cpp_sys_def, self.set_forces, self._aniso + ) @abstractmethod def set_forces(self, timestep): @@ -488,7 +496,6 @@ def _attach_hook(self): self._set_cpp_obj() def _set_cpp_obj(self): - # initialize the reflected c++ class sim = self._simulation @@ -497,8 +504,9 @@ def _set_cpp_obj(self): else: my_class = _md.ActiveForceComputeGPU - self._cpp_obj = my_class(sim.state._cpp_sys_def, - sim.state._get_group(self.filter)) + self._cpp_obj = my_class( + sim.state._cpp_sys_def, sim.state._get_group(self.filter) + ) def create_diffusion_updater(self, trigger, rotational_diffusion): """Create a rotational diffusion updater for this active force. @@ -514,7 +522,8 @@ def create_diffusion_updater(self, trigger, rotational_diffusion): The rotational diffusion updater. """ return hoomd.md.update.ActiveRotationalDiffusion( - trigger, self, rotational_diffusion) + trigger, self, rotational_diffusion + ) class ActiveOnManifold(Active): @@ -548,11 +557,12 @@ class ActiveOnManifold(Active): all = filter.All() sphere = hoomd.md.manifold.Sphere(r=10) active = hoomd.md.force.ActiveOnManifold( - filter=hoomd.filter.All(), rotation_diff=0.01, - manifold_constraint = sphere - ) - active.active_force['A','B'] = (1,0,0) - active.active_torque['A','B'] = (0,0,0) + filter=hoomd.filter.All(), + rotation_diff=0.01, + manifold_constraint=sphere, + ) + active.active_force["A", "B"] = (1, 0, 0) + active.active_torque["A", "B"] = (0, 0, 0) {inherited} @@ -590,32 +600,32 @@ def __init__(self, filter, manifold_constraint): # store metadata super().__init__(filter) param_dict = ParameterDict( - manifold_constraint=OnlyTypes(Manifold, allow_none=False)) + manifold_constraint=OnlyTypes(Manifold, allow_none=False) + ) param_dict["manifold_constraint"] = manifold_constraint self._param_dict.update(param_dict) def _setattr_param(self, attr, value): if attr == "manifold_constraint": - raise AttributeError( - "Cannot set manifold_constraint after construction.") + raise AttributeError("Cannot set manifold_constraint after construction.") super()._setattr_param(attr, value) def _set_cpp_obj(self): - # initialize the reflected c++ class sim = self._simulation if not self.manifold_constraint._attached: self.manifold_constraint._attach(sim) - base_class_str = 'ActiveForceConstraintCompute' + base_class_str = "ActiveForceConstraintCompute" base_class_str += self.manifold_constraint.__class__.__name__ if isinstance(sim.device, hoomd.device.GPU): base_class_str += "GPU" - self._cpp_obj = getattr( - _md, base_class_str)(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.manifold_constraint._cpp_obj) + self._cpp_obj = getattr(_md, base_class_str)( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.manifold_constraint._cpp_obj, + ) class Constant(Force): @@ -631,11 +641,9 @@ class Constant(Force): Examples:: - constant = hoomd.md.force.Constant( - filter=hoomd.filter.All() - ) - constant.constant_force['A'] = (1,0,0) - constant.constant_torque['A'] = (0,0,0) + constant = hoomd.md.force.Constant(filter=hoomd.filter.All()) + constant.constant_force["A"] = (1, 0, 0) + constant.constant_torque["A"] = (0, 0, 0) Note: The energy and virial associated with the constant force are 0. @@ -701,14 +709,15 @@ def _attach_hook(self): else: my_class = _md.ConstantForceComputeGPU - self._cpp_obj = my_class(sim.state._cpp_sys_def, - sim.state._get_group(self.filter)) + self._cpp_obj = my_class( + sim.state._cpp_sys_def, sim.state._get_group(self.filter) + ) __all__ = [ - 'Active', - 'ActiveOnManifold', - 'Constant', - 'Custom', - 'Force', + "Active", + "ActiveOnManifold", + "Constant", + "Custom", + "Force", ] diff --git a/hoomd/md/half_step_hook.py b/hoomd/md/half_step_hook.py index d79566977a..db205c0de9 100644 --- a/hoomd/md/half_step_hook.py +++ b/hoomd/md/half_step_hook.py @@ -28,4 +28,5 @@ def update(self, timestep): """ raise TypeError( "Use a hoomd.md.HalfStepHook derived class implementing the " - "corresponding update method.") + "corresponding update method." + ) diff --git a/hoomd/md/improper.py b/hoomd/md/improper.py index c9f7fdd779..23e524779e 100644 --- a/hoomd/md/improper.py +++ b/hoomd/md/improper.py @@ -68,10 +68,8 @@ def __init__(self): def _attach_hook(self): # check that some impropers are defined - if self._simulation.state._cpp_sys_def.getImproperData().getNGlobal( - ) == 0: - self._simulation.device._cpp_msg.warning( - "No impropers are defined.\n") + if self._simulation.state._cpp_sys_def.getImproperData().getNGlobal() == 0: + self._simulation.device._cpp_msg.warning("No impropers are defined.\n") # Instantiate the c++ implementation. if isinstance(self._simulation.device, hoomd.device.CPU): @@ -95,7 +93,7 @@ class Harmonic(Improper): Example:: harmonic = hoomd.md.improper.Harmonic() - harmonic.params['A-B-C-D'] = dict(k=1.0, chi0=0) + harmonic.params["A-B-C-D"] = dict(k=1.0, chi0=0) {inherited} @@ -113,17 +111,19 @@ class Harmonic(Improper): * ``chi0`` (`float`, **required**), equilibrium angle :math:`\\chi_0` :math:`[\\mathrm{radian}]`. """ + _cpp_class_name = "HarmonicImproperForceCompute" __doc__ = __doc__.replace("{inherited}", Improper._doc_inherited) def __init__(self): super().__init__() params = hoomd.data.typeparam.TypeParameter( - 'params', 'improper_types', + "params", + "improper_types", hoomd.data.parameterdicts.TypeParameterDict( - k=float, - chi0=hoomd.data.typeconverter.nonnegative_real, - len_keys=1)) + k=float, chi0=hoomd.data.typeconverter.nonnegative_real, len_keys=1 + ), + ) self._add_typeparam(params) @@ -142,7 +142,7 @@ class Periodic(Improper): .. code-block:: python periodic = hoomd.md.improper.Periodic() - periodic.params['A-B-C-D'] = dict(k=1.0, n = 1, chi0=0, d=1.0) + periodic.params["A-B-C-D"] = dict(k=1.0, n=1, chi0=0, d=1.0) {inherited} @@ -164,24 +164,28 @@ class Periodic(Improper): * ``d`` (`float`, **required**), sign factor :math:`d` :math:`[\\mathrm{dimensionless}]`. """ + _cpp_class_name = "PeriodicImproperForceCompute" __doc__ = __doc__.replace("{inherited}", Improper._doc_inherited) def __init__(self): super().__init__() params = hoomd.data.typeparam.TypeParameter( - 'params', 'improper_types', + "params", + "improper_types", hoomd.data.parameterdicts.TypeParameterDict( k=float, n=int, d=int, chi0=hoomd.data.typeconverter.nonnegative_real, - len_keys=1)) + len_keys=1, + ), + ) self._add_typeparam(params) __all__ = [ - 'Harmonic', - 'Improper', - 'Periodic', + "Harmonic", + "Improper", + "Periodic", ] diff --git a/hoomd/md/integrate.py b/hoomd/md/integrate.py index 6fc2dba59f..46cc878c08 100644 --- a/hoomd/md/integrate.py +++ b/hoomd/md/integrate.py @@ -20,21 +20,23 @@ def _set_synced_list(old_list, new_list): class _DynamicIntegrator(BaseIntegrator): - def __init__(self, forces, constraints, methods, rigid): forces = [] if forces is None else forces constraints = [] if constraints is None else constraints methods = [] if methods is None else methods self._forces = syncedlist.SyncedList( - Force, syncedlist._PartialGetAttr('_cpp_obj'), iterable=forces) + Force, syncedlist._PartialGetAttr("_cpp_obj"), iterable=forces + ) self._constraints = syncedlist.SyncedList( OnlyTypes(Constraint, disallow_types=(Rigid,)), - syncedlist._PartialGetAttr('_cpp_obj'), - iterable=constraints) + syncedlist._PartialGetAttr("_cpp_obj"), + iterable=constraints, + ) self._methods = syncedlist.SyncedList( - Method, syncedlist._PartialGetAttr('_cpp_obj'), iterable=methods) + Method, syncedlist._PartialGetAttr("_cpp_obj"), iterable=methods + ) param_dict = ParameterDict(rigid=OnlyTypes(Rigid, allow_none=True)) if rigid is not None and rigid._attached: @@ -270,9 +272,10 @@ class Integrator(_DynamicIntegrator): simulation associated with the integrator. """ - __doc__ = __doc__.replace("{inherited}", - hoomd.operation.Integrator._doc_inherited) - _doc_inherited = hoomd.operation.Integrator._doc_inherited + """ + __doc__ = __doc__.replace("{inherited}", hoomd.operation.Integrator._doc_inherited) + _doc_inherited = ( + hoomd.operation.Integrator._doc_inherited + + """ ---------- **Members inherited from** `Integrator `: @@ -318,31 +321,35 @@ class Integrator(_DynamicIntegrator): `Read more... ` """ - - def __init__(self, - dt, - integrate_rotational_dof=False, - forces=None, - constraints=None, - methods=None, - rigid=None, - half_step_hook=None): - + ) + + def __init__( + self, + dt, + integrate_rotational_dof=False, + forces=None, + constraints=None, + methods=None, + rigid=None, + half_step_hook=None, + ): super().__init__(forces, constraints, methods, rigid) self._param_dict.update( ParameterDict( dt=float(dt), integrate_rotational_dof=bool(integrate_rotational_dof), - half_step_hook=OnlyTypes(hoomd.md.HalfStepHook, - allow_none=True))) + half_step_hook=OnlyTypes(hoomd.md.HalfStepHook, allow_none=True), + ) + ) self.half_step_hook = half_step_hook def _attach_hook(self): # initialize the reflected c++ class self._cpp_obj = _md.IntegratorTwoStep( - self._simulation.state._cpp_sys_def, self.dt) + self._simulation.state._cpp_sys_def, self.dt + ) # Call attach from DynamicIntegrator which attaches forces, # constraint_forces, and methods, and calls super()._attach() itself. super()._attach_hook() @@ -350,8 +357,11 @@ def _attach_hook(self): def __setattr__(self, attr, value): """Handle group DOF update when setting integrate_rotational_dof.""" super().__setattr__(attr, value) - if (attr == 'integrate_rotational_dof' and self._simulation is not None - and self._simulation.state is not None): + if ( + attr == "integrate_rotational_dof" + and self._simulation is not None + and self._simulation.state is not None + ): self._simulation.state.update_group_dof() @hoomd.logging.log(category="sequence", requires_run=True) diff --git a/hoomd/md/long_range/__init__.py b/hoomd/md/long_range/__init__.py index 5828676adf..d65cd0ed0a 100644 --- a/hoomd/md/long_range/__init__.py +++ b/hoomd/md/long_range/__init__.py @@ -6,5 +6,5 @@ from . import pppm __all__ = [ - 'pppm', + "pppm", ] diff --git a/hoomd/md/long_range/pppm.py b/hoomd/md/long_range/pppm.py index b5b60aee18..338e29fa6c 100644 --- a/hoomd/md/long_range/pppm.py +++ b/hoomd/md/long_range/pppm.py @@ -128,12 +128,14 @@ def make_pppm_coulomb_forces(nlist, resolution, order, r_cut, alpha=0): real_space_force.params.default = dict(kappa=0, alpha=0) real_space_force.r_cut.default = r_cut - reciprocal_space_force = Coulomb(nlist=nlist, - resolution=resolution, - order=order, - r_cut=r_cut, - alpha=0, - pair_force=real_space_force) + reciprocal_space_force = Coulomb( + nlist=nlist, + resolution=resolution, + order=order, + r_cut=r_cut, + alpha=0, + pair_force=real_space_force, + ) return real_space_force, reciprocal_space_force @@ -167,13 +169,14 @@ class Coulomb(Force): def __init__(self, nlist, resolution, order, r_cut, alpha, pair_force): super().__init__() - self._nlist = hoomd.data.typeconverter.OnlyTypes( - hoomd.md.nlist.NeighborList)(nlist) + self._nlist = hoomd.data.typeconverter.OnlyTypes(hoomd.md.nlist.NeighborList)( + nlist + ) self._param_dict.update( - hoomd.data.parameterdicts.ParameterDict(resolution=(int, int, int), - order=int, - r_cut=float, - alpha=float)) + hoomd.data.parameterdicts.ParameterDict( + resolution=(int, int, int), order=int, r_cut=float, alpha=float + ) + ) self.resolution = resolution self.order = order @@ -198,8 +201,9 @@ def _attach_hook(self): alpha = self.alpha group = self._simulation.state._get_group(hoomd.filter.All()) - self._cpp_obj = cls(self._simulation.state._cpp_sys_def, - self.nlist._cpp_obj, group) + self._cpp_obj = cls( + self._simulation.state._cpp_sys_def, self.nlist._cpp_obj, group + ) # compute the kappa parameter q2 = self._cpp_obj.getQ2Sum() @@ -222,8 +226,7 @@ def _attach_hook(self): fmid = _diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut) if f * fmid >= 0.0: - raise RuntimeError("Cannot compute PPPM Coloumb forces,\n" - "f*fmid >= 0.0") + raise RuntimeError("Cannot compute PPPM Coloumb forces,\n" "f*fmid >= 0.0") if f < 0.0: dgew = gew2 - gew1 @@ -243,8 +246,7 @@ def _attach_hook(self): rtb = kappa ncount += 1 if ncount > 10000.0: - raise RuntimeError("Cannot compute PPPM\n" - "kappa is not converging") + raise RuntimeError("Cannot compute PPPM\n" "kappa is not converging") # set parameters particle_types = self._simulation.state.particle_types @@ -274,7 +276,8 @@ def nlist(self, value): raise RuntimeError("nlist cannot be set after scheduling.") else: self._nlist = hoomd.data.typeconverter.OnlyTypes( - hoomd.md.nlist.NeighborList)(value) + hoomd.md.nlist.NeighborList + )(value) # ensure that the pair force uses the same neighbor list self._pair_force.nlist = value @@ -285,10 +288,13 @@ def _diffpr(hx, hy, hz, xprd, yprd, zprd, N, order, kappa, q2, rcut): lprx = _rms(hx, xprd, N, order, kappa, q2) lpry = _rms(hy, yprd, N, order, kappa, q2) lprz = _rms(hz, zprd, N, order, kappa, q2) - kspace_prec = math.sqrt(lprx * lprx + lpry * lpry - + lprz * lprz) / math.sqrt(3.0) - real_prec = 2.0 * q2 * math.exp(-kappa * kappa * rcut * rcut) / math.sqrt( - N * rcut * xprd * yprd * zprd) + kspace_prec = math.sqrt(lprx * lprx + lpry * lpry + lprz * lprz) / math.sqrt(3.0) + real_prec = ( + 2.0 + * q2 + * math.exp(-kappa * kappa * rcut * rcut) + / math.sqrt(N * rcut * xprd * yprd * zprd) + ) value = kspace_prec - real_prec return value @@ -329,12 +335,17 @@ def _rms(h, prd, N, order, kappa, q2): sum = 0.0 for m in range(0, order): sum += acons[order][m] * pow(h * kappa, 2.0 * m) - value = q2 * pow(h * kappa, order) * math.sqrt( - kappa * prd * math.sqrt(2.0 * math.pi) * sum / N) / prd / prd + value = ( + q2 + * pow(h * kappa, order) + * math.sqrt(kappa * prd * math.sqrt(2.0 * math.pi) * sum / N) + / prd + / prd + ) return value __all__ = [ - 'Coulomb', - 'make_pppm_coulomb_forces', + "Coulomb", + "make_pppm_coulomb_forces", ] diff --git a/hoomd/md/manifold.py b/hoomd/md/manifold.py index 381f34bafb..2eb1f01247 100644 --- a/hoomd/md/manifold.py +++ b/hoomd/md/manifold.py @@ -36,8 +36,7 @@ class Manifold(_HOOMDBaseObject): def _preprocess_unitcell(value): if isinstance(value, Sequence): if len(value) != 3: - raise ValueError( - "Expected a single int or a sequence of three ints.") + raise ValueError("Expected a single int or a sequence of three ints.") return tuple(value) else: return (value, value, value) @@ -47,8 +46,8 @@ def __eq__(self, other): if not isinstance(other, type(self)): return NotImplemented return all( - getattr(self, attr) == getattr(other, attr) - for attr in self._param_dict) + getattr(self, attr) == getattr(other, attr) for attr in self._param_dict + ) def _setattr_param(self, attr, value): raise MutabilityError(attr) @@ -71,7 +70,7 @@ class Cylinder(Manifold): Example:: cylinder1 = manifold.Cylinder(r=10) - cylinder2 = manifold.Cylinder(r=5,P=(1,1,1)) + cylinder2 = manifold.Cylinder(r=5, P=(1, 1, 1)) """ def __init__(self, r, P=(0, 0, 0)): @@ -80,13 +79,14 @@ def __init__(self, r, P=(0, 0, 0)): r=float(r), P=(float, float, float), ) - param_dict['P'] = P + param_dict["P"] = P self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldZCylinder( - self.r, _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2])) + self.r, _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2]) + ) super()._attach(self._simulation) @@ -126,23 +126,24 @@ class Diamond(Manifold): Example:: diamond1 = manifold.Diamond(N=1) - diamond2 = manifold.Diamond(N=(1,2,2)) + diamond2 = manifold.Diamond(N=(1, 2, 2)) """ def __init__(self, N, epsilon=0): - # store metadata param_dict = ParameterDict( - N=OnlyIf(to_type_converter((int,) * 3), - preprocess=self._preprocess_unitcell), + N=OnlyIf( + to_type_converter((int,) * 3), preprocess=self._preprocess_unitcell + ), epsilon=float(epsilon), ) - param_dict['N'] = N + param_dict["N"] = N self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldDiamond( - _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon) + _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon + ) super()._attach(self._simulation) @@ -171,8 +172,8 @@ class Ellipsoid(Manifold): Example:: - ellipsoid1 = manifold.Ellipsoid(a=10,b=5,c=5) - ellipsoid2 = manifold.Ellipsoid(a=5,b=10,c=10,P=(1,0.5,1)) + ellipsoid1 = manifold.Ellipsoid(a=10, b=5, c=5) + ellipsoid2 = manifold.Ellipsoid(a=5, b=10, c=10, P=(1, 0.5, 1)) """ def __init__(self, a, b, c, P=(0, 0, 0)): @@ -183,14 +184,14 @@ def __init__(self, a, b, c, P=(0, 0, 0)): c=float(c), P=(float, float, float), ) - param_dict['P'] = P + param_dict["P"] = P self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldEllipsoid( - self.a, self.b, self.c, - _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2])) + self.a, self.b, self.c, _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2]) + ) super()._attach(self._simulation) @@ -229,25 +230,26 @@ class Gyroid(Manifold): Example:: gyroid1 = manifold.Gyroid(N=1) - gyroid2 = manifold.Gyroid(N=(1,2,2)) + gyroid2 = manifold.Gyroid(N=(1, 2, 2)) """ def __init__(self, N, epsilon=0): - # initialize the base class super().__init__() # store metadata param_dict = ParameterDict( - N=OnlyIf(to_type_converter((int,) * 3), - preprocess=self._preprocess_unitcell), + N=OnlyIf( + to_type_converter((int,) * 3), preprocess=self._preprocess_unitcell + ), epsilon=float(epsilon), ) - param_dict['N'] = N + param_dict["N"] = N self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldGyroid( - _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon) + _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon + ) super()._attach(self._simulation) @@ -270,7 +272,9 @@ class Plane(Manifold): """ def __init__(self, shift=0): - param_dict = ParameterDict(shift=float(shift),) + param_dict = ParameterDict( + shift=float(shift), + ) self._param_dict.update(param_dict) @@ -313,23 +317,24 @@ class Primitive(Manifold): Example:: primitive1 = manifold.Primitive(N=1) - primitive2 = manifold.Primitive(N=(1,2,2)) + primitive2 = manifold.Primitive(N=(1, 2, 2)) """ def __init__(self, N, epsilon=0): - # store metadata param_dict = ParameterDict( - N=OnlyIf(to_type_converter((int,) * 3), - preprocess=self._preprocess_unitcell), + N=OnlyIf( + to_type_converter((int,) * 3), preprocess=self._preprocess_unitcell + ), epsilon=float(epsilon), ) - param_dict['N'] = N + param_dict["N"] = N self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldPrimitive( - _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon) + _hoomd.make_int3(self.N[0], self.N[1], self.N[2]), self.epsilon + ) super()._attach(self._simulation) @@ -351,7 +356,7 @@ class Sphere(Manifold): Example:: sphere1 = manifold.Sphere(r=10) - sphere2 = manifold.Sphere(r=5,P=(1,0,1.5)) + sphere2 = manifold.Sphere(r=5, P=(1, 0, 1.5)) """ def __init__(self, r, P=(0, 0, 0)): @@ -361,24 +366,25 @@ def __init__(self, r, P=(0, 0, 0)): r=float(r), P=(float, float, float), ) - param_dict['P'] = P + param_dict["P"] = P self._param_dict.update(param_dict) def _attach_hook(self): self._cpp_obj = _md.ManifoldSphere( - self.r, _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2])) + self.r, _hoomd.make_scalar3(self.P[0], self.P[1], self.P[2]) + ) super()._attach(self._simulation) __all__ = [ - 'Cylinder', - 'Diamond', - 'Ellipsoid', - 'Gyroid', - 'Manifold', - 'Plane', - 'Primitive', - 'Sphere', + "Cylinder", + "Diamond", + "Ellipsoid", + "Gyroid", + "Manifold", + "Plane", + "Primitive", + "Sphere", ] diff --git a/hoomd/md/many_body.py b/hoomd/md/many_body.py index 3dcc638cfc..4b48ff5984 100644 --- a/hoomd/md/many_body.py +++ b/hoomd/md/many_body.py @@ -83,7 +83,9 @@ class Triplet(Force): """ __doc__ = __doc__.replace("{inherited}", Force._doc_inherited) - _doc_inherited = Force._doc_inherited + """ + _doc_inherited = ( + Force._doc_inherited + + """ ---------- **Members inherited from** @@ -99,17 +101,17 @@ class Triplet(Force): Neighbor list used to compute the triplet potential. `Read more... ` """ + ) def __init__(self, nlist, default_r_cut=None): super().__init__() r_cut_param = TypeParameter( - 'r_cut', 'particle_types', - TypeParameterDict(positive_real, len_keys=2)) + "r_cut", "particle_types", TypeParameterDict(positive_real, len_keys=2) + ) if default_r_cut is not None: r_cut_param.default = default_r_cut self._add_typeparam(r_cut_param) - self._param_dict.update( - ParameterDict(nlist=hoomd.md.nlist.NeighborList)) + self._param_dict.update(ParameterDict(nlist=hoomd.md.nlist.NeighborList)) self.nlist = nlist def _setattr_param(self, attr, value): @@ -129,7 +131,8 @@ def _attach_hook(self): f"{self} object is creating a new equivalent neighbor list." f" This is happending since the force is moving to a new " f"simulation. Explicitly set the nlist to hide this warning.", - RuntimeWarning) + RuntimeWarning, + ) self.nlist = copy.deepcopy(self.nlist) self.nlist._attach(self._simulation) self.nlist._cpp_obj.setStorageMode(_md.NeighborList.storageMode.full) @@ -138,8 +141,7 @@ def _attach_hook(self): else: cls = getattr(_md, self._cpp_class_name + "GPU") - self._cpp_obj = cls(self._simulation.state._cpp_sys_def, - self.nlist._cpp_obj) + self._cpp_obj = cls(self._simulation.state._cpp_sys_def, self.nlist._cpp_obj) class Tersoff(Triplet): @@ -260,25 +262,30 @@ class Tersoff(Triplet): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialTersoff" __doc__ = __doc__.replace("{inherited}", Triplet._doc_inherited) def __init__(self, nlist, default_r_cut=None): super().__init__(nlist, default_r_cut) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(cutoff_thickness=0.2, - magnitudes=(1.0, 1.0), - exp_factors=(2.0, 1.0), - lambda3=0.0, - dimer_r=1.5, - n=0.0, - gamma=0.0, - c=0.0, - d=1.0, - m=0.0, - alpha=3.0, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + cutoff_thickness=0.2, + magnitudes=(1.0, 1.0), + exp_factors=(2.0, 1.0), + lambda3=0.0, + dimer_r=1.5, + n=0.0, + gamma=0.0, + c=0.0, + d=1.0, + m=0.0, + alpha=3.0, + len_keys=2, + ), + ) self._add_typeparam(params) @@ -398,18 +405,17 @@ class RevCross(Triplet): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialRevCross" __doc__ = __doc__.replace("{inherited}", Triplet._doc_inherited) def __init__(self, nlist, default_r_cut=None): super().__init__(nlist, default_r_cut) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(sigma=2.0, - n=1.0, - epsilon=1.0, - lambda3=1.0, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(sigma=2.0, n=1.0, epsilon=1.0, lambda3=1.0, len_keys=2), + ) self._add_typeparam(params) @@ -454,8 +460,8 @@ class SquareDensity(Triplet): nl = nlist.Cell() sqd = md.many_body.SquareDensity(nl, default_r_cut=3.0) - sqd.params[('A', 'B')] = dict(A=1.0, B=2.0) - sqd.params[('B', 'B')] = dict(A=2.0, B=2.0, default_r_on=1.0) + sqd.params[("A", "B")] = dict(A=1.0, B=2.0) + sqd.params[("B", "B")] = dict(A=2.0, B=2.0, default_r_on=1.0) For further details regarding this multibody potential, see @@ -485,19 +491,21 @@ class SquareDensity(Triplet): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialSquareDensity" __doc__ = __doc__.replace("{inherited}", Triplet._doc_inherited) def __init__(self, nlist, default_r_cut=None): super().__init__(nlist, default_r_cut) - params = TypeParameter('params', 'particle_types', - TypeParameterDict(A=0.0, B=float, len_keys=2)) + params = TypeParameter( + "params", "particle_types", TypeParameterDict(A=0.0, B=float, len_keys=2) + ) self._add_typeparam(params) __all__ = [ - 'RevCross', - 'SquareDensity', - 'Tersoff', - 'Triplet', + "RevCross", + "SquareDensity", + "Tersoff", + "Triplet", ] diff --git a/hoomd/md/mesh/__init__.py b/hoomd/md/mesh/__init__.py index 63e27e70d4..9cbdba33cf 100644 --- a/hoomd/md/mesh/__init__.py +++ b/hoomd/md/mesh/__init__.py @@ -7,8 +7,8 @@ from . import bending, bond, conservation __all__ = [ - 'MeshPotential', - 'bending', - 'bond', - 'conservation', + "MeshPotential", + "bending", + "bond", + "conservation", ] diff --git a/hoomd/md/mesh/bending.py b/hoomd/md/mesh/bending.py index 1423373583..c5a831b340 100644 --- a/hoomd/md/mesh/bending.py +++ b/hoomd/md/mesh/bending.py @@ -69,12 +69,14 @@ class BendingRigidity(MeshPotential): * ``k`` (`float`, **required**) - bending stiffness :math:`[\mathrm{energy}]` """ + _cpp_class_name = "BendingRigidityMeshForceCompute" __doc__ = __doc__.replace("{inherited}", MeshPotential._doc_inherited) def __init__(self, mesh): - params = TypeParameter("params", "types", - TypeParameterDict(k=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh) @@ -134,19 +136,19 @@ class Helfrich(MeshPotential): :math:`[\mathrm{energy}]` """ + _cpp_class_name = "HelfrichMeshForceCompute" __doc__ = __doc__.replace("{inherited}", MeshPotential._doc_inherited) def __init__(self, mesh): - - params = TypeParameter("params", "types", - TypeParameterDict(k=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh) def _attach_hook(self): - if self._simulation.device.communicator.num_ranks == 1: super()._attach_hook() else: @@ -154,6 +156,6 @@ def _attach_hook(self): __all__ = [ - 'BendingRigidity', - 'Helfrich', + "BendingRigidity", + "Helfrich", ] diff --git a/hoomd/md/mesh/bond.py b/hoomd/md/mesh/bond.py index ec359cc832..30d3c616c2 100644 --- a/hoomd/md/mesh/bond.py +++ b/hoomd/md/mesh/bond.py @@ -86,12 +86,14 @@ class Harmonic(MeshPotential): * ``r0`` (`float`, **required**) - rest length :math:`[\mathrm{length}]` """ + _cpp_class_name = "PotentialMeshBondHarmonic" __doc__ = __doc__.replace("{inherited}", MeshPotential._doc_inherited) def __init__(self, mesh): - params = TypeParameter("params", "types", - TypeParameterDict(k=float, r0=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, r0=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh) @@ -111,8 +113,9 @@ class FENEWCA(MeshPotential): .. code-block:: python bond_potential = hoomd.md.mesh.bond.FENEWCA(mesh) - bond_potential.params["mesh"] = dict(k=10.0, r0=1.0, - epsilon=0.8, sigma=1.2, delta=0.0) + bond_potential.params["mesh"] = dict( + k=10.0, r0=1.0, epsilon=0.8, sigma=1.2, delta=0.0 + ) {inherited} @@ -141,18 +144,18 @@ class FENEWCA(MeshPotential): * ``delta`` (`float`, **required**) - radial shift :math:`\Delta` :math:`[\mathrm{length}]`. """ + _cpp_class_name = "PotentialMeshBondFENE" __doc__ = __doc__.replace("{inherited}", MeshPotential._doc_inherited) def __init__(self, mesh): params = TypeParameter( - "params", "types", - TypeParameterDict(k=float, - r0=float, - epsilon=float, - sigma=float, - delta=float, - len_keys=1)) + "params", + "types", + TypeParameterDict( + k=float, r0=float, epsilon=float, sigma=float, delta=float, len_keys=1 + ), + ) self._add_typeparam(params) super().__init__(mesh) @@ -172,8 +175,9 @@ class Tether(MeshPotential): .. code-block:: python bond_potential = hoomd.md.mesh.bond.Tether(mesh) - bond_potential.params["mesh"] = dict(k_b=10.0, l_min=0.9, l_c1=1.2, - l_c0=1.8, l_max=2.1) + bond_potential.params["mesh"] = dict( + k_b=10.0, l_min=0.9, l_c1=1.2, l_c0=1.8, l_max=2.1 + ) {inherited} @@ -202,25 +206,25 @@ class Tether(MeshPotential): * ``l_max`` (`float`, **required**) - maximum bond length :math:`[\mathrm{length}]` """ + _cpp_class_name = "PotentialMeshBondTether" __doc__ = __doc__.replace("{inherited}", MeshPotential._doc_inherited) def __init__(self, mesh): params = TypeParameter( - "params", "types", - TypeParameterDict(k_b=float, - l_min=float, - l_c1=float, - l_c0=float, - l_max=float, - len_keys=1)) + "params", + "types", + TypeParameterDict( + k_b=float, l_min=float, l_c1=float, l_c0=float, l_max=float, len_keys=1 + ), + ) self._add_typeparam(params) super().__init__(mesh) __all__ = [ - 'FENEWCA', - 'Harmonic', - 'Tether', + "FENEWCA", + "Harmonic", + "Tether", ] diff --git a/hoomd/md/mesh/conservation.py b/hoomd/md/mesh/conservation.py index f68aecffee..4b5d92bdd2 100644 --- a/hoomd/md/mesh/conservation.py +++ b/hoomd/md/mesh/conservation.py @@ -70,13 +70,14 @@ class Area(MeshConservationPotential): * ``A0`` (`float`, **required**) - targeted global surface area :math:`[\mathrm{length}]^2]` """ + _cpp_class_name = "AreaConservationMeshForceCompute" - __doc__ = __doc__.replace("{inherited}", - MeshConservationPotential._doc_inherited) + __doc__ = __doc__.replace("{inherited}", MeshConservationPotential._doc_inherited) def __init__(self, mesh, ignore_type=False): - params = TypeParameter("params", "types", - TypeParameterDict(k=float, A0=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, A0=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh, ignore_type) @@ -133,13 +134,14 @@ class TriangleArea(MeshPotential): of a single triangle in the mesh :math:`[\mathrm{length}]^2` """ + _cpp_class_name = "TriangleAreaConservationMeshForceCompute" - __doc__ = __doc__.replace("{inherited}", - MeshConservationPotential._doc_inherited) + __doc__ = __doc__.replace("{inherited}", MeshConservationPotential._doc_inherited) def __init__(self, mesh): - params = TypeParameter("params", "types", - TypeParameterDict(k=float, A0=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, A0=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh) @@ -190,13 +192,14 @@ class Volume(MeshConservationPotential): * ``V0`` (`float`, **required**) - target volume :math:`[\mathrm{length}^{3}]` """ + _cpp_class_name = "VolumeConservationMeshForceCompute" - __doc__ = __doc__.replace("{inherited}", - MeshConservationPotential._doc_inherited) + __doc__ = __doc__.replace("{inherited}", MeshConservationPotential._doc_inherited) def __init__(self, mesh, ignore_type=False): - params = TypeParameter("params", "types", - TypeParameterDict(k=float, V0=float, len_keys=1)) + params = TypeParameter( + "params", "types", TypeParameterDict(k=float, V0=float, len_keys=1) + ) self._add_typeparam(params) super().__init__(mesh, ignore_type) @@ -208,8 +211,8 @@ def volume(self): __all__ = [ - 'Area', - 'MeshConservationPotential', - 'TriangleArea', - 'Volume', + "Area", + "MeshConservationPotential", + "TriangleArea", + "Volume", ] diff --git a/hoomd/md/mesh/potential.py b/hoomd/md/mesh/potential.py index ae08c9ebb8..84558bf55a 100644 --- a/hoomd/md/mesh/potential.py +++ b/hoomd/md/mesh/potential.py @@ -31,7 +31,9 @@ class MeshPotential(Force): """ __doc__ = __doc__.replace("{inherited}", Force._doc_inherited) - _doc_inherited = Force._doc_inherited + """ + _doc_inherited = ( + Force._doc_inherited + + """ ---------- **Members inherited from** @@ -42,6 +44,7 @@ class MeshPotential(Force): Mesh data structure used to compute the bond potential. `Read more... ` """ + ) def __init__(self, mesh): self._mesh = validate_mesh(mesh) @@ -53,7 +56,8 @@ def _attach_hook(self): f"{self} object is creating a new equivalent mesh structure." f" This is happending since the force is moving to a new " f"simulation. To suppress the warning explicitly set new mesh.", - RuntimeWarning) + RuntimeWarning, + ) self._mesh = copy.deepcopy(self._mesh) self.mesh._attach(self._simulation) @@ -62,8 +66,9 @@ def _attach_hook(self): else: cpp_cls = getattr(_md, self._cpp_class_name + "GPU") - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self._mesh._cpp_obj) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, self._mesh._cpp_obj + ) def _detach_hook(self): self._mesh._detach() @@ -74,8 +79,8 @@ def _apply_typeparam_dict(self, cpp_obj, simulation): typeparam._attach(cpp_obj, self.mesh) except ValueError as err: raise err.__class__( - f"For {type(self)} in TypeParameter {typeparam.name} " - f"{err!s}") + f"For {type(self)} in TypeParameter {typeparam.name} " f"{err!s}" + ) @property def mesh(self): @@ -85,8 +90,7 @@ def mesh(self): @mesh.setter def mesh(self, value): if self._attached: - raise RuntimeError( - "mesh cannot be set after calling Simulation.run().") + raise RuntimeError("mesh cannot be set after calling Simulation.run().") mesh = validate_mesh(value) self._mesh = mesh @@ -115,7 +119,8 @@ def _attach_hook(self): f"{self} object is creating a new equivalent mesh structure." f" This is happending since the force is moving to a new " f"simulation. To suppress the warning explicitly set new mesh.", - RuntimeWarning) + RuntimeWarning, + ) self._mesh = copy.deepcopy(self._mesh) self.mesh._attach(self._simulation) @@ -124,5 +129,6 @@ def _attach_hook(self): else: cpp_cls = getattr(_md, self._cpp_class_name + "GPU") - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self._mesh._cpp_obj, self._ignore_type) + self._cpp_obj = cpp_cls( + self._simulation.state._cpp_sys_def, self._mesh._cpp_obj, self._ignore_type + ) diff --git a/hoomd/md/methods/__init__.py b/hoomd/md/methods/__init__.py index 4b32a9ee25..99627dc2b8 100644 --- a/hoomd/md/methods/__init__.py +++ b/hoomd/md/methods/__init__.py @@ -16,19 +16,27 @@ """ from . import rattle -from .methods import (Method, Langevin, Brownian, Thermostatted, ConstantVolume, - ConstantPressure, DisplacementCapped, OverdampedViscous) +from .methods import ( + Method, + Langevin, + Brownian, + Thermostatted, + ConstantVolume, + ConstantPressure, + DisplacementCapped, + OverdampedViscous, +) from . import thermostats __all__ = [ - 'Brownian', - 'ConstantPressure', - 'ConstantVolume', - 'DisplacementCapped', - 'Langevin', - 'Method', - 'OverdampedViscous', - 'Thermostatted', - 'rattle', - 'thermostats', + "Brownian", + "ConstantPressure", + "ConstantVolume", + "DisplacementCapped", + "Langevin", + "Method", + "OverdampedViscous", + "Thermostatted", + "rattle", + "thermostats", ] diff --git a/hoomd/md/methods/methods.py b/hoomd/md/methods/methods.py index 3efde7155c..297c31fc1b 100644 --- a/hoomd/md/methods/methods.py +++ b/hoomd/md/methods/methods.py @@ -80,13 +80,16 @@ class Thermostatted(Method): nvt.thermostat = hoomd.md.methods.thermostats.Bussi(kT=0.5) """ + _remove_for_pickling = (*AutotunedObject._remove_for_pickling, "_thermo") _skip_for_equality = AutotunedObject._skip_for_equality | { "_thermo", } __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) - _doc_inherited = Method._doc_inherited + """ + _doc_inherited = ( + Method._doc_inherited + + """ ---------- **Members inherited from** @@ -97,6 +100,7 @@ class Thermostatted(Method): Temperature control for the integrator. `Read more... ` """ + ) def _setattr_param(self, attr, value): if attr == "thermostat": @@ -115,8 +119,7 @@ def _thermostat_setter(self, new_thermostat): return if new_thermostat._attached: - raise RuntimeError("Trying to set a thermostat that is " - "already attached") + raise RuntimeError("Trying to set a thermostat that is " "already attached") if self._attached: new_thermostat._set_thermo(self.filter, self._thermo) new_thermostat._attach(self._simulation) @@ -161,7 +164,8 @@ class ConstantVolume(Thermostatted): nvt = hoomd.md.methods.ConstantVolume( filter=hoomd.filter.All(), - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [nvt] {inherited} @@ -179,7 +183,9 @@ class ConstantVolume(Thermostatted): __doc__ = __doc__.replace("{inherited}", Thermostatted._doc_inherited) - _doc_inherited = Thermostatted._doc_inherited + """ + _doc_inherited = ( + Thermostatted._doc_inherited + + """ ---------- **Members inherited from** @@ -190,13 +196,14 @@ class ConstantVolume(Thermostatted): Subset of particles on which to apply this method. `Read more... ` """ + ) def __init__(self, filter, thermostat=None): super().__init__() # store metadata - param_dict = ParameterDict(filter=ParticleFilter, - thermostat=OnlyTypes(Thermostat, - allow_none=True)) + param_dict = ParameterDict( + filter=ParticleFilter, thermostat=OnlyTypes(Thermostat, allow_none=True) + ) param_dict.update(dict(filter=filter, thermostat=thermostat)) # set defaults self._param_dict.update(param_dict) @@ -357,10 +364,12 @@ class ConstantPressure(Thermostatted): .. code-block:: python - nph = hoomd.md.methods.ConstantPressure(filter=hoomd.filter.All(), - tauS=1.0, - S=2.0, - couple="xyz") + nph = hoomd.md.methods.ConstantPressure( + filter=hoomd.filter.All(), + tauS=1.0, + S=2.0, + couple="xyz", + ) simulation.operations.integrator.methods = [nph] NPT integrator with cubic symmetry: @@ -372,7 +381,8 @@ class ConstantPressure(Thermostatted): tauS=1.0, S=2.0, couple="xyz", - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [npt] NPT integrator with tetragonal symmetry: @@ -381,10 +391,11 @@ class ConstantPressure(Thermostatted): npt = hoomd.md.methods.ConstantPressure( filter=hoomd.filter.All(), - tauS = 1.0, + tauS=1.0, S=2.0, couple="xy", - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [npt] NPT integrator with orthorhombic symmetry: @@ -393,10 +404,11 @@ class ConstantPressure(Thermostatted): npt = hoomd.md.methods.ConstantPressure( filter=hoomd.filter.All(), - tauS = 1.0, + tauS=1.0, S=2.0, couple="none", - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [npt] @@ -406,11 +418,12 @@ class ConstantPressure(Thermostatted): npt = hoomd.md.methods.ConstantPressure( filter=hoomd.filter.All(), - tauS = 1.0, + tauS=1.0, S=2.0, couple="none", box_dof=[True, True, True, True, True, True], - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [npt] {inherited} @@ -437,10 +450,7 @@ class ConstantPressure(Thermostatted): .. code-block:: python - npt.S = hoomd.variant.Ramp(A=1.0, - B=2.0, - t_start=0, - t_ramp=1_000_000) + npt.S = hoomd.variant.Ramp(A=1.0, B=2.0, t_start=0, t_ramp=1_000_000) tauS (float): Coupling constant for the barostat :math:`[\mathrm{time}]`. @@ -458,7 +468,7 @@ class ConstantPressure(Thermostatted): .. code-block:: python - npt.couple = 'none' + npt.couple = "none" box_dof(list[bool]): Box degrees of freedom with six boolean elements in the order [x, y, z, xy, xz, yz]. @@ -495,8 +505,7 @@ class ConstantPressure(Thermostatted): .. code-block:: python - numpy.save(file=path / 'barostat_dof.npy', - arr=npt.barostat_dof) + numpy.save(file=path / "barostat_dof.npy", arr=npt.barostat_dof) Load when continuing: @@ -507,46 +516,54 @@ class ConstantPressure(Thermostatted): tauS=1.0, S=2.0, couple="xyz", - thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5)) + thermostat=hoomd.md.methods.thermostats.Bussi(kT=1.5), + ) simulation.operations.integrator.methods = [npt] - npt.barostat_dof = numpy.load(file=path / 'barostat_dof.npy') + npt.barostat_dof = numpy.load(file=path / "barostat_dof.npy") """ __doc__ = __doc__.replace("{inherited}", Thermostatted._doc_inherited) - def __init__(self, - filter, - S, - tauS, - couple, - thermostat=None, - box_dof=[True, True, True, False, False, False], - rescale_all=False, - gamma=0.0): + def __init__( + self, + filter, + S, + tauS, + couple, + thermostat=None, + box_dof=[True, True, True, False, False, False], + rescale_all=False, + gamma=0.0, + ): super().__init__() # store metadata - param_dict = ParameterDict(filter=ParticleFilter, - thermostat=OnlyTypes(Thermostat, - allow_none=True), - S=OnlyIf(to_type_converter((Variant,) * 6), - preprocess=self._preprocess_stress), - tauS=float(tauS), - couple=str(couple), - box_dof=[ - bool, - ] * 6, - rescale_all=bool(rescale_all), - gamma=float(gamma), - barostat_dof=(float, float, float, float, - float, float)) + param_dict = ParameterDict( + filter=ParticleFilter, + thermostat=OnlyTypes(Thermostat, allow_none=True), + S=OnlyIf( + to_type_converter((Variant,) * 6), preprocess=self._preprocess_stress + ), + tauS=float(tauS), + couple=str(couple), + box_dof=[ + bool, + ] + * 6, + rescale_all=bool(rescale_all), + gamma=float(gamma), + barostat_dof=(float, float, float, float, float, float), + ) param_dict.update( - dict(filter=filter, - thermostat=thermostat, - S=S, - couple=couple, - box_dof=box_dof, - barostat_dof=(0, 0, 0, 0, 0, 0))) + dict( + filter=filter, + thermostat=thermostat, + S=S, + couple=couple, + box_dof=box_dof, + barostat_dof=(0, 0, 0, 0, 0, 0), + ) + ) # set defaults self._param_dict.update(param_dict) @@ -568,17 +585,32 @@ def _attach_hook(self): thermo_full_step = thermo_cls(cpp_sys_def, thermo_group) if self.thermostat is None: - self._cpp_obj = cpp_cls(cpp_sys_def, thermo_group, thermo_full_step, - self.tauS, self.S, self.couple, - self.box_dof, None, self.gamma) + self._cpp_obj = cpp_cls( + cpp_sys_def, + thermo_group, + thermo_full_step, + self.tauS, + self.S, + self.couple, + self.box_dof, + None, + self.gamma, + ) else: self.thermostat._set_thermo(self.filter, self._thermo) self.thermostat._attach(self._simulation) - self._cpp_obj = cpp_cls(cpp_sys_def, thermo_group, thermo_full_step, - self.tauS, self.S, self.couple, - self.box_dof, self.thermostat._cpp_obj, - self.gamma) + self._cpp_obj = cpp_cls( + cpp_sys_def, + thermo_group, + thermo_full_step, + self.tauS, + self.S, + self.couple, + self.box_dof, + self.thermostat._cpp_obj, + self.gamma, + ) # Attach param_dict and typeparam_dict super()._attach_hook() @@ -586,8 +618,7 @@ def _attach_hook(self): def _preprocess_stress(self, value): if isinstance(value, Sequence): if len(value) != 6: - raise ValueError( - "Expected a single hoomd.variant.variant_like or six.") + raise ValueError("Expected a single hoomd.variant.variant_like or six.") return tuple(value) else: return (value, value, value, 0, 0, 0) @@ -613,8 +644,9 @@ def thermalize_barostat_dof(self): `hoomd.md.methods.thermostats.MTTK.thermalize_dof` """ if not self._attached: - raise RuntimeError("Call Simulation.run(0) before" - "thermalize_barostat_dof") + raise RuntimeError( + "Call Simulation.run(0) before" "thermalize_barostat_dof" + ) self._simulation._warn_if_seed_unset() self._cpp_obj.thermalizeBarostatDOF(self._simulation.timestep) @@ -659,7 +691,8 @@ class DisplacementCapped(ConstantVolume): displacement_capped = hoomd.md.methods.DisplacementCapped( filter=hoomd.filter.All(), - maximum_displacement=1e-3) + maximum_displacement=1e-3, + ) simulation.operations.integrator.methods = [displacement_capped] {inherited} @@ -680,9 +713,7 @@ class DisplacementCapped(ConstantVolume): __doc__ = __doc__.replace("{inherited}", ConstantVolume._doc_inherited) - def __init__(self, filter, - maximum_displacement: hoomd.variant.variant_like): - + def __init__(self, filter, maximum_displacement: hoomd.variant.variant_like): # store metadata super().__init__(filter) param_dict = ParameterDict(maximum_displacement=hoomd.variant.Variant) @@ -792,10 +823,9 @@ class Langevin(Method): .. code-block:: python - langevin.kT = hoomd.variant.Ramp(A=2.0, - B=1.0, - t_start=0, - t_ramp=1_000_000) + langevin.kT = hoomd.variant.Ramp( + A=2.0, B=1.0, t_start=0, t_ramp=1_000_000 + ) tally_reservoir_energy (bool): When True, track the energy exchange between the thermal reservoir and the particles. @@ -814,7 +844,7 @@ class Langevin(Method): .. code-block:: python - langevin.gamma['A'] = 0.5 + langevin.gamma["A"] = 0.5 gamma_r (TypeParameter[``particle type``,[`float`, `float` , `float`]]): The rotational drag coefficient tensor for each particle type @@ -825,20 +855,19 @@ class Langevin(Method): .. code-block:: python - langevin.gamma_r['A'] = [1.0, 2.0, 3.0] + langevin.gamma_r["A"] = [1.0, 2.0, 3.0] """ __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) def __init__( - self, - filter, - kT, - tally_reservoir_energy=False, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0), + self, + filter, + kT, + tally_reservoir_energy=False, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), ): - # store metadata param_dict = ParameterDict( filter=ParticleFilter, @@ -849,15 +878,18 @@ def __init__( # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(float, len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(float, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict( - (float, float, float), len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((float, float, float), len_keys=1), + ) gamma_r.default = default_gamma_r @@ -872,8 +904,9 @@ def _attach_hook(self): else: cls = _md.TwoStepLangevinGPU - self._cpp_obj = cls(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), self.kT) + self._cpp_obj = cls( + sim.state._cpp_sys_def, sim.state._get_group(self.filter), self.kT + ) # Attach param_dict and typeparam_dict super()._attach_hook() @@ -889,7 +922,7 @@ def reservoir_energy(self): .. code-block:: python langevin.tally_reservoir_energy = True - logger.add(obj=langevin, quantities=['reservoir_energy']) + logger.add(obj=langevin, quantities=["reservoir_energy"]) Warning: When continuing a simulation, the energy of the reservoir will be @@ -1019,10 +1052,9 @@ class Brownian(Method): .. code-block:: python - brownian.kT = hoomd.variant.Ramp(A=2.0, - B=1.0, - t_start=0, - t_ramp=1_000_000) + brownian.kT = hoomd.variant.Ramp( + A=2.0, B=1.0, t_start=0, t_ramp=1_000_000 + ) gamma (TypeParameter[ ``particle type``, `float` ]): The drag coefficient for each particle type @@ -1032,7 +1064,7 @@ class Brownian(Method): .. code-block:: python - brownian.gamma['A'] = 0.5 + brownian.gamma["A"] = 0.5 gamma_r (TypeParameter[``particle type``,[`float`, `float` , `float`]]): The rotational drag coefficient tensor for each particle type @@ -1042,19 +1074,18 @@ class Brownian(Method): .. code-block:: python - brownian.gamma_r['A'] = [1.0, 2.0, 3.0] + brownian.gamma_r["A"] = [1.0, 2.0, 3.0] """ __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) def __init__( - self, - filter, - kT, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0), + self, + filter, + kT, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), ): - # store metadata param_dict = ParameterDict( filter=ParticleFilter, @@ -1065,15 +1096,18 @@ def __init__( # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(float, len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(float, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict( - (float, float, float), len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((float, float, float), len_keys=1), + ) gamma_r.default = default_gamma_r self._extend_typeparam([gamma, gamma_r]) @@ -1083,13 +1117,21 @@ def _attach_hook(self): self._simulation._warn_if_seed_unset() sim = self._simulation if isinstance(sim.device, hoomd.device.CPU): - self._cpp_obj = _md.TwoStepBD(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.kT, False, False) + self._cpp_obj = _md.TwoStepBD( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.kT, + False, + False, + ) else: - self._cpp_obj = _md.TwoStepBDGPU(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.kT, False, False) + self._cpp_obj = _md.TwoStepBDGPU( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.kT, + False, + False, + ) # Attach param_dict and typeparam_dict super()._attach_hook() @@ -1156,7 +1198,8 @@ class OverdampedViscous(Method): .. code-block:: python overdamped_viscous = hoomd.md.methods.OverdampedViscous( - filter=hoomd.filter.All()) + filter=hoomd.filter.All() + ) simulation.operations.integrator.methods = [overdamped_viscous] {inherited} @@ -1177,7 +1220,7 @@ class OverdampedViscous(Method): .. code-block:: python - overdamped_viscous.gamma['A'] = 0.5 + overdamped_viscous.gamma["A"] = 0.5 gamma_r (TypeParameter[``particle type``,[`float`, `float` , `float`]]): @@ -1188,34 +1231,38 @@ class OverdampedViscous(Method): .. code-block:: python - overdamped_viscous.gamma_r['A'] = [1.0, 2.0, 3.0] + overdamped_viscous.gamma_r["A"] = [1.0, 2.0, 3.0] """ __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) def __init__( - self, - filter, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0), + self, + filter, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), ): - # store metadata - param_dict = ParameterDict(filter=ParticleFilter,) + param_dict = ParameterDict( + filter=ParticleFilter, + ) param_dict.update(dict(filter=filter)) # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(float, len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(float, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict( - (float, float, float), len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((float, float, float), len_keys=1), + ) gamma_r.default = default_gamma_r self._extend_typeparam([gamma, gamma_r]) @@ -1225,15 +1272,21 @@ def _attach_hook(self): self._simulation._warn_if_seed_unset() sim = self._simulation if isinstance(sim.device, hoomd.device.CPU): - self._cpp_obj = _md.TwoStepBD(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - hoomd.variant.Constant(0.0), True, - True) + self._cpp_obj = _md.TwoStepBD( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + hoomd.variant.Constant(0.0), + True, + True, + ) else: - self._cpp_obj = _md.TwoStepBDGPU(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - hoomd.variant.Constant(1.0), True, - True) + self._cpp_obj = _md.TwoStepBDGPU( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + hoomd.variant.Constant(1.0), + True, + True, + ) # Attach param_dict and typeparam_dict super()._attach_hook() diff --git a/hoomd/md/methods/rattle.py b/hoomd/md/methods/rattle.py index 68c7c231f4..297175e5d5 100644 --- a/hoomd/md/methods/rattle.py +++ b/hoomd/md/methods/rattle.py @@ -66,7 +66,9 @@ class MethodRATTLE(Method): __doc__ = __doc__.replace("{inherited}", Method._doc_inherited) - _doc_inherited = Method._doc_inherited + """ + _doc_inherited = ( + Method._doc_inherited + + """ ---------- **Members inherited from** @@ -83,13 +85,14 @@ class MethodRATTLE(Method): deviate from the manifold in terms of the implicit function. `Read more... ` """ + ) def __init__(self, manifold_constraint, tolerance): - - param_dict = ParameterDict(manifold_constraint=OnlyTypes( - Manifold, allow_none=False), - tolerance=float(tolerance)) - param_dict['manifold_constraint'] = manifold_constraint + param_dict = ParameterDict( + manifold_constraint=OnlyTypes(Manifold, allow_none=False), + tolerance=float(tolerance), + ) + param_dict["manifold_constraint"] = manifold_constraint # set defaults self._param_dict.update(param_dict) @@ -98,8 +101,7 @@ def _attach_constraint(self, sim): def _setattr_param(self, attr, value): if attr == "manifold_constraint": - raise AttributeError( - "Cannot set manifold_constraint after construction.") + raise AttributeError("Cannot set manifold_constraint after construction.") super()._setattr_param(attr, value) @@ -129,7 +131,8 @@ class NVE(MethodRATTLE): sphere = hoomd.md.manifold.Sphere(r=5) nve_rattle = hoomd.md.methods.rattle.NVE( filter=hoomd.filter.All(), - manifold_constraint=sphere) + manifold_constraint=sphere, + ) simulation.operations.integrator.methods = [nve_rattle] {inherited} @@ -144,7 +147,9 @@ class NVE(MethodRATTLE): """ __doc__ = __doc__.replace("{inherited}", MethodRATTLE._doc_inherited) - _doc_inherited = MethodRATTLE._doc_inherited + """ + _doc_inherited = ( + MethodRATTLE._doc_inherited + + """ ---------- **Members inherited from** @@ -155,9 +160,9 @@ class NVE(MethodRATTLE): Subset of particles on which to apply this method. `Read more... ` """ + ) def __init__(self, filter, manifold_constraint, tolerance=0.000001): - # store metadata param_dict = ParameterDict( filter=ParticleFilter, @@ -176,17 +181,22 @@ def _attach_hook(self): # initialize the reflected c++ class if isinstance(self._simulation.device, hoomd.device.CPU): my_class = getattr( - _md, 'TwoStepRATTLENVE' - + self.manifold_constraint.__class__.__name__) + _md, "TwoStepRATTLENVE" + self.manifold_constraint.__class__.__name__ + ) else: my_class = getattr( - _md, 'TwoStepRATTLENVE' - + self.manifold_constraint.__class__.__name__ + 'GPU') - - self._cpp_obj = my_class(self._simulation.state._cpp_sys_def, - self._simulation.state._get_group(self.filter), - self.manifold_constraint._cpp_obj, - self.tolerance) + _md, + "TwoStepRATTLENVE" + + self.manifold_constraint.__class__.__name__ + + "GPU", + ) + + self._cpp_obj = my_class( + self._simulation.state._cpp_sys_def, + self._simulation.state._get_group(self.filter), + self.manifold_constraint._cpp_obj, + self.tolerance, + ) class DisplacementCapped(NVE): @@ -223,7 +233,8 @@ class DisplacementCapped(NVE): relax_rattle = hoomd.md.methods.rattle.DisplacementCapped( filter=hoomd.filter.All(), maximum_displacement=0.01, - manifold_constraint=sphere) + manifold_constraint=sphere, + ) simulation.operations.integrator.methods = [relax_rattle] {inherited} @@ -240,12 +251,13 @@ class DisplacementCapped(NVE): __doc__ = __doc__.replace("{inherited}", NVE._doc_inherited) - def __init__(self, - filter: hoomd.filter.filter_like, - maximum_displacement: hoomd.variant.variant_like, - manifold_constraint: "hoomd.md.manifold.Manifold", - tolerance: float = 1e-6): - + def __init__( + self, + filter: hoomd.filter.filter_like, + maximum_displacement: hoomd.variant.variant_like, + manifold_constraint: "hoomd.md.manifold.Manifold", + tolerance: float = 1e-6, + ): # store metadata super().__init__(filter, manifold_constraint, tolerance) param_dict = ParameterDict(maximum_displacement=hoomd.variant.Variant) @@ -306,7 +318,8 @@ class Langevin(MethodRATTLE): kT=1.5, manifold_constraint=sphere, default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0)) + default_gamma_r=(1.0, 1.0, 1.0), + ) simulation.operations.integrator.methods = [langevin_rattle] {inherited} @@ -334,16 +347,15 @@ class Langevin(MethodRATTLE): __doc__ = __doc__.replace("{inherited}", MethodRATTLE._doc_inherited) def __init__( - self, - filter, - kT, - manifold_constraint, - tally_reservoir_energy=False, - tolerance=0.000001, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0), + self, + filter, + kT, + manifold_constraint, + tally_reservoir_energy=False, + tolerance=0.000001, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), ): - # store metadata param_dict = ParameterDict( filter=ParticleFilter, @@ -354,15 +366,18 @@ def __init__( # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(1., len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(1.0, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict((1., 1., 1.), - len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((1.0, 1.0, 1.0), len_keys=1), + ) gamma_r.default = default_gamma_r self._extend_typeparam([gamma, gamma_r]) @@ -377,17 +392,24 @@ def _attach_hook(self): if isinstance(sim.device, hoomd.device.CPU): my_class = getattr( - _md, 'TwoStepRATTLELangevin' - + self.manifold_constraint.__class__.__name__) + _md, + "TwoStepRATTLELangevin" + self.manifold_constraint.__class__.__name__, + ) else: my_class = getattr( - _md, 'TwoStepRATTLELangevin' - + self.manifold_constraint.__class__.__name__ + 'GPU') - - self._cpp_obj = my_class(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.manifold_constraint._cpp_obj, self.kT, - self.tolerance) + _md, + "TwoStepRATTLELangevin" + + self.manifold_constraint.__class__.__name__ + + "GPU", + ) + + self._cpp_obj = my_class( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.manifold_constraint._cpp_obj, + self.kT, + self.tolerance, + ) class Brownian(MethodRATTLE): @@ -432,7 +454,8 @@ class Brownian(MethodRATTLE): kT=1.5, manifold_constraint=sphere, default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0)) + default_gamma_r=(1.0, 1.0, 1.0), + ) simulation.operations.integrator.methods = [brownian_rattle] {inherited} @@ -459,14 +482,15 @@ class Brownian(MethodRATTLE): __doc__ = __doc__.replace("{inherited}", MethodRATTLE._doc_inherited) - def __init__(self, - filter, - kT, - manifold_constraint, - tolerance=1e-6, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0)): - + def __init__( + self, + filter, + kT, + manifold_constraint, + tolerance=1e-6, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), + ): # store metadata param_dict = ParameterDict( filter=ParticleFilter, @@ -477,15 +501,18 @@ def __init__(self, # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(1., len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(1.0, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict((1., 1., 1.), - len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((1.0, 1.0, 1.0), len_keys=1), + ) gamma_r.default = default_gamma_r self._extend_typeparam([gamma, gamma_r]) @@ -500,17 +527,23 @@ def _attach_hook(self): if isinstance(sim.device, hoomd.device.CPU): my_class = getattr( - _md, - 'TwoStepRATTLEBD' + self.manifold_constraint.__class__.__name__) + _md, "TwoStepRATTLEBD" + self.manifold_constraint.__class__.__name__ + ) else: my_class = getattr( - _md, 'TwoStepRATTLEBD' - + self.manifold_constraint.__class__.__name__ + 'GPU') - - self._cpp_obj = my_class(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.manifold_constraint._cpp_obj, self.kT, - False, False, self.tolerance) + _md, + "TwoStepRATTLEBD" + self.manifold_constraint.__class__.__name__ + "GPU", + ) + + self._cpp_obj = my_class( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.manifold_constraint._cpp_obj, + self.kT, + False, + False, + self.tolerance, + ) class OverdampedViscous(MethodRATTLE): @@ -550,7 +583,8 @@ class OverdampedViscous(MethodRATTLE): filter=hoomd.filter.All(), manifold_constraint=sphere, default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0)) + default_gamma_r=(1.0, 1.0, 1.0), + ) simulation.operations.integrator.methods = [odv_rattle] {inherited} @@ -574,28 +608,35 @@ class OverdampedViscous(MethodRATTLE): __doc__ = __doc__.replace("{inherited}", MethodRATTLE._doc_inherited) - def __init__(self, - filter, - manifold_constraint, - tolerance=1e-6, - default_gamma=1.0, - default_gamma_r=(1.0, 1.0, 1.0)): + def __init__( + self, + filter, + manifold_constraint, + tolerance=1e-6, + default_gamma=1.0, + default_gamma_r=(1.0, 1.0, 1.0), + ): # store metadata - param_dict = ParameterDict(filter=ParticleFilter,) + param_dict = ParameterDict( + filter=ParticleFilter, + ) param_dict.update(dict(filter=filter)) # set defaults self._param_dict.update(param_dict) - gamma = TypeParameter('gamma', - type_kind='particle_types', - param_dict=TypeParameterDict(1., len_keys=1)) + gamma = TypeParameter( + "gamma", + type_kind="particle_types", + param_dict=TypeParameterDict(1.0, len_keys=1), + ) gamma.default = default_gamma - gamma_r = TypeParameter('gamma_r', - type_kind='particle_types', - param_dict=TypeParameterDict((1., 1., 1.), - len_keys=1)) + gamma_r = TypeParameter( + "gamma_r", + type_kind="particle_types", + param_dict=TypeParameterDict((1.0, 1.0, 1.0), len_keys=1), + ) gamma_r.default = default_gamma_r self._extend_typeparam([gamma, gamma_r]) @@ -611,25 +652,30 @@ def _attach_hook(self): if isinstance(sim.device, hoomd.device.CPU): my_class = getattr( - _md, - 'TwoStepRATTLEBD' + self.manifold_constraint.__class__.__name__) + _md, "TwoStepRATTLEBD" + self.manifold_constraint.__class__.__name__ + ) else: my_class = getattr( - _md, 'TwoStepRATTLEBD' - + self.manifold_constraint.__class__.__name__ + 'GPU') - - self._cpp_obj = my_class(sim.state._cpp_sys_def, - sim.state._get_group(self.filter), - self.manifold_constraint._cpp_obj, - hoomd.variant.Constant(0.0), True, True, - self.tolerance) + _md, + "TwoStepRATTLEBD" + self.manifold_constraint.__class__.__name__ + "GPU", + ) + + self._cpp_obj = my_class( + sim.state._cpp_sys_def, + sim.state._get_group(self.filter), + self.manifold_constraint._cpp_obj, + hoomd.variant.Constant(0.0), + True, + True, + self.tolerance, + ) __all__ = [ - 'NVE', - 'Brownian', - 'DisplacementCapped', - 'Langevin', - 'MethodRATTLE', - 'OverdampedViscous', + "NVE", + "Brownian", + "DisplacementCapped", + "Langevin", + "MethodRATTLE", + "OverdampedViscous", ] diff --git a/hoomd/md/methods/thermostats.py b/hoomd/md/methods/thermostats.py index d6f43518d6..3aafbcf38b 100644 --- a/hoomd/md/methods/thermostats.py +++ b/hoomd/md/methods/thermostats.py @@ -24,8 +24,8 @@ .. code-block:: python simulation.state.thermalize_particle_momenta( - filter=hoomd.filter.All(), - kT=1.5) + filter=hoomd.filter.All(), kT=1.5 + ) .. invisible-code-block: python @@ -49,11 +49,13 @@ class Thermostat(_HOOMDBaseObject): Users should use the subclasses and not instantiate `Thermostat` directly. """ - _remove_for_pickling = (*_HOOMDBaseObject._remove_for_pickling, "_thermo", - "_filter") - _skip_for_equality = _HOOMDBaseObject._skip_for_equality | { - "_thermo", "_filter" - } + + _remove_for_pickling = ( + *_HOOMDBaseObject._remove_for_pickling, + "_thermo", + "_filter", + ) + _skip_for_equality = _HOOMDBaseObject._skip_for_equality | {"_thermo", "_filter"} def __init__(self, kT): param_dict = ParameterDict(kT=Variant) @@ -98,8 +100,10 @@ class MTTK(Thermostat): .. code-block:: python - mttk = hoomd.md.methods.thermostats.MTTK(kT=1.5, - tau=simulation.operations.integrator.dt*100) + mttk = hoomd.md.methods.thermostats.MTTK( + kT=1.5, + tau=simulation.operations.integrator.dt * 100, + ) simulation.operations.integrator.methods[0].thermostat = mttk Attributes: @@ -114,10 +118,7 @@ class MTTK(Thermostat): .. code-block:: python - mttk.kT = hoomd.variant.Ramp(A=1.0, - B=2.0, - t_start=0, - t_ramp=1_000_000) + mttk.kT = hoomd.variant.Ramp(A=1.0, B=2.0, t_start=0, t_ramp=1_000_000) tau (float): Coupling constant for the thermostat :math:`[\mathrm{time}]` @@ -141,19 +142,22 @@ class MTTK(Thermostat): .. code-block:: python - numpy.save(file=path / 'translational_dof.npy', - arr=mttk.translational_dof) + numpy.save( + file=path / "translational_dof.npy", + arr=mttk.translational_dof, + ) Load when continuing: .. code-block:: python - mttk = hoomd.md.methods.thermostats.MTTK(kT=1.5, - tau=simulation.operations.integrator.dt*100) + mttk = hoomd.md.methods.thermostats.MTTK( + kT=1.5, + tau=simulation.operations.integrator.dt * 100, + ) simulation.operations.integrator.methods[0].thermostat = mttk - mttk.translational_dof = numpy.load( - file=path / 'translational_dof.npy') + mttk.translational_dof = numpy.load(file=path / "translational_dof.npy") rotational_dof (tuple[float, float]): Additional degrees @@ -169,34 +173,39 @@ class MTTK(Thermostat): .. code-block:: python - numpy.save(file=path / 'rotational_dof.npy', - arr=mttk.rotational_dof) + numpy.save( + file=path / "rotational_dof.npy", + arr=mttk.rotational_dof, + ) Load when continuing: .. code-block:: python - mttk = hoomd.md.methods.thermostats.MTTK(kT=1.5, - tau=simulation.operations.integrator.dt*100) + mttk = hoomd.md.methods.thermostats.MTTK( + kT=1.5, + tau=simulation.operations.integrator.dt * 100, + ) simulation.operations.integrator.methods[0].thermostat = mttk - mttk.rotational_dof = numpy.load( - file=path / 'rotational_dof.npy') + mttk.rotational_dof = numpy.load(file=path / "rotational_dof.npy") """ def __init__(self, kT, tau): super().__init__(kT) - param_dict = ParameterDict(tau=float(tau), - translational_dof=(float, float), - rotational_dof=(float, float)) + param_dict = ParameterDict( + tau=float(tau), + translational_dof=(float, float), + rotational_dof=(float, float), + ) param_dict.update(dict(translational_dof=(0, 0), rotational_dof=(0, 0))) self._param_dict.update(param_dict) def _attach_hook(self): group = self._simulation.state._get_group(self._filter) - self._cpp_obj = _md.MTTKThermostat(self.kT, group, self._thermo, - self._simulation.state._cpp_sys_def, - self.tau) + self._cpp_obj = _md.MTTKThermostat( + self.kT, group, self._thermo, self._simulation.state._cpp_sys_def, self.tau + ) @hoomd.logging.log(requires_run=True) def energy(self): @@ -234,7 +243,8 @@ def thermalize_dof(self): if not self._attached: raise RuntimeError( "Call Simulation.run(0) before attempting to thermalize the " - "MTTK thermostat.") + "MTTK thermostat." + ) self._simulation._warn_if_seed_unset() self._cpp_obj.thermalizeThermostat(self._simulation.timestep) @@ -286,8 +296,9 @@ class Bussi(Thermostat): .. code-block:: python - bussi = hoomd.md.methods.thermostats.Bussi(kT=1.5, - tau=simulation.operations.integrator.dt*20) + bussi = hoomd.md.methods.thermostats.Bussi( + kT=1.5, tau=simulation.operations.integrator.dt * 20 + ) simulation.operations.integrator.methods[0].thermostat = bussi Attributes: @@ -302,10 +313,7 @@ class Bussi(Thermostat): .. code-block:: python - bussi.kT = hoomd.variant.Ramp(A=1.0, - B=2.0, - t_start=0, - t_ramp=1_000_000) + bussi.kT = hoomd.variant.Ramp(A=1.0, B=2.0, t_start=0, t_ramp=1_000_000) tau (float): Thermostat time constant :math:`[\mathrm{time}].` @@ -324,9 +332,9 @@ def __init__(self, kT, tau=0.0): def _attach_hook(self): group = self._simulation.state._get_group(self._filter) - self._cpp_obj = _md.BussiThermostat(self.kT, group, self._thermo, - self._simulation.state._cpp_sys_def, - self.tau) + self._cpp_obj = _md.BussiThermostat( + self.kT, group, self._thermo, self._simulation.state._cpp_sys_def, self.tau + ) self._simulation._warn_if_seed_unset() @@ -358,8 +366,10 @@ class Berendsen(Thermostat): .. code-block:: python - berendsen = hoomd.md.methods.thermostats.Berendsen(kT=1.5, - tau=simulation.operations.integrator.dt * 10_000) + berendsen = hoomd.md.methods.thermostats.Berendsen( + kT=1.5, + tau=simulation.operations.integrator.dt * 10_000, + ) simulation.operations.integrator.methods[0].thermostat = berendsen Attributes: @@ -374,10 +384,9 @@ class Berendsen(Thermostat): .. code-block:: python - berendsen.kT = hoomd.variant.Ramp(A=1.0, - B=2.0, - t_start=0, - t_ramp=1_000_000) + berendsen.kT = hoomd.variant.Ramp( + A=1.0, B=2.0, t_start=0, t_ramp=1_000_000 + ) tau (float): Time constant of thermostat. :math:`[time]` """ @@ -391,13 +400,13 @@ def __init__(self, kT, tau): def _attach_hook(self): group = self._simulation.state._get_group(self._filter) self._cpp_obj = _md.BerendsenThermostat( - self.kT, group, self._thermo, self._simulation.state._cpp_sys_def, - self.tau) + self.kT, group, self._thermo, self._simulation.state._cpp_sys_def, self.tau + ) __all__ = [ - 'MTTK', - 'Berendsen', - 'Bussi', - 'Thermostat', + "MTTK", + "Berendsen", + "Bussi", + "Thermostat", ] diff --git a/hoomd/md/minimize/__init__.py b/hoomd/md/minimize/__init__.py index 46cbbb0d91..d22e58446c 100644 --- a/hoomd/md/minimize/__init__.py +++ b/hoomd/md/minimize/__init__.py @@ -6,5 +6,5 @@ from hoomd.md.minimize.fire import FIRE __all__ = [ - 'FIRE', + "FIRE", ] diff --git a/hoomd/md/minimize/fire.py b/hoomd/md/minimize/fire.py index 7b5ce9b997..ebb8ef1b5e 100644 --- a/hoomd/md/minimize/fire.py +++ b/hoomd/md/minimize/fire.py @@ -112,21 +112,26 @@ class FIRE(_DynamicIntegrator): Examples:: - fire = md.minimize.FIRE(dt=0.05, - force_tol=1e-2, - angmom_tol=1e-2, - energy_tol=1e-7) + fire = md.minimize.FIRE( + dt=0.05, + force_tol=1e-2, + angmom_tol=1e-2, + energy_tol=1e-7, + ) fire.methods.append(md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = fire - while not(fire.converged): - sim.run(100) + while not (fire.converged): + sim.run(100) fire = md.minimize.FIRE(dt=0.05) - fire.methods.append(md.methods.ConstantPressure( - hoomd.filter.All(), S=1, tauS=1, couple='none')) + fire.methods.append( + md.methods.ConstantPressure( + hoomd.filter.All(), S=1, tauS=1, couple="none" + ) + ) sim.operations.integrator = fire - while not(fire.converged): - sim.run(100) + while not (fire.converged): + sim.run(100) Note: To use `FIRE`, set it as the simulation's integrator in place of the @@ -196,25 +201,27 @@ class FIRE(_DynamicIntegrator): considered. """ - _cpp_class_name = "FIREEnergyMinimizer" - def __init__(self, - dt, - force_tol, - angmom_tol, - energy_tol, - integrate_rotational_dof=False, - forces=None, - constraints=None, - methods=None, - rigid=None, - min_steps_adapt=5, - finc_dt=1.1, - fdec_dt=0.5, - alpha_start=0.1, - fdec_alpha=0.99, - min_steps_conv=10): + _cpp_class_name = "FIREEnergyMinimizer" + def __init__( + self, + dt, + force_tol, + angmom_tol, + energy_tol, + integrate_rotational_dof=False, + forces=None, + constraints=None, + methods=None, + rigid=None, + min_steps_adapt=5, + finc_dt=1.1, + fdec_dt=0.5, + alpha_start=0.1, + fdec_alpha=0.99, + min_steps_conv=10, + ): super().__init__(forces, constraints, methods, rigid) pdict = ParameterDict( @@ -229,10 +236,8 @@ def __init__(self, angmom_tol=float(angmom_tol), energy_tol=float(energy_tol), min_steps_conv=OnlyTypes(int, preprocess=positive_real), - _defaults={ - 'min_steps_adapt': 5, - 'min_steps_conv': 10 - }) + _defaults={"min_steps_adapt": 5, "min_steps_conv": 10}, + ) self._param_dict.update(pdict) @@ -245,11 +250,16 @@ def __init__(self, self._methods.clear() methods_list = syncedlist.SyncedList( - OnlyTypes((hoomd.md.methods.ConstantVolume, - hoomd.md.methods.ConstantPressure, - hoomd.md.methods.rattle.NVE)), + OnlyTypes( + ( + hoomd.md.methods.ConstantVolume, + hoomd.md.methods.ConstantPressure, + hoomd.md.methods.rattle.NVE, + ) + ), syncedlist._PartialGetAttr("_cpp_obj"), - iterable=methods) + iterable=methods, + ) self._methods = methods_list def _attach_hook(self): diff --git a/hoomd/md/nlist.py b/hoomd/md/nlist.py index f554859898..04d523317b 100644 --- a/hoomd/md/nlist.py +++ b/hoomd/md/nlist.py @@ -120,7 +120,9 @@ class NeighborList(Compute): __doc__ = __doc__.replace("{inherited}", Compute._doc_inherited) - _doc_inherited = Compute._doc_inherited + """ + _doc_inherited = ( + Compute._doc_inherited + + """ ---------- **Members inherited from** @@ -187,28 +189,40 @@ class NeighborList(Compute): `Read more... ` """ - - def __init__(self, buffer, exclusions, rebuild_check_delay, check_dist, - mesh, default_r_cut): - - validate_exclusions = OnlyFrom([ - 'bond', 'angle', 'constraint', 'dihedral', 'special_pair', 'body', - '1-3', '1-4', 'meshbond' - ]) + ) + + def __init__( + self, buffer, exclusions, rebuild_check_delay, check_dist, mesh, default_r_cut + ): + validate_exclusions = OnlyFrom( + [ + "bond", + "angle", + "constraint", + "dihedral", + "special_pair", + "body", + "1-3", + "1-4", + "meshbond", + ] + ) validate_mesh = OnlyTypes(Mesh, allow_none=True) tp_r_cut = TypeParameter( - 'r_cut', 'particle_types', - TypeParameterDict(nonnegative_real, len_keys=2)) + "r_cut", "particle_types", TypeParameterDict(nonnegative_real, len_keys=2) + ) tp_r_cut.default = default_r_cut self._add_typeparam(tp_r_cut) # default exclusions - params = ParameterDict(exclusions=[validate_exclusions], - buffer=float(buffer), - rebuild_check_delay=int(rebuild_check_delay), - check_dist=bool(check_dist)) + params = ParameterDict( + exclusions=[validate_exclusions], + buffer=float(buffer), + rebuild_check_delay=int(rebuild_check_delay), + check_dist=bool(check_dist), + ) params["exclusions"] = exclusions self._param_dict.update(params) @@ -250,12 +264,13 @@ def cpu_local_nlist_arrays(self): if not self._attached: raise hoomd.error.DataAccessError("cpu_local_nlist_arrays") if self._in_context_manager: - raise RuntimeError("Cannot enter cpu_local_nlist_arrays context " - "manager inside another local_nlist_arrays " - "context manager") + raise RuntimeError( + "Cannot enter cpu_local_nlist_arrays context " + "manager inside another local_nlist_arrays " + "context manager" + ) self._cpp_obj.compute(self._simulation.timestep) - return hoomd.md.data.NeighborListLocalAccess(self, - self._simulation.state) + return hoomd.md.data.NeighborListLocalAccess(self, self._simulation.state) @property def gpu_local_nlist_arrays(self): @@ -341,16 +356,17 @@ def gpu_local_nlist_arrays(self): """ if not isinstance(self._simulation.device, hoomd.device.GPU): raise RuntimeError( - "Cannot access gpu_local_nlist_arrays without a GPU device") + "Cannot access gpu_local_nlist_arrays without a GPU device" + ) if not self._attached: raise hoomd.error.DataAccessError("gpu_local_nlist_arrays") if self._in_context_manager: raise RuntimeError( "Cannot enter gpu_local_nlist_arrays context manager inside " - "another local_nlist_arrays context manager") + "another local_nlist_arrays context manager" + ) self._cpp_obj.compute(self._simulation.timestep) - return hoomd.md.data.NeighborListLocalAccessGPU(self, - self._simulation.state) + return hoomd.md.data.NeighborListLocalAccessGPU(self, self._simulation.state) @property def local_pair_list(self): @@ -454,31 +470,31 @@ class Cell(NeighborList): __doc__ = __doc__.replace("{inherited}", NeighborList._doc_inherited) - def __init__(self, - buffer, - exclusions=('bond',), - rebuild_check_delay=1, - check_dist=True, - deterministic=False, - mesh=None, - default_r_cut=0.0): - - super().__init__(buffer, exclusions, rebuild_check_delay, check_dist, - mesh, default_r_cut) - - self._param_dict.update( - ParameterDict(deterministic=bool(deterministic))) + def __init__( + self, + buffer, + exclusions=("bond",), + rebuild_check_delay=1, + check_dist=True, + deterministic=False, + mesh=None, + default_r_cut=0.0, + ): + super().__init__( + buffer, exclusions, rebuild_check_delay, check_dist, mesh, default_r_cut + ) + + self._param_dict.update(ParameterDict(deterministic=bool(deterministic))) def _attach_hook(self): if isinstance(self._simulation.device, hoomd.device.CPU): nlist_cls = _md.NeighborListBinned else: nlist_cls = _md.NeighborListGPUBinned - self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, - self.buffer) + self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, self.buffer) super()._attach_hook() - @log(requires_run=True, default=False, category='sequence') + @log(requires_run=True, default=False, category="sequence") def dimensions(self): """tuple[int, int, int]: Cell list dimensions. @@ -569,21 +585,24 @@ class Stencil(NeighborList): __doc__ = __doc__.replace("{inherited}", NeighborList._doc_inherited) - def __init__(self, - cell_width, - buffer, - exclusions=('bond',), - rebuild_check_delay=1, - check_dist=True, - deterministic=False, - mesh=None, - default_r_cut=0.0): - - super().__init__(buffer, exclusions, rebuild_check_delay, check_dist, - mesh, default_r_cut) - - params = ParameterDict(deterministic=bool(deterministic), - cell_width=float(cell_width)) + def __init__( + self, + cell_width, + buffer, + exclusions=("bond",), + rebuild_check_delay=1, + check_dist=True, + deterministic=False, + mesh=None, + default_r_cut=0.0, + ): + super().__init__( + buffer, exclusions, rebuild_check_delay, check_dist, mesh, default_r_cut + ) + + params = ParameterDict( + deterministic=bool(deterministic), cell_width=float(cell_width) + ) self._param_dict.update(params) @@ -592,8 +611,7 @@ def _attach_hook(self): nlist_cls = _md.NeighborListStencil else: nlist_cls = _md.NeighborListGPUStencil - self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, - self.buffer) + self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, self.buffer) super()._attach_hook() @@ -646,30 +664,31 @@ class Tree(NeighborList): __doc__ += NeighborList._doc_inherited - def __init__(self, - buffer, - exclusions=('bond',), - rebuild_check_delay=1, - check_dist=True, - mesh=None, - default_r_cut=0.0): - - super().__init__(buffer, exclusions, rebuild_check_delay, check_dist, - mesh, default_r_cut) + def __init__( + self, + buffer, + exclusions=("bond",), + rebuild_check_delay=1, + check_dist=True, + mesh=None, + default_r_cut=0.0, + ): + super().__init__( + buffer, exclusions, rebuild_check_delay, check_dist, mesh, default_r_cut + ) def _attach_hook(self): if isinstance(self._simulation.device, hoomd.device.CPU): nlist_cls = _md.NeighborListTree else: nlist_cls = _md.NeighborListGPUTree - self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, - self.buffer) + self._cpp_obj = nlist_cls(self._simulation.state._cpp_sys_def, self.buffer) super()._attach_hook() __all__ = [ - 'Cell', - 'NeighborList', - 'Stencil', - 'Tree', + "Cell", + "NeighborList", + "Stencil", + "Tree", ] diff --git a/hoomd/md/pair/__init__.py b/hoomd/md/pair/__init__.py index bc973d4e7e..c5de16f215 100644 --- a/hoomd/md/pair/__init__.py +++ b/hoomd/md/pair/__init__.py @@ -161,31 +161,31 @@ ) __all__ = [ - 'DLVO', - 'DPD', - 'DPDLJ', - 'LJ', - 'LJ0804', - 'LJ1208', - 'OPP', - 'TWF', - 'ZBL', - 'Buckingham', - 'DPDConservative', - 'Ewald', - 'ExpandedGaussian', - 'ExpandedLJ', - 'ExpandedMie', - 'ForceShiftedLJ', - 'Fourier', - 'Gaussian', - 'LJGauss', - 'Mie', - 'Moliere', - 'Morse', - 'Pair', - 'ReactionField', - 'Table', - 'Yukawa', - 'aniso', + "DLVO", + "DPD", + "DPDLJ", + "LJ", + "LJ0804", + "LJ1208", + "OPP", + "TWF", + "ZBL", + "Buckingham", + "DPDConservative", + "Ewald", + "ExpandedGaussian", + "ExpandedLJ", + "ExpandedMie", + "ForceShiftedLJ", + "Fourier", + "Gaussian", + "LJGauss", + "Mie", + "Moliere", + "Morse", + "Pair", + "ReactionField", + "Table", + "Yukawa", + "aniso", ] diff --git a/hoomd/md/pair/aniso.py b/hoomd/md/pair/aniso.py index 3e5de8b852..69421a49b6 100644 --- a/hoomd/md/pair/aniso.py +++ b/hoomd/md/pair/aniso.py @@ -45,6 +45,7 @@ class AnisotropicPair(Pair): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ + __doc__ += Pair._doc_inherited _accepted_modes = ("none", "shift") @@ -93,8 +94,8 @@ class Dipole(AnisotropicPair): nl = nlist.Cell() dipole = md.pair.ansio.Dipole(nl, default_r_cut=3.0) - dipole.params[('A', 'B')] = dict(A=1.0, kappa=4.0) - dipole.mu['A'] = (4.0, 1.0, 0.0) + dipole.params[("A", "B")] = dict(A=1.0, kappa=4.0) + dipole.mu["A"] = (4.0, 1.0, 0.0) {inherited} @@ -126,16 +127,20 @@ class Dipole(AnisotropicPair): Type: `TypeParameter` [``particle_type``, `tuple` [`float`, `float`, `float` ]] """ + _cpp_class_name = "AnisoPotentialPairDipole" __doc__ = __doc__.replace("{inherited}", AnisotropicPair._doc_inherited) def __init__(self, nlist, default_r_cut=None): - super().__init__(nlist, default_r_cut, 'none') + super().__init__(nlist, default_r_cut, "none") params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(A=float, kappa=float, len_keys=2)) - mu = TypeParameter('mu', 'particle_types', - TypeParameterDict((float, float, float), len_keys=1)) + "params", + "particle_types", + TypeParameterDict(A=float, kappa=float, len_keys=2), + ) + mu = TypeParameter( + "mu", "particle_types", TypeParameterDict((float, float, float), len_keys=1) + ) self._extend_typeparam((params, mu)) @@ -218,17 +223,17 @@ class GayBerne(AnisotropicPair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "AnisoPotentialPairGB" __doc__ = __doc__.replace("{inherited}", AnisotropicPair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, mode='none'): + def __init__(self, nlist, default_r_cut=None, mode="none"): super().__init__(nlist, default_r_cut, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - lperp=float, - lpar=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, lperp=float, lpar=float, len_keys=2), + ) self._add_typeparam(params) @log(category="object") @@ -535,33 +540,43 @@ class ALJ(AnisotropicPair): # created in _attach based on the dimension of the associated simulation. def __init__(self, nlist, default_r_cut=None): - super().__init__(nlist, default_r_cut, 'none') + super().__init__(nlist, default_r_cut, "none") params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma_i=float, - sigma_j=float, - alpha=int, - contact_ratio_i=0.15, - contact_ratio_j=0.15, - average_simplices=True, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + epsilon=float, + sigma_i=float, + sigma_j=float, + alpha=int, + contact_ratio_i=0.15, + contact_ratio_j=0.15, + average_simplices=True, + len_keys=2, + ), + ) shape = TypeParameter( - 'shape', 'particle_types', - TypeParameterDict(vertices=[(float, float, float)], - faces=[[int]], - rounding_radii=OnlyIf( - to_type_converter((float, float, float)), - preprocess=self._to_three_tuple), - len_keys=1, - _defaults={'rounding_radii': (0.0, 0.0, 0.0)})) + "shape", + "particle_types", + TypeParameterDict( + vertices=[(float, float, float)], + faces=[[int]], + rounding_radii=OnlyIf( + to_type_converter((float, float, float)), + preprocess=self._to_three_tuple, + ), + len_keys=1, + _defaults={"rounding_radii": (0.0, 0.0, 0.0)}, + ), + ) self._extend_typeparam((params, shape)) def _attach_hook(self): self._cpp_class_name = "AnisoPotentialPairALJ{}".format( - "2D" if self._simulation.state.box.is2D else "3D") + "2D" if self._simulation.state.box.is2D else "3D" + ) super()._attach_hook() @@ -730,8 +745,11 @@ class Patchy(AnisotropicPair): patchy.directors['A'] = [] """ + __doc__ = __doc__.replace("{inherited}", AnisotropicPair._doc_inherited) - _doc_inherited = AnisotropicPair._doc_inherited + r""" + _doc_inherited = ( + AnisotropicPair._doc_inherited + + r""" ---------- **Members inherited from** `Patchy `: @@ -742,23 +760,29 @@ class Patchy(AnisotropicPair): `Read more... ` """ + ) - def __init__(self, nlist, default_r_cut=None, mode='none'): + def __init__(self, nlist, default_r_cut=None, mode="none"): super().__init__(nlist, default_r_cut, mode) params = TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", TypeParameterDict( { "pair_params": self._pair_params, "envelope_params": { "alpha": OnlyTypes(float, postprocess=self._check_0_pi), "omega": float, - } + }, }, - len_keys=2)) + len_keys=2, + ), + ) envelope = TypeParameter( - 'directors', 'particle_types', - TypeParameterDict([(float, float, float)], len_keys=1)) + "directors", + "particle_types", + TypeParameterDict([(float, float, float)], len_keys=1), + ) self._extend_typeparam((params, envelope)) @staticmethod @@ -783,13 +807,16 @@ class PatchyLJ(Patchy): .. code-block:: python lj_params = dict(epsilon=1, sigma=1) - envelope_params=dict(alpha=math.pi/2, omega=20) - - patchylj = hoomd.md.pair.aniso.PatchyLJ(nlist=neighbor_list, - default_r_cut=3.0) - patchylj.params[('A', 'A')] = dict(pair_params=lj_params, - envelope_params=envelope_params) - patchylj.directors['A'] = [(1,0,0)] + envelope_params = dict(alpha=math.pi / 2, omega=20) + + patchylj = hoomd.md.pair.aniso.PatchyLJ( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchylj.params[("A", "A")] = dict( + pair_params=lj_params, + envelope_params=envelope_params, + ) + patchylj.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchylj] {inherited} @@ -835,16 +862,20 @@ class PatchyExpandedGaussian(Patchy): .. code-block:: python - gauss_params=dict(epsilon=1, sigma=1, delta=0.5) - envelope_params=dict(alpha=math.pi/2, omega=40) + gauss_params = dict(epsilon=1, sigma=1, delta=0.5) + envelope_params = dict(alpha=math.pi / 2, omega=40) patchy_expanded_gaussian = hoomd.md.pair.aniso.PatchyExpandedGaussian( - nlist=neighbor_list, - default_r_cut=3.0) - patchy_expanded_gaussian.params[('A', 'A')] = dict( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchy_expanded_gaussian.params[("A", "A")] = dict( pair_params=gauss_params, - envelope_params=envelope_params) - patchy_expanded_gaussian.directors['A'] = [(1,0,0), (1,1,1)] + envelope_params=envelope_params, + ) + patchy_expanded_gaussian.directors["A"] = [ + (1, 0, 0), + (1, 1, 1), + ] simulation.operations.integrator.forces = [patchy_expanded_gaussian] {inherited} @@ -875,6 +906,7 @@ class PatchyExpandedGaussian(Patchy): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + __doc__ = __doc__.replace("{inherited}", Patchy._doc_inherited) _cpp_class_name = "AnisoPotentialPairPatchyExpandedGaussian" _pair_params = {"epsilon": float, "sigma": float, "delta": float} @@ -892,14 +924,17 @@ class PatchyExpandedLJ(Patchy): .. code-block:: python - lj_params=dict(epsilon=1, sigma=1) - envelope_params=dict(alpha=math.pi/2, omega=20) - - patchylj = hoomd.md.pair.aniso.PatchyLJ(nlist=neighbor_list, - default_r_cut=3.0) - patchylj.params[('A', 'A')] = dict(pair_params=lj_params, - envelope_params=envelope_params) - patchylj.directors['A'] = [(1,0,0)] + lj_params = dict(epsilon=1, sigma=1) + envelope_params = dict(alpha=math.pi / 2, omega=20) + + patchylj = hoomd.md.pair.aniso.PatchyLJ( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchylj.params[("A", "A")] = dict( + pair_params=lj_params, + envelope_params=envelope_params, + ) + patchylj.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchylj] {inherited} @@ -930,6 +965,7 @@ class PatchyExpandedLJ(Patchy): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + __doc__ = __doc__.replace("{inherited}", Patchy._doc_inherited) _cpp_class_name = "AnisoPotentialPairPatchyExpandedLJ" _pair_params = {"epsilon": float, "sigma": float, "delta": float} @@ -947,16 +983,17 @@ class PatchyExpandedMie(Patchy): .. code-block:: python - expanded_mie_params = dict(epsilon=1, sigma=1, - n=15, m=10, delta=1) - envelope_params = dict(alpha=math.pi/3, omega=20) + expanded_mie_params = dict(epsilon=1, sigma=1, n=15, m=10, delta=1) + envelope_params = dict(alpha=math.pi / 3, omega=20) patchy_expanded_mie = hoomd.md.pair.aniso.PatchyExpandedMie( - nlist=neighbor_list, default_r_cut=3.0) - patchy_expanded_mie.params[('A', 'A')] = dict( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchy_expanded_mie.params[("A", "A")] = dict( pair_params=expanded_mie_params, - envelope_params=envelope_params) - patchy_expanded_mie.directors['A'] = [(1,0,0)] + envelope_params=envelope_params, + ) + patchy_expanded_mie.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchy_expanded_mie] {inherited} @@ -991,6 +1028,7 @@ class PatchyExpandedMie(Patchy): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + __doc__ = __doc__.replace("{inherited}", Patchy._doc_inherited) _cpp_class_name = "AnisoPotentialPairPatchyExpandedMie" _pair_params = { @@ -998,7 +1036,7 @@ class PatchyExpandedMie(Patchy): "sigma": float, "n": float, "m": float, - "delta": float + "delta": float, } @@ -1014,14 +1052,17 @@ class PatchyGaussian(Patchy): .. code-block:: python - gauss_params=dict(epsilon=1, sigma=1) - envelope_params=dict(alpha=math.pi/4, omega=30) + gauss_params = dict(epsilon=1, sigma=1) + envelope_params = dict(alpha=math.pi / 4, omega=30) - patchy_gaussian = hoomd.md.pair.aniso.PatchyGaussian(nlist=neighbor_list, - default_r_cut=3.0) - patchy_gaussian.params[('A', 'A')] = dict(pair_params=gauss_params, - envelope_params=envelope_params) - patchy_gaussian.directors['A'] = [(1,0,0)] + patchy_gaussian = hoomd.md.pair.aniso.PatchyGaussian( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchy_gaussian.params[("A", "A")] = dict( + pair_params=gauss_params, + envelope_params=envelope_params, + ) + patchy_gaussian.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchy_gaussian] {inherited} @@ -1056,6 +1097,7 @@ class PatchyGaussian(Patchy): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + __doc__ = __doc__.replace("{inherited}", Patchy._doc_inherited) _cpp_class_name = "AnisoPotentialPairPatchyGauss" _pair_params = {"epsilon": float, "sigma": float} @@ -1074,13 +1116,16 @@ class PatchyMie(Patchy): .. code-block:: python mie_params = dict(epsilon=1, sigma=1, n=15, m=10) - envelope_params = dict(alpha=math.pi/3, omega=20) - - patchy_mie = hoomd.md.pair.aniso.PatchyMie(nlist=neighbor_list, - default_r_cut=3.0) - patchy_mie.params[('A', 'A')] = dict(pair_params=mie_params, - envelope_params = envelope_params) - patchy_mie.directors['A'] = [(1,0,0)] + envelope_params = dict(alpha=math.pi / 3, omega=20) + + patchy_mie = hoomd.md.pair.aniso.PatchyMie( + nlist=neighbor_list, default_r_cut=3.0 + ) + patchy_mie.params[("A", "A")] = dict( + pair_params=mie_params, + envelope_params=envelope_params, + ) + patchy_mie.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchy_mie] {inherited} @@ -1131,13 +1176,16 @@ class PatchyYukawa(Patchy): .. code-block:: python yukawa_params = dict(epsilon=1, kappa=10) - envelope_params = dict(alpha=math.pi/4, omega=25) - - patchy_yukawa = hoomd.md.pair.aniso.PatchyYukawa(nlist=neighbor_list, - default_r_cut=5.0) - patchy_yukawa.params[('A', 'A')] = dict(pair_params=yukawa_params, - envelope_params=envelope_params) - patchy_yukawa.directors['A'] = [(1,0,0)] + envelope_params = dict(alpha=math.pi / 4, omega=25) + + patchy_yukawa = hoomd.md.pair.aniso.PatchyYukawa( + nlist=neighbor_list, default_r_cut=5.0 + ) + patchy_yukawa.params[("A", "A")] = dict( + pair_params=yukawa_params, + envelope_params=envelope_params, + ) + patchy_yukawa.directors["A"] = [(1, 0, 0)] simulation.operations.integrator.forces = [patchy_yukawa] {inherited} @@ -1172,16 +1220,16 @@ class PatchyYukawa(Patchy): __all__ = [ - 'ALJ', - 'AnisotropicPair', - 'Dipole', - 'GayBerne', - 'Patchy', - 'PatchyExpandedGaussian', - 'PatchyExpandedLJ', - 'PatchyExpandedMie', - 'PatchyGaussian', - 'PatchyLJ', - 'PatchyMie', - 'PatchyYukawa', + "ALJ", + "AnisotropicPair", + "Dipole", + "GayBerne", + "Patchy", + "PatchyExpandedGaussian", + "PatchyExpandedLJ", + "PatchyExpandedMie", + "PatchyGaussian", + "PatchyLJ", + "PatchyMie", + "PatchyYukawa", ] diff --git a/hoomd/md/pair/pair.py b/hoomd/md/pair/pair.py index 2473b763b2..8b7ce9e33c 100644 --- a/hoomd/md/pair/pair.py +++ b/hoomd/md/pair/pair.py @@ -63,7 +63,9 @@ class Pair(force.Force): """ __doc__ = __doc__.replace("{inherited}", force.Force._doc_inherited) - _doc_inherited = force.Force._doc_inherited + """ + _doc_inherited = ( + force.Force._doc_inherited + + """ ---------- **Members inherited from** @@ -94,6 +96,7 @@ class Pair(force.Force): Compute the energy between two sets of particles. `Read more... ` """ + ) # The accepted modes for the potential. Should be reset by subclasses with # restricted modes. @@ -103,26 +106,29 @@ class Pair(force.Force): # external plugin. _ext_module = _md - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__() tp_r_cut = TypeParameter( - 'r_cut', 'particle_types', - TypeParameterDict(nonnegative_real, len_keys=2)) + "r_cut", "particle_types", TypeParameterDict(nonnegative_real, len_keys=2) + ) if default_r_cut is not None: tp_r_cut.default = default_r_cut - tp_r_on = TypeParameter('r_on', 'particle_types', - TypeParameterDict(nonnegative_real, len_keys=2)) + tp_r_on = TypeParameter( + "r_on", "particle_types", TypeParameterDict(nonnegative_real, len_keys=2) + ) if default_r_on is not None: tp_r_on.default = default_r_on type_params = [tp_r_cut] - if 'xplor' in self._accepted_modes: + if "xplor" in self._accepted_modes: type_params.append(tp_r_on) self._extend_typeparam(type_params) self._param_dict.update( - ParameterDict(mode=OnlyFrom(self._accepted_modes), - nlist=hoomd.md.nlist.NeighborList)) + ParameterDict( + mode=OnlyFrom(self._accepted_modes), nlist=hoomd.md.nlist.NeighborList + ) + ) self.mode = mode self.nlist = nlist @@ -152,10 +158,12 @@ def compute_energy(self, tags1, tags2): Examples:: - tags=numpy.linspace(0,N-1,1, dtype=numpy.int32) + tags = numpy.linspace(0, N - 1, 1, dtype=numpy.int32) # computes the energy between even and odd particles - U = mypair.compute_energy(tags1=numpy.array(tags[0:N:2]), - tags2=numpy.array(tags[1:N:2])) + U = mypair.compute_energy( + tags1=numpy.array(tags[0:N:2]), + tags2=numpy.array(tags[1:N:2]), + ) """ # TODO future versions could use np functions to test the assumptions @@ -168,19 +176,17 @@ def _attach_hook(self): f"{self} object is creating a new equivalent neighbor list." f" This is happending since the force is moving to a new " f"simulation. Set a new nlist to suppress this warning.", - RuntimeWarning) + RuntimeWarning, + ) self.nlist = copy.deepcopy(self.nlist) self.nlist._attach(self._simulation) if isinstance(self._simulation.device, hoomd.device.CPU): cls = getattr(self._ext_module, self._cpp_class_name) - self.nlist._cpp_obj.setStorageMode( - _md.NeighborList.storageMode.half) + self.nlist._cpp_obj.setStorageMode(_md.NeighborList.storageMode.half) else: cls = getattr(self._ext_module, self._cpp_class_name + "GPU") - self.nlist._cpp_obj.setStorageMode( - _md.NeighborList.storageMode.full) - self._cpp_obj = cls(self._simulation.state._cpp_sys_def, - self.nlist._cpp_obj) + self.nlist._cpp_obj.setStorageMode(_md.NeighborList.storageMode.full) + self._cpp_obj = cls(self._simulation.state._cpp_sys_def, self.nlist._cpp_obj) def _detach_hook(self): self.nlist._detach() @@ -222,8 +228,8 @@ class LJ(Pair): nl = nlist.Cell() lj = pair.LJ(nl, default_r_cut=3.0) - lj.params[('A', 'A')] = {'sigma': 1.0, 'epsilon': 1.0} - lj.r_cut[('A', 'B')] = 3.0 + lj.params[("A", "A")] = {"sigma": 1.0, "epsilon": 1.0} + lj.r_cut[("A", "B")] = 3.0 {inherited} @@ -249,22 +255,26 @@ class LJ(Pair): Type: `bool` """ + _cpp_class_name = "PotentialPairLJ" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, - nlist, - default_r_cut=None, - default_r_on=0., - mode='none', - tail_correction=False): + def __init__( + self, + nlist, + default_r_cut=None, + default_r_on=0.0, + mode="none", + tail_correction=False, + ): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, sigma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, len_keys=2), + ) self._add_typeparam(params) - self._param_dict.update( - ParameterDict(tail_correction=bool(tail_correction))) + self._param_dict.update(ParameterDict(tail_correction=bool(tail_correction))) class Gaussian(Pair): @@ -287,8 +297,8 @@ class Gaussian(Pair): nl = nlist.Cell() gauss = pair.Gaussian(default_r_cut=3.0, nlist=nl) - gauss.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) - gauss.r_cut[('A', 'B')] = 3.0 + gauss.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) + gauss.r_cut[("A", "B")] = 3.0 {inherited} @@ -309,14 +319,17 @@ class Gaussian(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairGauss" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, sigma=positive_real, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=positive_real, len_keys=2), + ) self._add_typeparam(params) @@ -340,9 +353,10 @@ class ExpandedGaussian(Pair): nl = nlist.Cell() expanded_gauss = pair.ExpandedGaussian(default_r_cut=3.0, nlist=nl) - expanded_gauss.params[('A', 'A')] = dict(epsilon=1.0, - sigma=1.0, delta=0.5) - expanded_gauss.r_cut[('A', 'B')] = 3.0 + expanded_gauss.params[("A", "A")] = dict( + epsilon=1.0, sigma=1.0, delta=0.5 + ) + expanded_gauss.r_cut[("A", "B")] = 3.0 {inherited} @@ -365,17 +379,19 @@ class ExpandedGaussian(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairExpandedGaussian" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=positive_real, - delta=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + epsilon=float, sigma=positive_real, delta=float, len_keys=2 + ), + ) self._add_typeparam(params) @@ -402,12 +418,11 @@ class ExpandedLJ(Pair): nl = nlist.Cell() expanded_lj = pair.ExpandedLJ(default_r_cut=3.0, nlist=nl) - expanded_lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, delta=1.0) - expanded_lj.params[('A', 'B')] = dict( - epsilon=2.0, - sigma=1.0, - delta=0.75) - expanded_lj.params[('B', 'B')] = dict(epsilon=1.0, sigma=1.0, delta=0.5) + expanded_lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, delta=1.0) + expanded_lj.params[("A", "B")] = dict( + epsilon=2.0, sigma=1.0, delta=0.75 + ) + expanded_lj.params[("B", "B")] = dict(epsilon=1.0, sigma=1.0, delta=0.5) {inherited} @@ -429,17 +444,17 @@ class ExpandedLJ(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ - _cpp_class_name = 'PotentialPairExpandedLJ' + + _cpp_class_name = "PotentialPairExpandedLJ" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=float, - delta=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, delta=float, len_keys=2), + ) self._add_typeparam(params) self.mode = mode @@ -465,8 +480,8 @@ class Yukawa(Pair): nl = nlist.Cell() yukawa = pair.Yukawa(default_r_cut=3.0, nlist=nl) - yukawa.params[('A', 'A')] = dict(epsilon=1.0, kappa=1.0) - yukawa.r_cut[('A', 'B')] = 3.0 + yukawa.params[("A", "A")] = dict(epsilon=1.0, kappa=1.0) + yukawa.r_cut[("A", "B")] = 3.0 {inherited} @@ -487,14 +502,17 @@ class Yukawa(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairYukawa" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(kappa=float, epsilon=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(kappa=float, epsilon=float, len_keys=2), + ) self._add_typeparam(params) @@ -525,8 +543,8 @@ class Ewald(Pair): nl = nlist.Cell() ewald = pair.Ewald(default_r_cut=3.0, nlist=nl) - ewald.params[('A', 'A')] = dict(kappa=1.0, alpha=1.5) - ewald.r_cut[('A', 'B')] = 3.0 + ewald.params[("A", "A")] = dict(kappa=1.0, alpha=1.5) + ewald.r_cut[("A", "B")] = 3.0 {inherited} @@ -546,18 +564,20 @@ class Ewald(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairEwald" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) def __init__(self, nlist, default_r_cut=None): - super().__init__(nlist=nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode='none') + super().__init__( + nlist=nlist, default_r_cut=default_r_cut, default_r_on=0, mode="none" + ) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(kappa=float, alpha=0.0, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(kappa=float, alpha=0.0, len_keys=2), + ) self._add_typeparam(params) @@ -649,22 +669,25 @@ class Table(Pair): the tabulated force values :math:`[\\mathrm{force}]`. Must have the same length as ``U``. """ + _cpp_class_name = "PotentialPairTable" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) def __init__(self, nlist, default_r_cut=None): - super().__init__(nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode='none') + super().__init__( + nlist, default_r_cut=default_r_cut, default_r_on=0, mode="none" + ) params = TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", TypeParameterDict( r_min=float, U=hoomd.data.typeconverter.NDArrayValidator(np.float64), F=hoomd.data.typeconverter.NDArrayValidator(np.float64), - len_keys=2)) + len_keys=2, + ), + ) self._add_typeparam(params) @@ -689,8 +712,8 @@ class Morse(Pair): nl = nlist.Cell() morse = pair.Morse(default_r_cut=3.0, nlist=nl) - morse.params[('A', 'A')] = dict(D0=1.0, alpha=3.0, r0=1.0) - morse.r_cut[('A', 'B')] = 3.0 + morse.params[("A", "A")] = dict(D0=1.0, alpha=3.0, r0=1.0) + morse.r_cut[("A", "B")] = 3.0 {inherited} @@ -716,11 +739,13 @@ class Morse(Pair): _cpp_class_name = "PotentialPairMorse" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(D0=float, alpha=float, r0=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(D0=float, alpha=float, r0=float, len_keys=2), + ) self._add_typeparam(params) @@ -803,6 +828,7 @@ class DPD(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairDPDThermoDPD" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) @@ -813,13 +839,14 @@ def __init__( kT, default_r_cut=None, ): - super().__init__(nlist=nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode='none') + super().__init__( + nlist=nlist, default_r_cut=default_r_cut, default_r_on=0, mode="none" + ) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(A=float, gamma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(A=float, gamma=float, len_keys=2), + ) self._add_typeparam(params) param_dict = ParameterDict(kT=hoomd.variant.Variant) param_dict["kT"] = kT @@ -853,9 +880,9 @@ class DPDConservative(Pair): nl = nlist.Cell() dpdc = pair.DPDConservative(nlist=nl, default_r_cut=3.0) - dpdc.params[('A', 'A')] = dict(A=1.0) - dpdc.params[('A', 'B')] = dict(A=2.0, r_cut = 1.0) - dpdc.params[(['A', 'B'], ['C', 'D'])] = dict(A=3.0) + dpdc.params[("A", "A")] = dict(A=1.0) + dpdc.params[("A", "B")] = dict(A=2.0, r_cut=1.0) + dpdc.params[(["A", "B"], ["C", "D"])] = dict(A=3.0) {inherited} @@ -872,18 +899,19 @@ class DPDConservative(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairConservativeDPD" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) def __init__(self, nlist, default_r_cut=None): # initialize the base class - super().__init__(nlist=nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode='none') - params = TypeParameter('params', 'particle_types', - TypeParameterDict(A=float, len_keys=2)) + super().__init__( + nlist=nlist, default_r_cut=default_r_cut, default_r_on=0, mode="none" + ) + params = TypeParameter( + "params", "particle_types", TypeParameterDict(A=float, len_keys=2) + ) self._add_typeparam(params) @@ -960,22 +988,20 @@ class DPDLJ(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairDPDThermoLJ" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none", "shift") - def __init__(self, nlist, kT, default_r_cut=None, mode='none'): - - super().__init__(nlist=nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode=mode) + def __init__(self, nlist, kT, default_r_cut=None, mode="none"): + super().__init__( + nlist=nlist, default_r_cut=default_r_cut, default_r_on=0, mode=mode + ) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=float, - gamma=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, gamma=float, len_keys=2), + ) self._add_typeparam(params) d = ParameterDict(kT=hoomd.variant.Variant) @@ -1020,7 +1046,7 @@ class ForceShiftedLJ(Pair): nl = nlist.Cell() fslj = pair.ForceShiftedLJ(nlist=nl, default_r_cut=1.5) - fslj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + fslj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) {inherited} @@ -1040,19 +1066,21 @@ class ForceShiftedLJ(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairForceShiftedLJ" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) def __init__(self, nlist, default_r_cut=None): - super().__init__(nlist=nlist, - default_r_cut=default_r_cut, - default_r_on=0, - mode='none') + super().__init__( + nlist=nlist, default_r_cut=default_r_cut, default_r_on=0, mode="none" + ) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, sigma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, len_keys=2), + ) self._add_typeparam(params) @@ -1123,14 +1151,17 @@ class Moliere(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairMoliere" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(qi=float, qj=float, aF=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(qi=float, qj=float, aF=float, len_keys=2), + ) self._add_typeparam(params) @@ -1199,16 +1230,18 @@ class ZBL(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairZBL" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none",) - def __init__(self, nlist, default_r_cut=None, default_r_on=0.): - - super().__init__(nlist, default_r_cut, default_r_on, 'none') + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0): + super().__init__(nlist, default_r_cut, default_r_on, "none") params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(qi=float, qj=float, aF=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(qi=float, qj=float, aF=float, len_keys=2), + ) self._add_typeparam(params) @@ -1233,10 +1266,10 @@ class Mie(Pair): nl = nlist.Cell() mie = pair.Mie(nlist=nl, default_r_cut=3.0) - mie.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0, n=12, m=6) - mie.r_cut[('A', 'A')] = 2**(1.0/6.0) - mie.r_on[('A', 'A')] = 2.0 - mie.params[(['A', 'B'], ['C', 'D'])] = dict(epsilon=1.5, sigma=2.0) + mie.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0, n=12, m=6) + mie.r_cut[("A", "A")] = 2 ** (1.0 / 6.0) + mie.r_on[("A", "A")] = 2.0 + mie.params[(["A", "B"], ["C", "D"])] = dict(epsilon=1.5, sigma=2.0) {inherited} @@ -1260,19 +1293,17 @@ class Mie(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairMie" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): - + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=float, - n=float, - m=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, n=float, m=float, len_keys=2), + ) self._add_typeparam(params) @@ -1300,13 +1331,21 @@ class ExpandedMie(Pair): nl = nlist.Cell() expanded_mie = pair.ExpandedMie(nlist=nl, default_r_cut=3.0) - mie.params[('A', 'B')] = { - "epsilon": 1.0, "sigma": 1.0, "n": 12, "m": 6, - "delta": 0.5} - expanded_mie.r_cut[('A', 'B')] = 2**(1.0 / 6.0) - expanded_mie.params[(['A', 'B'], ['C', 'D'])] = { - "epsilon": 1.5, "sigma": 2.0, "n": 12, "m": 6, - "delta": 0.5} + mie.params[("A", "B")] = { + "epsilon": 1.0, + "sigma": 1.0, + "n": 12, + "m": 6, + "delta": 0.5, + } + expanded_mie.r_cut[("A", "B")] = 2 ** (1.0 / 6.0) + expanded_mie.params[(["A", "B"], ["C", "D"])] = { + "epsilon": 1.5, + "sigma": 2.0, + "n": 12, + "m": 6, + "delta": 0.5, + } {inherited} @@ -1333,20 +1372,19 @@ class ExpandedMie(Pair): Type: `TypeParameter` [ `tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairExpandedMie" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): - + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=float, - n=float, - m=float, - delta=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + epsilon=float, sigma=float, n=float, m=float, delta=float, len_keys=2 + ), + ) self._add_typeparam(params) @@ -1393,9 +1431,10 @@ class ReactionField(Pair): nl = nlist.Cell() reaction_field = pair.reaction_field(nl, default_r_cut=3.0) - reaction_field.params[('A', 'B')] = dict(epsilon=1.0, eps_rf=1.0) - reaction_field.params[('B', 'B')] = dict( - epsilon=1.0, eps_rf=0.0, use_charge=True) + reaction_field.params[("A", "B")] = dict(epsilon=1.0, eps_rf=1.0) + reaction_field.params[("B", "B")] = dict( + epsilon=1.0, eps_rf=0.0, use_charge=True + ) {inherited} @@ -1417,17 +1456,19 @@ class ReactionField(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairReactionField" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - eps_rf=float, - use_charge=False, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + epsilon=float, eps_rf=float, use_charge=False, len_keys=2 + ), + ) self._add_typeparam(params) @@ -1495,17 +1536,19 @@ class DLVO(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairDLVO" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none", "shift") - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): - if mode == 'xplor': + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): + if mode == "xplor": raise ValueError("xplor is not a valid mode for the DLVO potential") super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', + "params", + "particle_types", TypeParameterDict( kappa=float, Z=float, @@ -1513,7 +1556,8 @@ def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): a1=float, a2=float, len_keys=2, - )) + ), + ) self._add_typeparam(params) @@ -1562,11 +1606,13 @@ class Buckingham(Pair): _cpp_class_name = "PotentialPairBuckingham" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(A=float, rho=float, C=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(A=float, rho=float, C=float, len_keys=2), + ) self._add_typeparam(params) @@ -1586,8 +1632,11 @@ class LJ1208(Pair): nl = nlist.Cell() lj1208 = pair.LJ1208(nl, default_r_cut=3.0) - lj1208.params[('A', 'A')] = {'sigma': 1.0, 'epsilon': 1.0} - lj1208.params[('A', 'B')] = dict(epsilon=2.0, sigma=1.0) + lj1208.params[("A", "A")] = { + "sigma": 1.0, + "epsilon": 1.0, + } + lj1208.params[("A", "B")] = dict(epsilon=2.0, sigma=1.0) .. math:: U(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} - @@ -1611,14 +1660,17 @@ class LJ1208(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairLJ1208" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, sigma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, len_keys=2), + ) self._add_typeparam(params) @@ -1642,9 +1694,12 @@ class LJ0804(Pair): nl = nlist.Cell() lj0804 = pair.LJ0804(nl, default_r_cut=3.0) - lj0804.params[('A', 'A')] = {'sigma': 1.0, 'epsilon': 1.0} - lj0804.params[('A', 'B')] = dict(epsilon=2.0, sigma=1.0) - lj0804.r_cut[('A', 'B')] = 3.0 + lj0804.params[("A", "A")] = { + "sigma": 1.0, + "epsilon": 1.0, + } + lj0804.params[("A", "B")] = dict(epsilon=2.0, sigma=1.0) + lj0804.r_cut[("A", "B")] = 3.0 {inherited} @@ -1664,14 +1719,17 @@ class LJ0804(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairLJ0804" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, sigma=float, len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, len_keys=2), + ) self._add_typeparam(params) @@ -1726,17 +1784,20 @@ class Fourier(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairFourier" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) _accepted_modes = ("none", "xplor") - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(a=(float, float, float), - b=(float, float, float), - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + a=(float, float, float), b=(float, float, float), len_keys=2 + ), + ) self._add_typeparam(params) @@ -1765,10 +1826,15 @@ class OPP(Pair): nl = nlist.Cell() opp = pair.OPP(nl, default_r_cut=3.0) - opp.params[('A', 'A')] = { - 'C1': 1., 'C2': 1., 'eta1': 15, - 'eta2': 3, 'k': 1.0, 'phi': 3.14} - opp.r_cut[('A', 'B')] = 3.0 + opp.params[("A", "A")] = { + "C1": 1.0, + "C2": 1.0, + "eta1": 15, + "eta2": 3, + "k": 1.0, + "phi": 3.14, + } + opp.r_cut[("A", "B")] = 3.0 {inherited} @@ -1800,20 +1866,25 @@ class OPP(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairOPP" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, nlist, default_r_cut=None, default_r_on=0., mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(C1=float, - C2=float, - eta1=float, - eta2=float, - k=float, - phi=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict( + C1=float, + C2=float, + eta1=float, + eta2=float, + k=float, + phi=float, + len_keys=2, + ), + ) self._add_typeparam(params) @@ -1844,8 +1915,12 @@ class TWF(Pair): nl = nlist.Cell() twf = hoomd.md.pair.TWF(nl, default_r_cut=3.0) - twf.params[('A', 'A')] = {'sigma': 1.0, 'epsilon': 1.0, 'alpha': 50.0} - twf.r_cut[('A', 'B')] = 3.0 + twf.params[("A", "A")] = { + "sigma": 1.0, + "epsilon": 1.0, + "alpha": 50.0, + } + twf.r_cut[("A", "B")] = 3.0 {inherited} @@ -1867,21 +1942,17 @@ class TWF(Pair): Type: `TypeParameter` [`tuple` [``particle_type``, ``particle_type``], `dict`] """ + _cpp_class_name = "PotentialPairTWF" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, - nlist, - default_r_cut=None, - default_r_on=0.0, - mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=float, - alpha=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=float, alpha=float, len_keys=2), + ) self._add_typeparam(params) @@ -1908,9 +1979,17 @@ class LJGauss(Pair): nl = hoomd.md.nlist.Cell() ljg = pair.LJGauss(nl) - ljg.params[('A', 'A')] = dict(epsilon=1.0, sigma=0.02, r0=1.6) - ljg.params[('A', 'B')] = {'epsilon' : 2.0, 'sigma' : 0.02, 'r0' : 1.6} - ljg.params[('A', 'B')] = {'epsilon' : 2.0, 'sigma' : 0.02, 'r0' : 1.6} + ljg.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.6) + ljg.params[("A", "B")] = { + "epsilon": 2.0, + "sigma": 0.02, + "r0": 1.6, + } + ljg.params[("A", "B")] = { + "epsilon": 2.0, + "sigma": 0.02, + "r0": 1.6, + } {inherited} @@ -1929,19 +2008,15 @@ class LJGauss(Pair): * ``r0`` (`float`, **required**) - Gaussian center :math:`r_0` :math:`[\mathrm{length}]` """ + _cpp_class_name = "PotentialPairLJGauss" __doc__ = __doc__.replace("{inherited}", Pair._doc_inherited) - def __init__(self, - nlist, - default_r_cut=None, - default_r_on=0.0, - mode='none'): + def __init__(self, nlist, default_r_cut=None, default_r_on=0.0, mode="none"): super().__init__(nlist, default_r_cut, default_r_on, mode) params = TypeParameter( - 'params', 'particle_types', - TypeParameterDict(epsilon=float, - sigma=positive_real, - r0=float, - len_keys=2)) + "params", + "particle_types", + TypeParameterDict(epsilon=float, sigma=positive_real, r0=float, len_keys=2), + ) self._add_typeparam(params) diff --git a/hoomd/md/pytest/test_active.py b/hoomd/md/pytest/test_active.py index d1244657f5..ccb74cdd25 100644 --- a/hoomd/md/pytest/test_active.py +++ b/hoomd/md/pytest/test_active.py @@ -9,28 +9,29 @@ def test_attributes(): active = hoomd.md.force.Active(filter=hoomd.filter.All()) - assert active.active_force['A'] == (1.0, 0.0, 0.0) - assert active.active_torque['A'] == (0.0, 0.0, 0.0) + assert active.active_force["A"] == (1.0, 0.0, 0.0) + assert active.active_torque["A"] == (0.0, 0.0, 0.0) - active.active_force['A'] = (0.5, 0.0, 0.0) - assert active.active_force['A'] == (0.5, 0.0, 0.0) - active.active_force['A'] = (0.0, 0.0, 1.0) - assert active.active_force['A'] == (0.0, 0.0, 1.0) + active.active_force["A"] = (0.5, 0.0, 0.0) + assert active.active_force["A"] == (0.5, 0.0, 0.0) + active.active_force["A"] = (0.0, 0.0, 1.0) + assert active.active_force["A"] == (0.0, 0.0, 1.0) def test_attributes_constraints(): plane = hoomd.md.manifold.Plane() - active = hoomd.md.force.ActiveOnManifold(filter=hoomd.filter.All(), - manifold_constraint=plane) + active = hoomd.md.force.ActiveOnManifold( + filter=hoomd.filter.All(), manifold_constraint=plane + ) - assert active.active_force['A'] == (1.0, 0.0, 0.0) - assert active.active_torque['A'] == (0.0, 0.0, 0.0) + assert active.active_force["A"] == (1.0, 0.0, 0.0) + assert active.active_torque["A"] == (0.0, 0.0, 0.0) assert active.manifold_constraint == plane - active.active_force['A'] = (0.5, 0.0, 0.0) - assert active.active_force['A'] == (0.5, 0.0, 0.0) - active.active_force['A'] = (0.0, 0.0, 1.0) - assert active.active_force['A'] == (0.0, 0.0, 1.0) + active.active_force["A"] = (0.5, 0.0, 0.0) + assert active.active_force["A"] == (0.5, 0.0, 0.0) + active.active_force["A"] = (0.0, 0.0, 1.0) + assert active.active_force["A"] == (0.0, 0.0, 1.0) sphere = hoomd.md.manifold.Sphere(r=5) with pytest.raises(AttributeError): @@ -42,80 +43,75 @@ def test_attach(simulation_factory, two_particle_snapshot_factory): active = hoomd.md.force.Active(filter=hoomd.filter.All()) sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8)) - integrator = hoomd.md.Integrator(.05) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator = hoomd.md.Integrator(0.05) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(active) sim.operations.integrator = integrator sim.run(0) - assert active.active_force['A'] == (1.0, 0.0, 0.0) - assert active.active_torque['A'] == (0.0, 0.0, 0.0) + assert active.active_force["A"] == (1.0, 0.0, 0.0) + assert active.active_torque["A"] == (0.0, 0.0, 0.0) - active.active_force['A'] = (0.5, 0.0, 0.0) - assert active.active_force['A'] == (0.5, 0.0, 0.0) - active.active_force['A'] = (0.0, 0.0, 1.0) - assert active.active_force['A'] == (0.0, 0.0, 1.0) + active.active_force["A"] = (0.5, 0.0, 0.0) + assert active.active_force["A"] == (0.5, 0.0, 0.0) + active.active_force["A"] = (0.0, 0.0, 1.0) + assert active.active_force["A"] == (0.0, 0.0, 1.0) def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory): active = hoomd.md.force.Active(filter=hoomd.filter.All()) sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8)) - integrator = hoomd.md.Integrator(.05) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator = hoomd.md.Integrator(0.05) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(active) sim.operations.integrator = integrator sim.run(0) - autotuned_kernel_parameter_check(instance=active, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=active, activate=lambda: sim.run(1)) def test_attach_manifold(simulation_factory, two_particle_snapshot_factory): plane = hoomd.md.manifold.Plane() - active = hoomd.md.force.ActiveOnManifold(filter=hoomd.filter.All(), - manifold_constraint=plane) + active = hoomd.md.force.ActiveOnManifold( + filter=hoomd.filter.All(), manifold_constraint=plane + ) sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8)) - integrator = hoomd.md.Integrator(.05) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator = hoomd.md.Integrator(0.05) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(active) sim.operations.integrator = integrator sim.run(0) - assert active.active_force['A'] == (1.0, 0.0, 0.0) - assert active.active_torque['A'] == (0.0, 0.0, 0.0) + assert active.active_force["A"] == (1.0, 0.0, 0.0) + assert active.active_torque["A"] == (0.0, 0.0, 0.0) assert active.manifold_constraint == plane - active.active_force['A'] = (0.5, 0.0, 0.0) - assert active.active_force['A'] == (0.5, 0.0, 0.0) - active.active_force['A'] = (0.0, 0.0, 1.0) - assert active.active_force['A'] == (0.0, 0.0, 1.0) + active.active_force["A"] = (0.5, 0.0, 0.0) + assert active.active_force["A"] == (0.5, 0.0, 0.0) + active.active_force["A"] = (0.0, 0.0, 1.0) + assert active.active_force["A"] == (0.0, 0.0, 1.0) sphere = hoomd.md.manifold.Sphere(r=2) with pytest.raises(AttributeError): active.manifold_constraint = sphere assert active.manifold_constraint == plane -def test_kernel_parameters_manifold(simulation_factory, - two_particle_snapshot_factory): +def test_kernel_parameters_manifold(simulation_factory, two_particle_snapshot_factory): plane = hoomd.md.manifold.Plane() - active = hoomd.md.force.ActiveOnManifold(filter=hoomd.filter.All(), - manifold_constraint=plane) + active = hoomd.md.force.ActiveOnManifold( + filter=hoomd.filter.All(), manifold_constraint=plane + ) sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8)) - integrator = hoomd.md.Integrator(.05) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator = hoomd.md.Integrator(0.05) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(active) sim.operations.integrator = integrator sim.run(0) - autotuned_kernel_parameter_check(instance=active, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=active, activate=lambda: sim.run(1)) def test_pickling(simulation_factory, two_particle_snapshot_factory): @@ -123,9 +119,10 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): active = hoomd.md.force.Active(filter=hoomd.filter.All()) pickling_check(active) integrator = hoomd.md.Integrator( - .05, + 0.05, methods=[hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)], - forces=[active]) + forces=[active], + ) sim.operations.integrator = integrator sim.run(0) pickling_check(active) @@ -134,13 +131,14 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): def test_pickling_constraint(simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory()) active = hoomd.md.force.ActiveOnManifold( - filter=hoomd.filter.All(), - manifold_constraint=hoomd.md.manifold.Plane()) + filter=hoomd.filter.All(), manifold_constraint=hoomd.md.manifold.Plane() + ) pickling_check(active) integrator = hoomd.md.Integrator( - .05, + 0.05, methods=[hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)], - forces=[active]) + forces=[active], + ) sim.operations.integrator = integrator sim.run(0) pickling_check(active) diff --git a/hoomd/md/pytest/test_active_rotational_diffusion.py b/hoomd/md/pytest/test_active_rotational_diffusion.py index 903e39a537..e5c06bcf4a 100644 --- a/hoomd/md/pytest/test_active_rotational_diffusion.py +++ b/hoomd/md/pytest/test_active_rotational_diffusion.py @@ -10,26 +10,25 @@ @pytest.fixture( params=[ - (hoomd.md.force.Active, { - "filter": hoomd.filter.All() - }), + (hoomd.md.force.Active, {"filter": hoomd.filter.All()}), ( hoomd.md.force.ActiveOnManifold, { "filter": hoomd.filter.All(), # this is the shift used by two_particle_snapshot_factory - "manifold_constraint": hoomd.md.manifold.Plane(shift=0.1) - }) + "manifold_constraint": hoomd.md.manifold.Plane(shift=0.1), + }, + ), ], - ids=lambda x: x[0].__name__) + ids=lambda x: x[0].__name__, +) def active_force(request): cls, kwargs = request.param yield cls(**kwargs) def test_construction(active_force): - rd_updater = hoomd.md.update.ActiveRotationalDiffusion( - 10, active_force, 0.1) + rd_updater = hoomd.md.update.ActiveRotationalDiffusion(10, active_force, 0.1) # We want to test identity for active force since the two are linked. assert rd_updater.active_force is active_force @@ -37,9 +36,10 @@ def test_construction(active_force): assert rd_updater.rotational_diffusion == hoomd.variant.Constant(0.1) after_trigger = hoomd.trigger.After(100) - ramp_variant = hoomd.variant.Ramp(0.1, 1., 100, 1_000) + ramp_variant = hoomd.variant.Ramp(0.1, 1.0, 100, 1_000) rd_updater = hoomd.md.update.ActiveRotationalDiffusion( - after_trigger, active_force, ramp_variant) + after_trigger, active_force, ramp_variant + ) assert rd_updater.active_force is active_force assert rd_updater.trigger == after_trigger @@ -64,8 +64,7 @@ def check_setting(active_force, rd_updater): def test_setting(active_force): - rd_updater = hoomd.md.update.ActiveRotationalDiffusion( - 10, active_force, 0.1) + rd_updater = hoomd.md.update.ActiveRotationalDiffusion(10, active_force, 0.1) check_setting(active_force, rd_updater) @@ -84,10 +83,10 @@ def sim_constructor(active_force=None, rd_updater=None): if isinstance(active_force, hoomd.md.force.Active): method = hoomd.md.methods.ConstantVolume(hoomd.filter.All()) else: - method = hoomd.md.methods.rattle.NVE(hoomd.filter.All(), - hoomd.md.manifold.Plane(0.1)) - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - methods=[method]) + method = hoomd.md.methods.rattle.NVE( + hoomd.filter.All(), hoomd.md.manifold.Plane(0.1) + ) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, methods=[method]) if active_force is not None: sim.operations.integrator.forces.append(active_force) if rd_updater is not None: @@ -99,8 +98,7 @@ def sim_constructor(active_force=None, rd_updater=None): def test_attaching(active_force, local_simulation_factory): - rd_updater = hoomd.md.update.ActiveRotationalDiffusion( - 10, active_force, 0.1) + rd_updater = hoomd.md.update.ActiveRotationalDiffusion(10, active_force, 0.1) sim = local_simulation_factory(active_force, rd_updater) sim.run(0) check_setting(active_force, rd_updater) @@ -126,9 +124,9 @@ def test_attaching(active_force, local_simulation_factory): def test_update(active_force, local_simulation_factory): - active_force.active_force.default = (1., 0., 0.) + active_force.active_force.default = (1.0, 0.0, 0.0) # Set torque to zero so no angular momentum exists to change orientations. - active_force.active_torque.default = (0., 0., 0.) + active_force.active_torque.default = (0.0, 0.0, 0.0) rd_updater = hoomd.md.update.ActiveRotationalDiffusion(1, active_force, 0.1) sim = local_simulation_factory(active_force, rd_updater) snapshot = sim.state.get_snapshot() diff --git a/hoomd/md/pytest/test_alchemostat.py b/hoomd/md/pytest/test_alchemostat.py index 94b78234ec..7f53374360 100644 --- a/hoomd/md/pytest/test_alchemostat.py +++ b/hoomd/md/pytest/test_alchemostat.py @@ -6,11 +6,11 @@ import hoomd.md.alchemy import pytest -_NVT_args = (hoomd.md.alchemy.methods.NVT, { - 'alchemical_kT': hoomd.variant.Constant(1) -}, { - 'alchemical_kT': hoomd.variant.Constant(0.5) -}) +_NVT_args = ( + hoomd.md.alchemy.methods.NVT, + {"alchemical_kT": hoomd.variant.Constant(1)}, + {"alchemical_kT": hoomd.variant.Constant(0.5)}, +) def get_alchemostat(): @@ -19,24 +19,29 @@ def get_alchemostat(): @pytest.mark.parametrize( "alchemostat_cls, extra_property_1st_value, extra_property_2nd_value", - get_alchemostat()) + get_alchemostat(), +) @pytest.mark.cpu @pytest.mark.serial -def test_before_attaching(simulation_factory, two_particle_snapshot_factory, - alchemostat_cls, extra_property_1st_value, - extra_property_2nd_value): +def test_before_attaching( + simulation_factory, + two_particle_snapshot_factory, + alchemostat_cls, + extra_property_1st_value, + extra_property_2nd_value, +): sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=1)) nlist = hoomd.md.nlist.Cell(buffer=0.4) ljg = hoomd.md.alchemy.pair.LJGauss(nlist, default_r_cut=3.0) - ljg.params[('A', 'A')] = dict(epsilon=1., sigma=0.02, r0=1.8) + ljg.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.8) integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(ljg) sim.operations.integrator = integrator - r0_alchemical_dof = ljg.r0[('A', 'A')] + r0_alchemical_dof = ljg.r0[("A", "A")] period = 10 - alchemostat = alchemostat_cls(period=period, - alchemical_dof=[r0_alchemical_dof], - **extra_property_1st_value) + alchemostat = alchemostat_cls( + period=period, alchemical_dof=[r0_alchemical_dof], **extra_property_1st_value + ) assert alchemostat.period == period period = 5 @@ -60,24 +65,29 @@ def test_before_attaching(simulation_factory, two_particle_snapshot_factory, @pytest.mark.parametrize( "alchemostat_cls, extra_property_1st_value, extra_property_2nd_value", - get_alchemostat()) + get_alchemostat(), +) @pytest.mark.cpu @pytest.mark.serial -def test_after_attaching(simulation_factory, two_particle_snapshot_factory, - alchemostat_cls, extra_property_1st_value, - extra_property_2nd_value): +def test_after_attaching( + simulation_factory, + two_particle_snapshot_factory, + alchemostat_cls, + extra_property_1st_value, + extra_property_2nd_value, +): sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=1)) nlist = hoomd.md.nlist.Cell(buffer=0.4) ljg = hoomd.md.alchemy.pair.LJGauss(nlist, default_r_cut=3.0) - ljg.params[('A', 'A')] = dict(epsilon=1., sigma=0.02, r0=1.8) + ljg.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.8) integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(ljg) sim.operations.integrator = integrator - r0_alchemical_dof = ljg.r0[('A', 'A')] + r0_alchemical_dof = ljg.r0[("A", "A")] period = 10 - alchemostat = alchemostat_cls(period=period, - alchemical_dof=[r0_alchemical_dof], - **extra_property_1st_value) + alchemostat = alchemostat_cls( + period=period, alchemical_dof=[r0_alchemical_dof], **extra_property_1st_value + ) sim.operations.integrator.methods.insert(0, alchemostat) assert alchemostat.period == period assert len(alchemostat.alchemical_dof) == 1 @@ -99,15 +109,15 @@ def test_after_attaching(simulation_factory, two_particle_snapshot_factory, @pytest.mark.cpu @pytest.mark.serial -@pytest.mark.parametrize("alchemical_potential", - [hoomd.md.alchemy.pair.LJGauss]) -def test_pickling_potential(simulation_factory, two_particle_snapshot_factory, - alchemical_potential): +@pytest.mark.parametrize("alchemical_potential", [hoomd.md.alchemy.pair.LJGauss]) +def test_pickling_potential( + simulation_factory, two_particle_snapshot_factory, alchemical_potential +): """Test that md.constrain.Distance can be pickled and unpickled.""" # detached nlist = hoomd.md.nlist.Cell(buffer=0.4) ljg = alchemical_potential(nlist, default_r_cut=3.0) - ljg.params[('A', 'A')] = dict(epsilon=1., sigma=0.02, r0=1.8) + ljg.params[("A", "A")] = dict(epsilon=1.0, sigma=0.02, r0=1.8) pickling_check(ljg) # attached diff --git a/hoomd/md/pytest/test_alj.py b/hoomd/md/pytest/test_alj.py index a2dd1bd18f..115dbd0522 100644 --- a/hoomd/md/pytest/test_alj.py +++ b/hoomd/md/pytest/test_alj.py @@ -15,15 +15,29 @@ def test_type_shapes(simulation_factory, two_particle_snapshot_factory): sim.operations.integrator = md.Integrator(0.005, forces=[alj]) alj.r_cut.default = 2.5 - octahedron = [(0.5, 0, 0), (-0.5, 0, 0), (0, 0.5, 0), (0, -0.5, 0), - (0, 0, 0.5), (0, 0, -0.5)] - faces = [[5, 3, 1], [0, 3, 5], [1, 3, 4], [4, 3, 0], [5, 2, 0], [1, 2, 5], - [0, 2, 4], [4, 2, 1]] + octahedron = [ + (0.5, 0, 0), + (-0.5, 0, 0), + (0, 0.5, 0), + (0, -0.5, 0), + (0, 0, 0.5), + (0, 0, -0.5), + ] + faces = [ + [5, 3, 1], + [0, 3, 5], + [1, 3, 4], + [4, 3, 0], + [5, 2, 0], + [1, 2, 5], + [0, 2, 4], + [4, 2, 1], + ] rounding_radius = 0.1 alj.shape["A"] = { "vertices": octahedron, "faces": faces, - "rounding_radii": rounding_radius + "rounding_radii": rounding_radius, } # We use a non-zero sigma_i to ensure that it is added appropriately to the # rounding radius. @@ -31,7 +45,7 @@ def test_type_shapes(simulation_factory, two_particle_snapshot_factory): "epsilon": 1.0, "sigma_i": 0.1, "sigma_j": 0.1, - "alpha": 1 + "alpha": 1, } with pytest.raises(hoomd.error.DataAccessError): alj.type_shapes @@ -48,27 +62,24 @@ def get_rounding_radius(base, param_spec): assert np.allclose(shape_spec["vertices"], octahedron) assert np.isclose( shape_spec["rounding_radius"], - get_rounding_radius(rounding_radius, alj.params[("A", "A")])) + get_rounding_radius(rounding_radius, alj.params[("A", "A")]), + ) ellipse_axes = (0.1, 0.2, 0.3) - alj.shape["A"] = { - "vertices": [], - "faces": [], - "rounding_radii": ellipse_axes - } + alj.shape["A"] = {"vertices": [], "faces": [], "rounding_radii": ellipse_axes} shape_spec = alj.type_shapes assert len(shape_spec) == 1 shape_spec = shape_spec[0] assert shape_spec["type"] == "Ellipsoid" assert np.isclose( - shape_spec["a"], - get_rounding_radius(ellipse_axes[0], alj.params[("A", "A")])) + shape_spec["a"], get_rounding_radius(ellipse_axes[0], alj.params[("A", "A")]) + ) assert np.isclose( - shape_spec["a"], - get_rounding_radius(ellipse_axes[1], alj.params[("A", "A")])) + shape_spec["a"], get_rounding_radius(ellipse_axes[1], alj.params[("A", "A")]) + ) assert np.isclose( - shape_spec["a"], - get_rounding_radius(ellipse_axes[2], alj.params[("A", "A")])) + shape_spec["a"], get_rounding_radius(ellipse_axes[2], alj.params[("A", "A")]) + ) sim.operations.integrator.forces.remove(alj) @@ -78,7 +89,7 @@ def get_rounding_radius(base, param_spec): alj.shape["A"] = { "vertices": square, "faces": [], - "rounding_radii": rounding_radius + "rounding_radii": rounding_radius, } sim.run(0) @@ -89,20 +100,17 @@ def get_rounding_radius(base, param_spec): assert np.allclose(shape_spec["vertices"], np.array(square)[:, :2]) assert np.isclose( shape_spec["rounding_radius"], - get_rounding_radius(rounding_radius, alj.params[("A", "A")])) + get_rounding_radius(rounding_radius, alj.params[("A", "A")]), + ) - alj.shape["A"] = { - "vertices": [], - "faces": [], - "rounding_radii": ellipse_axes - } + alj.shape["A"] = {"vertices": [], "faces": [], "rounding_radii": ellipse_axes} shape_spec = alj.type_shapes assert len(shape_spec) == 1 shape_spec = shape_spec[0] assert shape_spec["type"] == "Ellipsoid" assert np.isclose( - shape_spec["a"], - get_rounding_radius(ellipse_axes[0], alj.params[("A", "A")])) + shape_spec["a"], get_rounding_radius(ellipse_axes[0], alj.params[("A", "A")]) + ) assert np.isclose( - shape_spec["a"], - get_rounding_radius(ellipse_axes[1], alj.params[("A", "A")])) + shape_spec["a"], get_rounding_radius(ellipse_axes[1], alj.params[("A", "A")]) + ) diff --git a/hoomd/md/pytest/test_angle.py b/hoomd/md/pytest/test_angle.py index 918fb69c29..a9bc87300a 100644 --- a/hoomd/md/pytest/test_angle.py +++ b/hoomd/md/pytest/test_angle.py @@ -4,12 +4,16 @@ import hoomd from hoomd import md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import pytest import numpy import itertools + # Test parameters include the class, class keyword arguments, bond params, # force, and energy. angle_test_parameters = [ @@ -65,14 +69,9 @@ ] -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def triplet_snapshot_factory(device): - - def make_snapshot(d=1.0, - theta_deg=60, - particle_types=['A'], - dimensions=3, - L=20): + def make_snapshot(d=1.0, theta_deg=60, particle_types=["A"], dimensions=3, L=20): theta_rad = theta_deg * (numpy.pi / 180) snapshot = hoomd.Snapshot(device.communicator) N = 3 @@ -83,18 +82,17 @@ def make_snapshot(d=1.0, snapshot.configuration.box = box snapshot.particles.N = N - base_positions = numpy.array([ - [ - -d * numpy.sin(theta_rad / 2), d * numpy.cos(theta_rad / 2), - 0.0 - ], - [0.0, 0.0, 0.0], + base_positions = numpy.array( [ - d * numpy.sin(theta_rad / 2), - d * numpy.cos(theta_rad / 2), - 0.0, - ], - ]) + [-d * numpy.sin(theta_rad / 2), d * numpy.cos(theta_rad / 2), 0.0], + [0.0, 0.0, 0.0], + [ + d * numpy.sin(theta_rad / 2), + d * numpy.cos(theta_rad / 2), + 0.0, + ], + ] + ) # move particles slightly in direction of MPI decomposition which # varies by simulation dimension nudge_dimension = 2 if dimensions == 3 else 1 @@ -102,7 +100,7 @@ def make_snapshot(d=1.0, snapshot.particles.position[:] = base_positions snapshot.particles.types = particle_types snapshot.angles.N = 1 - snapshot.angles.types = ['A-A-A'] + snapshot.angles.types = ["A-A-A"] snapshot.angles.typeid[0] = 0 snapshot.angles.group[0] = (0, 1, 2) return snapshot @@ -110,49 +108,64 @@ def make_snapshot(d=1.0, return make_snapshot -@pytest.mark.parametrize('angle_cls, angle_args, params, force, energy', - angle_test_parameters) +@pytest.mark.parametrize( + "angle_cls, angle_args, params, force, energy", angle_test_parameters +) def test_before_attaching(angle_cls, angle_args, params, force, energy): potential = angle_cls(**angle_args) - potential.params['A-A-A'] = params + potential.params["A-A-A"] = params for key in params: - assert potential.params['A-A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize('angle_cls, angle_args, params, force, energy', - angle_test_parameters) -def test_after_attaching(triplet_snapshot_factory, simulation_factory, - angle_cls, angle_args, params, force, energy): +@pytest.mark.parametrize( + "angle_cls, angle_args, params, force, energy", angle_test_parameters +) +def test_after_attaching( + triplet_snapshot_factory, + simulation_factory, + angle_cls, + angle_args, + params, + force, + energy, +): sim = simulation_factory(triplet_snapshot_factory()) potential = angle_cls(**angle_args) - potential.params['A-A-A'] = params + potential.params["A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) for key in params: - assert potential.params['A-A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize('angle_cls, angle_args, params, force, energy', - angle_test_parameters) -def test_forces_and_energies(triplet_snapshot_factory, simulation_factory, - angle_cls, angle_args, params, force, energy): +@pytest.mark.parametrize( + "angle_cls, angle_args, params, force, energy", angle_test_parameters +) +def test_forces_and_energies( + triplet_snapshot_factory, + simulation_factory, + angle_cls, + angle_args, + params, + force, + energy, +): theta_deg = 60 theta_rad = theta_deg * (numpy.pi / 180) snapshot = triplet_snapshot_factory(theta_deg=theta_deg) sim = simulation_factory(snapshot) force_array = force * numpy.asarray( - [numpy.cos(theta_rad / 2), - numpy.sin(theta_rad / 2), 0]) + [numpy.cos(theta_rad / 2), numpy.sin(theta_rad / 2), 0] + ) potential = angle_cls(**angle_args) - potential.params['A-A-A'] = params + potential.params["A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) @@ -160,60 +173,75 @@ def test_forces_and_energies(triplet_snapshot_factory, simulation_factory, sim_forces = potential.forces if sim.device.communicator.rank == 0: assert sim_energy == pytest.approx(energy, rel=1e-2) - numpy.testing.assert_allclose(sim_forces[0], - force_array, - rtol=1e-2, - atol=1e-5) - numpy.testing.assert_allclose(sim_forces[1], [0, -1 * force, 0], - rtol=1e-2, - atol=1e-5) + numpy.testing.assert_allclose(sim_forces[0], force_array, rtol=1e-2, atol=1e-5) + numpy.testing.assert_allclose( + sim_forces[1], [0, -1 * force, 0], rtol=1e-2, atol=1e-5 + ) numpy.testing.assert_allclose( sim_forces[2], [-1 * force_array[0], force_array[1], force_array[2]], rtol=1e-2, - atol=1e-5) + atol=1e-5, + ) -@pytest.mark.parametrize('angle_cls, angle_args, params, force, energy', - angle_test_parameters) -def test_kernel_parameters(triplet_snapshot_factory, simulation_factory, - angle_cls, angle_args, params, force, energy): +@pytest.mark.parametrize( + "angle_cls, angle_args, params, force, energy", angle_test_parameters +) +def test_kernel_parameters( + triplet_snapshot_factory, + simulation_factory, + angle_cls, + angle_args, + params, + force, + energy, +): theta_deg = 60 snapshot = triplet_snapshot_factory(theta_deg=theta_deg) sim = simulation_factory(snapshot) potential = angle_cls(**angle_args) - potential.params['A-A-A'] = params + potential.params["A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) - autotuned_kernel_parameter_check(instance=potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=potential, activate=lambda: sim.run(1)) # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((md.angle.Angle, md.angle.Harmonic, md.angle.CosineSquared, - md.angle.Table), itertools.repeat(('md', 'angle')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + (md.angle.Angle, md.angle.Harmonic, md.angle.CosineSquared, md.angle.Table), + itertools.repeat(("md", "angle")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) # Test Pickling -@pytest.mark.parametrize('angle_cls, angle_args, params, force, energy', - angle_test_parameters) -def test_pickling(simulation_factory, triplet_snapshot_factory, angle_cls, - angle_args, params, force, energy): +@pytest.mark.parametrize( + "angle_cls, angle_args, params, force, energy", angle_test_parameters +) +def test_pickling( + simulation_factory, + triplet_snapshot_factory, + angle_cls, + angle_args, + params, + force, + energy, +): theta_deg = 60 snapshot = triplet_snapshot_factory(theta_deg=theta_deg) sim = simulation_factory(snapshot) potential = angle_cls(**angle_args) - potential.params['A-A-A'] = params + potential.params["A-A-A"] = params pickling_check(potential) integrator = hoomd.md.Integrator(0.05, forces=[potential]) diff --git a/hoomd/md/pytest/test_aniso_pair.py b/hoomd/md/pytest/test_aniso_pair.py index 79f96b159e..34bc1bdc89 100644 --- a/hoomd/md/pytest/test_aniso_pair.py +++ b/hoomd/md/pytest/test_aniso_pair.py @@ -13,8 +13,11 @@ import pytest import hoomd -from hoomd.conftest import (pickling_check, logging_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + pickling_check, + logging_check, + autotuned_kernel_parameter_check, +) from hoomd.logging import LoggerCategories from hoomd import md from hoomd.error import TypeConversionError @@ -33,13 +36,15 @@ def _equivalent_data_structures(struct_1, struct_2): return False return all( _equivalent_data_structures(struct_1[key], struct_2[key]) - for key in struct_1) + for key in struct_1 + ) if isinstance(struct_1, Sequence): if len(struct_1) != len(struct_2): return False return all( _equivalent_data_structures(value_1, value_2) - for value_1, value_2 in zip(struct_1, struct_2)) + for value_1, value_2 in zip(struct_1, struct_2) + ) if isinstance(struct_1, Number): return math.isclose(struct_1, struct_2) return False @@ -57,19 +62,17 @@ def make_langevin_integrator(force): @pytest.fixture -def make_two_particle_simulation(two_particle_snapshot_factory, - simulation_factory): - +def make_two_particle_simulation(two_particle_snapshot_factory, simulation_factory): def make_simulation(force, d=1, types=None, dimensions=3): if types is None: - types = ['A'] - snap = two_particle_snapshot_factory(dimensions=dimensions, - d=d, - particle_types=types) + types = ["A"] + snap = two_particle_snapshot_factory( + dimensions=dimensions, d=d, particle_types=types + ) if snap.communicator.rank == 0: - snap.particles.charge[:] = 1. - snap.particles.moment_inertia[0] = [1., 1., 1.] - snap.particles.moment_inertia[1] = [1., 2., 1.] + snap.particles.charge[:] = 1.0 + snap.particles.moment_inertia[0] = [1.0, 1.0, 1.0] + snap.particles.moment_inertia[1] = [1.0, 2.0, 1.0] sim = simulation_factory(snap) sim.operations.integrator = make_langevin_integrator(force) return sim @@ -77,14 +80,12 @@ def make_simulation(force, d=1, types=None, dimensions=3): return make_simulation -@pytest.mark.parametrize("mode", [('none', 'shift'), ('shift', 'none')]) +@pytest.mark.parametrize("mode", [("none", "shift"), ("shift", "none")]) def test_mode(make_two_particle_simulation, mode): """Test that all modes are correctly set on construction.""" cell = md.nlist.Cell(buffer=0.4) # Test setting on construction - gay_berne = md.pair.aniso.GayBerne(nlist=cell, - default_r_cut=2.5, - mode=mode[0]) + gay_berne = md.pair.aniso.GayBerne(nlist=cell, default_r_cut=2.5, mode=mode[0]) assert gay_berne.mode == mode[0] # Test setting @@ -92,23 +93,24 @@ def test_mode(make_two_particle_simulation, mode): assert gay_berne.mode == mode[1] # Ensure that mode remains the same after attaching - gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0} + gay_berne.params[("A", "A")] = {"epsilon": 1, "lpar": 0.5, "lperp": 1.0} sim = make_two_particle_simulation(dimensions=3, d=0.5, force=gay_berne) sim.run(0) assert gay_berne.mode == mode[1] -@pytest.mark.parametrize("mode", ['foo', 1, True, 'xplor']) +@pytest.mark.parametrize("mode", ["foo", 1, True, "xplor"]) def test_mode_invalid(mode): """Test mode validation on construction and setting.""" # Test errors on construction with pytest.raises(TypeConversionError): - gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5, - mode=mode) - gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) - gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0} + gay_berne = md.pair.aniso.GayBerne( + nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5, mode=mode + ) + gay_berne = md.pair.aniso.GayBerne( + nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) + gay_berne.params[("A", "A")] = {"epsilon": 1, "lpar": 0.5, "lperp": 1.0} # Test errors on setting with pytest.raises(TypeConversionError): gay_berne.mode = mode @@ -124,21 +126,21 @@ def test_rcut(make_two_particle_simulation, r_cut): # Test setting new_r_cut = r_cut * 1.1 - gay_berne.r_cut[('A', 'A')] = new_r_cut - assert gay_berne.r_cut[('A', 'A')] == new_r_cut + gay_berne.r_cut[("A", "A")] = new_r_cut + assert gay_berne.r_cut[("A", "A")] == new_r_cut - expected_r_cut = {('A', 'A'): new_r_cut} + expected_r_cut = {("A", "A"): new_r_cut} assert_equivalent_data_structures(gay_berne.r_cut.to_base(), expected_r_cut) - gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0} - sim = make_two_particle_simulation(dimensions=3, d=.5, force=gay_berne) + gay_berne.params[("A", "A")] = {"epsilon": 1, "lpar": 0.5, "lperp": 1.0} + sim = make_two_particle_simulation(dimensions=3, d=0.5, force=gay_berne) # Check after attaching sim.run(0) assert_equivalent_data_structures(gay_berne.r_cut.to_base(), expected_r_cut) -@pytest.mark.parametrize("r_cut", [-1., 'foo', None]) +@pytest.mark.parametrize("r_cut", [-1.0, "foo", None]) def test_rcut_invalid(r_cut): """Test r_cut validation logic.""" cell = md.nlist.Cell(buffer=0.4) @@ -149,7 +151,7 @@ def test_rcut_invalid(r_cut): # Test setting error gay_berne = md.pair.aniso.GayBerne(nlist=cell, default_r_cut=2.5) with pytest.raises(ValueError): - gay_berne.r_cut[('A', 'B')] = r_cut + gay_berne.r_cut[("A", "B")] = r_cut def isclose(value, reference, rtol=5e-6): @@ -170,8 +172,9 @@ def expand_dict(iterable_dict): yield dict(zip(iterable_dict.keys(), values)) -AnisoPotentialSpecification = namedtuple("AnisoParametersSpecification", - ("cls", "type_parameters")) +AnisoPotentialSpecification = namedtuple( + "AnisoParametersSpecification", ("cls", "type_parameters") +) def make_aniso_spec(cls, type_parameters=None): @@ -180,7 +183,7 @@ def make_aniso_spec(cls, type_parameters=None): return AnisoPotentialSpecification(cls, type_parameters) -def _valid_params(particle_types=['A', 'B']): +def _valid_params(particle_types=["A", "B"]): """Create valid full specifications for anisotropic potentials.""" def to_type_parameter_dicts(types, argument_dict): @@ -207,7 +210,8 @@ def to_type_parameter_dicts(types, argument_dict): for name, (values, num_types) in argument_dict.items(): if num_types > 1: type_keys = itertools.combinations_with_replacement( - particle_types, num_types) + particle_types, num_types + ) else: type_keys = particle_types @@ -217,105 +221,149 @@ def to_type_parameter_dicts(types, argument_dict): for type_key, spec in zip(type_keys, expand_dict(values)) } else: - tp_spec = { - type_key: spec for type_key, spec in zip(type_keys, values) - } + tp_spec = {type_key: spec for type_key, spec in zip(type_keys, values)} type_parameters_dicts[name] = tp_spec return type_parameters_dicts valid_params_list = [] dipole_arg_dict = { - 'params': ({ - 'A': [0.5, 1.5, 3.47], - 'kappa': [4., 1.2, 0.3] - }, 2), - 'mu': ([(1.0, 0, 0), (0.5, 0, 0)], 1) + "params": ({"A": [0.5, 1.5, 3.47], "kappa": [4.0, 1.2, 0.3]}, 2), + "mu": ([(1.0, 0, 0), (0.5, 0, 0)], 1), } valid_params_list.append( make_aniso_spec( md.pair.aniso.Dipole, - to_type_parameter_dicts(particle_types, dipole_arg_dict))) + to_type_parameter_dicts(particle_types, dipole_arg_dict), + ) + ) gay_berne_arg_dict = { - 'params': ({ - 'epsilon': [0.5, 0.25, 0.1], - 'lperp': [0.5, 0.45, 0.3], - 'lpar': [.7, 0.2, 0.375] - }, 2) + "params": ( + { + "epsilon": [0.5, 0.25, 0.1], + "lperp": [0.5, 0.45, 0.3], + "lpar": [0.7, 0.2, 0.375], + }, + 2, + ) } valid_params_list.append( make_aniso_spec( md.pair.aniso.GayBerne, - to_type_parameter_dicts(particle_types, gay_berne_arg_dict))) + to_type_parameter_dicts(particle_types, gay_berne_arg_dict), + ) + ) alj_arg_dict0 = { - 'params': ({ - 'epsilon': [0.5, 1.1, 0.147], - 'sigma_i': [0.4, 0.12, 0.3], - 'sigma_j': [4., 1.2, 0.3], - 'alpha': [0, 1, 3], - 'contact_ratio_i': [0.15, 0.3, 0.145], - 'contact_ratio_j': [0.15, 0.3, 0.145], - 'average_simplices': [True, False, True] - }, 2), - 'shape': ({ - "vertices": [[], []], - "rounding_radii": [(0.1, 0.2, 0.15), (0.3, 0.3, 0.3)], - "faces": [[], []] - }, 1) + "params": ( + { + "epsilon": [0.5, 1.1, 0.147], + "sigma_i": [0.4, 0.12, 0.3], + "sigma_j": [4.0, 1.2, 0.3], + "alpha": [0, 1, 3], + "contact_ratio_i": [0.15, 0.3, 0.145], + "contact_ratio_j": [0.15, 0.3, 0.145], + "average_simplices": [True, False, True], + }, + 2, + ), + "shape": ( + { + "vertices": [[], []], + "rounding_radii": [(0.1, 0.2, 0.15), (0.3, 0.3, 0.3)], + "faces": [[], []], + }, + 1, + ), } valid_params_list.append( - make_aniso_spec(md.pair.aniso.ALJ, - to_type_parameter_dicts(particle_types, alj_arg_dict0))) + make_aniso_spec( + md.pair.aniso.ALJ, to_type_parameter_dicts(particle_types, alj_arg_dict0) + ) + ) shape_vertices = [ # octahedron - [(0.5, 0, 0), (-0.5, 0, 0), (0, 0.5, 0), (0, -0.5, 0), (0, 0, 0.5), - (0, 0, -0.5)], + [ + (0.5, 0, 0), + (-0.5, 0, 0), + (0, 0.5, 0), + (0, -0.5, 0), + (0, 0, 0.5), + (0, 0, -0.5), + ], # cube - [(0.5, -0.5, -0.5), (0.5, 0.5, -0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, 0.5), - (-0.5, 0.5, -0.5), (-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), - (-0.5, -0.5, -0.5)], + [ + (0.5, -0.5, -0.5), + (0.5, 0.5, -0.5), + (0.5, 0.5, 0.5), + (-0.5, 0.5, 0.5), + (-0.5, 0.5, -0.5), + (-0.5, -0.5, 0.5), + (0.5, -0.5, 0.5), + (-0.5, -0.5, -0.5), + ], ] shape_faces = [ # octahedron - [[5, 3, 1], [0, 3, 5], [1, 3, 4], [4, 3, 0], [5, 2, 0], [1, 2, 5], - [0, 2, 4], [4, 2, 1]], + [ + [5, 3, 1], + [0, 3, 5], + [1, 3, 4], + [4, 3, 0], + [5, 2, 0], + [1, 2, 5], + [0, 2, 4], + [4, 2, 1], + ], # cube - [[4, 3, 2, 1], [0, 1, 2, 6], [2, 3, 5, 6], [7, 4, 1, 0], [6, 5, 7, 0], - [3, 4, 7, 5]] + [ + [4, 3, 2, 1], + [0, 1, 2, 6], + [2, 3, 5, 6], + [7, 4, 1, 0], + [6, 5, 7, 0], + [3, 4, 7, 5], + ], ] alj_arg_dict1 = { - 'params': ({ - 'epsilon': [0.5, 1.1, 0.147], - 'sigma_i': [0.4, 0.12, 0.3], - 'sigma_j': [4., 1.2, 0.3], - 'alpha': [0, 1, 3], - 'contact_ratio_i': [0.15, 0.3, 0.145], - 'contact_ratio_j': [0.15, 0.3, 0.145], - 'average_simplices': [True, False, True] - }, 2), - 'shape': ({ - "vertices": shape_vertices, - "rounding_radii": [(0.1, 0.01, 0.15), (0.0, 0.0, 0.0)], - "faces": shape_faces - }, 1) + "params": ( + { + "epsilon": [0.5, 1.1, 0.147], + "sigma_i": [0.4, 0.12, 0.3], + "sigma_j": [4.0, 1.2, 0.3], + "alpha": [0, 1, 3], + "contact_ratio_i": [0.15, 0.3, 0.145], + "contact_ratio_j": [0.15, 0.3, 0.145], + "average_simplices": [True, False, True], + }, + 2, + ), + "shape": ( + { + "vertices": shape_vertices, + "rounding_radii": [(0.1, 0.01, 0.15), (0.0, 0.0, 0.0)], + "faces": shape_faces, + }, + 1, + ), } valid_params_list.append( - make_aniso_spec(md.pair.aniso.ALJ, - to_type_parameter_dicts(particle_types, alj_arg_dict1))) + make_aniso_spec( + md.pair.aniso.ALJ, to_type_parameter_dicts(particle_types, alj_arg_dict1) + ) + ) return valid_params_list class PotentialId: - def __init__(self): self.cls_dict = {} @@ -325,21 +373,18 @@ def __call__(self, obj): return f"{obj.cls.__name__}-{self.cls_dict[obj.cls]}" -@pytest.mark.parametrize('pair_potential_spec', - _valid_params(), - ids=PotentialId()) -def test_setting_params_and_shape(make_two_particle_simulation, - pair_potential_spec): - pair_potential = pair_potential_spec.cls(nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) +@pytest.mark.parametrize("pair_potential_spec", _valid_params(), ids=PotentialId()) +def test_setting_params_and_shape(make_two_particle_simulation, pair_potential_spec): + pair_potential = pair_potential_spec.cls( + nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for key, value in pair_potential_spec.type_parameters.items(): setattr(pair_potential, key, value) assert_equivalent_data_structures(value, getattr(pair_potential, key)) - sim = make_two_particle_simulation(types=['A', 'B'], - dimensions=3, - d=0.5, - force=pair_potential) + sim = make_two_particle_simulation( + types=["A", "B"], dimensions=3, d=0.5, force=pair_potential + ) sim.run(0) for key, value in pair_potential_spec.type_parameters.items(): assert_equivalent_data_structures(value, getattr(pair_potential, key)) @@ -356,18 +401,17 @@ def _aniso_forces_and_energies(): """ # holds the forces, energies, and torques associated with an anisotropic # pair potential. - FETtuple = namedtuple('FETtuple', [ - 'pair_potential', 'pair_potential_params', 'forces', 'energies', - 'torques' - ]) + FETtuple = namedtuple( + "FETtuple", + ["pair_potential", "pair_potential_params", "forces", "energies", "torques"], + ) path = Path(__file__).parent / "aniso_forces_and_energies.json" with path.open() as f: computations = json.load(f) fet_list = [] for pot in computations: - for i, params in enumerate(expand_dict( - computations[pot]["params"])): + for i, params in enumerate(expand_dict(computations[pot]["params"])): fet_list.append( FETtuple( getattr(md.pair.aniso, pot), @@ -375,29 +419,26 @@ def _aniso_forces_and_energies(): computations[pot]["forces"][i], computations[pot]["energies"][i], computations[pot]["torques"][i], - )) + ) + ) return fet_list @pytest.fixture(scope="function", params=_valid_params(), ids=PotentialId()) def pair_potential(request): spec = request.param - pair_potential = spec.cls(nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pair_potential = spec.cls(nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5) for key, value in spec.type_parameters.items(): setattr(pair_potential, key, value) return pair_potential def test_run(simulation_factory, lattice_snapshot_factory, pair_potential): - snap = lattice_snapshot_factory(particle_types=['A', 'B'], - n=7, - a=2.0, - r=0.01) + snap = lattice_snapshot_factory(particle_types=["A", "B"], n=7, a=2.0, r=0.01) if snap.communicator.rank == 0: - snap.particles.typeid[:] = np.random.randint(0, - len(snap.particles.types), - snap.particles.N) + snap.particles.typeid[:] = np.random.randint( + 0, len(snap.particles.types), snap.particles.N + ) sim = simulation_factory(snap) integrator = md.Integrator(dt=0.005, integrate_rotational_dof=True) integrator.forces.append(pair_potential) @@ -409,20 +450,21 @@ def test_run(simulation_factory, lattice_snapshot_factory, pair_potential): forces = pair_potential.forces energies = pair_potential.energies if new_snap.communicator.rank == 0: - assert not np.allclose(new_snap.particles.position, - old_snap.particles.position) + assert not np.allclose(new_snap.particles.position, old_snap.particles.position) assert np.any(energies != 0) assert np.any(forces != 0) - autotuned_kernel_parameter_check(instance=pair_potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check( + instance=pair_potential, activate=lambda: sim.run(1) + ) -@pytest.mark.parametrize("aniso_forces_and_energies", - _aniso_forces_and_energies(), - ids=lambda x: x.pair_potential.__name__) -def test_aniso_force_computes(make_two_particle_simulation, - aniso_forces_and_energies): +@pytest.mark.parametrize( + "aniso_forces_and_energies", + _aniso_forces_and_energies(), + ids=lambda x: x.pair_potential.__name__, +) +def test_aniso_force_computes(make_two_particle_simulation, aniso_forces_and_energies): r"""These are pure regression tests from HOOMD-blue version 3.0 beta 1. This tests 2 conditions with three parameter values for each pair potential. @@ -438,21 +480,23 @@ def test_aniso_force_computes(make_two_particle_simulation, """ pot = aniso_forces_and_energies.pair_potential( - nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5) + nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for param, value in aniso_forces_and_energies.pair_potential_params.items(): - getattr(pot, param)[('A', 'A')] = value - sim = make_two_particle_simulation(types=['A'], d=0.75, force=pot) + getattr(pot, param)[("A", "A")] = value + sim = make_two_particle_simulation(types=["A"], d=0.75, force=pot) sim.run(0) particle_distances = [0.75, 1.5] - orientations = [[0.86615809, 0.4997701, 0.0, 0.0], - [0.70738827, 0.0, 0.0, 0.70682518]] - for i, (distance, - orientation) in enumerate(zip(particle_distances, orientations)): + orientations = [ + [0.86615809, 0.4997701, 0.0, 0.0], + [0.70738827, 0.0, 0.0, 0.70682518], + ] + for i, (distance, orientation) in enumerate(zip(particle_distances, orientations)): snap = sim.state.get_snapshot() # Set up proper distances and orientations if snap.communicator.rank == 0: - snap.particles.position[0] = [0, 0, .1] - snap.particles.position[1] = [0, 0, distance + .1] + snap.particles.position[0] = [0, 0, 0.1] + snap.particles.position[1] = [0, 0, distance + 0.1] snap.particles.orientation[1] = orientation sim.state.set_snapshot(snap) @@ -462,26 +506,23 @@ def test_aniso_force_computes(make_two_particle_simulation, sim_torques = sim.operations.integrator.forces[0].torques # Compare the gathered quantities for the potential if sim_energies is not None: - assert isclose(sim_energies[0], - aniso_forces_and_energies.energies[i]) + assert isclose(sim_energies[0], aniso_forces_and_energies.energies[i]) assert isclose(sim_forces[0], aniso_forces_and_energies.forces[i]) assert isclose(-sim_forces[1], aniso_forces_and_energies.forces[i]) assert isclose(sim_torques, aniso_forces_and_energies.torques[i]) -@pytest.mark.parametrize('pair_potential_spec', - _valid_params(), - ids=PotentialId()) +@pytest.mark.parametrize("pair_potential_spec", _valid_params(), ids=PotentialId()) def test_pickling(make_two_particle_simulation, pair_potential_spec): - pair_potential = pair_potential_spec.cls(nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pair_potential = pair_potential_spec.cls( + nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for key, value in pair_potential_spec.type_parameters.items(): setattr(pair_potential, key, value) - sim = make_two_particle_simulation(types=['A', 'B'], - dimensions=3, - d=0.5, - force=pair_potential) + sim = make_two_particle_simulation( + types=["A", "B"], dimensions=3, d=0.5, force=pair_potential + ) pickling_check(pair_potential) sim.run(0) pickling_check(pair_potential) @@ -491,39 +532,43 @@ def _base_expected_loggable(include_type_shapes=False): base = { "forces": { "category": hoomd.logging.LoggerCategories["particle"], - "default": True + "default": True, }, "torques": { "category": hoomd.logging.LoggerCategories["particle"], - "default": True + "default": True, }, "virials": { "category": hoomd.logging.LoggerCategories["particle"], - "default": True + "default": True, }, "energies": { "category": hoomd.logging.LoggerCategories["particle"], - "default": True + "default": True, }, "energy": { "category": hoomd.logging.LoggerCategories["scalar"], - "default": True - } + "default": True, + }, } if include_type_shapes: - base["type_shapes"] = { - 'category': LoggerCategories.object, - 'default': True - } + base["type_shapes"] = {"category": LoggerCategories.object, "default": True} return base @pytest.mark.parametrize( "cls,log_check_params", - ((cls, log_check_params) for cls, log_check_params in zip(( - md.pair.aniso.GayBerne, md.pair.aniso.Dipole, - md.pair.aniso.ALJ), (_base_expected_loggable(True), - _base_expected_loggable(), - _base_expected_loggable(True))))) + ( + (cls, log_check_params) + for cls, log_check_params in zip( + (md.pair.aniso.GayBerne, md.pair.aniso.Dipole, md.pair.aniso.ALJ), + ( + _base_expected_loggable(True), + _base_expected_loggable(), + _base_expected_loggable(True), + ), + ) + ), +) def test_logging(cls, log_check_params): - logging_check(cls, ('md', 'pair', 'aniso'), log_check_params) + logging_check(cls, ("md", "pair", "aniso"), log_check_params) diff --git a/hoomd/md/pytest/test_array_view.py b/hoomd/md/pytest/test_array_view.py index 87366c7105..c248719edf 100644 --- a/hoomd/md/pytest/test_array_view.py +++ b/hoomd/md/pytest/test_array_view.py @@ -77,10 +77,9 @@ def is_equal(self, a, b): attrs = { "SphereWall": ("radius", "origin", "inside"), "CylinderWall": ("radius", "origin", "axis", "inside"), - "PlaneWall": ("origin", "normal") + "PlaneWall": ("origin", "normal"), }[type(a).__name__] - return all( - np.allclose(getattr(a, attr), getattr(b, attr)) for attr in attrs) + return all(np.allclose(getattr(a, attr), getattr(b, attr)) for attr in attrs) def get_collection_size(self): return getattr(self._wall_collection, f"num_{self._mode}s") diff --git a/hoomd/md/pytest/test_bond.py b/hoomd/md/pytest/test_bond.py index 383920083b..f7ca87422b 100644 --- a/hoomd/md/pytest/test_bond.py +++ b/hoomd/md/pytest/test_bond.py @@ -4,8 +4,11 @@ import hoomd from hoomd import md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import pytest import numpy as np @@ -87,23 +90,23 @@ ] -@pytest.mark.parametrize('bond_cls, bond_args, params, force, energy', - bond_test_parameters) +@pytest.mark.parametrize( + "bond_cls, bond_args, params, force, energy", bond_test_parameters +) def test_before_attaching(bond_cls, bond_args, params, force, energy): potential = bond_cls(**bond_args) - potential.params['A-A'] = params + potential.params["A-A"] = params for key in params: - assert potential.params['A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A"][key] == pytest.approx(params[key]) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def snapshot_factory(two_particle_snapshot_factory): - def make_snapshot(): snapshot = two_particle_snapshot_factory(d=0.969, L=5) if snapshot.communicator.rank == 0: snapshot.bonds.N = 1 - snapshot.bonds.types = ['A-A'] + snapshot.bonds.types = ["A-A"] snapshot.bonds.typeid[0] = 0 snapshot.bonds.group[0] = (0, 1) @@ -112,34 +115,36 @@ def make_snapshot(): return make_snapshot -@pytest.mark.parametrize('bond_cls, bond_args, params, force, energy', - bond_test_parameters) -def test_after_attaching(snapshot_factory, simulation_factory, bond_cls, - bond_args, params, force, energy): +@pytest.mark.parametrize( + "bond_cls, bond_args, params, force, energy", bond_test_parameters +) +def test_after_attaching( + snapshot_factory, simulation_factory, bond_cls, bond_args, params, force, energy +): sim = simulation_factory(snapshot_factory()) potential = bond_cls(**bond_args) - potential.params['A-A'] = params + potential.params["A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) for key in params: - assert potential.params['A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize('bond_cls, bond_args, params, force, energy', - bond_test_parameters) -def test_forces_and_energies(snapshot_factory, simulation_factory, bond_cls, - bond_args, params, force, energy): +@pytest.mark.parametrize( + "bond_cls, bond_args, params, force, energy", bond_test_parameters +) +def test_forces_and_energies( + snapshot_factory, simulation_factory, bond_cls, bond_args, params, force, energy +): sim = simulation_factory(snapshot_factory()) potential = bond_cls(**bond_args) - potential.params['A-A'] = params + potential.params["A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) @@ -147,50 +152,61 @@ def test_forces_and_energies(snapshot_factory, simulation_factory, bond_cls, sim_forces = potential.forces if sim.device.communicator.rank == 0: assert sum(sim_energies) == pytest.approx(energy, rel=1e-2) - np.testing.assert_allclose(sim_forces[0], [force, 0.0, 0.0], - rtol=1e-2, - atol=1e-5) - np.testing.assert_allclose(sim_forces[1], [-1 * force, 0.0, 0.0], - rtol=1e-2, - atol=1e-5) - - -@pytest.mark.parametrize('bond_cls, bond_args, params, force, energy', - bond_test_parameters) -def test_kernel_parameters(snapshot_factory, simulation_factory, bond_cls, - bond_args, params, force, energy): + np.testing.assert_allclose( + sim_forces[0], [force, 0.0, 0.0], rtol=1e-2, atol=1e-5 + ) + np.testing.assert_allclose( + sim_forces[1], [-1 * force, 0.0, 0.0], rtol=1e-2, atol=1e-5 + ) + + +@pytest.mark.parametrize( + "bond_cls, bond_args, params, force, energy", bond_test_parameters +) +def test_kernel_parameters( + snapshot_factory, simulation_factory, bond_cls, bond_args, params, force, energy +): sim = simulation_factory(snapshot_factory()) potential = bond_cls(**bond_args) - potential.params['A-A'] = params + potential.params["A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) - autotuned_kernel_parameter_check(instance=potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=potential, activate=lambda: sim.run(1)) # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((md.bond.Bond, md.bond.Harmonic, md.bond.FENEWCA, md.bond.Table, - md.bond.Tether), itertools.repeat(('md', 'bond')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + ( + md.bond.Bond, + md.bond.Harmonic, + md.bond.FENEWCA, + md.bond.Table, + md.bond.Tether, + ), + itertools.repeat(("md", "bond")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) # Pickle Testing -@pytest.mark.parametrize('bond_cls, bond_args, params, force, energy', - bond_test_parameters) -def test_pickling(simulation_factory, snapshot_factory, bond_cls, bond_args, - params, force, energy): +@pytest.mark.parametrize( + "bond_cls, bond_args, params, force, energy", bond_test_parameters +) +def test_pickling( + simulation_factory, snapshot_factory, bond_cls, bond_args, params, force, energy +): sim = simulation_factory(snapshot_factory()) potential = bond_cls(**bond_args) - potential.params['A-A'] = params + potential.params["A-A"] = params pickling_check(potential) integrator = hoomd.md.Integrator(0.05, forces=[potential]) diff --git a/hoomd/md/pytest/test_burst_writer.py b/hoomd/md/pytest/test_burst_writer.py index ae0f2de699..f57818e2b7 100644 --- a/hoomd/md/pytest/test_burst_writer.py +++ b/hoomd/md/pytest/test_burst_writer.py @@ -6,6 +6,7 @@ import hoomd import numpy as np import pytest + try: import gsd.hoomd except ImportError: @@ -16,9 +17,9 @@ N_RUN_STEPS = 3 -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def hoomd_snapshot(lattice_snapshot_factory): - snap = lattice_snapshot_factory(particle_types=['t1', 't2'], n=10, a=2.0) + snap = lattice_snapshot_factory(particle_types=["t1", "t2"], n=10, a=2.0) if snap.communicator.rank == 0: Np = snap.particles.N snap.particles.typeid[:] = np.repeat([0, 1], int(Np / 2)) @@ -29,27 +30,27 @@ def hoomd_snapshot(lattice_snapshot_factory): snap.particles.angmom[:] = np.array([0, 0, 0, 1]) # bonds - snap.bonds.types = ['b1', 'b2'] + snap.bonds.types = ["b1", "b2"] snap.bonds.N = 2 snap.bonds.typeid[:] = [0, 1] snap.bonds.group[0] = [0, 1] snap.bonds.group[1] = [2, 3] # angles - snap.angles.types = ['a1', 'a2'] + snap.angles.types = ["a1", "a2"] snap.angles.N = 2 snap.angles.typeid[:] = [1, 0] snap.angles.group[0] = [0, 1, 2] snap.angles.group[1] = [2, 3, 0] # dihedrals - snap.dihedrals.types = ['d1'] + snap.dihedrals.types = ["d1"] snap.dihedrals.N = 1 snap.dihedrals.typeid[:] = [0] snap.dihedrals.group[0] = [0, 1, 2, 3] # impropers - snap.impropers.types = ['i1'] + snap.impropers.types = ["i1"] snap.impropers.N = 1 snap.impropers.typeid[:] = [0] snap.impropers.group[0] = [3, 2, 1, 0] @@ -60,7 +61,7 @@ def hoomd_snapshot(lattice_snapshot_factory): snap.constraints.value[0] = 2.5 # special pairs - snap.pairs.types = ['p1', 'p2'] + snap.pairs.types = ["p1", "p2"] snap.pairs.N = 2 snap.pairs.typeid[:] = [0, 1] snap.pairs.group[0] = [0, 1] @@ -71,9 +72,8 @@ def hoomd_snapshot(lattice_snapshot_factory): def lj_integrator(): integrator = hoomd.md.Integrator(dt=0.005) - lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) - lj.params.default = {'sigma': 1, 'epsilon': 1} + lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=2.5) + lj.params.default = {"sigma": 1, "epsilon": 1} integrator.forces.append(lj) langevin = hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1) integrator.methods.append(langevin) @@ -81,7 +81,7 @@ def lj_integrator(): return integrator -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def sim(simulation_factory, hoomd_snapshot): sim = simulation_factory(hoomd_snapshot) sim.operations.integrator = lj_integrator() @@ -98,7 +98,7 @@ def check_write(sim: hoomd.Simulation, filename: str, trigger_period: int): sim.operations.writers[0].dump() sim.operations.writers[0].flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: # have to skip first frame which is from the first call. for snap, gsd_snap in zip(snaps, traj[1:]): assert_equivalent_snapshots(gsd_snap, snap) @@ -106,22 +106,26 @@ def check_write(sim: hoomd.Simulation, filename: str, trigger_period: int): def test_write_on_start(sim, tmp_path): filename = tmp_path / "temporary_test_file.gsd" - burst_writer = hoomd.write.Burst(trigger=1, - filename=filename, - mode='wb', - dynamic=['property', 'momentum'], - max_burst_size=3) + burst_writer = hoomd.write.Burst( + trigger=1, + filename=filename, + mode="wb", + dynamic=["property", "momentum"], + max_burst_size=3, + ) sim.operations.writers.append(burst_writer) # Errors when file does not exist with pytest.raises(RuntimeError): # still creates file before erroring. sim.run(0) sim.operations.writers.clear() - burst_writer = hoomd.write.Burst(trigger=1, - filename=filename, - mode='wb', - dynamic=['property', 'momentum'], - max_burst_size=3) + burst_writer = hoomd.write.Burst( + trigger=1, + filename=filename, + mode="wb", + dynamic=["property", "momentum"], + max_burst_size=3, + ) sim.operations.writers.append(burst_writer) # Errors when file exists without frame with pytest.raises(RuntimeError): @@ -132,12 +136,14 @@ def test_len(sim, tmp_path): filename = tmp_path / "temporary_test_file.gsd" burst_trigger = hoomd.trigger.Periodic(period=2, phase=1) - burst_writer = hoomd.write.Burst(trigger=burst_trigger, - filename=filename, - mode='wb', - dynamic=['property', 'momentum'], - max_burst_size=3, - write_at_start=True) + burst_writer = hoomd.write.Burst( + trigger=burst_trigger, + filename=filename, + mode="wb", + dynamic=["property", "momentum"], + max_burst_size=3, + write_at_start=True, + ) sim.operations.writers.append(burst_writer) sim.run(8) assert len(burst_writer) == 3 @@ -145,18 +151,22 @@ def test_len(sim, tmp_path): assert len(burst_writer) == 0 -@pytest.mark.parametrize("start, end", [(0, -1), (0, 0), (0, 1), (0, 2), (1, 1), - (2, 2), (1, 2), (1, -1), (2, -1)]) +@pytest.mark.parametrize( + "start, end", + [(0, -1), (0, 0), (0, 1), (0, 2), (1, 1), (2, 2), (1, 2), (1, -1), (2, -1)], +) def test_burst_dump(sim, tmp_path, start, end): filename = tmp_path / "temporary_test_file.gsd" burst_trigger = hoomd.trigger.Periodic(period=2, phase=1) - burst_writer = hoomd.write.Burst(trigger=burst_trigger, - filename=filename, - mode='wb', - dynamic=['property', 'momentum'], - max_burst_size=3, - write_at_start=True) + burst_writer = hoomd.write.Burst( + trigger=burst_trigger, + filename=filename, + mode="wb", + dynamic=["property", "momentum"], + max_burst_size=3, + write_at_start=True, + ) sim.operations.writers.append(burst_writer) sim.run(8) burst_writer.flush() @@ -172,9 +182,10 @@ def test_burst_dump(sim, tmp_path, start, end): if sim.device.communicator.rank == 0: if end == -1: end = len(dumped_frames) - with gsd.hoomd.open(name=filename, mode='r') as traj: - assert [frame.configuration.step for frame in traj - ] == [0] + dumped_frames[start:end] + with gsd.hoomd.open(name=filename, mode="r") as traj: + assert [frame.configuration.step for frame in traj] == [0] + dumped_frames[ + start:end + ] @pytest.mark.parametrize("clear_entire_buffer", [True, False]) @@ -186,11 +197,12 @@ def test_burst_dump_with_clear_buffer(sim, tmp_path, clear_entire_buffer): burst_writer = hoomd.write.Burst( trigger=burst_trigger, filename=filename, - mode='wb', - dynamic=['property', 'momentum'], + mode="wb", + dynamic=["property", "momentum"], max_burst_size=4, write_at_start=True, - clear_whole_buffer_after_dump=clear_entire_buffer) + clear_whole_buffer_after_dump=clear_entire_buffer, + ) sim.operations.writers.append(burst_writer) sim.run(12) burst_writer.flush() @@ -204,7 +216,7 @@ def test_burst_dump_with_clear_buffer(sim, tmp_path, clear_entire_buffer): burst_writer.flush() dumped_frames = [0, 7, 9] if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: print([frame.configuration.step for frame in traj]) assert [frame.configuration.step for frame in traj] == dumped_frames @@ -216,19 +228,21 @@ def test_burst_dump_with_clear_buffer(sim, tmp_path, clear_entire_buffer): else: dumped_frames += [11, 13, 15] if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: print([frame.configuration.step for frame in traj]) assert [frame.configuration.step for frame in traj] == dumped_frames def test_burst_max_size(sim, tmp_path): filename = Path(tmp_path / "temporary_test_file.gsd") - burst_writer = hoomd.write.Burst(filename=str(filename), - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=['property', 'momentum'], - max_burst_size=N_RUN_STEPS, - write_at_start=True) + burst_writer = hoomd.write.Burst( + filename=str(filename), + trigger=hoomd.trigger.Periodic(1), + mode="wb", + dynamic=["property", "momentum"], + max_burst_size=N_RUN_STEPS, + write_at_start=True, + ) sim.operations.writers.append(burst_writer) # Run 1 extra step to fill the burst which does not include the first frame sim.run(N_RUN_STEPS + 1) @@ -240,11 +254,13 @@ def test_burst_mode_xb(sim, tmp_path): filename = tmp_path / "temporary_test_file.gsd" if sim.device.communicator.rank == 0: Path(filename).touch() - burst_writer = hoomd.write.Burst(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='xb', - dynamic=['property', 'momentum'], - write_at_start=True) + burst_writer = hoomd.write.Burst( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="xb", + dynamic=["property", "momentum"], + write_at_start=True, + ) sim.operations.writers.append(burst_writer) if sim.device.communicator.rank == 0: with pytest.raises(RuntimeError): @@ -254,17 +270,18 @@ def test_burst_mode_xb(sim, tmp_path): # test mode=xb creates a new file filename_xb = tmp_path / "new_temporary_test_file.gsd" - burst_writer = hoomd.write.Burst(filename=filename_xb, - trigger=hoomd.trigger.Periodic(1), - mode='xb', - dynamic=['property', 'momentum'], - write_at_start=True) + burst_writer = hoomd.write.Burst( + filename=filename_xb, + trigger=hoomd.trigger.Periodic(1), + mode="xb", + dynamic=["property", "momentum"], + write_at_start=True, + ) sim.operations.writers.append(burst_writer) check_write(sim, filename_xb, 1) def test_write_burst_log(sim, tmp_path): - filename = tmp_path / "temporary_test_file.gsd" thermo = hoomd.md.compute.ThermodynamicQuantities(filter=hoomd.filter.All()) @@ -273,12 +290,14 @@ def test_write_burst_log(sim, tmp_path): logger = hoomd.logging.Logger() logger.add(thermo) - burst_writer = hoomd.write.Burst(filename=filename, - trigger=hoomd.trigger.Periodic(1), - filter=hoomd.filter.Null(), - mode='wb', - logger=logger, - write_at_start=True) + burst_writer = hoomd.write.Burst( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + filter=hoomd.filter.Null(), + mode="wb", + logger=logger, + write_at_start=True, + ) sim.operations.writers.append(burst_writer) kinetic_energies = [] @@ -289,7 +308,7 @@ def test_write_burst_log(sim, tmp_path): burst_writer.flush() if sim.device.communicator.rank == 0: key = "md/compute/ThermodynamicQuantities/kinetic_energy" - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for frame, sim_ke in zip(traj[1:], kinetic_energies): assert frame.log[key] == sim_ke @@ -301,11 +320,12 @@ def test_burst_dump_empty_buffer(sim, tmp_path, clear_entire_buffer): burst_writer = hoomd.write.Burst( trigger=burst_trigger, filename=filename, - mode='wb', - dynamic=['property', 'momentum'], + mode="wb", + dynamic=["property", "momentum"], max_burst_size=3, write_at_start=True, - clear_whole_buffer_after_dump=clear_entire_buffer) + clear_whole_buffer_after_dump=clear_entire_buffer, + ) sim.operations.writers.append(burst_writer) sim.run(8) burst_writer.flush() @@ -318,12 +338,12 @@ def test_burst_dump_empty_buffer(sim, tmp_path, clear_entire_buffer): burst_writer.dump(1, 2) burst_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: assert len(traj) == 2 sim.run(4) burst_writer.dump() burst_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: assert len(traj) == (4 if clear_entire_buffer else 5) diff --git a/hoomd/md/pytest/test_constant_force.py b/hoomd/md/pytest/test_constant_force.py index 0a7045f0fe..a016d3f628 100644 --- a/hoomd/md/pytest/test_constant_force.py +++ b/hoomd/md/pytest/test_constant_force.py @@ -9,45 +9,44 @@ def test_attributes(): constant = hoomd.md.force.Constant(filter=hoomd.filter.All()) - assert constant.constant_force['A'] == (0.0, 0.0, 0.0) - assert constant.constant_torque['A'] == (0.0, 0.0, 0.0) + assert constant.constant_force["A"] == (0.0, 0.0, 0.0) + assert constant.constant_torque["A"] == (0.0, 0.0, 0.0) - constant.constant_force['A'] = (0.5, 0.0, 0.0) - assert constant.constant_force['A'] == (0.5, 0.0, 0.0) - constant.constant_force['A'] = (0.0, 0.0, 1.0) - assert constant.constant_force['A'] == (0.0, 0.0, 1.0) + constant.constant_force["A"] = (0.5, 0.0, 0.0) + assert constant.constant_force["A"] == (0.5, 0.0, 0.0) + constant.constant_force["A"] = (0.0, 0.0, 1.0) + assert constant.constant_force["A"] == (0.0, 0.0, 1.0) def test_attach_and_filter(simulation_factory, two_particle_snapshot_factory): - constant = hoomd.md.force.Constant(filter=hoomd.filter.Type(['A'])) + constant = hoomd.md.force.Constant(filter=hoomd.filter.Type(["A"])) - snapshot = two_particle_snapshot_factory(particle_types=['A', 'B'], - dimensions=3, - d=8) + snapshot = two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=3, d=8 + ) if snapshot.communicator.rank == 0: snapshot.particles.typeid[:] = [1, 0] sim = simulation_factory(snapshot) integrator = hoomd.md.Integrator(0.0) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(constant) sim.operations.integrator = integrator sim.run(0) - assert constant.constant_force['A'] == (0.0, 0.0, 0.0) - assert constant.constant_torque['A'] == (0.0, 0.0, 0.0) + assert constant.constant_force["A"] == (0.0, 0.0, 0.0) + assert constant.constant_torque["A"] == (0.0, 0.0, 0.0) - constant.constant_force['A'] = (0.5, 0.0, 0.0) - assert constant.constant_force['A'] == (0.5, 0.0, 0.0) - constant.constant_torque['A'] = (0.0, 0.0, 1.0) - assert constant.constant_torque['A'] == (0.0, 0.0, 1.0) + constant.constant_force["A"] = (0.5, 0.0, 0.0) + assert constant.constant_force["A"] == (0.5, 0.0, 0.0) + constant.constant_torque["A"] = (0.0, 0.0, 1.0) + assert constant.constant_torque["A"] == (0.0, 0.0, 1.0) - constant.constant_force['B'] = (0.0, 0.125, 5.0) - assert constant.constant_force['B'] == (0.0, 0.125, 5.0) - constant.constant_torque['B'] = (4.0, -6.0, 0.5) - assert constant.constant_torque['B'] == (4.0, -6.0, 0.5) + constant.constant_force["B"] = (0.0, 0.125, 5.0) + assert constant.constant_force["B"] == (0.0, 0.125, 5.0) + constant.constant_torque["B"] = (4.0, -6.0, 0.5) + assert constant.constant_torque["B"] == (4.0, -6.0, 0.5) sim.run(1) @@ -55,35 +54,32 @@ def test_attach_and_filter(simulation_factory, two_particle_snapshot_factory): torques = constant.torques if sim.device.communicator.rank == 0: - numpy.testing.assert_array_equal(forces, - [[0.0, 0.0, 0.0], [0.5, 0.0, 0.0]]) - numpy.testing.assert_array_equal(torques, - [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + numpy.testing.assert_array_equal(forces, [[0.0, 0.0, 0.0], [0.5, 0.0, 0.0]]) + numpy.testing.assert_array_equal(torques, [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) def test_types(simulation_factory, two_particle_snapshot_factory): constant = hoomd.md.force.Constant(filter=hoomd.filter.All()) - snapshot = two_particle_snapshot_factory(particle_types=['A', 'B'], - dimensions=3, - d=8) + snapshot = two_particle_snapshot_factory( + particle_types=["A", "B"], dimensions=3, d=8 + ) if snapshot.communicator.rank == 0: snapshot.particles.typeid[:] = [1, 0] sim = simulation_factory(snapshot) integrator = hoomd.md.Integrator(0.0) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(constant) sim.operations.integrator = integrator sim.run(0) - constant.constant_force['A'] = (0.5, 0.0, 0.0) - constant.constant_torque['A'] = (0.0, 0.0, 1.0) + constant.constant_force["A"] = (0.5, 0.0, 0.0) + constant.constant_torque["A"] = (0.0, 0.0, 1.0) - constant.constant_force['B'] = (0.0, 0.125, 5.0) - constant.constant_torque['B'] = (4.0, -6.0, 0.5) + constant.constant_force["B"] = (0.0, 0.125, 5.0) + constant.constant_torque["B"] = (4.0, -6.0, 0.5) sim.run(1) @@ -91,29 +87,25 @@ def test_types(simulation_factory, two_particle_snapshot_factory): torques = constant.torques if sim.device.communicator.rank == 0: - numpy.testing.assert_array_equal(forces, - [[0.0, 0.125, 5.0], [0.5, 0.0, 0.0]]) - numpy.testing.assert_array_equal(torques, - [[4.0, -6.0, 0.5], [0.0, 0.0, 1.0]]) + numpy.testing.assert_array_equal(forces, [[0.0, 0.125, 5.0], [0.5, 0.0, 0.0]]) + numpy.testing.assert_array_equal(torques, [[4.0, -6.0, 0.5], [0.0, 0.0, 1.0]]) def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory): constant = hoomd.md.force.Constant(filter=hoomd.filter.All()) sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=8)) - integrator = hoomd.md.Integrator(.05) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) + integrator = hoomd.md.Integrator(0.05) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)) integrator.forces.append(constant) sim.operations.integrator = integrator sim.run(0) def activate_kernel(): - constant.constant_force['A'] = (1.0, 2.0, 3.0) + constant.constant_force["A"] = (1.0, 2.0, 3.0) sim.run(1) - autotuned_kernel_parameter_check(instance=constant, - activate=activate_kernel) + autotuned_kernel_parameter_check(instance=constant, activate=activate_kernel) def test_pickling(simulation_factory, two_particle_snapshot_factory): @@ -121,9 +113,10 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): constant = hoomd.md.force.Constant(filter=hoomd.filter.All()) pickling_check(constant) integrator = hoomd.md.Integrator( - .05, + 0.05, methods=[hoomd.md.methods.Langevin(hoomd.filter.All(), kT=0)], - forces=[constant]) + forces=[constant], + ) sim.operations.integrator = integrator sim.run(0) pickling_check(constant) diff --git a/hoomd/md/pytest/test_constrain_distance.py b/hoomd/md/pytest/test_constrain_distance.py index 562239372e..57a40209e7 100644 --- a/hoomd/md/pytest/test_constrain_distance.py +++ b/hoomd/md/pytest/test_constrain_distance.py @@ -7,14 +7,13 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def polymer_snapshot_factory(device): """Make a snapshot with polymers and distance constraints.""" - def make_snapshot(polymer_length=10, - N_polymers=10, - polymer_spacing=1.2, - bead_spacing=1.1): + def make_snapshot( + polymer_length=10, N_polymers=10, polymer_spacing=1.2, bead_spacing=1.1 + ): """Make the snapshot. Args: @@ -30,19 +29,33 @@ def make_snapshot(polymer_length=10, if s.communicator.rank == 0: s.configuration.box = [ - polymer_spacing * N_polymers, bead_spacing * polymer_length, 0, - 0, 0, 0 + polymer_spacing * N_polymers, + bead_spacing * polymer_length, + 0, + 0, + 0, + 0, ] s.particles.N = polymer_length * N_polymers - s.particles.types = ['A'] - x_coords = numpy.linspace(-polymer_spacing * N_polymers / 2, - polymer_spacing * N_polymers / 2, - num=N_polymers, - endpoint=False) + polymer_spacing / 2 - y_coords = numpy.linspace(-bead_spacing * polymer_length / 2, - bead_spacing * polymer_length / 2, - num=N_polymers, - endpoint=False) + bead_spacing / 2 + s.particles.types = ["A"] + x_coords = ( + numpy.linspace( + -polymer_spacing * N_polymers / 2, + polymer_spacing * N_polymers / 2, + num=N_polymers, + endpoint=False, + ) + + polymer_spacing / 2 + ) + y_coords = ( + numpy.linspace( + -bead_spacing * polymer_length / 2, + bead_spacing * polymer_length / 2, + num=N_polymers, + endpoint=False, + ) + + bead_spacing / 2 + ) position = [] constraint_values = [] @@ -121,8 +134,8 @@ def test_basic_simulation(simulation_factory, polymer_snapshot_factory): cell = hoomd.md.nlist.Cell(buffer=0.4) lj = hoomd.md.pair.LJ(nlist=cell) - lj.params[('A', 'A')] = dict(epsilon=1, sigma=1) - lj.r_cut[('A', 'A')] = 2**(1 / 6) + lj.params[("A", "A")] = dict(epsilon=1, sigma=1) + lj.r_cut[("A", "A")] = 2 ** (1 / 6) integrator.forces.append(lj) sim.operations.integrator = integrator @@ -141,8 +154,6 @@ def test_basic_simulation(simulation_factory, polymer_snapshot_factory): delta_r = r[constraints[:, 1]] - r[constraints[:, 0]] bond_lengths = numpy.sqrt(numpy.sum(delta_r * delta_r, axis=1)) - numpy.testing.assert_allclose(bond_lengths, - snap.constraints.value, - rtol=1e-5) + numpy.testing.assert_allclose(bond_lengths, snap.constraints.value, rtol=1e-5) autotuned_kernel_parameter_check(instance=d, activate=lambda: sim.run(1)) diff --git a/hoomd/md/pytest/test_custom_force.py b/hoomd/md/pytest/test_custom_force.py index 3d53e3f06e..c526090f21 100644 --- a/hoomd/md/pytest/test_custom_force.py +++ b/hoomd/md/pytest/test_custom_force.py @@ -8,6 +8,7 @@ # cupy works implicitly to set values in GPU force arrays try: import cupy + CUPY_IMPORTED = True except ImportError: # Necessary to test failure of using GPU buffers in CPU simulation. @@ -17,6 +18,7 @@ # mpi4py is needed for the ghost data test try: from mpi4py import MPI + MPI4PY_IMPORTED = True except ImportError: MPI4PY_IMPORTED = False @@ -25,27 +27,25 @@ from hoomd import md -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def local_force_names(device): """Get local access properties based on the chosen devices.""" - names = ['cpu_local_force_arrays'] + names = ["cpu_local_force_arrays"] if isinstance(device, hoomd.device.GPU): - names.append('gpu_local_force_arrays') + names.append("gpu_local_force_arrays") return names -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def force_simulation_factory(simulation_factory): """Create a basic simulation where there is only one force.""" def make_sim(force_obj, snapshot=None, domain_decomposition=None): sim = simulation_factory(snapshot, domain_decomposition) thermostat = hoomd.md.methods.thermostats.MTTK(kT=1.0, tau=1.0) - npt = md.methods.ConstantPressure(hoomd.filter.All(), - S=1, - tauS=1, - couple="none", - thermostat=thermostat) + npt = md.methods.ConstantPressure( + hoomd.filter.All(), S=1, tauS=1, couple="none", thermostat=thermostat + ) integrator = md.Integrator(dt=0.005, forces=[force_obj], methods=[npt]) sim.operations.integrator = integrator return sim @@ -75,13 +75,12 @@ def _skip_if_gpu_device_and_no_cupy(sim): class MyForce(md.force.Custom): - def __init__(self, local_force_name): super().__init__(aniso=True) self._local_force_name = local_force_name def set_forces(self, timestep): - if 'gpu' in self._local_force_name: + if "gpu" in self._local_force_name: array_mod = cupy else: array_mod = np @@ -93,8 +92,9 @@ def set_forces(self, timestep): arrays.virial[:] = array_mod.arange(6)[None, :] -def test_simulation(local_force_names, force_simulation_factory, - lattice_snapshot_factory): +def test_simulation( + local_force_names, force_simulation_factory, lattice_snapshot_factory +): """Make sure custom force can plug into simulation without crashing.""" for local_force_name in local_force_names: snap = lattice_snapshot_factory() @@ -116,7 +116,6 @@ def test_simulation(local_force_names, force_simulation_factory, class ForceAsFunctionOfTag(md.force.Custom): - def __init__(self): super().__init__(aniso=True) @@ -124,29 +123,31 @@ def set_forces(self, timestep): with self.cpu_local_force_arrays as force_arrays: with self._state.cpu_local_snapshot as local_snapshot: tags = local_snapshot.particles.tag - force_arrays.force[:] = np.stack((tags * 1, tags * 2, tags * 3), - axis=-1) + force_arrays.force[:] = np.stack( + (tags * 1, tags * 2, tags * 3), axis=-1 + ) energy = local_snapshot.particles.tag.astype(np.float64) * -10.0 force_arrays.potential_energy[:] = energy tags_float = tags.astype(np.float64) force_arrays.torque[:] = np.stack( - (tags_float * -3.0, tags_float * -2.0, tags_float * -1.0), - axis=-1) + (tags_float * -3.0, tags_float * -2.0, tags_float * -1.0), axis=-1 + ) if force_arrays.virial.shape[0] != 0: - force_arrays.virial[:] = np.stack(( - tags_float * 1.0, - tags_float * -2.0, - tags_float * -3.0, - tags_float * 4.0, - tags_float * -5.0, - tags_float * 6.0, - ), - axis=-1) + force_arrays.virial[:] = np.stack( + ( + tags_float * 1.0, + tags_float * -2.0, + tags_float * -3.0, + tags_float * 4.0, + tags_float * -5.0, + tags_float * 6.0, + ), + axis=-1, + ) @pytest.mark.cpu -def test_force_array_ordering(force_simulation_factory, - lattice_snapshot_factory): +def test_force_array_ordering(force_simulation_factory, lattice_snapshot_factory): """Make sure values in force arrays are returned in correct order.""" snap = lattice_snapshot_factory() custom_force = ForceAsFunctionOfTag() @@ -161,19 +162,28 @@ def test_force_array_ordering(force_simulation_factory, if sim.device.communicator.rank == 0: npt.assert_array_equal(energies, np.arange(sim.state.N_particles) * -10) npt.assert_array_equal( - forces, np.stack((indices * 1, indices * 2, indices * 3), axis=-1)) + forces, np.stack((indices * 1, indices * 2, indices * 3), axis=-1) + ) npt.assert_array_equal( - torques, - np.stack((indices * -3, indices * -2, indices * -1), axis=-1)) + torques, np.stack((indices * -3, indices * -2, indices * -1), axis=-1) + ) npt.assert_array_equal( virials, - np.stack((indices * 1, indices * -2, indices * -3, indices * 4, - indices * -5, indices * 6), - axis=-1)) + np.stack( + ( + indices * 1, + indices * -2, + indices * -3, + indices * 4, + indices * -5, + indices * 6, + ), + axis=-1, + ), + ) class MyPeriodicField(md.force.Custom): - def __init__(self, local_force_name, A, i, p, w): super().__init__() self._local_force_name = local_force_name @@ -185,14 +195,14 @@ def __init__(self, local_force_name, A, i, p, w): def _numpy_array(self, arr): """If arr is hoomd array change it to numpy.""" - if arr.__class__.__name__ == 'HOOMDGPUArray': + if arr.__class__.__name__ == "HOOMDGPUArray": return arr.get() else: return arr def _evaluate_periodic(self, snapshot): """Evaluate force and energy in python.""" - if 'gpu' in self._local_force_name: + if "gpu" in self._local_force_name: array_mod = cupy else: array_mod = np @@ -211,31 +221,36 @@ def _evaluate_periodic(self, snapshot): b = {0: b1, 1: b2, 2: b3}.get(self._i) dot = array_mod.dot(array_mod.array(positions), array_mod.array(b)) - cos_term = 1 / (2 * array_mod.pi * self._p * self._w) * array_mod.cos( - self._p * dot) - sin_term = 1 / (2 * array_mod.pi * self._p * self._w) * array_mod.sin( - self._p * dot) + cos_term = ( + 1 / (2 * array_mod.pi * self._p * self._w) * array_mod.cos(self._p * dot) + ) + sin_term = ( + 1 / (2 * array_mod.pi * self._p * self._w) * array_mod.sin(self._p * dot) + ) energies = self._A * array_mod.tanh(cos_term) forces = self._A * sin_term - forces *= 1 - array_mod.tanh(cos_term)**2 + forces *= 1 - array_mod.tanh(cos_term) ** 2 forces = array_mod.outer(forces, array_mod.array(b)) return forces, energies def set_forces(self, timestep): - with getattr(self._state, self._local_snap_name) as snap, \ - getattr(self, self._local_force_name) as arrays: + with ( + getattr(self._state, self._local_snap_name) as snap, + getattr(self, self._local_force_name) as arrays, + ): forces, potential = self._evaluate_periodic(snap) arrays.force[:] = forces arrays.potential_energy[:] = potential -def test_compare_to_periodic(local_force_names, force_simulation_factory, - two_particle_snapshot_factory): +def test_compare_to_periodic( + local_force_names, force_simulation_factory, two_particle_snapshot_factory +): """Test hoomd external periodic compared to a python version.""" # sim with built-in force snap = two_particle_snapshot_factory() periodic = md.external.field.Periodic() - periodic.params['A'] = dict(A=1, i=0, p=1, w=1) + periodic.params["A"] = dict(A=1, i=0, p=1, w=1) sim = force_simulation_factory(periodic, snap) integrator = sim.operations.integrator @@ -285,9 +300,9 @@ def test_compare_to_periodic(local_force_names, force_simulation_factory, npt.assert_allclose(virials1, virials2) -def test_nested_context_managers(local_force_names, - two_particle_snapshot_factory, - force_simulation_factory): +def test_nested_context_managers( + local_force_names, two_particle_snapshot_factory, force_simulation_factory +): """Ensure we cannot nest local force context managers.""" for local_force_name in local_force_names: snap = two_particle_snapshot_factory() @@ -301,8 +316,9 @@ def test_nested_context_managers(local_force_names, return -def test_ghost_data_access(local_force_names, two_particle_snapshot_factory, - force_simulation_factory): +def test_ghost_data_access( + local_force_names, two_particle_snapshot_factory, force_simulation_factory +): """Ensure size of ghost data arrays are correct.""" # skip this test if mpi4py not imported if not MPI4PY_IMPORTED: @@ -318,7 +334,7 @@ def test_ghost_data_access(local_force_names, two_particle_snapshot_factory, # make LJ force so there is a ghost width on each rank nlist = md.nlist.Cell(buffer=0.2) lj_force = md.pair.LJ(nlist, default_r_cut=2.0) - lj_force.params[('A', 'A')] = dict(sigma=1, epsilon=1) + lj_force.params[("A", "A")] = dict(sigma=1, epsilon=1) sim.operations.integrator.forces.append(lj_force) sim.run(0) @@ -331,12 +347,12 @@ def test_ghost_data_access(local_force_names, two_particle_snapshot_factory, N_global = mpi_comm.bcast(N_global, root=0) # test buffer lengths - array_buffers = ['force', 'torque', 'potential_energy', 'virial'] + array_buffers = ["force", "torque", "potential_energy", "virial"] with getattr(custom_force, local_force_name) as arrays: for buffer_name in array_buffers: buffer = getattr(arrays, buffer_name) - ghost_buffer = getattr(arrays, 'ghost_' + buffer_name) - buffer_with_ghost = getattr(arrays, buffer_name + '_with_ghost') + ghost_buffer = getattr(arrays, "ghost_" + buffer_name) + buffer_with_ghost = getattr(arrays, buffer_name + "_with_ghost") # make sure particle numbers add up within the rank assert len(buffer) + len(ghost_buffer) == len(buffer_with_ghost) @@ -359,12 +375,13 @@ def _assert_buffers_readonly(force_arrays): force_arrays.torque[:] = 2345 -def test_data_buffers_readonly(local_force_names, two_particle_snapshot_factory, - simulation_factory): +def test_data_buffers_readonly( + local_force_names, two_particle_snapshot_factory, simulation_factory +): """Ensure local data buffers for non-custom force classes are read-only.""" nlist = md.nlist.Cell(buffer=0.2) lj = md.pair.LJ(nlist, default_r_cut=2.0) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) snap = two_particle_snapshot_factory() sim = simulation_factory(snap) @@ -380,7 +397,7 @@ def test_data_buffers_readonly(local_force_names, two_particle_snapshot_factory, _assert_buffers_readonly(arrays) -def _make_two_particle_snapshot(device, particle_types=['A'], d=1, L=20): +def _make_two_particle_snapshot(device, particle_types=["A"], d=1, L=20): """Make the snapshot. Args: @@ -400,7 +417,7 @@ def _make_two_particle_snapshot(device, particle_types=['A'], d=1, L=20): s.configuration.box = box s.particles.N = 2 # shift particle positions slightly in z so MPI tests pass - s.particles.position[:] = [[-d / 2, 0, .1], [d / 2, 0, .1]] + s.particles.position[:] = [[-d / 2, 0, 0.1], [d / 2, 0, 0.1]] s.particles.types = particle_types return s @@ -412,21 +429,20 @@ def test_failure_with_cpu_device_and_gpu_buffer(): snap = _make_two_particle_snapshot(device) sim = hoomd.Simulation(device) sim.create_state_from_snapshot(snap) - custom_force = MyForce('gpu_local_force_arrays') + custom_force = MyForce("gpu_local_force_arrays") thermostat = hoomd.md.methods.thermostats.MTTK(kT=1.0, tau=1.0) - npt = md.methods.ConstantPressure(hoomd.filter.All(), - thermostat=thermostat, - S=1, - tauS=1, - couple="none") + npt = md.methods.ConstantPressure( + hoomd.filter.All(), thermostat=thermostat, S=1, tauS=1, couple="none" + ) integrator = md.Integrator(dt=0.005, forces=[custom_force], methods=[npt]) sim.operations.integrator = integrator with pytest.raises(RuntimeError): sim.run(1) -def test_torques_update(local_force_names, two_particle_snapshot_factory, - force_simulation_factory): +def test_torques_update( + local_force_names, two_particle_snapshot_factory, force_simulation_factory +): """Confirm torque'd particles' orientation changes over time.""" initial_orientations = np.array([[1, 0, 0, 0], [1, 0, 0, 0]]) for local_force_name in local_force_names: @@ -440,20 +456,16 @@ def test_torques_update(local_force_names, two_particle_snapshot_factory, sim.operations.integrator.integrate_rotational_dof = True if sim.device.communicator.rank == 0: - npt.assert_allclose(snap.particles.orientation, - initial_orientations) + npt.assert_allclose(snap.particles.orientation, initial_orientations) sim.run(2) snap = sim.state.get_snapshot() if sim.device.communicator.rank == 0: - assert np.count_nonzero(snap.particles.orientation - - initial_orientations) + assert np.count_nonzero(snap.particles.orientation - initial_orientations) def test_force_zeroing(force_simulation_factory, two_particle_snapshot_factory): - class TestForceZeroing(hoomd.md.force.Custom): - def __init__(self): super().__init__(aniso=True) diff --git a/hoomd/md/pytest/test_dihedral.py b/hoomd/md/pytest/test_dihedral.py index 40015b3fdf..1da9dec06c 100644 --- a/hoomd/md/pytest/test_dihedral.py +++ b/hoomd/md/pytest/test_dihedral.py @@ -4,12 +4,16 @@ import hoomd from hoomd import md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import pytest import numpy import itertools + # Test parameters include the class, class keyword arguments, bond params, # force, and energy. dihedral_test_parameters = [ @@ -65,10 +69,9 @@ ] -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def dihedral_snapshot_factory(device): - - def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): + def make_snapshot(d=1.0, phi_deg=45, particle_types=["A"], L=20): phi_rad = phi_deg * (numpy.pi / 180) # the central particles are along the x-axis, so phi is determined from # the angle in the yz plane. @@ -97,7 +100,7 @@ def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): ] snapshot.dihedrals.N = 1 - snapshot.dihedrals.types = ['A-A-A-A'] + snapshot.dihedrals.types = ["A-A-A-A"] snapshot.dihedrals.typeid[0] = 0 snapshot.dihedrals.group[0] = (0, 1, 2, 3) @@ -106,38 +109,53 @@ def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): return make_snapshot -@pytest.mark.parametrize('dihedral_cls, dihedral_args, params, force, energy', - dihedral_test_parameters) +@pytest.mark.parametrize( + "dihedral_cls, dihedral_args, params, force, energy", dihedral_test_parameters +) def test_before_attaching(dihedral_cls, dihedral_args, params, force, energy): potential = dihedral_cls(**dihedral_args) - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params for key in params: - potential.params['A-A-A-A'][key] == pytest.approx(params[key]) + potential.params["A-A-A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize('dihedral_cls, dihedral_args, params, force, energy', - dihedral_test_parameters) -def test_after_attaching(dihedral_snapshot_factory, simulation_factory, - dihedral_cls, dihedral_args, params, force, energy): +@pytest.mark.parametrize( + "dihedral_cls, dihedral_args, params, force, energy", dihedral_test_parameters +) +def test_after_attaching( + dihedral_snapshot_factory, + simulation_factory, + dihedral_cls, + dihedral_args, + params, + force, + energy, +): snapshot = dihedral_snapshot_factory(d=0.969, L=5) sim = simulation_factory(snapshot) potential = dihedral_cls(**dihedral_args) - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) for key in params: - assert potential.params['A-A-A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A-A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize('dihedral_cls, dihedral_args, params, force, energy', - dihedral_test_parameters) -def test_forces_and_energies(dihedral_snapshot_factory, simulation_factory, - dihedral_cls, dihedral_args, params, force, - energy): +@pytest.mark.parametrize( + "dihedral_cls, dihedral_args, params, force, energy", dihedral_test_parameters +) +def test_forces_and_energies( + dihedral_snapshot_factory, + simulation_factory, + dihedral_cls, + dihedral_args, + params, + force, + energy, +): phi_deg = 45 phi_rad = phi_deg * (numpy.pi / 180) snapshot = dihedral_snapshot_factory(phi_deg=phi_deg) @@ -145,13 +163,12 @@ def test_forces_and_energies(dihedral_snapshot_factory, simulation_factory, # the dihedral angle is in yz plane, thus no force along x axis force_array = force * numpy.asarray( - [0, numpy.sin(-phi_rad / 2), - numpy.cos(-phi_rad / 2)]) + [0, numpy.sin(-phi_rad / 2), numpy.cos(-phi_rad / 2)] + ) potential = dihedral_cls(**dihedral_args) - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) @@ -159,64 +176,86 @@ def test_forces_and_energies(dihedral_snapshot_factory, simulation_factory, sim_forces = potential.forces if sim.device.communicator.rank == 0: assert sum(sim_energies) == pytest.approx(energy, rel=1e-2, abs=1e-5) - numpy.testing.assert_allclose(sim_forces[0], - force_array, - rtol=1e-2, - atol=1e-5) - numpy.testing.assert_allclose(sim_forces[1], - -1 * force_array, - rtol=1e-2, - atol=1e-5) - numpy.testing.assert_allclose(sim_forces[2], - [0, -1 * force_array[1], force_array[2]], - rtol=1e-2, - atol=1e-5) - numpy.testing.assert_allclose(sim_forces[3], - [0, force_array[1], -1 * force_array[2]], - rtol=1e-2, - atol=1e-5) - - -@pytest.mark.parametrize('dihedral_cls, dihedral_args, params, force, energy', - dihedral_test_parameters) -def test_kernel_parameters(dihedral_snapshot_factory, simulation_factory, - dihedral_cls, dihedral_args, params, force, energy): + numpy.testing.assert_allclose(sim_forces[0], force_array, rtol=1e-2, atol=1e-5) + numpy.testing.assert_allclose( + sim_forces[1], -1 * force_array, rtol=1e-2, atol=1e-5 + ) + numpy.testing.assert_allclose( + sim_forces[2], + [0, -1 * force_array[1], force_array[2]], + rtol=1e-2, + atol=1e-5, + ) + numpy.testing.assert_allclose( + sim_forces[3], + [0, force_array[1], -1 * force_array[2]], + rtol=1e-2, + atol=1e-5, + ) + + +@pytest.mark.parametrize( + "dihedral_cls, dihedral_args, params, force, energy", dihedral_test_parameters +) +def test_kernel_parameters( + dihedral_snapshot_factory, + simulation_factory, + dihedral_cls, + dihedral_args, + params, + force, + energy, +): phi_deg = 45 snapshot = dihedral_snapshot_factory(phi_deg=phi_deg) sim = simulation_factory(snapshot) potential = dihedral_cls(**dihedral_args) - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) - autotuned_kernel_parameter_check(instance=potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=potential, activate=lambda: sim.run(1)) # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((md.dihedral.Dihedral, md.dihedral.Periodic, md.dihedral.Table, - md.dihedral.OPLS), itertools.repeat(('md', 'dihedral')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + ( + md.dihedral.Dihedral, + md.dihedral.Periodic, + md.dihedral.Table, + md.dihedral.OPLS, + ), + itertools.repeat(("md", "dihedral")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) # Test Pickling -@pytest.mark.parametrize('dihedral_cls, dihedral_args, params, force, energy', - dihedral_test_parameters) -def test_pickling(simulation_factory, dihedral_snapshot_factory, dihedral_cls, - dihedral_args, params, force, energy): +@pytest.mark.parametrize( + "dihedral_cls, dihedral_args, params, force, energy", dihedral_test_parameters +) +def test_pickling( + simulation_factory, + dihedral_snapshot_factory, + dihedral_cls, + dihedral_args, + params, + force, + energy, +): phi_deg = 45 snapshot = dihedral_snapshot_factory(phi_deg=phi_deg) sim = simulation_factory(snapshot) potential = dihedral_cls(**dihedral_args) - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params pickling_check(potential) integrator = hoomd.md.Integrator(0.05, forces=[potential]) diff --git a/hoomd/md/pytest/test_external.py b/hoomd/md/pytest/test_external.py index 27f1917ff6..258e2b3a35 100644 --- a/hoomd/md/pytest/test_external.py +++ b/hoomd/md/pytest/test_external.py @@ -9,8 +9,11 @@ import hoomd import hoomd.md as md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import itertools @@ -19,21 +22,19 @@ def _evaluate_periodic(snapshot, params): """Evaluate force and energy in python for Periodic.""" box = hoomd.Box(*snapshot.configuration.box) positions = snapshot.particles.position - A = params['A'] - i = params['i'] - w = params['w'] - p = params['p'] + A = params["A"] + i = params["i"] + w = params["w"] + p = params["p"] a1, a2, a3 = box.to_matrix().T V = np.dot(a1, np.cross(a2, a3)) b1 = 2 * np.pi / V * np.cross(a2, a3) b2 = 2 * np.pi / V * np.cross(a3, a1) b3 = 2 * np.pi / V * np.cross(a1, a2) b = {0: b1, 1: b2, 2: b3}.get(i) - energies = A * np.tanh( - 1 / (2 * np.pi * p * w) * np.cos(p * np.dot(positions, b))) + energies = A * np.tanh(1 / (2 * np.pi * p * w) * np.cos(p * np.dot(positions, b))) forces = A / (2 * np.pi * w) * np.sin(p * np.dot(positions, b)) - forces *= 1 - (np.tanh( - np.cos(p * np.dot(positions, b)) / (2 * np.pi * p * w)))**2 + forces *= 1 - (np.tanh(np.cos(p * np.dot(positions, b)) / (2 * np.pi * p * w))) ** 2 forces = np.outer(forces, b) torques = [ [0, 0, 0], @@ -58,8 +59,8 @@ def _evaluate_magnetic(snapshot, params): """Evaluate force and energy in python for MagneticField.""" positions = snapshot.particles.position N = len(positions) - B_field = params['B'] - b_moment = params['mu'] + B_field = params["B"] + b_moment = params["mu"] energies = np.repeat(-np.dot(b_moment, B_field), N) torques = np.tile(np.cross(b_moment, B_field), (N, 1)) forces = [ @@ -72,25 +73,42 @@ def _external_params(): """Each is tuple (cls_obj, param attr, lis(param values), eval func).""" list_ext_params = [] list_ext_params.append( - (hoomd.md.external.field.Periodic, "params", - list([dict(A=1.5, i=1, w=3.5, p=5), - dict(A=10, i=0, w=3.4, p=2)]), _evaluate_periodic)) + ( + hoomd.md.external.field.Periodic, + "params", + list([dict(A=1.5, i=1, w=3.5, p=5), dict(A=10, i=0, w=3.4, p=2)]), + _evaluate_periodic, + ) + ) list_ext_params.append( - (hoomd.md.external.field.Electric, "E", list([ - (1, 0, 0), - (0, 2, 0), - ]), _evaluate_electric)) - list_ext_params.append((hoomd.md.external.field.Magnetic, "params", - list([ - dict(B=(0, 2, -11.5), mu=(1, 2, 3)), - dict(B=(1, 0, 1), mu=(1, 1, 1)) - ]), _evaluate_magnetic)) + ( + hoomd.md.external.field.Electric, + "E", + list( + [ + (1, 0, 0), + (0, 2, 0), + ] + ), + _evaluate_electric, + ) + ) + list_ext_params.append( + ( + hoomd.md.external.field.Magnetic, + "params", + list( + [dict(B=(0, 2, -11.5), mu=(1, 2, 3)), dict(B=(1, 0, 1), mu=(1, 1, 1))] + ), + _evaluate_magnetic, + ) + ) return list_ext_params -@pytest.fixture(scope="function", - params=_external_params(), - ids=(lambda x: x[0].__name__)) +@pytest.fixture( + scope="function", params=_external_params(), ids=(lambda x: x[0].__name__) +) def external_params(request): return cp.deepcopy(request.param) @@ -100,22 +118,22 @@ def _assert_correct_params(external_obj, param_attr, params): if type(params) is dict: for param in params.keys(): npt.assert_allclose( - getattr(external_obj, param_attr)['A'][param], params[param]) + getattr(external_obj, param_attr)["A"][param], params[param] + ) if type(params) is tuple: - npt.assert_allclose(getattr(external_obj, param_attr)['A'], params) + npt.assert_allclose(getattr(external_obj, param_attr)["A"], params) -def test_get_set(simulation_factory, two_particle_snapshot_factory, - external_params): +def test_get_set(simulation_factory, two_particle_snapshot_factory, external_params): """Test we can get/set parameter while attached and while not attached.""" # unpack parameters cls_obj, param_attr, list_params, evaluator = external_params # create class instance, get/set params when not attached obj_instance = cls_obj() - getattr(obj_instance, param_attr)['A'] = list_params[0] + getattr(obj_instance, param_attr)["A"] = list_params[0] _assert_correct_params(obj_instance, param_attr, list_params[0]) - getattr(obj_instance, param_attr)['A'] = list_params[1] + getattr(obj_instance, param_attr)["A"] = list_params[1] _assert_correct_params(obj_instance, param_attr, list_params[1]) # set up simulation @@ -126,14 +144,15 @@ def test_get_set(simulation_factory, two_particle_snapshot_factory, sim.run(0) # get/set params while attached - getattr(obj_instance, param_attr)['A'] = list_params[0] + getattr(obj_instance, param_attr)["A"] = list_params[0] _assert_correct_params(obj_instance, param_attr, list_params[0]) - getattr(obj_instance, param_attr)['A'] = list_params[1] + getattr(obj_instance, param_attr)["A"] = list_params[1] _assert_correct_params(obj_instance, param_attr, list_params[1]) -def test_forces_and_energies(simulation_factory, lattice_snapshot_factory, - external_params): +def test_forces_and_energies( + simulation_factory, lattice_snapshot_factory, external_params +): """Run a small simulation and make sure forces/energies are correct.""" # unpack parameters cls_obj, param_attr, list_params, evaluator = external_params @@ -141,13 +160,12 @@ def test_forces_and_energies(simulation_factory, lattice_snapshot_factory, for param in list_params: # create class instance obj_instance = cls_obj() - getattr(obj_instance, param_attr)['A'] = param + getattr(obj_instance, param_attr)["A"] = param # set up simulation and run a bit snap = lattice_snapshot_factory(n=2) if snap.communicator.rank == 0: - snap.particles.charge[:] = np.random.random( - snap.particles.N) * 2 - 1 + snap.particles.charge[:] = np.random.random(snap.particles.N) * 2 - 1 sim = simulation_factory(snap) sim.operations.integrator = hoomd.md.Integrator(dt=0.001) sim.operations.integrator.forces.append(obj_instance) @@ -160,7 +178,8 @@ def test_forces_and_energies(simulation_factory, lattice_snapshot_factory, energies = sim.operations.integrator.forces[0].energies if new_snap.communicator.rank == 0: expected_forces, expected_torques, expected_energies = evaluator( - new_snap, param) + new_snap, param + ) # Set atol as the energies and forces very close to 0. # It would be better to run a test that applies appreciable forces # and energies. @@ -170,26 +189,35 @@ def test_forces_and_energies(simulation_factory, lattice_snapshot_factory, # Test Logging -_potential_cls = (md.external.field.Field, md.external.field.Periodic, - md.external.field.Electric, md.external.field.Magnetic) +_potential_cls = ( + md.external.field.Field, + md.external.field.Periodic, + md.external.field.Electric, + md.external.field.Magnetic, +) @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip(_potential_cls, itertools.repeat(('md', 'external', 'field')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + _potential_cls, + itertools.repeat(("md", "external", "field")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) -def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory, - external_params): +def test_kernel_parameters( + simulation_factory, two_particle_snapshot_factory, external_params +): # unpack parameters cls_obj, param_attr, list_params, evaluator = external_params # create class instance, get/set params when not attached obj_instance = cls_obj() - getattr(obj_instance, param_attr)['A'] = list_params[0] + getattr(obj_instance, param_attr)["A"] = list_params[0] pickling_check(obj_instance) # set up simulation @@ -199,20 +227,18 @@ def test_kernel_parameters(simulation_factory, two_particle_snapshot_factory, sim.operations.integrator.forces.append(obj_instance) sim.run(0) - autotuned_kernel_parameter_check(instance=obj_instance, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=obj_instance, activate=lambda: sim.run(1)) # Pickle Testing -def test_pickling(simulation_factory, two_particle_snapshot_factory, - external_params): +def test_pickling(simulation_factory, two_particle_snapshot_factory, external_params): """Test pickling while attached and while not attached.""" # unpack parameters cls_obj, param_attr, list_params, evaluator = external_params # create class instance, get/set params when not attached obj_instance = cls_obj() - getattr(obj_instance, param_attr)['A'] = list_params[0] + getattr(obj_instance, param_attr)["A"] = list_params[0] pickling_check(obj_instance) # set up simulation diff --git a/hoomd/md/pytest/test_filter_md.py b/hoomd/md/pytest/test_filter_md.py index ac2af33d9e..ccefef8e00 100644 --- a/hoomd/md/pytest/test_filter_md.py +++ b/hoomd/md/pytest/test_filter_md.py @@ -10,8 +10,7 @@ @pytest.fixture(scope="function") def make_filter_snapshot(device): - - def filter_snapshot(n=10, particle_types=['A']): + def filter_snapshot(n=10, particle_types=["A"]): s = Snapshot(device.communicator) if s.communicator.rank == 0: s.configuration.box = [20, 20, 20, 0, 0, 0] @@ -30,10 +29,10 @@ def test_rigid_filter(make_filter_snapshot, simulation_factory): rigid.body["A"] = { "constituent_types": ["B", "B", "B", "B"], "positions": [ - [1, 0, -1 / (2**(1. / 2.))], - [-1, 0, -1 / (2**(1. / 2.))], - [0, -1, 1 / (2**(1. / 2.))], - [0, 1, 1 / (2**(1. / 2.))], + [1, 0, -1 / (2 ** (1.0 / 2.0))], + [-1, 0, -1 / (2 ** (1.0 / 2.0))], + [0, -1, 1 / (2 ** (1.0 / 2.0))], + [0, 1, 1 / (2 ** (1.0 / 2.0))], ], "orientations": [(1.0, 0.0, 0.0, 0.0)] * 4, } @@ -58,23 +57,26 @@ def check_tags(filter_, state, expected_tags): only_centers = Rigid() check_tags(only_centers, sim.state, np.arange(50)) - only_free = Rigid(('free',)) + only_free = Rigid(("free",)) check_tags(only_free, sim.state, np.arange(50, 100)) - only_constituent = Rigid(('constituent',)) + only_constituent = Rigid(("constituent",)) check_tags(only_constituent, sim.state, np.arange(100, 300)) - free_and_centers = Rigid(('free', 'center')) + free_and_centers = Rigid(("free", "center")) check_tags(free_and_centers, sim.state, np.arange(0, 100)) - constituent_and_centers = Rigid(('constituent', 'center')) - check_tags(constituent_and_centers, sim.state, - np.concatenate((np.arange(0, 50), np.arange(100, 300)))) + constituent_and_centers = Rigid(("constituent", "center")) + check_tags( + constituent_and_centers, + sim.state, + np.concatenate((np.arange(0, 50), np.arange(100, 300))), + ) - constituent_and_free = Rigid(('free', 'constituent')) + constituent_and_free = Rigid(("free", "constituent")) check_tags(constituent_and_free, sim.state, np.arange(50, 300)) - all_ = Rigid(('free', 'constituent', 'center')) + all_ = Rigid(("free", "constituent", "center")) check_tags(all_, sim.state, np.arange(0, 300)) @@ -108,9 +110,9 @@ def __eq__(self, other): # depending on how many particles are local to the MPI ranks. local_Np = snap.particles.charge.shape[0] N_negative_charge = max(0, max(1, int(local_Np * 0.5))) - negative_charge_ind = np.random.choice(local_Np, - N_negative_charge, - replace=False) + negative_charge_ind = np.random.choice( + local_Np, N_negative_charge, replace=False + ) # Get the expected tags returned by the custom filter and the positions # that should vary and remain static for testing after running. snap.particles.charge[negative_charge_ind] = -1.0 @@ -130,7 +132,9 @@ def __eq__(self, other): sim.run(100) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - assert not np.allclose(snap.particles.position[negative_charge_ind], - original_positions) - assert np.allclose(snap.particles.position[positive_charge_tags], - static_positions) + assert not np.allclose( + snap.particles.position[negative_charge_ind], original_positions + ) + assert np.allclose( + snap.particles.position[positive_charge_tags], static_positions + ) diff --git a/hoomd/md/pytest/test_flags.py b/hoomd/md/pytest/test_flags.py index 4aafed33e9..8f4bf2e4be 100644 --- a/hoomd/md/pytest/test_flags.py +++ b/hoomd/md/pytest/test_flags.py @@ -8,10 +8,10 @@ def test_per_particle_virial(simulation_factory, lattice_snapshot_factory): cell = hoomd.md.nlist.Cell(buffer=0.4) lj = hoomd.md.pair.LJ(nlist=cell) - lj.params[('A', 'A')] = dict(sigma=1.0, epsilon=1.0) - lj.r_cut[('A', 'A')] = 2.5 + lj.params[("A", "A")] = dict(sigma=1.0, epsilon=1.0) + lj.r_cut[("A", "A")] = 2.5 - a = 2**(1.0 / 6.0) + a = 2 ** (1.0 / 6.0) sim = simulation_factory(lattice_snapshot_factory(n=20, a=a, r=a * 0.01)) assert not sim.always_compute_pressure diff --git a/hoomd/md/pytest/test_gsd.py b/hoomd/md/pytest/test_gsd.py index 01247eec9c..86bf829527 100644 --- a/hoomd/md/pytest/test_gsd.py +++ b/hoomd/md/pytest/test_gsd.py @@ -4,6 +4,7 @@ import hoomd import numpy as np import pytest + try: import gsd.hoomd except ImportError: @@ -12,43 +13,45 @@ from hoomd.pytest.test_snapshot import assert_equivalent_snapshots -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def hoomd_snapshot(lattice_snapshot_factory): - snap = lattice_snapshot_factory(particle_types=['t1', 't2'], n=10, a=2.0) + snap = lattice_snapshot_factory(particle_types=["t1", "t2"], n=10, a=2.0) if snap.communicator.rank == 0: typeid_list = [0] * int(snap.particles.N / 2) typeid_list.extend([1] * int(snap.particles.N / 2)) snap.particles.typeid[:] = typeid_list[:] - snap.particles.velocity[:] = np.tile(np.linspace(1, 2, 3), - (snap.particles.N, 1)) - snap.particles.acceleration[:] = np.tile(np.linspace(1, 2, 3), - (snap.particles.N, 1)) + snap.particles.velocity[:] = np.tile( + np.linspace(1, 2, 3), (snap.particles.N, 1) + ) + snap.particles.acceleration[:] = np.tile( + np.linspace(1, 2, 3), (snap.particles.N, 1) + ) snap.particles.mass[:] = snap.particles.N * [1] snap.particles.charge[:] = snap.particles.N * [2] snap.particles.angmom[:] = snap.particles.N * [[0, 0, 0, 1]] # bonds - snap.bonds.types = ['b1', 'b2'] + snap.bonds.types = ["b1", "b2"] snap.bonds.N = 2 snap.bonds.typeid[:] = [0, 1] snap.bonds.group[0] = [0, 1] snap.bonds.group[1] = [2, 3] # angles - snap.angles.types = ['a1', 'a2'] + snap.angles.types = ["a1", "a2"] snap.angles.N = 2 snap.angles.typeid[:] = [1, 0] snap.angles.group[0] = [0, 1, 2] snap.angles.group[1] = [2, 3, 0] # dihedrals - snap.dihedrals.types = ['d1'] + snap.dihedrals.types = ["d1"] snap.dihedrals.N = 1 snap.dihedrals.typeid[:] = [0] snap.dihedrals.group[0] = [0, 1, 2, 3] # impropers - snap.impropers.types = ['i1'] + snap.impropers.types = ["i1"] snap.impropers.N = 1 snap.impropers.typeid[:] = [0] snap.impropers.group[0] = [3, 2, 1, 0] @@ -59,7 +62,7 @@ def hoomd_snapshot(lattice_snapshot_factory): snap.constraints.value[0] = 2.5 # special pairs - snap.pairs.types = ['p1', 'p2'] + snap.pairs.types = ["p1", "p2"] snap.pairs.N = 2 snap.pairs.typeid[:] = [0, 1] snap.pairs.group[0] = [0, 1] @@ -70,9 +73,8 @@ def hoomd_snapshot(lattice_snapshot_factory): def lj_integrator(): integrator = hoomd.md.Integrator(dt=0.005) - lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) - lj.params.default = {'sigma': 1, 'epsilon': 1} + lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=2.5) + lj.params.default = {"sigma": 1, "epsilon": 1} integrator.forces.append(lj) langevin = hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1) integrator.methods.append(langevin) @@ -80,7 +82,7 @@ def lj_integrator(): return integrator -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def create_md_sim(simulation_factory, device, hoomd_snapshot): sim = simulation_factory(hoomd_snapshot) sim.operations.integrator = lj_integrator() @@ -91,43 +93,44 @@ def create_md_sim(simulation_factory, device, hoomd_snapshot): def test_write(simulation_factory, hoomd_snapshot, tmp_path): filename = tmp_path / "temporary_test_file.gsd" sim = simulation_factory(hoomd_snapshot) - hoomd.write.GSD.write(state=sim.state, mode='wb', filename=str(filename)) + hoomd.write.GSD.write(state=sim.state, mode="wb", filename=str(filename)) if hoomd_snapshot.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: assert len(traj) == 1 assert_equivalent_snapshots(traj[0], hoomd_snapshot) def test_write_gsd_trigger(create_md_sim, tmp_path): - filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim gsd_trigger = hoomd.trigger.Periodic(period=10, phase=5) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=gsd_trigger, - mode='wb', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=gsd_trigger, + mode="wb", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) sim.run(30) gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: assert [frame.configuration.step for frame in traj] == [5, 15, 25] -def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, - simulation_factory): - +def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, simulation_factory): filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="wb", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) # run 5 steps and create a gsd file for testing mode=ab @@ -136,10 +139,12 @@ def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, # test mode=ab sim.operations.writers.clear() - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='ab', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="ab", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) snap_list = [] @@ -149,7 +154,7 @@ def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, if snap.communicator.rank == 0: snap_list.append(snap) if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for gsd_snap, hoomd_snap in zip(traj[5:], snap_list): assert_equivalent_snapshots(gsd_snap, hoomd_snap) @@ -157,11 +162,13 @@ def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, sim.operations.writers.clear() if sim.device.communicator.num_ranks == 1: - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='xb', - dynamic=['property', 'momentum']) - with pytest.raises(Exception, match='.*File exists.*'): + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="xb", + dynamic=["property", "momentum"], + ) + with pytest.raises(Exception, match=".*File exists.*"): sim.operations.writers.append(gsd_writer) sim.run(1) @@ -170,10 +177,12 @@ def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, sim = simulation_factory(hoomd_snapshot) sim.operations.integrator = lj_integrator() - gsd_writer = hoomd.write.GSD(filename=filename_xb, - trigger=hoomd.trigger.Periodic(1), - mode='xb', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename_xb, + trigger=hoomd.trigger.Periodic(1), + mode="xb", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) snapshot_list = [] @@ -186,64 +195,62 @@ def test_write_gsd_mode(create_md_sim, hoomd_snapshot, tmp_path, gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename_xb, mode='r') as traj: + with gsd.hoomd.open(name=filename_xb, mode="r") as traj: assert len(traj) == len(snapshot_list) for gsd_snap, hoomd_snap in zip(traj, snapshot_list): assert_equivalent_snapshots(gsd_snap, hoomd_snap) def test_write_gsd_filter(create_md_sim, tmp_path): - # test Null filter filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - filter=hoomd.filter.Null(), - mode='wb', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + filter=hoomd.filter.Null(), + mode="wb", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) sim.run(3) if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for frame in traj: assert frame.particles.N == 0 def test_write_gsd_truncate(create_md_sim, tmp_path): - filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - truncate=True, - mode='wb') + gsd_writer = hoomd.write.GSD( + filename=filename, trigger=hoomd.trigger.Periodic(1), truncate=True, mode="wb" + ) sim.operations.writers.append(gsd_writer) sim.run(2) snapshot = sim.state.get_snapshot() if snapshot.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for gsd_snap in traj: assert_equivalent_snapshots(gsd_snap, snapshot) def test_write_gsd_dynamic(simulation_factory, create_md_sim, tmp_path): - filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim # test default dynamic=['property'] - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb') + gsd_writer = hoomd.write.GSD( + filename=filename, trigger=hoomd.trigger.Periodic(1), mode="wb" + ) sim.operations.writers.append(gsd_writer) velocity_list = [] position_list = [] @@ -258,27 +265,35 @@ def test_write_gsd_dynamic(simulation_factory, create_md_sim, tmp_path): gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for step in range(5): - np.testing.assert_allclose(traj[step].particles.position, - position_list[step], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.orientation, - N_particles * [[1, 0, 0, 0]], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.velocity, - velocity_list[0], - rtol=1e-07, - atol=1.5e-07) + np.testing.assert_allclose( + traj[step].particles.position, + position_list[step], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.orientation, + N_particles * [[1, 0, 0, 0]], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.velocity, + velocity_list[0], + rtol=1e-07, + atol=1.5e-07, + ) # test dynamic=['property', 'momentum'] sim.operations.writers.clear() - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=['property', 'momentum']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="wb", + dynamic=["property", "momentum"], + ) sim.operations.writers.append(gsd_writer) velocity_list = [] @@ -293,20 +308,26 @@ def test_write_gsd_dynamic(simulation_factory, create_md_sim, tmp_path): gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for step in range(5): - np.testing.assert_allclose(traj[step].particles.velocity, - velocity_list[step], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.angmom, - angmom_list[step], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.image, - N_particles * [[0, 0, 0]], - rtol=1e-07, - atol=1.5e-07) + np.testing.assert_allclose( + traj[step].particles.velocity, + velocity_list[step], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.angmom, + angmom_list[step], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.image, + N_particles * [[0, 0, 0]], + rtol=1e-07, + atol=1.5e-07, + ) # test dynamic=['property', 'attribute'] if snap.communicator.rank == 0: @@ -317,34 +338,44 @@ def test_write_gsd_dynamic(simulation_factory, create_md_sim, tmp_path): sim.state.set_snapshot(snap) sim.operations.writers.clear() - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='ab', - dynamic=['property', 'attribute']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="ab", + dynamic=["property", "attribute"], + ) sim.operations.writers.append(gsd_writer) sim.run(5) gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for step in range(5, 10): - np.testing.assert_allclose(traj[step].particles.mass, - N_particles * [0.8], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.charge, - N_particles * [0], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.body, - N_particles * [-1], - rtol=1e-07, - atol=1.5e-07) - np.testing.assert_allclose(traj[step].particles.moment_inertia, - N_particles * [[0, 0, 0]], - rtol=1e-07, - atol=1.5e-07) + np.testing.assert_allclose( + traj[step].particles.mass, + N_particles * [0.8], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.charge, + N_particles * [0], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.body, + N_particles * [-1], + rtol=1e-07, + atol=1.5e-07, + ) + np.testing.assert_allclose( + traj[step].particles.moment_inertia, + N_particles * [[0, 0, 0]], + rtol=1e-07, + atol=1.5e-07, + ) # test dynamic=['property', 'topology'] snap = sim.state.get_snapshot() @@ -355,22 +386,23 @@ def test_write_gsd_dynamic(simulation_factory, create_md_sim, tmp_path): sim.state.set_snapshot(snap) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='ab', - dynamic=['property', 'topology']) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="ab", + dynamic=["property", "topology"], + ) sim.operations.writers.append(gsd_writer) sim.run(1) gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: assert traj[-1].bonds.N == 3 def test_write_gsd_log(create_md_sim, tmp_path): - filename = tmp_path / "temporary_test_file.gsd" sim = create_md_sim @@ -380,11 +412,13 @@ def test_write_gsd_log(create_md_sim, tmp_path): logger = hoomd.logging.Logger() logger.add(thermo) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - filter=hoomd.filter.Null(), - mode='wb', - logger=logger) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + filter=hoomd.filter.Null(), + mode="wb", + logger=logger, + ) sim.operations.writers.append(gsd_writer) kinetic_energy_list = [] @@ -395,58 +429,58 @@ def test_write_gsd_log(create_md_sim, tmp_path): gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.hoomd.open(name=filename, mode='r') as traj: + with gsd.hoomd.open(name=filename, mode="r") as traj: for s in range(5): - e = traj[s].log[ - 'md/compute/ThermodynamicQuantities/kinetic_energy'] + e = traj[s].log["md/compute/ThermodynamicQuantities/kinetic_energy"] assert e == kinetic_energy_list[s] dynamic_fields = [ - 'particles/position', - 'particles/orientation', - 'particles/velocity', - 'particles/angmom', - 'particles/image', - 'particles/typeid', - 'particles/mass', - 'particles/charge', - 'particles/diameter', - 'particles/body', - 'particles/moment_inertia', + "particles/position", + "particles/orientation", + "particles/velocity", + "particles/angmom", + "particles/image", + "particles/typeid", + "particles/mass", + "particles/charge", + "particles/diameter", + "particles/body", + "particles/moment_inertia", ] -@pytest.mark.parametrize('dynamic_field', dynamic_fields) -def test_write_gsd_finegrained_dynamic(simulation_factory, hoomd_snapshot, - tmp_path, dynamic_field): - +@pytest.mark.parametrize("dynamic_field", dynamic_fields) +def test_write_gsd_finegrained_dynamic( + simulation_factory, hoomd_snapshot, tmp_path, dynamic_field +): filename = tmp_path / "test_finegrained_dynamic.gsd" # make all fields in snapshot non-default if hoomd_snapshot.communicator.rank == 0: hoomd_snapshot.particles.orientation[:] = np.tile( - [0.707, 0, 0, 0.707], (hoomd_snapshot.particles.N, 1)) + [0.707, 0, 0, 0.707], (hoomd_snapshot.particles.N, 1) + ) hoomd_snapshot.particles.image[:] = np.tile( - [0, 1, 2], (hoomd_snapshot.particles.N, 1)) - hoomd_snapshot.particles.types = ['A', 'B'] - hoomd_snapshot.particles.typeid[:] = np.tile(1, - hoomd_snapshot.particles.N) - hoomd_snapshot.particles.mass[:] = np.tile(2, - hoomd_snapshot.particles.N) - hoomd_snapshot.particles.diameter[:] = np.tile( - 4, hoomd_snapshot.particles.N) - hoomd_snapshot.particles.body[:] = np.tile(4, - hoomd_snapshot.particles.N) + [0, 1, 2], (hoomd_snapshot.particles.N, 1) + ) + hoomd_snapshot.particles.types = ["A", "B"] + hoomd_snapshot.particles.typeid[:] = np.tile(1, hoomd_snapshot.particles.N) + hoomd_snapshot.particles.mass[:] = np.tile(2, hoomd_snapshot.particles.N) + hoomd_snapshot.particles.diameter[:] = np.tile(4, hoomd_snapshot.particles.N) + hoomd_snapshot.particles.body[:] = np.tile(4, hoomd_snapshot.particles.N) hoomd_snapshot.particles.moment_inertia[:] = np.tile( - [1, 2, 3], [hoomd_snapshot.particles.N, 1]) + [1, 2, 3], [hoomd_snapshot.particles.N, 1] + ) sim = simulation_factory(hoomd_snapshot) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=[dynamic_field]) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="wb", + dynamic=[dynamic_field], + ) gsd_writer.write_diameter = True sim.operations.writers.append(gsd_writer) @@ -455,7 +489,7 @@ def test_write_gsd_finegrained_dynamic(simulation_factory, hoomd_snapshot, gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.fl.open(name=filename, mode='r') as f: + with gsd.fl.open(name=filename, mode="r") as f: for field in dynamic_fields: if field == dynamic_field: assert f.chunk_exists(frame=1, name=field) @@ -463,39 +497,42 @@ def test_write_gsd_finegrained_dynamic(simulation_factory, hoomd_snapshot, assert not f.chunk_exists(frame=1, name=field) data = f.read_chunk(frame=1, name=dynamic_field) - np.testing.assert_allclose(data, - getattr(hoomd_snapshot.particles, - dynamic_field[10:]), - rtol=1e-07, - atol=1.5e-07) - - -@pytest.mark.parametrize('dynamic_field', dynamic_fields) -def test_write_gsd_finegrained_dynamic_alldefault(simulation_factory, - hoomd_snapshot, tmp_path, - dynamic_field): - + np.testing.assert_allclose( + data, + getattr(hoomd_snapshot.particles, dynamic_field[10:]), + rtol=1e-07, + atol=1.5e-07, + ) + + +@pytest.mark.parametrize("dynamic_field", dynamic_fields) +def test_write_gsd_finegrained_dynamic_alldefault( + simulation_factory, hoomd_snapshot, tmp_path, dynamic_field +): filename = tmp_path / "test_finegrained_dynamic.gsd" # make all fields in snapshot default if hoomd_snapshot.communicator.rank == 0: hoomd_snapshot.particles.position[:] = np.tile( - [0, 0, 0], [hoomd_snapshot.particles.N, 1]) + [0, 0, 0], [hoomd_snapshot.particles.N, 1] + ) hoomd_snapshot.particles.velocity[:] = np.tile( - [0, 0, 0], [hoomd_snapshot.particles.N, 1]) + [0, 0, 0], [hoomd_snapshot.particles.N, 1] + ) hoomd_snapshot.particles.angmom[:] = np.tile( - [0, 0, 0, 0], [hoomd_snapshot.particles.N, 1]) - hoomd_snapshot.particles.typeid[:] = np.tile(0, - hoomd_snapshot.particles.N) - hoomd_snapshot.particles.charge[:] = np.tile(0, - hoomd_snapshot.particles.N) + [0, 0, 0, 0], [hoomd_snapshot.particles.N, 1] + ) + hoomd_snapshot.particles.typeid[:] = np.tile(0, hoomd_snapshot.particles.N) + hoomd_snapshot.particles.charge[:] = np.tile(0, hoomd_snapshot.particles.N) sim = simulation_factory(hoomd_snapshot) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=[dynamic_field]) + gsd_writer = hoomd.write.GSD( + filename=filename, + trigger=hoomd.trigger.Periodic(1), + mode="wb", + dynamic=[dynamic_field], + ) gsd_writer.write_diameter = True sim.operations.writers.append(gsd_writer) @@ -504,7 +541,7 @@ def test_write_gsd_finegrained_dynamic_alldefault(simulation_factory, gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.fl.open(name=filename, mode='r') as f: + with gsd.fl.open(name=filename, mode="r") as f: assert f.nframes == 2 for field in dynamic_fields: @@ -517,10 +554,9 @@ def test_write_gsd_no_dynamic(simulation_factory, hoomd_snapshot, tmp_path): sim = simulation_factory(hoomd_snapshot) - gsd_writer = hoomd.write.GSD(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='wb', - dynamic=[]) + gsd_writer = hoomd.write.GSD( + filename=filename, trigger=hoomd.trigger.Periodic(1), mode="wb", dynamic=[] + ) sim.operations.writers.append(gsd_writer) sim.run(2) @@ -528,16 +564,16 @@ def test_write_gsd_no_dynamic(simulation_factory, hoomd_snapshot, tmp_path): gsd_writer.flush() if sim.device.communicator.rank == 0: - with gsd.fl.open(name=filename, mode='r') as f: + with gsd.fl.open(name=filename, mode="r") as f: assert f.nframes == 2 - assert f.chunk_exists(frame=0, name='configuration/step') - assert f.chunk_exists(frame=0, name='configuration/box') - assert f.chunk_exists(frame=0, name='particles/N') + assert f.chunk_exists(frame=0, name="configuration/step") + assert f.chunk_exists(frame=0, name="configuration/box") + assert f.chunk_exists(frame=0, name="particles/N") # particles/positions is not default, so it is written to frame 0. - assert f.chunk_exists(frame=0, name='particles/position') + assert f.chunk_exists(frame=0, name="particles/position") - assert f.chunk_exists(frame=1, name='configuration/step') - assert not f.chunk_exists(frame=1, name='configuration/box') - assert not f.chunk_exists(frame=1, name='particles/N') - assert not f.chunk_exists(frame=1, name='particles/position') + assert f.chunk_exists(frame=1, name="configuration/step") + assert not f.chunk_exists(frame=1, name="configuration/box") + assert not f.chunk_exists(frame=1, name="particles/N") + assert not f.chunk_exists(frame=1, name="particles/position") diff --git a/hoomd/md/pytest/test_half_step_hook.py b/hoomd/md/pytest/test_half_step_hook.py index 3c08046d55..63cf84f0ed 100644 --- a/hoomd/md/pytest/test_half_step_hook.py +++ b/hoomd/md/pytest/test_half_step_hook.py @@ -11,7 +11,6 @@ class DistanceCV(md.HalfStepHook): - def __init__(self, sim): md.HalfStepHook.__init__(self) self.state = sim.state @@ -26,8 +25,7 @@ def update(self, _): @pytest.fixture def make_simulation(simulation_factory, two_particle_snapshot_factory): - - def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20): + def sim_factory(particle_types=["A"], dimensions=3, d=1, L=20): snap = two_particle_snapshot_factory() if snap.communicator.rank == 0: snap.constraints.N = 1 @@ -48,7 +46,7 @@ def integrator_elements(): return { "methods": [md.methods.ConstantVolume(hoomd.filter.All())], "forces": [lj, gauss], - "constraints": [md.constrain.Distance()] + "constraints": [md.constrain.Distance()], } diff --git a/hoomd/md/pytest/test_hdf5.py b/hoomd/md/pytest/test_hdf5.py index 9c9b0782de..202509430a 100644 --- a/hoomd/md/pytest/test_hdf5.py +++ b/hoomd/md/pytest/test_hdf5.py @@ -13,9 +13,8 @@ def lj_integrator(): integrator = hoomd.md.Integrator(dt=0.005) - lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) - lj.params.default = {'sigma': 1, 'epsilon': 1} + lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=2.5) + lj.params.default = {"sigma": 1, "epsilon": 1} integrator.forces.append(lj) langevin = hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1) integrator.methods.append(langevin) @@ -23,7 +22,7 @@ def lj_integrator(): return integrator -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def create_md_sim(simulation_factory, device, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.integrator = lj_integrator() @@ -32,7 +31,7 @@ def create_md_sim(simulation_factory, device, two_particle_snapshot_factory): def test_invalid_attrs(tmp_path): - logger = hoomd.logging.Logger(categories=['scalar']) + logger = hoomd.logging.Logger(categories=["scalar"]) hdf5_writer = hoomd.write.HDF5Log(1, tmp_path / "eg.h5", logger) with pytest.raises(AttributeError): hdf5_writer.action @@ -46,20 +45,19 @@ def test_only_error_on_strings(tmp_path): logger = hoomd.logging.Logger(categories=["strings"]) with pytest.raises(ValueError): hoomd.write.HDF5Log(1, tmp_path / "eg.h5", logger) - logger = hoomd.logging.Logger(categories=['string']) + logger = hoomd.logging.Logger(categories=["string"]) with pytest.raises(ValueError): hoomd.write.HDF5Log(1, tmp_path / "eg.h5", logger) def test_pickling(simulation_factory, two_particle_snapshot_factory, tmp_path): - logger = hoomd.logging.Logger(categories=['scalar']) + logger = hoomd.logging.Logger(categories=["scalar"]) sim = simulation_factory(two_particle_snapshot_factory()) hdf5_writer = hoomd.write.HDF5Log(1, tmp_path / "eg.h5", logger) operation_pickling_check(hdf5_writer, sim) def test_write(create_md_sim, tmp_path): - filename = tmp_path / "temporary_test_file.h5" sim = create_md_sim @@ -69,10 +67,9 @@ def test_write(create_md_sim, tmp_path): logger = hoomd.logging.Logger(["scalar", "particle", "sequence"]) logger.add(thermo) - hdf5_writer = hoomd.write.HDF5Log(filename=filename, - trigger=hoomd.trigger.Periodic(1), - mode='w', - logger=logger) + hdf5_writer = hoomd.write.HDF5Log( + filename=filename, trigger=hoomd.trigger.Periodic(1), mode="w", logger=logger + ) sim.operations.writers.append(hdf5_writer) kinetic_energy_list = [] @@ -83,13 +80,13 @@ def test_write(create_md_sim, tmp_path): hdf5_writer.flush() if sim.device.communicator.rank == 0: - key = 'hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy' - with h5py.File(filename, mode='r') as fh: + key = "hoomd-data/md/compute/ThermodynamicQuantities/kinetic_energy" + with h5py.File(filename, mode="r") as fh: assert np.allclose(fh[key], kinetic_energy_list) def test_mode(tmp_path, create_md_sim): - logger = hoomd.logging.Logger(categories=['scalar']) + logger = hoomd.logging.Logger(categories=["scalar"]) sim = create_md_sim fn = tmp_path / "eg.py" logger[("foo", "bar")] = (lambda: 42, "scalar") @@ -125,7 +122,7 @@ def test_mode(tmp_path, create_md_sim): def test_type_handling(tmp_path, create_md_sim): - logger = hoomd.logging.Logger(categories=['scalar']) + logger = hoomd.logging.Logger(categories=["scalar"]) sim = create_md_sim fn = tmp_path / "types.h5" loggables = { @@ -134,7 +131,7 @@ def test_type_handling(tmp_path, create_md_sim): bool: lambda: True, np.uint32: lambda: np.uint32(42), np.float32: lambda: np.float32(3.1415), - np.bool_: lambda: np.bool_(True) + np.bool_: lambda: np.bool_(True), } for key, value in loggables.items(): logger[str(key)] = (value, "scalar") diff --git a/hoomd/md/pytest/test_improper.py b/hoomd/md/pytest/test_improper.py index 5dee47990d..24167caf63 100644 --- a/hoomd/md/pytest/test_improper.py +++ b/hoomd/md/pytest/test_improper.py @@ -3,8 +3,11 @@ import hoomd from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import pytest import numpy @@ -20,18 +23,15 @@ class PeriodicImproperMath: def dchi_dr1(n1, n2, r1, r2, r3, r4): n1hat = n1 / numpy.sqrt(numpy.dot(n1, n1)) n2hat = n2 / numpy.sqrt(numpy.dot(n2, n2)) - numerator = numpy.dot(n1hat, n2hat) * \ - numpy.cross(numpy.dot(n1hat, n2hat) * n1hat - n2hat, r2 - r3) / \ - numpy.linalg.norm(n1) - - denominator = \ - numpy.sqrt( - 1 - numpy.dot( - numpy.cross(n1hat, n2hat), - numpy.cross(n1hat, n2hat) - ) - ) \ - * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) + numerator = ( + numpy.dot(n1hat, n2hat) + * numpy.cross(numpy.dot(n1hat, n2hat) * n1hat - n2hat, r2 - r3) + / numpy.linalg.norm(n1) + ) + + denominator = numpy.sqrt( + 1 - numpy.dot(numpy.cross(n1hat, n2hat), numpy.cross(n1hat, n2hat)) + ) * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) return numerator / denominator @@ -40,24 +40,16 @@ def dchi_dr2(n1, n2, r1, r2, r3, r4): n1hat = n1 / numpy.sqrt(numpy.dot(n1, n1)) n2hat = n2 / numpy.sqrt(numpy.dot(n2, n2)) - numerator = numpy.dot(n1hat, n2hat) \ - * ( - numpy.cross( - numpy.dot(n1hat, n2hat) * n2hat - n1hat, - r3 - r4 - ) - / numpy.linalg.norm(n2) - numpy.cross( - numpy.dot(n1hat, n2hat) * n1hat - n2hat, - r1 - r3 - ) / numpy.linalg.norm(n1) - ) + numerator = numpy.dot(n1hat, n2hat) * ( + numpy.cross(numpy.dot(n1hat, n2hat) * n2hat - n1hat, r3 - r4) + / numpy.linalg.norm(n2) + - numpy.cross(numpy.dot(n1hat, n2hat) * n1hat - n2hat, r1 - r3) + / numpy.linalg.norm(n1) + ) denominator = numpy.sqrt( - 1 - numpy.dot( - numpy.cross(n1hat, n2hat), - numpy.cross(n1hat, n2hat)) - ) \ - * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) + 1 - numpy.dot(numpy.cross(n1hat, n2hat), numpy.cross(n1hat, n2hat)) + ) * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) return numerator / denominator @@ -66,24 +58,16 @@ def dchi_dr3(n1, n2, r1, r2, r3, r4): n1hat = n1 / numpy.sqrt(numpy.dot(n1, n1)) n2hat = n2 / numpy.sqrt(numpy.dot(n2, n2)) - numerator = numpy.dot(n1hat, n2hat) \ - * ( - numpy.cross( - numpy.dot(n1hat, n2hat) * n1hat - n2hat, - r1 - r2 - ) - / numpy.linalg.norm(n1) - numpy.cross( - numpy.dot(n1hat, n2hat) * n2hat - n1hat, - r2 - r4 - ) / numpy.linalg.norm(n2) - ) + numerator = numpy.dot(n1hat, n2hat) * ( + numpy.cross(numpy.dot(n1hat, n2hat) * n1hat - n2hat, r1 - r2) + / numpy.linalg.norm(n1) + - numpy.cross(numpy.dot(n1hat, n2hat) * n2hat - n1hat, r2 - r4) + / numpy.linalg.norm(n2) + ) denominator = numpy.sqrt( - 1 - numpy.dot( - numpy.cross(n1hat, n2hat), - numpy.cross(n1hat, n2hat)) - ) \ - * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) + 1 - numpy.dot(numpy.cross(n1hat, n2hat), numpy.cross(n1hat, n2hat)) + ) * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) return numerator / denominator @@ -92,22 +76,15 @@ def dchi_dr4(n1, n2, r1, r2, r3, r4): n1hat = n1 / numpy.sqrt(numpy.dot(n1, n1)) n2hat = n2 / numpy.sqrt(numpy.dot(n2, n2)) - numerator = numpy.dot(n1hat, n2hat) * \ - numpy.cross( - numpy.dot(n1hat, n2hat) * n2hat - n1hat, - r2 - r3 - ) / numpy.linalg.norm(n2) - - denominator = \ - numpy.sqrt( - 1 - numpy.dot( - numpy.cross(n1hat, n2hat), - numpy.cross(n1hat, n2hat) - ) - ) \ - * numpy.linalg.norm( - numpy.cross(n1hat, n2hat) - ) + numerator = ( + numpy.dot(n1hat, n2hat) + * numpy.cross(numpy.dot(n1hat, n2hat) * n2hat - n1hat, r2 - r3) + / numpy.linalg.norm(n2) + ) + + denominator = numpy.sqrt( + 1 - numpy.dot(numpy.cross(n1hat, n2hat), numpy.cross(n1hat, n2hat)) + ) * numpy.linalg.norm(numpy.cross(n1hat, n2hat)) return numerator / denominator @@ -128,23 +105,23 @@ def du_dchi_harmonic(chi, k, chi0): @staticmethod def periodic_improper_energy(chi, k, n, d, chi0): - return (k * (1 + d * numpy.cos(n * chi - chi0))) + return k * (1 + d * numpy.cos(n * chi - chi0)) @staticmethod def get_force_vectors(chi, n1, n2, r1, r2, r3, r4, chi0, k, d, n): f_matrix = numpy.zeros((4, 3)) f_matrix[0, :] = PeriodicImproperMath.dchi_dr1( - n1, n2, r1, r2, r3, r4) * PeriodicImproperMath.du_dchi_periodic( - chi, chi0=chi0, k=k, d=d, n=n) + n1, n2, r1, r2, r3, r4 + ) * PeriodicImproperMath.du_dchi_periodic(chi, chi0=chi0, k=k, d=d, n=n) f_matrix[1, :] = PeriodicImproperMath.dchi_dr2( - n1, n2, r1, r2, r3, r4) * PeriodicImproperMath.du_dchi_periodic( - chi, chi0=chi0, k=k, d=d, n=n) + n1, n2, r1, r2, r3, r4 + ) * PeriodicImproperMath.du_dchi_periodic(chi, chi0=chi0, k=k, d=d, n=n) f_matrix[2, :] = PeriodicImproperMath.dchi_dr3( - n1, n2, r1, r2, r3, r4) * PeriodicImproperMath.du_dchi_periodic( - chi, chi0=chi0, k=k, d=d, n=n) + n1, n2, r1, r2, r3, r4 + ) * PeriodicImproperMath.du_dchi_periodic(chi, chi0=chi0, k=k, d=d, n=n) f_matrix[3, :] = PeriodicImproperMath.dchi_dr4( - n1, n2, r1, r2, r3, r4) * PeriodicImproperMath.du_dchi_periodic( - chi, chi0=chi0, k=k, d=d, n=n) + n1, n2, r1, r2, r3, r4 + ) * PeriodicImproperMath.du_dchi_periodic(chi, chi0=chi0, k=k, d=d, n=n) return f_matrix @@ -152,22 +129,23 @@ def get_force_vectors(chi, n1, n2, r1, r2, r3, r4, chi0, k, d, n): # This is parameterized to plan for any future expansion with additional # improper potentials. -pos = numpy.array([ - [0, 0, 0], - [1, 0, 0], - [1, 1, 0], - [0, 1, 0.1], -]) +pos = numpy.array( + [ + [0, 0, 0], + [1, 0, 0], + [1, 1, 0], + [0, 1, 0.1], + ] +) # plane 1 normal vector n1 = numpy.cross(pos[0, :] - pos[1, :], pos[1, :] - pos[2, :]) # plane 2 normal vector n2 = numpy.cross(pos[1, :] - pos[2, :], pos[2, :] - pos[3, :]) # improper angle -chi = PeriodicImproperMath.chi_from_pos(posa=pos[0, :], - posb=pos[1, :], - posc=pos[2, :], - posd=pos[3, :]) +chi = PeriodicImproperMath.chi_from_pos( + posa=pos[0, :], posb=pos[1, :], posc=pos[2, :], posd=pos[3, :] +) improper_test_parameters = [ ( @@ -181,63 +159,87 @@ def get_force_vectors(chi, n1, n2, r1, r2, r3, r4, chi0, k, d, n): ], 0.007549784469704433, ), - (hoomd.md.improper.Periodic, dict(k=3.0, d=-1, n=2, chi0=numpy.pi / 2), - PeriodicImproperMath.get_force_vectors(chi, - n1=n1, - n2=n2, - r1=pos[0, :], - r2=pos[1, :], - r3=pos[2, :], - r4=pos[3, :], - chi0=numpy.pi / 2, - k=3.0, - d=-1, - n=2) / 2, - PeriodicImproperMath.periodic_improper_energy( - chi, k=3.0, d=-1, n=2, chi0=numpy.pi / 2) / 2), - (hoomd.md.improper.Periodic, dict(k=10.0, d=1, n=1, chi0=numpy.pi / 4), - PeriodicImproperMath.get_force_vectors(chi, - n1=n1, - n2=n2, - r1=pos[0, :], - r2=pos[1, :], - r3=pos[2, :], - r4=pos[3, :], - chi0=numpy.pi / 4, - k=10.0, - d=1, - n=1) / 2, - PeriodicImproperMath.periodic_improper_energy( - chi, k=10.0, d=1, n=1, chi0=numpy.pi / 4) / 2), - (hoomd.md.improper.Periodic, dict(k=5.0, d=1, n=3, chi0=numpy.pi / 6), - PeriodicImproperMath.get_force_vectors(chi, - n1=n1, - n2=n2, - r1=pos[0, :], - r2=pos[1, :], - r3=pos[2, :], - r4=pos[3, :], - chi0=numpy.pi / 6, - k=5.0, - d=1, - n=3) / 2, - PeriodicImproperMath.periodic_improper_energy( - chi, k=5.0, d=1, n=3, chi0=numpy.pi / 6) / 2) + ( + hoomd.md.improper.Periodic, + dict(k=3.0, d=-1, n=2, chi0=numpy.pi / 2), + PeriodicImproperMath.get_force_vectors( + chi, + n1=n1, + n2=n2, + r1=pos[0, :], + r2=pos[1, :], + r3=pos[2, :], + r4=pos[3, :], + chi0=numpy.pi / 2, + k=3.0, + d=-1, + n=2, + ) + / 2, + PeriodicImproperMath.periodic_improper_energy( + chi, k=3.0, d=-1, n=2, chi0=numpy.pi / 2 + ) + / 2, + ), + ( + hoomd.md.improper.Periodic, + dict(k=10.0, d=1, n=1, chi0=numpy.pi / 4), + PeriodicImproperMath.get_force_vectors( + chi, + n1=n1, + n2=n2, + r1=pos[0, :], + r2=pos[1, :], + r3=pos[2, :], + r4=pos[3, :], + chi0=numpy.pi / 4, + k=10.0, + d=1, + n=1, + ) + / 2, + PeriodicImproperMath.periodic_improper_energy( + chi, k=10.0, d=1, n=1, chi0=numpy.pi / 4 + ) + / 2, + ), + ( + hoomd.md.improper.Periodic, + dict(k=5.0, d=1, n=3, chi0=numpy.pi / 6), + PeriodicImproperMath.get_force_vectors( + chi, + n1=n1, + n2=n2, + r1=pos[0, :], + r2=pos[1, :], + r3=pos[2, :], + r4=pos[3, :], + chi0=numpy.pi / 6, + k=5.0, + d=1, + n=3, + ) + / 2, + PeriodicImproperMath.periodic_improper_energy( + chi, k=5.0, d=1, n=3, chi0=numpy.pi / 6 + ) + / 2, + ), ] -@pytest.mark.parametrize("improper_cls, params, force, energy", - improper_test_parameters) +@pytest.mark.parametrize( + "improper_cls, params, force, energy", improper_test_parameters +) def test_before_attaching(improper_cls, params, force, energy): potential = improper_cls() - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params for key in params: - assert potential.params['A-A-A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A-A-A"][key] == pytest.approx(params[key]) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def snapshot_factory(device): - def make_snapshot(): snapshot = hoomd.Snapshot(device.communicator) N = 4 @@ -246,7 +248,7 @@ def make_snapshot(): box = [L, L, L, 0, 0, 0] snapshot.configuration.box = box snapshot.particles.N = N - snapshot.particles.types = ['A'] + snapshot.particles.types = ["A"] # shift particle positions slightly in z so MPI tests pass snapshot.particles.position[:] = [ [0, 0, 0], @@ -256,7 +258,7 @@ def make_snapshot(): ] snapshot.impropers.N = 1 - snapshot.impropers.types = ['A-A-A-A'] + snapshot.impropers.types = ["A-A-A-A"] snapshot.impropers.typeid[0] = 0 snapshot.impropers.group[0] = (0, 1, 2, 3) @@ -265,15 +267,17 @@ def make_snapshot(): return make_snapshot -@pytest.mark.parametrize("improper_cls, params, force, energy", - improper_test_parameters) -def test_after_attaching(snapshot_factory, simulation_factory, improper_cls, - params, force, energy): +@pytest.mark.parametrize( + "improper_cls, params, force, energy", improper_test_parameters +) +def test_after_attaching( + snapshot_factory, simulation_factory, improper_cls, params, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = improper_cls() - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(potential) @@ -284,18 +288,20 @@ def test_after_attaching(snapshot_factory, simulation_factory, improper_cls, sim.run(0) for key in params: - assert potential.params['A-A-A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A-A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize("improper_cls, params, force, energy", - improper_test_parameters) -def test_forces_and_energies(snapshot_factory, simulation_factory, improper_cls, - params, force, energy): +@pytest.mark.parametrize( + "improper_cls, params, force, energy", improper_test_parameters +) +def test_forces_and_energies( + snapshot_factory, simulation_factory, improper_cls, params, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = improper_cls() - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(potential) @@ -314,15 +320,17 @@ def test_forces_and_energies(snapshot_factory, simulation_factory, improper_cls, numpy.testing.assert_allclose(sim_forces, force, rtol=1e-4, atol=1e-4) -@pytest.mark.parametrize("improper_cls, params, force, energy", - improper_test_parameters) -def test_kernel_parameters(snapshot_factory, simulation_factory, improper_cls, - params, force, energy): +@pytest.mark.parametrize( + "improper_cls, params, force, energy", improper_test_parameters +) +def test_kernel_parameters( + snapshot_factory, simulation_factory, improper_cls, params, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = improper_cls() - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(potential) @@ -333,30 +341,34 @@ def test_kernel_parameters(snapshot_factory, simulation_factory, improper_cls, sim.run(0) - autotuned_kernel_parameter_check(instance=potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=potential, activate=lambda: sim.run(1)) # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((hoomd.md.improper.Improper, hoomd.md.improper.Harmonic), - itertools.repeat(('md', 'improper')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + (hoomd.md.improper.Improper, hoomd.md.improper.Harmonic), + itertools.repeat(("md", "improper")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) # Test pickling -@pytest.mark.parametrize("improper_cls, params, force, energy", - improper_test_parameters) -def test_pickling(simulation_factory, snapshot_factory, improper_cls, params, - force, energy): +@pytest.mark.parametrize( + "improper_cls, params, force, energy", improper_test_parameters +) +def test_pickling( + simulation_factory, snapshot_factory, improper_cls, params, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = improper_cls() - potential.params['A-A-A-A'] = params + potential.params["A-A-A-A"] = params pickling_check(potential) diff --git a/hoomd/md/pytest/test_integrate.py b/hoomd/md/pytest/test_integrate.py index 18b133ab89..f5c6ecfe0c 100644 --- a/hoomd/md/pytest/test_integrate.py +++ b/hoomd/md/pytest/test_integrate.py @@ -11,8 +11,7 @@ @pytest.fixture def make_simulation(simulation_factory, two_particle_snapshot_factory): - - def sim_factory(particle_types=['A'], dimensions=3, d=1, L=20): + def sim_factory(particle_types=["A"], dimensions=3, d=1, L=20): snap = two_particle_snapshot_factory() if snap.communicator.rank == 0: snap.constraints.N = 1 @@ -33,7 +32,7 @@ def integrator_elements(): return { "methods": [md.methods.ConstantVolume(hoomd.filter.All())], "forces": [lj, gauss], - "constraints": [md.constrain.Distance()] + "constraints": [md.constrain.Distance()], } @@ -61,7 +60,7 @@ def test_detaching(make_simulation, integrator_elements): def test_validate_groups(simulation_factory, two_particle_snapshot_factory): - snapshot = two_particle_snapshot_factory(particle_types=['R', 'A']) + snapshot = two_particle_snapshot_factory(particle_types=["R", "A"]) if snapshot.communicator.rank == 0: snapshot.particles.body[:] = [0, 1] CUBE_VERTS = [ @@ -76,16 +75,16 @@ def test_validate_groups(simulation_factory, two_particle_snapshot_factory): ] rigid = hoomd.md.constrain.Rigid() - rigid.body['R'] = { - "constituent_types": ['A'] * 8, + rigid.body["R"] = { + "constituent_types": ["A"] * 8, "positions": CUBE_VERTS, "orientations": [(1.0, 0.0, 0.0, 0.0)] * 8, } nve1 = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) - integrator = hoomd.md.Integrator(dt=0, - methods=[nve1], - integrate_rotational_dof=True) + integrator = hoomd.md.Integrator( + dt=0, methods=[nve1], integrate_rotational_dof=True + ) integrator.rigid = rigid sim = simulation_factory(snapshot) sim.operations.integrator = integrator @@ -128,15 +127,9 @@ def test_linear_momentum(simulation_factory, lattice_snapshot_factory): snapshot = lattice_snapshot_factory() if snapshot.communicator.rank == 0: snapshot.particles.mass[:] = numpy.linspace(1, 5, snapshot.particles.N) - snapshot.particles.velocity[:, - 0] = numpy.linspace(-5, 5, - snapshot.particles.N) - snapshot.particles.velocity[:, - 1] = numpy.linspace(1, 10, - snapshot.particles.N) - snapshot.particles.velocity[:, - 2] = numpy.linspace(5, 20, - snapshot.particles.N) + snapshot.particles.velocity[:, 0] = numpy.linspace(-5, 5, snapshot.particles.N) + snapshot.particles.velocity[:, 1] = numpy.linspace(1, 10, snapshot.particles.N) + snapshot.particles.velocity[:, 2] = numpy.linspace(5, 20, snapshot.particles.N) sim = simulation_factory(snapshot) integrator = hoomd.md.Integrator(dt=0.005) @@ -146,9 +139,10 @@ def test_linear_momentum(simulation_factory, lattice_snapshot_factory): linear_momentum = integrator.linear_momentum if snapshot.communicator.rank == 0: - reference = numpy.sum(snapshot.particles.mass[numpy.newaxis, :].T - * snapshot.particles.velocity, - axis=0) + reference = numpy.sum( + snapshot.particles.mass[numpy.newaxis, :].T * snapshot.particles.velocity, + axis=0, + ) numpy.testing.assert_allclose(linear_momentum, reference) @@ -159,8 +153,8 @@ def test_pickling(make_simulation, integrator_elements): def test_logging(): - hoomd.conftest.logging_check(hoomd.md.Integrator, ("md",), { - "linear_momentum": { - "category": hoomd.logging.LoggerCategories.sequence - } - }) + hoomd.conftest.logging_check( + hoomd.md.Integrator, + ("md",), + {"linear_momentum": {"category": hoomd.logging.LoggerCategories.sequence}}, + ) diff --git a/hoomd/md/pytest/test_kernel_parameters.py b/hoomd/md/pytest/test_kernel_parameters.py index 47cd262815..14c0d55ffc 100644 --- a/hoomd/md/pytest/test_kernel_parameters.py +++ b/hoomd/md/pytest/test_kernel_parameters.py @@ -7,19 +7,17 @@ @pytest.mark.gpu @pytest.mark.validate -def test_combined_kernel_parameters(simulation_factory, - lattice_snapshot_factory): - - snap = lattice_snapshot_factory(particle_types=['A'], n=7, a=1.7, r=0.01) +def test_combined_kernel_parameters(simulation_factory, lattice_snapshot_factory): + snap = lattice_snapshot_factory(particle_types=["A"], n=7, a=1.7, r=0.01) sim = simulation_factory(snap) nlist = hoomd.md.nlist.Cell(buffer=0.4) lj = hoomd.md.pair.LJ(nlist=nlist, default_r_cut=2.5) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) langevin = hoomd.md.methods.Langevin(kT=1.5, filter=hoomd.filter.All()) - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - methods=[langevin], - forces=[lj]) + sim.operations.integrator = hoomd.md.Integrator( + dt=0.005, methods=[langevin], forces=[lj] + ) sim.run(0) while not sim.operations.is_tuning_complete: diff --git a/hoomd/md/pytest/test_manifolds.py b/hoomd/md/pytest/test_manifolds.py index 8340622bd9..4d5b98ce78 100644 --- a/hoomd/md/pytest/test_manifolds.py +++ b/hoomd/md/pytest/test_manifolds.py @@ -8,82 +8,125 @@ from collections import namedtuple paramtuple = namedtuple( - 'paramtuple', ['setup_params', 'extra_params', 'changed_params', 'surface']) + "paramtuple", ["setup_params", "extra_params", "changed_params", "surface"] +) def _manifold_base_params(): manifold_base_params_list = [] # Start with valid parameters to get the keys and placeholder values - cylinder_setup_params = {'r': 5} - cylinder_extra_params = {'P': (0, 0, 0)} - cylinder_changed_params = {'r': 4, 'P': (1.0, 0, 0)} - - manifold_base_params_list.extend([ - paramtuple(cylinder_setup_params, cylinder_extra_params, - cylinder_changed_params, hoomd.md.manifold.Cylinder) - ]) - - diamond_setup_params = {'N': (1, 1, 1)} - diamond_extra_params = {'epsilon': 0} - diamond_changed_params = {'N': (1, 2, 2), 'epsilon': 0.1} - - manifold_base_params_list.extend([ - paramtuple(diamond_setup_params, diamond_extra_params, - diamond_changed_params, hoomd.md.manifold.Diamond) - ]) - - ellipsoid_setup_params = {'a': 3.3, 'b': 5, 'c': 4.1} - ellipsoid_extra_params = {'P': (0, 0, 0)} - ellipsoid_changed_params = {'a': 4, 'b': 2, 'c': 5.2, 'P': (1.0, 0, 0)} - - manifold_base_params_list.extend([ - paramtuple(ellipsoid_setup_params, ellipsoid_extra_params, - ellipsoid_changed_params, hoomd.md.manifold.Ellipsoid) - ]) - - gyroid_setup_params = {'N': (1, 2, 1)} - gyroid_extra_params = {'epsilon': 0} - gyroid_changed_params = {'N': (2, 1, 1), 'epsilon': 0.1} - - manifold_base_params_list.extend([ - paramtuple(gyroid_setup_params, gyroid_extra_params, - gyroid_changed_params, hoomd.md.manifold.Gyroid) - ]) - - primitive_setup_params = {'N': (1, 1, 1)} - primitive_extra_params = {'epsilon': 0} - primitive_changed_params = {'N': (2, 2, 2), 'epsilon': -0.1} - - manifold_base_params_list.extend([ - paramtuple(primitive_setup_params, primitive_extra_params, - primitive_changed_params, hoomd.md.manifold.Primitive) - ]) - - sphere_setup_params = {'r': 5} - sphere_extra_params = {'P': (0, 0, 0)} - sphere_changed_params = {'r': 4, 'P': (1.0, 0, 0)} - - manifold_base_params_list.extend([ - paramtuple(sphere_setup_params, sphere_extra_params, - sphere_changed_params, hoomd.md.manifold.Sphere) - ]) + cylinder_setup_params = {"r": 5} + cylinder_extra_params = {"P": (0, 0, 0)} + cylinder_changed_params = {"r": 4, "P": (1.0, 0, 0)} + + manifold_base_params_list.extend( + [ + paramtuple( + cylinder_setup_params, + cylinder_extra_params, + cylinder_changed_params, + hoomd.md.manifold.Cylinder, + ) + ] + ) + + diamond_setup_params = {"N": (1, 1, 1)} + diamond_extra_params = {"epsilon": 0} + diamond_changed_params = {"N": (1, 2, 2), "epsilon": 0.1} + + manifold_base_params_list.extend( + [ + paramtuple( + diamond_setup_params, + diamond_extra_params, + diamond_changed_params, + hoomd.md.manifold.Diamond, + ) + ] + ) + + ellipsoid_setup_params = {"a": 3.3, "b": 5, "c": 4.1} + ellipsoid_extra_params = {"P": (0, 0, 0)} + ellipsoid_changed_params = {"a": 4, "b": 2, "c": 5.2, "P": (1.0, 0, 0)} + + manifold_base_params_list.extend( + [ + paramtuple( + ellipsoid_setup_params, + ellipsoid_extra_params, + ellipsoid_changed_params, + hoomd.md.manifold.Ellipsoid, + ) + ] + ) + + gyroid_setup_params = {"N": (1, 2, 1)} + gyroid_extra_params = {"epsilon": 0} + gyroid_changed_params = {"N": (2, 1, 1), "epsilon": 0.1} + + manifold_base_params_list.extend( + [ + paramtuple( + gyroid_setup_params, + gyroid_extra_params, + gyroid_changed_params, + hoomd.md.manifold.Gyroid, + ) + ] + ) + + primitive_setup_params = {"N": (1, 1, 1)} + primitive_extra_params = {"epsilon": 0} + primitive_changed_params = {"N": (2, 2, 2), "epsilon": -0.1} + + manifold_base_params_list.extend( + [ + paramtuple( + primitive_setup_params, + primitive_extra_params, + primitive_changed_params, + hoomd.md.manifold.Primitive, + ) + ] + ) + + sphere_setup_params = {"r": 5} + sphere_extra_params = {"P": (0, 0, 0)} + sphere_changed_params = {"r": 4, "P": (1.0, 0, 0)} + + manifold_base_params_list.extend( + [ + paramtuple( + sphere_setup_params, + sphere_extra_params, + sphere_changed_params, + hoomd.md.manifold.Sphere, + ) + ] + ) xyplane_setup_params = {} - xyplane_extra_params = {'shift': 0} - xyplane_changed_params = {'shift': 0.5} - - manifold_base_params_list.extend([ - paramtuple(xyplane_setup_params, xyplane_extra_params, - xyplane_changed_params, hoomd.md.manifold.Plane) - ]) + xyplane_extra_params = {"shift": 0} + xyplane_changed_params = {"shift": 0.5} + + manifold_base_params_list.extend( + [ + paramtuple( + xyplane_setup_params, + xyplane_extra_params, + xyplane_changed_params, + hoomd.md.manifold.Plane, + ) + ] + ) return manifold_base_params_list -@pytest.fixture(scope="function", - params=_manifold_base_params(), - ids=(lambda x: x[3].__name__)) +@pytest.fixture( + scope="function", params=_manifold_base_params(), ids=(lambda x: x[3].__name__) +) def manifold_base_params(request): return deepcopy(request.param) @@ -109,13 +152,12 @@ def test_attributes(manifold_base_params): check_instance_attrs(surface, manifold_base_params.extra_params) -def test_attributes_attached(simulation_factory, two_particle_snapshot_factory, - manifold_base_params): - +def test_attributes_attached( + simulation_factory, two_particle_snapshot_factory, manifold_base_params +): all_ = hoomd.filter.All() surface = manifold_base_params.surface(**manifold_base_params.setup_params) - method = hoomd.md.methods.rattle.NVE(filter=all_, - manifold_constraint=surface) + method = hoomd.md.methods.rattle.NVE(filter=all_, manifold_constraint=surface) sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[method]) @@ -130,12 +172,14 @@ def test_attributes_attached(simulation_factory, two_particle_snapshot_factory, check_instance_attrs(surface, manifold_base_params.extra_params) -def test_pickling(manifold_base_params, simulation_factory, - two_particle_snapshot_factory): +def test_pickling( + manifold_base_params, simulation_factory, two_particle_snapshot_factory +): sim = simulation_factory(two_particle_snapshot_factory()) manifold = manifold_base_params.surface(**manifold_base_params.setup_params) - nve = hoomd.md.methods.rattle.NVE(filter=hoomd.filter.All(), - manifold_constraint=manifold) + nve = hoomd.md.methods.rattle.NVE( + filter=hoomd.filter.All(), manifold_constraint=manifold + ) integrator = hoomd.md.Integrator(0.005, methods=[nve]) sim.operations += integrator pickling_check(manifold) diff --git a/hoomd/md/pytest/test_meshpotential.py b/hoomd/md/pytest/test_meshpotential.py index 878e6e0f6e..f5b6614133 100644 --- a/hoomd/md/pytest/test_meshpotential.py +++ b/hoomd/md/pytest/test_meshpotential.py @@ -5,146 +5,223 @@ import pytest import numpy as np -_harmonic_args = {'k': [30.0, 25.0, 20.0], 'r0': [1.6, 1.7, 1.8]} -_harmonic_arg_list = [(hoomd.md.mesh.bond.Harmonic, - dict(zip(_harmonic_args, val))) - for val in zip(*_harmonic_args.values())] +_harmonic_args = {"k": [30.0, 25.0, 20.0], "r0": [1.6, 1.7, 1.8]} +_harmonic_arg_list = [ + (hoomd.md.mesh.bond.Harmonic, dict(zip(_harmonic_args, val))) + for val in zip(*_harmonic_args.values()) +] _FENE_args = { - 'k': [30.0, 25.0, 20.0], - 'r0': [1.6, 1.7, 1.8], - 'epsilon': [0.9, 1.0, 1.1], - 'sigma': [1.1, 1.0, 0.9], - 'delta': [0, 0, 0] + "k": [30.0, 25.0, 20.0], + "r0": [1.6, 1.7, 1.8], + "epsilon": [0.9, 1.0, 1.1], + "sigma": [1.1, 1.0, 0.9], + "delta": [0, 0, 0], } -_FENE_arg_list = [(hoomd.md.mesh.bond.FENEWCA, dict(zip(_FENE_args, val))) - for val in zip(*_FENE_args.values())] +_FENE_arg_list = [ + (hoomd.md.mesh.bond.FENEWCA, dict(zip(_FENE_args, val))) + for val in zip(*_FENE_args.values()) +] _Tether_args = { - 'k_b': [5.0, 6.0, 7.0], - 'l_min': [0.7, 0.8, 0.9], - 'l_c1': [0.9, 1.05, 1.1], - 'l_c0': [1.1, 1.1, 1.3], - 'l_max': [1.3, 1.3, 1.5] + "k_b": [5.0, 6.0, 7.0], + "l_min": [0.7, 0.8, 0.9], + "l_c1": [0.9, 1.05, 1.1], + "l_c0": [1.1, 1.1, 1.3], + "l_max": [1.3, 1.3, 1.5], } -_Tether_arg_list = [(hoomd.md.mesh.bond.Tether, dict(zip(_Tether_args, val))) - for val in zip(*_Tether_args.values())] +_Tether_arg_list = [ + (hoomd.md.mesh.bond.Tether, dict(zip(_Tether_args, val))) + for val in zip(*_Tether_args.values()) +] _BendingRigidity_args = { - 'k': [2.0, 10.0, 300.0], + "k": [2.0, 10.0, 300.0], } -_BendingRigidity_arg_list = [(hoomd.md.mesh.bending.BendingRigidity, - dict(zip(_BendingRigidity_args, val))) - for val in zip(*_BendingRigidity_args.values())] +_BendingRigidity_arg_list = [ + (hoomd.md.mesh.bending.BendingRigidity, dict(zip(_BendingRigidity_args, val))) + for val in zip(*_BendingRigidity_args.values()) +] _Helfrich_args = { - 'k': [1.0, 20.0, 100.0], + "k": [1.0, 20.0, 100.0], } -_Helfrich_arg_list = [(hoomd.md.mesh.bending.Helfrich, - dict(zip(_Helfrich_args, val))) - for val in zip(*_Helfrich_args.values())] +_Helfrich_arg_list = [ + (hoomd.md.mesh.bending.Helfrich, dict(zip(_Helfrich_args, val))) + for val in zip(*_Helfrich_args.values()) +] _AreaConservation_args = { - 'k': [1.0, 20.0, 100.0], - 'A0': [6 * np.sqrt(3), 5 * np.sqrt(3), 7 * np.sqrt(3)] + "k": [1.0, 20.0, 100.0], + "A0": [6 * np.sqrt(3), 5 * np.sqrt(3), 7 * np.sqrt(3)], } -_AreaConservation_arg_list = [(hoomd.md.mesh.conservation.Area, - dict(zip(_AreaConservation_args, val))) - for val in zip(*_AreaConservation_args.values())] +_AreaConservation_arg_list = [ + (hoomd.md.mesh.conservation.Area, dict(zip(_AreaConservation_args, val))) + for val in zip(*_AreaConservation_args.values()) +] _TriangleAreaConservation_args = { - 'k': [1.0, 20.0, 100.0], - 'A0': [6 * np.sqrt(3) / 4, 5 * np.sqrt(3) / 4, 7 * np.sqrt(3) / 4] + "k": [1.0, 20.0, 100.0], + "A0": [6 * np.sqrt(3) / 4, 5 * np.sqrt(3) / 4, 7 * np.sqrt(3) / 4], } _TriangleAreaConservation_arg_list = [ - (hoomd.md.mesh.conservation.TriangleArea, - dict(zip(_TriangleAreaConservation_args, val))) + ( + hoomd.md.mesh.conservation.TriangleArea, + dict(zip(_TriangleAreaConservation_args, val)), + ) for val in zip(*_TriangleAreaConservation_args.values()) ] _Helfrich_args = { - 'k': [1.0, 20.0, 100.0], + "k": [1.0, 20.0, 100.0], } -_Helfrich_arg_list = [(hoomd.md.mesh.bending.Helfrich, - dict(zip(_Helfrich_args, val))) - for val in zip(*_Helfrich_args.values())] +_Helfrich_arg_list = [ + (hoomd.md.mesh.bending.Helfrich, dict(zip(_Helfrich_args, val))) + for val in zip(*_Helfrich_args.values()) +] -_Volume_args = {'k': [20.0, 50.0, 100.0], 'V0': [0.107227, 1, 0.01]} -_Volume_arg_list = [(hoomd.md.mesh.conservation.Volume, - dict(zip(_Volume_args, val))) - for val in zip(*_Volume_args.values())] +_Volume_args = {"k": [20.0, 50.0, 100.0], "V0": [0.107227, 1, 0.01]} +_Volume_arg_list = [ + (hoomd.md.mesh.conservation.Volume, dict(zip(_Volume_args, val))) + for val in zip(*_Volume_args.values()) +] def get_mesh_potential_and_args(): - return (_harmonic_arg_list + _FENE_arg_list + _Tether_arg_list - + _AreaConservation_arg_list + _TriangleAreaConservation_arg_list - + _BendingRigidity_arg_list + _Helfrich_arg_list + _Volume_arg_list) + return ( + _harmonic_arg_list + + _FENE_arg_list + + _Tether_arg_list + + _AreaConservation_arg_list + + _TriangleAreaConservation_arg_list + + _BendingRigidity_arg_list + + _Helfrich_arg_list + + _Volume_arg_list + ) def get_mesh_potential_args_forces_and_energies(): - harmonic_forces = [[[37.86, 0., -26.771063], [-37.86, 0., -26.771063], - [0., 37.86, 26.771063], [0., -37.86, 26.771063]], - [[36.55, 0., -25.844753], [-36.55, 0., -25.844753], - [0., 36.55, 25.844753], [0., -36.55, 25.844753]], - [[33.24, 0., -23.504229], [-33.24, 0., -23.504229], - [0., 33.24, 23.504229], [0., -33.24, 23.504229]]] + harmonic_forces = [ + [ + [37.86, 0.0, -26.771063], + [-37.86, 0.0, -26.771063], + [0.0, 37.86, 26.771063], + [0.0, -37.86, 26.771063], + ], + [ + [36.55, 0.0, -25.844753], + [-36.55, 0.0, -25.844753], + [0.0, 36.55, 25.844753], + [0.0, -36.55, 25.844753], + ], + [ + [33.24, 0.0, -23.504229], + [-33.24, 0.0, -23.504229], + [0.0, 33.24, 23.504229], + [0.0, -33.24, 23.504229], + ], + ] harmonic_energies = [35.83449, 40.077075, 41.43366] - FENE_forces = [[[221.113071, 0., - -156.350552], [-221.113071, 0., -156.350552], - [0., 221.113071, 156.350552], [0., -221.113071, - 156.350552]], - [[12.959825, 0., -9.16398], [-12.959825, 0., -9.16398], - [0., 12.959825, 9.16398], [0., -12.959825, 9.16398]], - [[-44.644347, 0., 31.568321], [44.644347, 0., 31.568321], - [0., -44.644347, -31.568321], [0., 44.644347, -31.568321]]] + FENE_forces = [ + [ + [221.113071, 0.0, -156.350552], + [-221.113071, 0.0, -156.350552], + [0.0, 221.113071, 156.350552], + [0.0, -221.113071, 156.350552], + ], + [ + [12.959825, 0.0, -9.16398], + [-12.959825, 0.0, -9.16398], + [0.0, 12.959825, 9.16398], + [0.0, -12.959825, 9.16398], + ], + [ + [-44.644347, 0.0, 31.568321], + [44.644347, 0.0, 31.568321], + [0.0, -44.644347, -31.568321], + [0.0, 44.644347, -31.568321], + ], + ] FENE_energies = [163.374213, 97.189301, 67.058202] - Tether_forces = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[0.048888, 0., -0.034569], [-0.048888, 0., -0.034569], - [0., 0.048888, 0.034569], [0., -0.048888, 0.034569]], - [[7.144518, 0., -5.051937], [-7.144518, 0., -5.051937], - [0., 7.144518, 5.051937], [0., -7.144518, 5.051937]]] + Tether_forces = [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], + [ + [0.048888, 0.0, -0.034569], + [-0.048888, 0.0, -0.034569], + [0.0, 0.048888, 0.034569], + [0.0, -0.048888, 0.034569], + ], + [ + [7.144518, 0.0, -5.051937], + [-7.144518, 0.0, -5.051937], + [0.0, 7.144518, 5.051937], + [0.0, -7.144518, 5.051937], + ], + ] Tether_energies = [0, 0.000926, 0.294561] - BendingRigidity_forces = [[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], - [0., 0., 0.]], - [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], - [0., 0., 0.]], - [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.], - [0., 0., 0.]]] + BendingRigidity_forces = [ + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], + ] BendingRigidity_energies = [8, 40, 1200] - Helfrich_forces = [[[-12.710842, 0., 8.987922], [12.710842, 0., 8.987922], - [0., -12.710842, -8.987922], [0., 12.710842, - -8.987922]], - [[-254.216837, 0., 179.758449], - [254.216837, 0., 179.758449], - [0., -254.216837, -179.758449], - [0., 254.216837, -179.758449]], - [[-1271.084184, 0., 898.792246], - [1271.084184, 0., 898.792246], - [0., -1271.084184, -898.792246], - [0., 1271.084184, -898.792246]]] + Helfrich_forces = [ + [ + [-12.710842, 0.0, 8.987922], + [12.710842, 0.0, 8.987922], + [0.0, -12.710842, -8.987922], + [0.0, 12.710842, -8.987922], + ], + [ + [-254.216837, 0.0, 179.758449], + [254.216837, 0.0, 179.758449], + [0.0, -254.216837, -179.758449], + [0.0, 254.216837, -179.758449], + ], + [ + [-1271.084184, 0.0, 898.792246], + [1271.084184, 0.0, 898.792246], + [0.0, -1271.084184, -898.792246], + [0.0, 1271.084184, -898.792246], + ], + ] Helfrich_energies = [27.712812, 554.256258, 2771.281293] - AreaConservation_forces = [[[0.94380349, 0., -0.66736985], - [-0.94380349, 0., -0.66736985], - [0., 0.94380349, 0.66736985], - [0, -0.94380349, 0.66736985]], - [[18.17566447, 0., -12.8521356], - [-18.17566447, 0., -12.8521356], - [0., 18.17566447, 12.8521356], - [0., -18.17566447, 12.8521356]], - [[96.88179659, 0., -68.50577534], - [-96.88179659, 0., -68.50577534], - [0., 96.88179659, 68.50577534], - [0., -96.88179659, 68.50577534]]] + AreaConservation_forces = [ + [ + [0.94380349, 0.0, -0.66736985], + [-0.94380349, 0.0, -0.66736985], + [0.0, 0.94380349, 0.66736985], + [0, -0.94380349, 0.66736985], + ], + [ + [18.17566447, 0.0, -12.8521356], + [-18.17566447, 0.0, -12.8521356], + [0.0, 18.17566447, 12.8521356], + [0.0, -18.17566447, 12.8521356], + ], + [ + [96.88179659, 0.0, -68.50577534], + [-96.88179659, 0.0, -68.50577534], + [0.0, 96.88179659, 68.50577534], + [0.0, -96.88179659, 68.50577534], + ], + ] AreaConservation_energies = [3.69707, 57.13009, 454.492529] - Volume_forces = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], - [[4.93960528, 0, - -3.49282839], [-4.93960528, 0, -3.49282839], - [0, 4.93960528, 3.49282839], [0, -4.93960528, - 3.49282839]], - [[-107.5893328, 0, 76.0771468], - [107.5893328, 0, 76.0771468], - [0, -107.5893328, -76.0771468], - [0, 107.5893328, -76.0771468]]] + Volume_forces = [ + [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], + [ + [4.93960528, 0, -3.49282839], + [-4.93960528, 0, -3.49282839], + [0, 4.93960528, 3.49282839], + [0, -4.93960528, 3.49282839], + ], + [ + [-107.5893328, 0, 76.0771468], + [107.5893328, 0, 76.0771468], + [0, -107.5893328, -76.0771468], + [0, 107.5893328, -76.0771468], + ], + ] Volume_energies = [0, 19.92608051621174, 47.2656702899458] harmonic_args_and_vals = [] @@ -158,42 +235,64 @@ def get_mesh_potential_args_forces_and_energies(): for i in range(3): harmonic_args_and_vals.append( - (*_harmonic_arg_list[i], harmonic_forces[i], harmonic_energies[i])) + (*_harmonic_arg_list[i], harmonic_forces[i], harmonic_energies[i]) + ) FENE_args_and_vals.append( - (*_FENE_arg_list[i], FENE_forces[i], FENE_energies[i])) + (*_FENE_arg_list[i], FENE_forces[i], FENE_energies[i]) + ) Tether_args_and_vals.append( - (*_Tether_arg_list[i], Tether_forces[i], Tether_energies[i])) + (*_Tether_arg_list[i], Tether_forces[i], Tether_energies[i]) + ) BendingRigidity_args_and_vals.append( - (*_BendingRigidity_arg_list[i], BendingRigidity_forces[i], - BendingRigidity_energies[i])) + ( + *_BendingRigidity_arg_list[i], + BendingRigidity_forces[i], + BendingRigidity_energies[i], + ) + ) Helfrich_args_and_vals.append( - (*_Helfrich_arg_list[i], Helfrich_forces[i], Helfrich_energies[i])) + (*_Helfrich_arg_list[i], Helfrich_forces[i], Helfrich_energies[i]) + ) AreaConservation_args_and_vals.append( - (*_AreaConservation_arg_list[i], AreaConservation_forces[i], - AreaConservation_energies[i])) + ( + *_AreaConservation_arg_list[i], + AreaConservation_forces[i], + AreaConservation_energies[i], + ) + ) TriangleAreaConservation_args_and_vals.append( - (*_TriangleAreaConservation_arg_list[i], AreaConservation_forces[i], - AreaConservation_energies[i])) + ( + *_TriangleAreaConservation_arg_list[i], + AreaConservation_forces[i], + AreaConservation_energies[i], + ) + ) Volume_args_and_vals.append( - (*_Volume_arg_list[i], Volume_forces[i], Volume_energies[i])) - return (harmonic_args_and_vals + FENE_args_and_vals + Tether_args_and_vals - + AreaConservation_args_and_vals - + TriangleAreaConservation_args_and_vals - + BendingRigidity_args_and_vals + Helfrich_args_and_vals - + Volume_args_and_vals) + (*_Volume_arg_list[i], Volume_forces[i], Volume_energies[i]) + ) + return ( + harmonic_args_and_vals + + FENE_args_and_vals + + Tether_args_and_vals + + AreaConservation_args_and_vals + + TriangleAreaConservation_args_and_vals + + BendingRigidity_args_and_vals + + Helfrich_args_and_vals + + Volume_args_and_vals + ) def _skip_if_helfrich_mpi(sim, pair_potential): """Determines if the simulation is able to run this pair potential.""" - if (sim.device.communicator.num_ranks > 1 - and issubclass(pair_potential, hoomd.md.mesh.bending.Helfrich)): + if sim.device.communicator.num_ranks > 1 and issubclass( + pair_potential, hoomd.md.mesh.bending.Helfrich + ): pytest.skip("Cannot run Helfrich with MPI") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def tetrahedron_snapshot_factory(device): - - def make_snapshot(d=1.0, particle_types=['A'], L=20): + def make_snapshot(d=1.0, particle_types=["A"], L=20): s = hoomd.Snapshot(device.communicator) N = 4 if s.communicator.rank == 0: @@ -201,10 +300,14 @@ def make_snapshot(d=1.0, particle_types=['A'], L=20): s.configuration.box = box s.particles.N = N - base_positions = np.array([[1.0, 0.0, -1.0 / np.sqrt(2.0)], - [-1.0, 0.0, -1.0 / np.sqrt(2.0)], - [0.0, 1.0, 1.0 / np.sqrt(2.0)], - [0.0, -1.0, 1.0 / np.sqrt(2.0)]]) + base_positions = np.array( + [ + [1.0, 0.0, -1.0 / np.sqrt(2.0)], + [-1.0, 0.0, -1.0 / np.sqrt(2.0)], + [0.0, 1.0, 1.0 / np.sqrt(2.0)], + [0.0, -1.0, 1.0 / np.sqrt(2.0)], + ] + ) # move particles slightly in direction of MPI decomposition which # varies by simulation dimension s.particles.position[:] = 0.5 * d * base_positions @@ -214,8 +317,9 @@ def make_snapshot(d=1.0, particle_types=['A'], L=20): return make_snapshot -@pytest.mark.parametrize("mesh_potential_cls, potential_kwargs", - get_mesh_potential_and_args()) +@pytest.mark.parametrize( + "mesh_potential_cls, potential_kwargs", get_mesh_potential_and_args() +) def test_before_attaching(mesh_potential_cls, potential_kwargs): mesh = hoomd.mesh.Mesh() mesh_potential = mesh_potential_cls(mesh) @@ -224,17 +328,23 @@ def test_before_attaching(mesh_potential_cls, potential_kwargs): assert mesh is mesh_potential.mesh for key in potential_kwargs: assert mesh_potential.params["mesh"][key] == pytest.approx( - potential_kwargs[key], rel=1e-6) + potential_kwargs[key], rel=1e-6 + ) mesh1 = hoomd.mesh.Mesh() mesh_potential.mesh = mesh1 assert mesh1 is mesh_potential.mesh -@pytest.mark.parametrize("mesh_potential_cls, potential_kwargs", - get_mesh_potential_and_args()) -def test_after_attaching(tetrahedron_snapshot_factory, simulation_factory, - mesh_potential_cls, potential_kwargs): +@pytest.mark.parametrize( + "mesh_potential_cls, potential_kwargs", get_mesh_potential_and_args() +) +def test_after_attaching( + tetrahedron_snapshot_factory, + simulation_factory, + mesh_potential_cls, + potential_kwargs, +): snap = tetrahedron_snapshot_factory(d=0.969, L=5) sim = simulation_factory(snap) @@ -252,9 +362,9 @@ def test_after_attaching(tetrahedron_snapshot_factory, simulation_factory, integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator @@ -262,18 +372,23 @@ def test_after_attaching(tetrahedron_snapshot_factory, simulation_factory, sim.run(0) for key in potential_kwargs: assert mesh_potential.params["mesh"][key] == pytest.approx( - potential_kwargs[key], rel=1e-6) + potential_kwargs[key], rel=1e-6 + ) mesh1 = hoomd.mesh.Mesh() with pytest.raises(RuntimeError): mesh_potential.mesh = mesh1 -@pytest.mark.parametrize("mesh_potential_cls, potential_kwargs", - get_mesh_potential_and_args()) -def test_multiple_types(tetrahedron_snapshot_factory, simulation_factory, - mesh_potential_cls, potential_kwargs): - +@pytest.mark.parametrize( + "mesh_potential_cls, potential_kwargs", get_mesh_potential_and_args() +) +def test_multiple_types( + tetrahedron_snapshot_factory, + simulation_factory, + mesh_potential_cls, + potential_kwargs, +): snap = tetrahedron_snapshot_factory(d=0.969, L=5) sim = simulation_factory(snap) @@ -292,22 +407,23 @@ def test_multiple_types(tetrahedron_snapshot_factory, simulation_factory, integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) for key in potential_kwargs: assert mesh_potential.params["mesh"][key] == pytest.approx( - potential_kwargs[key], rel=1e-6) + potential_kwargs[key], rel=1e-6 + ) assert mesh_potential.params["patch"][key] == pytest.approx( - potential_kwargs[key], rel=1e-6) + potential_kwargs[key], rel=1e-6 + ) def test_area(simulation_factory, tetrahedron_snapshot_factory): - snap = tetrahedron_snapshot_factory(d=0.969, L=5) sim = simulation_factory(snap) @@ -324,18 +440,20 @@ def test_area(simulation_factory, tetrahedron_snapshot_factory): integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) - np.testing.assert_allclose(mesh_potential.area, - np.array([1.62633 * 3 / 4, 1.62633 / 4]), - rtol=1e-2, - atol=1e-5) + np.testing.assert_allclose( + mesh_potential.area, + np.array([1.62633 * 3 / 4, 1.62633 / 4]), + rtol=1e-2, + atol=1e-5, + ) def test_area_ignore_type(simulation_factory, tetrahedron_snapshot_factory): @@ -355,9 +473,9 @@ def test_area_ignore_type(simulation_factory, tetrahedron_snapshot_factory): integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator @@ -365,10 +483,9 @@ def test_area_ignore_type(simulation_factory, tetrahedron_snapshot_factory): print(mesh_potential.area) - np.testing.assert_allclose(mesh_potential.area, - np.array([1.62633]), - rtol=1e-2, - atol=1e-5) + np.testing.assert_allclose( + mesh_potential.area, np.array([1.62633]), rtol=1e-2, atol=1e-5 + ) def test_triangle_area(simulation_factory, tetrahedron_snapshot_factory): @@ -387,25 +504,29 @@ def test_triangle_area(simulation_factory, tetrahedron_snapshot_factory): integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) - np.testing.assert_allclose(mesh_potential.area, [1.62633], - rtol=1e-2, - atol=1e-5) - - -@pytest.mark.parametrize("mesh_potential_cls, potential_kwargs, force, energy", - get_mesh_potential_args_forces_and_energies()) -def test_forces_and_energies(tetrahedron_snapshot_factory, simulation_factory, - mesh_potential_cls, potential_kwargs, force, - energy): - + np.testing.assert_allclose(mesh_potential.area, [1.62633], rtol=1e-2, atol=1e-5) + + +@pytest.mark.parametrize( + "mesh_potential_cls, potential_kwargs, force, energy", + get_mesh_potential_args_forces_and_energies(), +) +def test_forces_and_energies( + tetrahedron_snapshot_factory, + simulation_factory, + mesh_potential_cls, + potential_kwargs, + force, + energy, +): snap = tetrahedron_snapshot_factory(d=0.969, L=5) sim = simulation_factory(snap) @@ -425,9 +546,9 @@ def test_forces_and_energies(tetrahedron_snapshot_factory, simulation_factory, integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) @@ -455,17 +576,17 @@ def test_volume(simulation_factory, tetrahedron_snapshot_factory): integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) - np.testing.assert_allclose(mesh_potential.volume, [0.08042, 0.026807], - rtol=1e-2, - atol=1e-5) + np.testing.assert_allclose( + mesh_potential.volume, [0.08042, 0.026807], rtol=1e-2, atol=1e-5 + ) def test_volume_ignore_type(simulation_factory, tetrahedron_snapshot_factory): @@ -485,21 +606,18 @@ def test_volume_ignore_type(simulation_factory, tetrahedron_snapshot_factory): integrator.forces.append(mesh_potential) - langevin = hoomd.md.methods.Langevin(kT=1, - filter=hoomd.filter.All(), - default_gamma=0.1) + langevin = hoomd.md.methods.Langevin( + kT=1, filter=hoomd.filter.All(), default_gamma=0.1 + ) integrator.methods.append(langevin) sim.operations.integrator = integrator sim.run(0) - np.testing.assert_allclose(mesh_potential.volume, [0.107227], - rtol=1e-2, - atol=1e-5) + np.testing.assert_allclose(mesh_potential.volume, [0.107227], rtol=1e-2, atol=1e-5) -def test_auto_detach_simulation(simulation_factory, - tetrahedron_snapshot_factory): +def test_auto_detach_simulation(simulation_factory, tetrahedron_snapshot_factory): sim = simulation_factory(tetrahedron_snapshot_factory(d=0.969, L=5)) mesh = hoomd.mesh.Mesh() mesh.triangulation = dict(type_ids=[0, 0], triangles=[[0, 1, 2], [0, 2, 3]]) @@ -513,7 +631,8 @@ def test_auto_detach_simulation(simulation_factory, integrator = hoomd.md.Integrator(dt=0.005, forces=[harmonic, harmonic_2]) integrator.methods.append( - hoomd.md.methods.Langevin(kT=1, filter=hoomd.filter.All())) + hoomd.md.methods.Langevin(kT=1, filter=hoomd.filter.All()) + ) sim.operations.integrator = integrator sim.run(0) @@ -536,7 +655,8 @@ def test_helfrich_mpi_error(simulation_factory, tetrahedron_snapshot_factory): integrator = hoomd.md.Integrator(dt=0.005, forces=[helfrich]) integrator.methods.append( - hoomd.md.methods.Langevin(kT=1, filter=hoomd.filter.All())) + hoomd.md.methods.Langevin(kT=1, filter=hoomd.filter.All()) + ) sim.operations.integrator = integrator if sim.device.communicator.num_ranks > 1: diff --git a/hoomd/md/pytest/test_meta_wall_list.py b/hoomd/md/pytest/test_meta_wall_list.py index 50628b5887..834253510e 100644 --- a/hoomd/md/pytest/test_meta_wall_list.py +++ b/hoomd/md/pytest/test_meta_wall_list.py @@ -11,10 +11,8 @@ class TestWallMetaList(conftest.BaseListTest): - @pytest.fixture def generate_plain_collection(self): - def generate(n): return [self.generate_wall() for _ in range(n)] @@ -31,11 +29,7 @@ def generate_wall(self): kwargs.update({"radius": float, "inside": bool}) return hoomd.wall.Sphere(**self.generator(kwargs)) elif random_type == 1: - kwargs.update({ - "radius": float, - "axis": (float,) * 3, - "inside": bool - }) + kwargs.update({"radius": float, "axis": (float,) * 3, "inside": bool}) return hoomd.wall.Cylinder(**self.generator(kwargs)) normal = self.generator.ndarray((3,)) vector_norm = np.linalg.norm(normal) @@ -54,8 +48,7 @@ def final_check(self, test_list): item_type = type(item) assert item_type is backend_index.type assert type_counter[item_type] == backend_index.index - assert test_list._backend_lists[item_type][ - backend_index.index] is item + assert test_list._backend_lists[item_type][backend_index.index] is item type_counter[item_type] += 1 def test_construction(self): diff --git a/hoomd/md/pytest/test_methods.py b/hoomd/md/pytest/test_methods.py index 28cb4f1772..32d26e32b6 100644 --- a/hoomd/md/pytest/test_methods.py +++ b/hoomd/md/pytest/test_methods.py @@ -5,20 +5,27 @@ import pytest import hoomd -from hoomd.conftest import (Options, Either, Generator, ClassDefinition, - pickling_check, logging_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + Options, + Either, + Generator, + ClassDefinition, + pickling_check, + logging_check, + autotuned_kernel_parameter_check, +) from hoomd.logging import LoggerCategories class MethodDefinition(ClassDefinition): - - def __init__(self, - cls, - constructor_spec, - attribute_spec=None, - generator=None, - requires_thermostat=False): + def __init__( + self, + cls, + constructor_spec, + attribute_spec=None, + generator=None, + requires_thermostat=False, + ): super().__init__(cls, constructor_spec, attribute_spec, generator) self.requires_thermostat = requires_thermostat @@ -39,47 +46,53 @@ def generate_all_attr_change(self): change_attrs = super().generate_all_attr_change() change_attrs["filter"] = hoomd.filter.Type(["A"]) if self.requires_thermostat: - change_attrs["thermostat"] = hoomd.md.methods.thermostats.MTTK( - 1.5, 0.05) + change_attrs["thermostat"] = hoomd.md.methods.thermostats.MTTK(1.5, 0.05) return change_attrs generator = Generator(np.random.default_rng(546162595)) _method_definitions = ( - MethodDefinition(hoomd.md.methods.ConstantVolume, {}, {}, - generator, - requires_thermostat=True), - MethodDefinition(hoomd.md.methods.ConstantPressure, { - "S": Either(hoomd.variant.Variant, (hoomd.variant.Variant,) * 6), - "tauS": float, - "couple": Options("xy", "xz", "yz", "xyz"), - "box_dof": (bool,) * 6, - "rescale_all": bool, - "gamma": float - }, - generator=generator, - requires_thermostat=True), - MethodDefinition(hoomd.md.methods.DisplacementCapped, - {"maximum_displacement": hoomd.variant.Variant}, - generator=generator), - MethodDefinition(hoomd.md.methods.Langevin, { - "kT": hoomd.variant.Variant, - "tally_reservoir_energy": bool - }, - generator=generator), - MethodDefinition(hoomd.md.methods.Brownian, { - "kT": hoomd.variant.Variant, - }, - generator=generator), - MethodDefinition(hoomd.md.methods.OverdampedViscous, {}, - generator=generator), + MethodDefinition( + hoomd.md.methods.ConstantVolume, {}, {}, generator, requires_thermostat=True + ), + MethodDefinition( + hoomd.md.methods.ConstantPressure, + { + "S": Either(hoomd.variant.Variant, (hoomd.variant.Variant,) * 6), + "tauS": float, + "couple": Options("xy", "xz", "yz", "xyz"), + "box_dof": (bool,) * 6, + "rescale_all": bool, + "gamma": float, + }, + generator=generator, + requires_thermostat=True, + ), + MethodDefinition( + hoomd.md.methods.DisplacementCapped, + {"maximum_displacement": hoomd.variant.Variant}, + generator=generator, + ), + MethodDefinition( + hoomd.md.methods.Langevin, + {"kT": hoomd.variant.Variant, "tally_reservoir_energy": bool}, + generator=generator, + ), + MethodDefinition( + hoomd.md.methods.Brownian, + { + "kT": hoomd.variant.Variant, + }, + generator=generator, + ), + MethodDefinition(hoomd.md.methods.OverdampedViscous, {}, generator=generator), ) -@pytest.fixture(scope="module", - params=_method_definitions, - ids=lambda x: x.cls.__name__) +@pytest.fixture( + scope="module", params=_method_definitions, ids=lambda x: x.cls.__name__ +) def method_definition(request): return request.param @@ -87,33 +100,32 @@ def method_definition(request): _thermostat_definition = ( # Somewhat hacky way of representing None or no thermostat ClassDefinition(lambda: None, {}, generator=generator), - ClassDefinition(hoomd.md.methods.thermostats.MTTK, { - "kT": hoomd.variant.Variant, - "tau": float - }, - generator=generator), - ClassDefinition(hoomd.md.methods.thermostats.Bussi, { - "kT": hoomd.variant.Variant, - "tau": float - }, - generator=generator), - ClassDefinition(hoomd.md.methods.thermostats.Berendsen, { - "kT": hoomd.variant.Variant, - "tau": float - }, - generator=generator), + ClassDefinition( + hoomd.md.methods.thermostats.MTTK, + {"kT": hoomd.variant.Variant, "tau": float}, + generator=generator, + ), + ClassDefinition( + hoomd.md.methods.thermostats.Bussi, + {"kT": hoomd.variant.Variant, "tau": float}, + generator=generator, + ), + ClassDefinition( + hoomd.md.methods.thermostats.Berendsen, + {"kT": hoomd.variant.Variant, "tau": float}, + generator=generator, + ), ) -@pytest.fixture(scope="module", - params=_thermostat_definition, - ids=lambda x: x.cls.__name__) +@pytest.fixture( + scope="module", params=_thermostat_definition, ids=lambda x: x.cls.__name__ +) def thermostat_definition(request): return request.param def check_instance_attrs(instance, attr_dict, set_attrs=False): - def equality(a, b): if isinstance(a, type(b)): return a == b @@ -139,21 +151,24 @@ def equality(a, b): class TestThermostats: - @pytest.mark.parametrize("n", range(10)) def test_attributes(self, thermostat_definition, n): """Test the construction and setting of attributes.""" constructor_args = thermostat_definition.generate_init_args() thermostat = thermostat_definition.cls(**constructor_args) check_instance_attrs(thermostat, constructor_args) - check_instance_attrs(thermostat, - thermostat_definition.generate_all_attr_change(), - True) + check_instance_attrs( + thermostat, thermostat_definition.generate_all_attr_change(), True + ) @pytest.mark.parametrize("n", range(10)) - def test_attributes_attached(self, simulation_factory, - two_particle_snapshot_factory, - thermostat_definition, n): + def test_attributes_attached( + self, + simulation_factory, + two_particle_snapshot_factory, + thermostat_definition, + n, + ): """Test the setting of attributes with attaching.""" constructor_args = thermostat_definition.generate_init_args() thermostat = thermostat_definition.cls(**constructor_args) @@ -171,11 +186,13 @@ def test_attributes_attached(self, simulation_factory, check_instance_attrs(thermostat, change_attrs, True) def test_thermostat_thermalize_thermostat_dof( - self, simulation_factory, two_particle_snapshot_factory): + self, simulation_factory, two_particle_snapshot_factory + ): """Tests that NVT.thermalize_dof can be called.""" thermostat = hoomd.md.methods.thermostats.MTTK(1.5, 0.05) - nvt = hoomd.md.methods.ConstantVolume(thermostat=thermostat, - filter=hoomd.filter.All()) + nvt = hoomd.md.methods.ConstantVolume( + thermostat=thermostat, filter=hoomd.filter.All() + ) sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[nvt]) @@ -200,23 +217,25 @@ def test_thermostat_thermalize_thermostat_dof( assert eta_rot == 0.0 def test_logging(self): - logging_check(hoomd.md.methods.thermostats.MTTK, - ('md', 'methods', 'thermostats'), { - 'energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - }) - - def test_pickling(self, thermostat_definition, simulation_factory, - two_particle_snapshot_factory): + logging_check( + hoomd.md.methods.thermostats.MTTK, + ("md", "methods", "thermostats"), + { + "energy": {"category": LoggerCategories.scalar, "default": True}, + }, + ) + + def test_pickling( + self, thermostat_definition, simulation_factory, two_particle_snapshot_factory + ): constructor_args = thermostat_definition.generate_init_args() thermostat = thermostat_definition.cls(**constructor_args) pickling_check(thermostat) sim = simulation_factory(two_particle_snapshot_factory()) - method = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All(), - thermostat=thermostat) + method = hoomd.md.methods.ConstantVolume( + filter=hoomd.filter.All(), thermostat=thermostat + ) integrator = hoomd.md.Integrator(0.05, methods=[method]) sim.operations.integrator = integrator sim.run(0) @@ -224,20 +243,18 @@ def test_pickling(self, thermostat_definition, simulation_factory, class TestMethods: - @pytest.mark.parametrize("n", range(10)) def test_attributes(self, method_definition, n): """Test the construction and setting of attributes.""" constructor_args = method_definition.generate_init_args() method = method_definition.cls(**constructor_args) check_instance_attrs(method, constructor_args) - check_instance_attrs(method, - method_definition.generate_all_attr_change(), True) + check_instance_attrs(method, method_definition.generate_all_attr_change(), True) @pytest.mark.parametrize("n", range(10)) - def test_attributes_attached(self, simulation_factory, - two_particle_snapshot_factory, - method_definition, n): + def test_attributes_attached( + self, simulation_factory, two_particle_snapshot_factory, method_definition, n + ): """Test the setting of attributes with attaching.""" constructor_args = method_definition.generate_init_args() method = method_definition.cls(**constructor_args) @@ -259,8 +276,7 @@ def test_attributes_attached(self, simulation_factory, check_instance_attrs(method, change_attrs, True) - def test_switch_methods(self, simulation_factory, - two_particle_snapshot_factory): + def test_switch_methods(self, simulation_factory, two_particle_snapshot_factory): all_ = hoomd.filter.All() method = hoomd.md.methods.Langevin(all_, 1.5, 0.1) @@ -274,11 +290,13 @@ def test_switch_methods(self, simulation_factory, assert len(sim.operations.integrator.methods) == 0 sim.operations.integrator.methods.append( - hoomd.md.methods.ConstantVolume(all_, None)) + hoomd.md.methods.ConstantVolume(all_, None) + ) assert len(sim.operations.integrator.methods) == 1 def test_constant_pressure_thermalize_barostat_dof( - self, simulation_factory, two_particle_snapshot_factory): + self, simulation_factory, two_particle_snapshot_factory + ): """Tests that ConstantPressure.thermalize_barostat_dof can be called.""" all_ = hoomd.filter.All() npt = hoomd.md.methods.ConstantPressure( @@ -287,7 +305,8 @@ def test_constant_pressure_thermalize_barostat_dof( S=[1, 2, 3, 0.125, 0.25, 0.5], tauS=2.0, box_dof=[True, True, True, True, True, True], - couple='xyz') + couple="xyz", + ) sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[npt]) @@ -298,7 +317,8 @@ def test_constant_pressure_thermalize_barostat_dof( assert v != 0.0 def test_constant_pressure_attributes_attached_2d( - self, simulation_factory, two_particle_snapshot_factory): + self, simulation_factory, two_particle_snapshot_factory + ): """Test attributes of ConstantPressure specific to 2D simulations.""" all_ = hoomd.filter.All() npt = hoomd.md.methods.ConstantPressure( @@ -306,7 +326,8 @@ def test_constant_pressure_attributes_attached_2d( thermostat=hoomd.md.methods.thermostats.Bussi(1.0), S=2.0, tauS=2.0, - couple='xy') + couple="xy", + ) sim = simulation_factory(two_particle_snapshot_factory(dimensions=2)) sim.operations.integrator = hoomd.md.Integrator(0.005, methods=[npt]) @@ -316,55 +337,52 @@ def test_constant_pressure_attributes_attached_2d( with pytest.raises(ValueError): npt.couple = invalid_couple - npt.couple = 'none' - assert npt.couple == 'none' + npt.couple = "none" + assert npt.couple == "none" npt.box_dof = [True, True, True, True, True, True] assert npt.box_dof == [True, True, False, True, False, False] - @pytest.mark.parametrize("cls, init_args", [ - (hoomd.md.methods.Brownian, { - 'kT': 1.5 - }), - (hoomd.md.methods.Langevin, { - 'kT': 1.5 - }), - (hoomd.md.methods.OverdampedViscous, {}), - (hoomd.md.methods.rattle.Brownian, { - 'kT': 1.5, - 'manifold_constraint': hoomd.md.manifold.Sphere(r=10) - }), - (hoomd.md.methods.rattle.Langevin, { - 'kT': 1.5, - 'manifold_constraint': hoomd.md.manifold.Sphere(r=10) - }), - (hoomd.md.methods.rattle.OverdampedViscous, { - 'manifold_constraint': hoomd.md.manifold.Sphere(r=10) - }), - ]) + @pytest.mark.parametrize( + "cls, init_args", + [ + (hoomd.md.methods.Brownian, {"kT": 1.5}), + (hoomd.md.methods.Langevin, {"kT": 1.5}), + (hoomd.md.methods.OverdampedViscous, {}), + ( + hoomd.md.methods.rattle.Brownian, + {"kT": 1.5, "manifold_constraint": hoomd.md.manifold.Sphere(r=10)}, + ), + ( + hoomd.md.methods.rattle.Langevin, + {"kT": 1.5, "manifold_constraint": hoomd.md.manifold.Sphere(r=10)}, + ), + ( + hoomd.md.methods.rattle.OverdampedViscous, + {"manifold_constraint": hoomd.md.manifold.Sphere(r=10)}, + ), + ], + ) def test_default_gamma(self, cls, init_args): c = cls(filter=hoomd.filter.All(), **init_args) - assert c.gamma['A'] == 1.0 - assert c.gamma_r['A'] == (1.0, 1.0, 1.0) + assert c.gamma["A"] == 1.0 + assert c.gamma_r["A"] == (1.0, 1.0, 1.0) c = cls(filter=hoomd.filter.All(), **init_args, default_gamma=2.0) - assert c.gamma['A'] == 2.0 - assert c.gamma_r['A'] == (1.0, 1.0, 1.0) - - c = cls(filter=hoomd.filter.All(), - **init_args, - default_gamma_r=(3.0, 4.0, 5.0)) - assert c.gamma['A'] == 1.0 - assert c.gamma_r['A'] == (3.0, 4.0, 5.0) + assert c.gamma["A"] == 2.0 + assert c.gamma_r["A"] == (1.0, 1.0, 1.0) - def test_langevin_reservoir(self, simulation_factory, - two_particle_snapshot_factory): + c = cls(filter=hoomd.filter.All(), **init_args, default_gamma_r=(3.0, 4.0, 5.0)) + assert c.gamma["A"] == 1.0 + assert c.gamma_r["A"] == (3.0, 4.0, 5.0) + def test_langevin_reservoir( + self, simulation_factory, two_particle_snapshot_factory + ): langevin = hoomd.md.methods.Langevin(filter=hoomd.filter.All(), kT=1.5) sim = simulation_factory(two_particle_snapshot_factory()) - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - methods=[langevin]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, methods=[langevin]) sim.run(10) assert langevin.reservoir_energy == 0.0 @@ -373,8 +391,9 @@ def test_langevin_reservoir(self, simulation_factory, sim.run(10) assert langevin.reservoir_energy != 0.0 - def test_kernel_parameters(self, method_definition, simulation_factory, - two_particle_snapshot_factory): + def test_kernel_parameters( + self, method_definition, simulation_factory, two_particle_snapshot_factory + ): sim = simulation_factory(two_particle_snapshot_factory()) constructor_args = method_definition.generate_init_args() method = method_definition.cls(**constructor_args) @@ -382,11 +401,11 @@ def test_kernel_parameters(self, method_definition, simulation_factory, sim.operations.integrator = integrator sim.state.thermalize_particle_momenta(hoomd.filter.All(), 1.0) sim.run(0) - autotuned_kernel_parameter_check(instance=method, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=method, activate=lambda: sim.run(1)) - def test_pickling(self, method_definition, simulation_factory, - two_particle_snapshot_factory): + def test_pickling( + self, method_definition, simulation_factory, two_particle_snapshot_factory + ): constructor_args = method_definition.generate_init_args() method = method_definition.cls(**constructor_args) pickling_check(method) @@ -399,19 +418,22 @@ def test_pickling(self, method_definition, simulation_factory, def test_logging(self): logging_check( - hoomd.md.methods.ConstantPressure, ('md', 'methods'), { - 'barostat_energy': { - 'category': LoggerCategories.scalar, - 'default': True + hoomd.md.methods.ConstantPressure, + ("md", "methods"), + { + "barostat_energy": { + "category": LoggerCategories.scalar, + "default": True, }, - }) - logging_check(hoomd.md.methods.thermostats.MTTK, - ('md', 'methods', 'thermostats'), { - 'energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - }) + }, + ) + logging_check( + hoomd.md.methods.thermostats.MTTK, + ("md", "methods", "thermostats"), + { + "energy": {"category": LoggerCategories.scalar, "default": True}, + }, + ) @pytest.fixture(scope="function", params=range(7)) @@ -428,12 +450,7 @@ def manifold(request): class RattleDefinition(ClassDefinition): - - def __init__(self, - cls, - constructor_spec, - attribute_spec=None, - generator=None): + def __init__(self, cls, constructor_spec, attribute_spec=None, generator=None): super().__init__(cls, constructor_spec, attribute_spec, generator) self.default_manifold = lambda: hoomd.md.manifold.Sphere(5) self.default_filter = hoomd.filter.All() @@ -452,39 +469,44 @@ def generate_all_attr_change(self): _rattle_definitions = ( - RattleDefinition(hoomd.md.methods.rattle.NVE, {"tolerance": float}, - generator=generator), - RattleDefinition(hoomd.md.methods.rattle.DisplacementCapped, { - "maximum_displacement": hoomd.variant.Variant, - "tolerance": float - }, - generator=generator), - RattleDefinition(hoomd.md.methods.rattle.Langevin, { - "kT": hoomd.variant.Variant, - "tally_reservoir_energy": bool, - "tolerance": float - }, - generator=generator), - RattleDefinition(hoomd.md.methods.rattle.Brownian, { - "kT": hoomd.variant.Variant, - "tolerance": float - }, - generator=generator), - RattleDefinition(hoomd.md.methods.rattle.OverdampedViscous, - {"tolerance": float}, - generator=generator), + RattleDefinition( + hoomd.md.methods.rattle.NVE, {"tolerance": float}, generator=generator + ), + RattleDefinition( + hoomd.md.methods.rattle.DisplacementCapped, + {"maximum_displacement": hoomd.variant.Variant, "tolerance": float}, + generator=generator, + ), + RattleDefinition( + hoomd.md.methods.rattle.Langevin, + { + "kT": hoomd.variant.Variant, + "tally_reservoir_energy": bool, + "tolerance": float, + }, + generator=generator, + ), + RattleDefinition( + hoomd.md.methods.rattle.Brownian, + {"kT": hoomd.variant.Variant, "tolerance": float}, + generator=generator, + ), + RattleDefinition( + hoomd.md.methods.rattle.OverdampedViscous, + {"tolerance": float}, + generator=generator, + ), ) -@pytest.fixture(scope="module", - params=_rattle_definitions, - ids=lambda x: x.cls.__name__) +@pytest.fixture( + scope="module", params=_rattle_definitions, ids=lambda x: x.cls.__name__ +) def rattle_definition(request): return request.param class TestRattle: - def test_rattle_attributes(self, rattle_definition, manifold): constructor_args = rattle_definition.generate_init_args() constructor_args["manifold_constraint"] = manifold @@ -501,10 +523,13 @@ def test_rattle_attributes(self, rattle_definition, manifold): assert method.tolerance == 1e-5 check_instance_attrs(method, change_attrs, True) - def test_rattle_attributes_attached(self, simulation_factory, - two_particle_snapshot_factory, - rattle_definition, manifold): - + def test_rattle_attributes_attached( + self, + simulation_factory, + two_particle_snapshot_factory, + rattle_definition, + manifold, + ): constructor_args = rattle_definition.generate_init_args() constructor_args["manifold_constraint"] = manifold method = rattle_definition.cls(**constructor_args) @@ -528,8 +553,9 @@ def test_rattle_attributes_attached(self, simulation_factory, assert method.manifold_constraint == manifold check_instance_attrs(method, change_attrs, True) - def test_rattle_switch_methods(self, simulation_factory, - two_particle_snapshot_factory): + def test_rattle_switch_methods( + self, simulation_factory, two_particle_snapshot_factory + ): sim = simulation_factory(two_particle_snapshot_factory()) all_ = hoomd.filter.All() @@ -543,6 +569,6 @@ def test_rattle_switch_methods(self, simulation_factory, assert len(sim.operations.integrator.methods) == 0 sim.operations.integrator.methods.append( - hoomd.md.methods.rattle.NVE(filter=all_, - manifold_constraint=manifold)) + hoomd.md.methods.rattle.NVE(filter=all_, manifold_constraint=manifold) + ) assert len(sim.operations.integrator.methods) == 1 diff --git a/hoomd/md/pytest/test_minimize_fire.py b/hoomd/md/pytest/test_minimize_fire.py index e45cb6c798..b30faf04e0 100644 --- a/hoomd/md/pytest/test_minimize_fire.py +++ b/hoomd/md/pytest/test_minimize_fire.py @@ -19,17 +19,17 @@ def _assert_correct_params(fire, param_dict): def _make_random_params(): """Get random values for the fire parameters.""" params = { - 'dt': np.random.rand(), - 'integrate_rotational_dof': False, - 'min_steps_adapt': np.random.randint(1, 25), - 'finc_dt': 1 + np.random.rand(), - 'fdec_dt': np.random.rand(), - 'alpha_start': np.random.rand(), - 'fdec_alpha': np.random.rand(), - 'force_tol': np.random.rand(), - 'angmom_tol': np.random.rand(), - 'energy_tol': np.random.rand(), - 'min_steps_conv': np.random.randint(1, 15) + "dt": np.random.rand(), + "integrate_rotational_dof": False, + "min_steps_adapt": np.random.randint(1, 25), + "finc_dt": 1 + np.random.rand(), + "fdec_dt": np.random.rand(), + "alpha_start": np.random.rand(), + "fdec_alpha": np.random.rand(), + "force_tol": np.random.rand(), + "angmom_tol": np.random.rand(), + "energy_tol": np.random.rand(), + "min_steps_conv": np.random.randint(1, 15), } return params @@ -59,37 +59,30 @@ def _assert_error_if_nonpositive(fire): def test_constructor_validation(): """Make sure constructor validates arguments.""" with pytest.raises(ValueError): - md.minimize.FIRE(dt=0.01, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5, - min_steps_conv=-5) + md.minimize.FIRE( + dt=0.01, force_tol=1e-1, angmom_tol=1e-1, energy_tol=1e-5, min_steps_conv=-5 + ) with pytest.raises(ValueError): - md.minimize.FIRE(dt=0.01, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5, - min_steps_adapt=0) + md.minimize.FIRE( + dt=0.01, force_tol=1e-1, angmom_tol=1e-1, energy_tol=1e-5, min_steps_adapt=0 + ) def test_get_set_params(simulation_factory, two_particle_snapshot_factory): """Assert we can get/set params when not attached and when attached.""" - fire = md.minimize.FIRE(dt=0.01, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5) + fire = md.minimize.FIRE(dt=0.01, force_tol=1e-1, angmom_tol=1e-1, energy_tol=1e-5) default_params = { - 'dt': 0.01, - 'integrate_rotational_dof': False, - 'min_steps_adapt': 5, - 'finc_dt': 1.1, - 'fdec_dt': 0.5, - 'alpha_start': 0.1, - 'fdec_alpha': 0.99, - 'force_tol': 0.1, - 'angmom_tol': 0.1, - 'energy_tol': 1e-5, - 'min_steps_conv': 10 + "dt": 0.01, + "integrate_rotational_dof": False, + "min_steps_adapt": 5, + "finc_dt": 1.1, + "fdec_dt": 0.5, + "alpha_start": 0.1, + "fdec_alpha": 0.99, + "force_tol": 0.1, + "angmom_tol": 0.1, + "energy_tol": 1e-5, + "min_steps_conv": 10, } _assert_correct_params(fire, default_params) @@ -117,16 +110,18 @@ def test_run_minimization(lattice_snapshot_factory, simulation_factory): sim = simulation_factory(snap) lj = md.pair.LJ(default_r_cut=2.5, nlist=md.nlist.Cell(buffer=0.4)) - lj.params[('A', 'A')] = dict(sigma=1.0, epsilon=1.0) + lj.params[("A", "A")] = dict(sigma=1.0, epsilon=1.0) nve = md.methods.ConstantVolume(hoomd.filter.All()) - fire = md.minimize.FIRE(dt=0.0025, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5, - methods=[nve], - forces=[lj], - min_steps_conv=3) + fire = md.minimize.FIRE( + dt=0.0025, + force_tol=1e-1, + angmom_tol=1e-1, + energy_tol=1e-5, + methods=[nve], + forces=[lj], + min_steps_conv=3, + ) sim.operations.integrator = fire assert not fire.converged @@ -152,21 +147,16 @@ def test_pickling(lattice_snapshot_factory, simulation_factory): nve = md.methods.ConstantVolume(hoomd.filter.All()) - fire = md.minimize.FIRE(dt=0.0025, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5, - methods=[nve]) + fire = md.minimize.FIRE( + dt=0.0025, force_tol=1e-1, angmom_tol=1e-1, energy_tol=1e-5, methods=[nve] + ) operation_pickling_check(fire, sim) def _try_add_to_fire(sim, method, should_error=False): """Try adding method to FIRE's method list.""" - fire = md.minimize.FIRE(dt=0.0025, - force_tol=1e-1, - angmom_tol=1e-1, - energy_tol=1e-5) + fire = md.minimize.FIRE(dt=0.0025, force_tol=1e-1, angmom_tol=1e-1, energy_tol=1e-5) sim.operations.integrator = fire if should_error: with pytest.raises(ValueError): @@ -182,15 +172,11 @@ def test_validate_methods(lattice_snapshot_factory, simulation_factory): surface = md.manifold.Diamond(5) nve = md.methods.rattle.NVE(hoomd.filter.All(), surface) - nph = md.methods.ConstantPressure(hoomd.filter.All(), - S=1, - tauS=1, - couple='none') + nph = md.methods.ConstantPressure(hoomd.filter.All(), S=1, tauS=1, couple="none") brownian = md.methods.Brownian(hoomd.filter.All(), kT=1) rattle_brownian = md.methods.rattle.Brownian(hoomd.filter.All(), 1, surface) - methods = [(nve, False), (nph, False), (brownian, True), - (rattle_brownian, True)] + methods = [(nve, False), (nph, False), (brownian, True), (rattle_brownian, True)] for method, should_error in methods: sim = simulation_factory(snap) _try_add_to_fire(sim, method, should_error) @@ -198,13 +184,10 @@ def test_validate_methods(lattice_snapshot_factory, simulation_factory): def test_logging(): logging_check( - hoomd.md.minimize.FIRE, ('md', 'minimize', 'fire'), { - 'converged': { - 'category': LoggerCategories.scalar, - 'default': False - }, - 'energy': { - 'category': LoggerCategories.scalar, - 'default': True - } - }) + hoomd.md.minimize.FIRE, + ("md", "minimize", "fire"), + { + "converged": {"category": LoggerCategories.scalar, "default": False}, + "energy": {"category": LoggerCategories.scalar, "default": True}, + }, + ) diff --git a/hoomd/md/pytest/test_nlist.py b/hoomd/md/pytest/test_nlist.py index b6a8fbcbf0..c7780efdff 100644 --- a/hoomd/md/pytest/test_nlist.py +++ b/hoomd/md/pytest/test_nlist.py @@ -11,17 +11,22 @@ import collections from pathlib import Path from hoomd.md.nlist import Cell, Stencil, Tree -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) try: from mpi4py import MPI + MPI4PY_IMPORTED = True except ImportError: MPI4PY_IMPORTED = False try: import cupy + CUPY_IMPORTED = True except ImportError: CUPY_IMPORTED = False @@ -36,9 +41,7 @@ def _nlist_params(): return nlists -@pytest.fixture(scope="function", - params=_nlist_params(), - ids=(lambda x: x[0].__name__)) +@pytest.fixture(scope="function", params=_nlist_params(), ids=(lambda x: x[0].__name__)) def nlist_params(request): return cp.deepcopy(request.param) @@ -47,9 +50,7 @@ def _assert_nlist_params(nlist, param_dict): """Assert the params of the nlist are the same as in the dictionary.""" for param, item in param_dict.items(): if isinstance(item, (tuple, list)): - assert all( - a == b - for a, b in zip(getattr(nlist, param), param_dict[param])) + assert all(a == b for a, b in zip(getattr(nlist, param), param_dict[param])) else: assert getattr(nlist, param) == param_dict[param] @@ -59,23 +60,29 @@ def test_common_params(nlist_params): nlist = nlist_cls(**required_args, buffer=0.4) default_params_dict = { "buffer": 0.4, - "exclusions": ('bond',), + "exclusions": ("bond",), "rebuild_check_delay": 1, "check_dist": True, } _assert_nlist_params(nlist, default_params_dict) new_params_dict = { - "buffer": - np.random.uniform(5.0), - "exclusions": - random.sample([ - 'bond', '1-4', 'angle', 'dihedral', 'special_pair', 'body', - '1-3', 'constraint', 'meshbond' - ], np.random.randint(9)), - "rebuild_check_delay": - np.random.randint(8), - "check_dist": - False, + "buffer": np.random.uniform(5.0), + "exclusions": random.sample( + [ + "bond", + "1-4", + "angle", + "dihedral", + "special_pair", + "body", + "1-3", + "constraint", + "meshbond", + ], + np.random.randint(9), + ), + "rebuild_check_delay": np.random.randint(8), + "check_dist": False, } for param in new_params_dict.keys(): setattr(nlist, param, new_params_dict[param]) @@ -92,26 +99,23 @@ def test_cell_specific_params(): def test_stencil_specific_params(): cell_width = np.random.uniform(12.1) nlist = Stencil(cell_width=cell_width, buffer=0.4) - _assert_nlist_params(nlist, dict(deterministic=False, - cell_width=cell_width)) + _assert_nlist_params(nlist, dict(deterministic=False, cell_width=cell_width)) nlist.deterministic = True x = np.random.uniform(25.5) nlist.cell_width = x _assert_nlist_params(nlist, dict(deterministic=True, cell_width=x)) -def test_simple_simulation(nlist_params, simulation_factory, - lattice_snapshot_factory): +def test_simple_simulation(nlist_params, simulation_factory, lattice_snapshot_factory): nlist_cls, required_args = nlist_params nlist = nlist_cls(**required_args, buffer=0.4) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1, sigma=1) - lj.params[('A', 'B')] = dict(epsilon=1, sigma=1) - lj.params[('B', 'B')] = dict(epsilon=1, sigma=1) + lj.params[("A", "A")] = dict(epsilon=1, sigma=1) + lj.params[("A", "B")] = dict(epsilon=1, sigma=1) + lj.params[("B", "B")] = dict(epsilon=1, sigma=1) integrator = hoomd.md.Integrator(0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim = simulation_factory(lattice_snapshot_factory(n=10)) sim.operations.integrator = integrator @@ -120,25 +124,23 @@ def test_simple_simulation(nlist_params, simulation_factory, # Force nlist to update every step to ensure autotuning occurs. nlist.check_dist = False nlist.rebuild_check_delay = 1 - autotuned_kernel_parameter_check(instance=nlist, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=nlist, activate=lambda: sim.run(1)) -def test_auto_detach_simulation(simulation_factory, - two_particle_snapshot_factory): +def test_auto_detach_simulation(simulation_factory, two_particle_snapshot_factory): nlist = Cell(buffer=0.4) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1, sigma=1) - lj.params[('A', 'B')] = dict(epsilon=1, sigma=1) - lj.params[('B', 'B')] = dict(epsilon=1, sigma=1) + lj.params[("A", "A")] = dict(epsilon=1, sigma=1) + lj.params[("A", "B")] = dict(epsilon=1, sigma=1) + lj.params[("B", "B")] = dict(epsilon=1, sigma=1) lj_2 = cp.deepcopy(lj) lj_2.nlist = nlist integrator = hoomd.md.Integrator(0.005, forces=[lj, lj_2]) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=["A", "B"], d=2.0)) + two_particle_snapshot_factory(particle_types=["A", "B"], d=2.0) + ) sim.operations.integrator = integrator sim.run(0) del integrator.forces[1] @@ -153,15 +155,15 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): nlist = Cell(0.4) pickling_check(nlist) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1, sigma=1) - lj.params[('A', 'B')] = dict(epsilon=1, sigma=1) - lj.params[('B', 'B')] = dict(epsilon=1, sigma=1) + lj.params[("A", "A")] = dict(epsilon=1, sigma=1) + lj.params[("A", "B")] = dict(epsilon=1, sigma=1) + lj.params[("B", "B")] = dict(epsilon=1, sigma=1) integrator = hoomd.md.Integrator(0.005, forces=[lj]) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=["A", "B"], d=2.0)) + two_particle_snapshot_factory(particle_types=["A", "B"], d=2.0) + ) sim.operations.integrator = integrator sim.run(0) pickling_check(nlist) @@ -170,13 +172,12 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): def test_cell_properties(simulation_factory, lattice_snapshot_factory): nlist = hoomd.md.nlist.Cell(buffer=0) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1, sigma=1) - lj.params[('A', 'B')] = dict(epsilon=1, sigma=1) - lj.params[('B', 'B')] = dict(epsilon=1, sigma=1) + lj.params[("A", "A")] = dict(epsilon=1, sigma=1) + lj.params[("A", "B")] = dict(epsilon=1, sigma=1) + lj.params[("B", "B")] = dict(epsilon=1, sigma=1) integrator = hoomd.md.Integrator(0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim = simulation_factory(lattice_snapshot_factory(n=10)) sim.operations.integrator = integrator @@ -193,29 +194,23 @@ def test_cell_properties(simulation_factory, lattice_snapshot_factory): def test_logging(): base_loggables = { - 'shortest_rebuild': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'num_builds': { - 'category': LoggerCategories.scalar, - 'default': False - } + "shortest_rebuild": {"category": LoggerCategories.scalar, "default": True}, + "num_builds": {"category": LoggerCategories.scalar, "default": False}, } - logging_check(hoomd.md.nlist.NeighborList, ('md', 'nlist'), base_loggables) + logging_check(hoomd.md.nlist.NeighborList, ("md", "nlist"), base_loggables) logging_check( - hoomd.md.nlist.Cell, ('md', 'nlist'), { + hoomd.md.nlist.Cell, + ("md", "nlist"), + { **base_loggables, - 'dimensions': { - 'category': LoggerCategories.sequence, - 'default': False + "dimensions": {"category": LoggerCategories.sequence, "default": False}, + "allocated_particles_per_cell": { + "category": LoggerCategories.scalar, + "default": False, }, - 'allocated_particles_per_cell': { - 'category': LoggerCategories.scalar, - 'default': False - }, - }) + }, + ) _path = Path(__file__).parent / "true_pair_list.json" @@ -246,7 +241,7 @@ def _setup_set_rcut_later(sim_factory, snap_factory): sim.operations.computes.append(nlist) sim.run(0) - nlist.r_cut[('A', 'A')] = 1.1 + nlist.r_cut[("A", "A")] = 1.1 return sim, nlist, True @@ -258,10 +253,9 @@ def _setup_with_force_no_rcut(sim_factory, snap_factory): integrator = hoomd.md.Integrator(0.005) lj = hoomd.md.pair.LJ(nlist, default_r_cut=0.0) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All())) + integrator.methods.append(hoomd.md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = integrator sim.run(0) @@ -275,14 +269,13 @@ def _setup_with_force_rcut_later(sim_factory, snap_factory): integrator = hoomd.md.Integrator(0.005) lj = hoomd.md.pair.LJ(nlist, default_r_cut=0.0) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All())) + integrator.methods.append(hoomd.md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = integrator sim.run(0) - lj.r_cut[('A', 'A')] = 1.1 + lj.r_cut[("A", "A")] = 1.1 return sim, nlist, True @@ -294,10 +287,9 @@ def _setup_with_force_rcut_on_nlist(sim_factory, snap_factory): integrator = hoomd.md.Integrator(0.005) lj = hoomd.md.pair.LJ(nlist, default_r_cut=0.0) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All())) + integrator.methods.append(hoomd.md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = integrator sim.run(0) @@ -311,10 +303,9 @@ def _setup_with_force_drop_nlist(sim_factory, snap_factory): integrator = hoomd.md.Integrator(0.005) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All())) + integrator.methods.append(hoomd.md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = integrator sim.run(0) @@ -330,10 +321,9 @@ def _setup_with_force_drop_force(sim_factory, snap_factory): integrator = hoomd.md.Integrator(0.005) lj = hoomd.md.pair.LJ(nlist, default_r_cut=1.1) - lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) + lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All())) + integrator.methods.append(hoomd.md.methods.ConstantVolume(hoomd.filter.All())) sim.operations.integrator = integrator sim.run(0) @@ -343,10 +333,14 @@ def _setup_with_force_drop_force(sim_factory, snap_factory): pair_setup_funcs = [ - _setup_standard_rcut, _setup_no_rcut, _setup_set_rcut_later, - _setup_with_force_no_rcut, _setup_with_force_rcut_later, - _setup_with_force_rcut_on_nlist, _setup_with_force_drop_nlist, - _setup_with_force_drop_force + _setup_standard_rcut, + _setup_no_rcut, + _setup_set_rcut_later, + _setup_with_force_no_rcut, + _setup_with_force_rcut_later, + _setup_with_force_rcut_on_nlist, + _setup_with_force_drop_nlist, + _setup_with_force_drop_force, ] @@ -361,7 +355,6 @@ def _check_pair_set(sim, nlist, truth_set): @pytest.mark.parametrize("setup", pair_setup_funcs) def test_global_pair_list(simulation_factory, lattice_snapshot_factory, setup): - sim, nlist, full = setup(simulation_factory, lattice_snapshot_factory) if full: @@ -373,7 +366,6 @@ def test_global_pair_list(simulation_factory, lattice_snapshot_factory, setup): def _check_local_pairs_with_mpi(tag_pair_list, broadcast=False): - tag_pair_list = np.array(tag_pair_list, dtype=np.int32) comm = MPI.COMM_WORLD @@ -410,7 +402,6 @@ def _check_local_pairs_with_mpi(tag_pair_list, broadcast=False): def _check_local_pair_counts(sim, global_pairs, half_nlist=True): - if half_nlist: local_count = 1 else: @@ -434,9 +425,7 @@ def _check_local_pair_counts(sim, global_pairs, half_nlist=True): @pytest.mark.parametrize("setup", pair_setup_funcs) -def test_rank_local_pair_list(simulation_factory, lattice_snapshot_factory, - setup): - +def test_rank_local_pair_list(simulation_factory, lattice_snapshot_factory, setup): sim, nlist, full = setup(simulation_factory, lattice_snapshot_factory) if full: @@ -455,16 +444,13 @@ def test_rank_local_pair_list(simulation_factory, lattice_snapshot_factory, assert set_tag_pair_list.issubset(truth_set) if full and MPI4PY_IMPORTED: - global_pairs = _check_local_pairs_with_mpi(tag_pair_list, - broadcast=True) + global_pairs = _check_local_pairs_with_mpi(tag_pair_list, broadcast=True) _check_local_pair_counts(sim, global_pairs) @pytest.mark.parametrize("setup", pair_setup_funcs) -def test_cpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, - setup): - +def test_cpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, setup): sim, nlist, full = setup(simulation_factory, lattice_snapshot_factory) if full: @@ -475,7 +461,6 @@ def test_cpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, tag_pair_list = [] with nlist.cpu_local_nlist_arrays as data: with sim.state.cpu_local_snapshot as snap_data: - half_nlist = data.half_nlist tags = snap_data.particles.tag_with_ghost @@ -488,16 +473,13 @@ def test_cpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, assert set_tag_pair_list.issubset(truth_set) if full and MPI4PY_IMPORTED: - global_pairs = _check_local_pairs_with_mpi(tag_pair_list, - broadcast=True) + global_pairs = _check_local_pairs_with_mpi(tag_pair_list, broadcast=True) _check_local_pair_counts(sim, global_pairs, half_nlist) @pytest.mark.parametrize("setup", pair_setup_funcs) -def test_gpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, - setup): - +def test_gpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, setup): sim, nlist, full = setup(simulation_factory, lattice_snapshot_factory) if full: @@ -515,7 +497,7 @@ def test_gpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, pytest.skip("Cupy is not installed") get_local_pairs = cupy.RawKernel( - r''' + r""" extern "C" __global__ void get_local_pairs( const unsigned int N, @@ -538,7 +520,9 @@ def test_gpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, pair[offset + idx] = make_uint2(tag_i, tags[j]); } } -''', 'get_local_pairs') +""", + "get_local_pairs", + ) with nlist.gpu_local_nlist_arrays as data: with sim.state.gpu_local_snapshot as snap_data: @@ -552,15 +536,16 @@ def test_gpu_local_nlist_arrays(simulation_factory, lattice_snapshot_factory, N = int(head_list.size) n_pairs = int(cupy.sum(n_neigh)) - offsets = cupy.cumsum(n_neigh.astype(cupy.uint64)) \ - - n_neigh[0] + offsets = cupy.cumsum(n_neigh.astype(cupy.uint64)) - n_neigh[0] device_local_pairs = cupy.zeros((n_pairs, 2), dtype=cupy.uint32) block = 256 n_grid = (N + 255) // 256 - get_local_pairs((n_grid,), (block,), - (N, head_list, n_neigh, raw_nlist, tags, offsets, - device_local_pairs)) + get_local_pairs( + (n_grid,), + (block,), + (N, head_list, n_neigh, raw_nlist, tags, offsets, device_local_pairs), + ) local_pairs = cupy.asnumpy(device_local_pairs) diff --git a/hoomd/md/pytest/test_nlist_tuner.py b/hoomd/md/pytest/test_nlist_tuner.py index 0d6940df85..cef2d8c20d 100644 --- a/hoomd/md/pytest/test_nlist_tuner.py +++ b/hoomd/md/pytest/test_nlist_tuner.py @@ -24,7 +24,8 @@ def simulation(simulation_factory, lattice_snapshot_factory, nlist): integrator = md.Integrator( dt=0.005, methods=[md.methods.ConstantVolume(hoomd.filter.All(), thermostat)], - forces=[lj]) + forces=[lj], + ) sim.operations.integrator = integrator return sim @@ -33,21 +34,20 @@ def simulation(simulation_factory, lattice_snapshot_factory, nlist): def nlist_tuner(nlist, request): if request.param == "GradientDescent": return md.tune.NeighborListBuffer.with_gradient_descent( - trigger=5, nlist=nlist, maximum_buffer=1.5) - return md.tune.NeighborListBuffer.with_grid(trigger=5, - nlist=nlist, - maximum_buffer=1.5) + trigger=5, nlist=nlist, maximum_buffer=1.5 + ) + return md.tune.NeighborListBuffer.with_grid( + trigger=5, nlist=nlist, maximum_buffer=1.5 + ) class TestMoveSize: - def test_invalid_construction(self, nlist): solver = hoomd.tune.solve.ScaleSolver() with pytest.raises(ValueError): - md.tune.NeighborListBuffer(trigger=5, - solver=solver, - nlist=nlist, - maximum_buffer=1.0) + md.tune.NeighborListBuffer( + trigger=5, solver=solver, nlist=nlist, maximum_buffer=1.0 + ) solver = hoomd.tune.solve.GradientDescent() with pytest.raises(TypeError): @@ -55,16 +55,11 @@ def test_invalid_construction(self, nlist): def test_valid_construction(self, nlist): solver = hoomd.tune.solve.GradientDescent() - attrs = { - "solver": solver, - "nlist": nlist, - "trigger": 5, - "maximum_buffer": 1.0 - } + attrs = {"solver": solver, "nlist": nlist, "trigger": 5, "maximum_buffer": 1.0} tuner = md.tune.NeighborListBuffer(**attrs) for attr, value in attrs.items(): tuner_attr = getattr(tuner, attr) - if attr == 'trigger': + if attr == "trigger": assert tuner_attr.period == value else: assert tuner_attr is value or tuner_attr == value @@ -75,13 +70,14 @@ def test_valid_construction(self, nlist): "alpha": 0.1, "kappa": np.array([0.2, 0.15]), "tol": 1e-3, - "max_delta": 0.4 + "max_delta": 0.4, } tuner = md.tune.NeighborListBuffer.with_gradient_descent( - **attrs, **solver_attrs) + **attrs, **solver_attrs + ) for attr, value in attrs.items(): tuner_attr = getattr(tuner, attr) - if attr == 'trigger': + if attr == "trigger": assert tuner_attr.period == value else: assert tuner_attr is value or tuner_attr == value @@ -109,7 +105,7 @@ def test_detach(self, nlist_tuner, simulation): assert not nlist_tuner._attached def test_set_params(self, nlist_tuner): - max_buffer = 4. + max_buffer = 4.0 nlist_tuner.maximum_buffer_size = max_buffer assert nlist_tuner.maximum_buffer_size == max_buffer trigger = hoomd.trigger.Before(400) diff --git a/hoomd/md/pytest/test_patch.py b/hoomd/md/pytest/test_patch.py index 3a2b7d564a..3b71fe7226 100644 --- a/hoomd/md/pytest/test_patch.py +++ b/hoomd/md/pytest/test_patch.py @@ -9,56 +9,716 @@ TOLERANCES = {"rtol": 1e-2, "atol": 1e-5} -# yapf: disable patch_test_parameters = [ - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.012090314520418179, -0.03048768233212375, -0.020302519521372915], 0.011040513953371068, [[0.0015859062333126468, -0.0027468701721376867, -0.0015600230173152775], [-0.012752291970067747, 0.022087617605109952, -0.020833722138796803]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.0007999692723457226, -0.01114090834099443, -0.007274060796964416], 0.004651059201336865, [[0.002811092008585531, 0.0004956713663675812, -0.005749113522483697], [-0.010690998624191522, -0.0018851115081620442, 0.008743769332283843]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.10716329368566274, -2.1122670493484197, -1.7708604149002043], 0.19259884567130864, [[0.02766571476373035, -0.04791842359848936, -0.027214188906835586], [-0.22246036040275277, 0.38531264688765143, -0.3634387721364981]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.14980294587981527, -0.517897117596774, -0.6344703329732665], 0.08113649754981586, [[0.049038756548475816, 0.008646855879890203, -0.10029176474393942], [-0.18650164320150397, -0.0328852716428886, 0.1525327432514915]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1.2, 0.]], [[-0.8, -1.3, -1.02]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.22122614255103878, -0.024932958611667055, 0.057889843743764734], 0.08761098072205219, [[0., 0., 0.026357812134987807], [0.03183941405907059, -0.05514748283353311, 0.0715647331639529]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1.2, 0.]], [[-0.8, -1.3, -1.02]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.12732524637729745, -0.17726950238751302, 0.04481708806670525], 0.08444761711985735, [[0., 0., -0.07724006742162585], [0.04854983737507447, 0.008560646238233868, -0.02682913269351473]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1.2, 0.]], [[-0.8, -1.3, -1.02]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-11.77816503494904, 2.165770753686239, 5.049365060463138], 1.5283503853592224, [[0., 0., 0.4598050609835814], [0.5554301566509452, -0.9620332513753777, 1.2484278409832568]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyYukawa, {}, {"pair_params": {"epsilon": 0.778, "kappa": 1.42}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1.2, 0.]], [[-0.8, -1.3, -1.02]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [9.652760368011261, -7.221635485842196, 3.9091112354245268], 1.473166344037004, [[0., 0., -1.3474325459673837], [0.8469390714473051, 0.14933820931233416, -0.46802712346154485]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedGaussian, {}, {"pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.0161650695094226, -0.083298288358161, -0.08000254293546676], 0.04350539797064849, [[0.006249299816638692, -0.010824104794149086, -0.006147306411487301], [-0.05025069843114538, 0.08703676279856548, -0.08209575900961696]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedGaussian, {}, {"pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.008542090040088067, -0.013333970380345037, -0.028663603086883704], 0.01832760525404918, [[0.011077172410826152, 0.001953204365988083, -0.022654513442983346], [-0.04212812481497665, -0.007428325051534193, 0.034455023214604044]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedGaussian, {}, {"pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.2662070489442997, -0.46271964969619306, -0.5323769824248823], 0.05790134073485696, [[0.008317194070528874, -0.014405802706566608, -0.008181451032196864], [-0.06687866213726593, 0.1158372407639775, -0.10926125807444684]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedGaussian, {}, {"pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.07024336682877552, -0.01273561367945491, -0.19074196840379043], 0.024392212602781037, [[0.014742610435852588, 0.002599519985914994, -0.030150895365433757], [-0.05606832768380231, -0.0098863589338577, 0.04585619560409825]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyGaussian, {}, {"pair_params": {"epsilon": 0.78, "sigma": 0.97}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.0027622079239978375, -0.06658185805585613, -0.056127236080865534], 0.030522001590127953, [[0.004384309714149004, -0.007593847181023855, -0.004312754389535873], [-0.03525428955862503, 0.06106222070028351, -0.05759576981056035]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyGaussian, {}, {"pair_params": {"epsilon": 0.78, "sigma": 0.97}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.004801569836106703, -0.016110821605886394, -0.02010947100373393], 0.012858064120795543, [[0.007771391360802155, 0.0013703059745540929, -0.015893685096199244], [-0.029555750609598655, -0.005211476267562902, 0.024172546911386518]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyGaussian, {}, {"pair_params": {"epsilon": 0.78, "sigma": 0.97}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.2493853854591554, -0.4584090266528183, -0.5202661071389925], 0.05658416148091617, [[0.008127988857978328, -0.014078089665372197, -0.00799533379501422], [-0.06535726064326747, 0.11320209607766105, -0.10677570833468933]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyGaussian, {}, {"pair_params": {"epsilon": 0.78, "sigma": 0.97}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-0.06773603596583777, -0.017603283883824504, -0.18640283980247196], 0.023837321887120035, [[0.014407235462344481, 0.0025403843294311288, -0.02946500220028324], [-0.05479284706295722, -0.009661457287031931, 0.04481302754015036]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyLJ, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-1.9455238094148624, -1.269674877695913, -0.12646351897802516], 0.06877088552479703, [[0.009878541568339286, -0.017110135901044755, -0.009717316131562882], [-0.07943347700625308, 0.13758281799668448, -0.12977235718497987]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyLJ, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.1529938661525866, -0.9653274199711448, -0.0453098111628864], 0.028971247285687153, [[0.017510170952057886, 0.0030875155756654327, -0.035810980321533736], [-0.06659376960486044, -0.011742278328420192, 0.05446456227901608]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyLJ, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-3.7101814265232987e9, -3.068632424997914e9, -8.002380847724919e8], 8.703392435257888e7, [[1.250192189633267e7, -2.1653963916706044e7, -1.229788086412793e7], [-1.0052811122130676e8, 1.7411979622423828e8, -1.6423516190177378e8]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyLJ, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [2.416134421816039e8, -1.9882078394083674e9, -2.867122218281132e8], 3.6664953859771974e7, [[2.2160233686283108e7, 3.9074470976963183e6, -4.532107051569407e7], [-8.427864585301311e7, -1.4860599161459792e7, 6.892836346536472e7]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedLJ, {}, {"pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-33.02646286207422, -24.647019239630318, -4.818557570362859], 2.6203325175827956, [[0.37639567238783445, -0.6519364283247793, -0.3702526039741746], [-3.0266023360874064, 5.242229020410042, -4.944632089877043]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedLJ, {}, {"pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [2.357494589449272, -17.09094290068084, -1.7264103937244915], 1.1038726745818228, [[0.6671787048251221, 0.11764160661479799, -1.3644825932799285], [-2.537379279507933, -0.4474084272678541, 2.075227947212622]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedLJ, {}, {"pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-1.2494366602747592e23, -7.47190342399429e22, -2.2306585653721332e21], 2.426065098991449e20, [[3.484902767355334e19, -6.036028652496825e19, -3.4280264595619766e19], [-2.8022146986448788e20, 4.853578231769241e20, -4.5780446794859164e20]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedLJ, {}, {"pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [1.0353882496245466e22, -6.044231702301891e22, -7.9920849255704e20], 1.0220332540099799e20, [[6.177151028372888e19, 1.089198390213181e19, -1.2633219545706699e20], [-2.349261886272707e20, -4.142382552999396e19, 1.92137374222588e20]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-1.9185608907383984, -1.2521408460716963, -0.12476469967243964], 0.06784706726530326, [[0.009745840396784486, -0.016880290729687938, -0.009586780745736316], [-0.07836642521662623, 0.13573463008274345, -0.12802908934379711]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [0.15086870266201252, -0.9519631777531574, -0.04470115198150361], 0.02858206853600857, [[0.01727495199960542, 0.003046040127974089, -0.035329921552856386], [-0.06569919714356336, -0.011584541067294738, 0.0537329248027101]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-6.380949338454674e9, -5.220849829366294e9, -1.32729010034573e9], 1.4435612149134248e8, [[2.073592532529298e7, -3.5915676205361664e7, -2.0397498990352083e7], [-1.6673783636332324e8, 2.887984041253813e8, -2.724035617261037e8]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [4.199338440026387e8, -3.406506655464279e9, -4.755463416725215e8], 6.081318949165459e7, [[3.675538486963374e7, 6.480966038840114e6, -7.517038913444535e7], [-1.3978616419268584e8, -2.464807227691403e7, 1.1432589400768268e8]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-24438.06649914493, -19392.807472482873, -4563.172146474723], 2481.453875798641, [[356.44655547084204, -617.3835442584165, -350.6290720705163], [-2866.191236031939, 4964.3888450159575, -4682.564667457716]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [1654.9426299749784, -12909.362184624497, -1634.9099719140254], 1045.366993825708, [[631.817974180563, 111.40655574459755, -1292.164485550744], [-2402.8971915802563, -423.69560674419665, 1965.2400595046643]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-5.545584879562406e16, -3.897295731456281e16, -6.00724551378345e15], 6.533482491809166e14, [[9.384971254744408e13, -1.6255247040790753e14, -9.231801267129023e13], [-7.546467190636238e14, 1.307086459183353e15, -1.2328842607140105e15]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 1., 1.], [1., 0., 0.]], [[1., 1., 1.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [4.145472181514702e15, -2.814898155635645e16, -2.152297848725213e15], 2.7523731221791462e14, [[1.6635294786571403e14, 2.9332513029114016e13, -3.4021724623699825e14], [-6.326648616732528e14, -1.1155588485918328e14, 5.1743301159862644e14]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 0., 0.]], [[0., 1., 1.], [0., 0., 1.], [2., 1., 0.]], [[0, 0, 0], [0.9526279441628825, 0.55, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-138985.57323908593, -97123.42586287575, -21501.617361604447], 13583.235236081233, [[0., 0., -6448.0496895867645], [-11825.889548882442, 20483.04154336218, -9632.374538723448]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 0., 0.]], [[0., 1., 1.], [0., 0., 1.], [2., 1., 0.]], [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [7754.7557135632205, -24687.796151765757, -3306.6477337182796], 2063.2418829657026, [[0., 0., -4488.243718545056], [-3582.053557110608, -631.6126884613307, 803.2957077724617]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 0., 0.]], [[0., 1., 1.], [0., 0., 1.], [2., 1., 0.]], [[0, 0, 0], [0.1905255888325765, 0.11, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [-3.104239064372109e17, -2.0144530397799e17, -2.830607531087193e16], 3.5763642622009175e15, [[0., 0., -1.6977232647401915e15], [-3.113668284195913e15, 5.393031666143131e15, -2.5361321851305225e15]]), # noqa: E501 - (hoomd.md.pair.aniso.PatchyExpandedMie, {}, {"pair_params": {"epsilon": 0.78, "sigma": 2.14, "n": 5.5, "m": 12.4, "delta": 0.1}, "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}}, [[1., 0., 0.]], [[0., 1., 1.], [0., 0., 1.], [2., 1., 0.]], [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], [[1., 0., 0., 0.], [0.9843766433940419, 0., 0., 0.17607561994858706]], [1.409072519691006e16, -5.451579177557966e16, -4.353078105849488e15], 5.4323615885809244e14, [[0., 0., -1.1817210079977378e15], [-9.431279149838202e14, -1.662988976709668e14, 2.115017528096385e14]])] # noqa: E501 -# yapf: enable - - -@pytest.fixture(scope='session') -def patchy_snapshot_factory(device): + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.012090314520418179, -0.03048768233212375, -0.020302519521372915], + 0.011040513953371068, + [ + [0.0015859062333126468, -0.0027468701721376867, -0.0015600230173152775], + [-0.012752291970067747, 0.022087617605109952, -0.020833722138796803], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.0007999692723457226, -0.01114090834099443, -0.007274060796964416], + 0.004651059201336865, + [ + [0.002811092008585531, 0.0004956713663675812, -0.005749113522483697], + [-0.010690998624191522, -0.0018851115081620442, 0.008743769332283843], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.10716329368566274, -2.1122670493484197, -1.7708604149002043], + 0.19259884567130864, + [ + [0.02766571476373035, -0.04791842359848936, -0.027214188906835586], + [-0.22246036040275277, 0.38531264688765143, -0.3634387721364981], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.14980294587981527, -0.517897117596774, -0.6344703329732665], + 0.08113649754981586, + [ + [0.049038756548475816, 0.008646855879890203, -0.10029176474393942], + [-0.18650164320150397, -0.0328852716428886, 0.1525327432514915], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.2, 0.0]], + [[-0.8, -1.3, -1.02]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.22122614255103878, -0.024932958611667055, 0.057889843743764734], + 0.08761098072205219, + [ + [0.0, 0.0, 0.026357812134987807], + [0.03183941405907059, -0.05514748283353311, 0.0715647331639529], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.2, 0.0]], + [[-0.8, -1.3, -1.02]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.12732524637729745, -0.17726950238751302, 0.04481708806670525], + 0.08444761711985735, + [ + [0.0, 0.0, -0.07724006742162585], + [0.04854983737507447, 0.008560646238233868, -0.02682913269351473], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.2, 0.0]], + [[-0.8, -1.3, -1.02]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-11.77816503494904, 2.165770753686239, 5.049365060463138], + 1.5283503853592224, + [ + [0.0, 0.0, 0.4598050609835814], + [0.5554301566509452, -0.9620332513753777, 1.2484278409832568], + ], + ), + ( + hoomd.md.pair.aniso.PatchyYukawa, + {}, + { + "pair_params": {"epsilon": 0.778, "kappa": 1.42}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.2, 0.0]], + [[-0.8, -1.3, -1.02]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [9.652760368011261, -7.221635485842196, 3.9091112354245268], + 1.473166344037004, + [ + [0.0, 0.0, -1.3474325459673837], + [0.8469390714473051, 0.14933820931233416, -0.46802712346154485], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedGaussian, + {}, + { + "pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.0161650695094226, -0.083298288358161, -0.08000254293546676], + 0.04350539797064849, + [ + [0.006249299816638692, -0.010824104794149086, -0.006147306411487301], + [-0.05025069843114538, 0.08703676279856548, -0.08209575900961696], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedGaussian, + {}, + { + "pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.008542090040088067, -0.013333970380345037, -0.028663603086883704], + 0.01832760525404918, + [ + [0.011077172410826152, 0.001953204365988083, -0.022654513442983346], + [-0.04212812481497665, -0.007428325051534193, 0.034455023214604044], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedGaussian, + {}, + { + "pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.2662070489442997, -0.46271964969619306, -0.5323769824248823], + 0.05790134073485696, + [ + [0.008317194070528874, -0.014405802706566608, -0.008181451032196864], + [-0.06687866213726593, 0.1158372407639775, -0.10926125807444684], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedGaussian, + {}, + { + "pair_params": {"epsilon": 0.778, "sigma": 1.19, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.07024336682877552, -0.01273561367945491, -0.19074196840379043], + 0.024392212602781037, + [ + [0.014742610435852588, 0.002599519985914994, -0.030150895365433757], + [-0.05606832768380231, -0.0098863589338577, 0.04585619560409825], + ], + ), + ( + hoomd.md.pair.aniso.PatchyGaussian, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 0.97}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.0027622079239978375, -0.06658185805585613, -0.056127236080865534], + 0.030522001590127953, + [ + [0.004384309714149004, -0.007593847181023855, -0.004312754389535873], + [-0.03525428955862503, 0.06106222070028351, -0.05759576981056035], + ], + ), + ( + hoomd.md.pair.aniso.PatchyGaussian, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 0.97}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.004801569836106703, -0.016110821605886394, -0.02010947100373393], + 0.012858064120795543, + [ + [0.007771391360802155, 0.0013703059745540929, -0.015893685096199244], + [-0.029555750609598655, -0.005211476267562902, 0.024172546911386518], + ], + ), + ( + hoomd.md.pair.aniso.PatchyGaussian, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 0.97}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.2493853854591554, -0.4584090266528183, -0.5202661071389925], + 0.05658416148091617, + [ + [0.008127988857978328, -0.014078089665372197, -0.00799533379501422], + [-0.06535726064326747, 0.11320209607766105, -0.10677570833468933], + ], + ), + ( + hoomd.md.pair.aniso.PatchyGaussian, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 0.97}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-0.06773603596583777, -0.017603283883824504, -0.18640283980247196], + 0.023837321887120035, + [ + [0.014407235462344481, 0.0025403843294311288, -0.02946500220028324], + [-0.05479284706295722, -0.009661457287031931, 0.04481302754015036], + ], + ), + ( + hoomd.md.pair.aniso.PatchyLJ, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-1.9455238094148624, -1.269674877695913, -0.12646351897802516], + 0.06877088552479703, + [ + [0.009878541568339286, -0.017110135901044755, -0.009717316131562882], + [-0.07943347700625308, 0.13758281799668448, -0.12977235718497987], + ], + ), + ( + hoomd.md.pair.aniso.PatchyLJ, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.1529938661525866, -0.9653274199711448, -0.0453098111628864], + 0.028971247285687153, + [ + [0.017510170952057886, 0.0030875155756654327, -0.035810980321533736], + [-0.06659376960486044, -0.011742278328420192, 0.05446456227901608], + ], + ), + ( + hoomd.md.pair.aniso.PatchyLJ, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-3.7101814265232987e9, -3.068632424997914e9, -8.002380847724919e8], + 8.703392435257888e7, + [ + [1.250192189633267e7, -2.1653963916706044e7, -1.229788086412793e7], + [-1.0052811122130676e8, 1.7411979622423828e8, -1.6423516190177378e8], + ], + ), + ( + hoomd.md.pair.aniso.PatchyLJ, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [2.416134421816039e8, -1.9882078394083674e9, -2.867122218281132e8], + 3.6664953859771974e7, + [ + [2.2160233686283108e7, 3.9074470976963183e6, -4.532107051569407e7], + [-8.427864585301311e7, -1.4860599161459792e7, 6.892836346536472e7], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedLJ, + {}, + { + "pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-33.02646286207422, -24.647019239630318, -4.818557570362859], + 2.6203325175827956, + [ + [0.37639567238783445, -0.6519364283247793, -0.3702526039741746], + [-3.0266023360874064, 5.242229020410042, -4.944632089877043], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedLJ, + {}, + { + "pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [2.357494589449272, -17.09094290068084, -1.7264103937244915], + 1.1038726745818228, + [ + [0.6671787048251221, 0.11764160661479799, -1.3644825932799285], + [-2.537379279507933, -0.4474084272678541, 2.075227947212622], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedLJ, + {}, + { + "pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-1.2494366602747592e23, -7.47190342399429e22, -2.2306585653721332e21], + 2.426065098991449e20, + [ + [3.484902767355334e19, -6.036028652496825e19, -3.4280264595619766e19], + [-2.8022146986448788e20, 4.853578231769241e20, -4.5780446794859164e20], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedLJ, + {}, + { + "pair_params": {"epsilon": 0.77, "sigma": 1.13, "delta": 0.2}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [1.0353882496245466e22, -6.044231702301891e22, -7.9920849255704e20], + 1.0220332540099799e20, + [ + [6.177151028372888e19, 1.089198390213181e19, -1.2633219545706699e20], + [-2.349261886272707e20, -4.142382552999396e19, 1.92137374222588e20], + ], + ), + ( + hoomd.md.pair.aniso.PatchyMie, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-1.9185608907383984, -1.2521408460716963, -0.12476469967243964], + 0.06784706726530326, + [ + [0.009745840396784486, -0.016880290729687938, -0.009586780745736316], + [-0.07836642521662623, 0.13573463008274345, -0.12802908934379711], + ], + ), + ( + hoomd.md.pair.aniso.PatchyMie, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [0.15086870266201252, -0.9519631777531574, -0.04470115198150361], + 0.02858206853600857, + [ + [0.01727495199960542, 0.003046040127974089, -0.035329921552856386], + [-0.06569919714356336, -0.011584541067294738, 0.0537329248027101], + ], + ), + ( + hoomd.md.pair.aniso.PatchyMie, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-6.380949338454674e9, -5.220849829366294e9, -1.32729010034573e9], + 1.4435612149134248e8, + [ + [2.073592532529298e7, -3.5915676205361664e7, -2.0397498990352083e7], + [-1.6673783636332324e8, 2.887984041253813e8, -2.724035617261037e8], + ], + ), + ( + hoomd.md.pair.aniso.PatchyMie, + {}, + { + "pair_params": {"epsilon": 0.78, "sigma": 1.14, "n": 5.5, "m": 12.4}, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [4.199338440026387e8, -3.406506655464279e9, -4.755463416725215e8], + 6.081318949165459e7, + [ + [3.675538486963374e7, 6.480966038840114e6, -7.517038913444535e7], + [-1.3978616419268584e8, -2.464807227691403e7, 1.1432589400768268e8], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-24438.06649914493, -19392.807472482873, -4563.172146474723], + 2481.453875798641, + [ + [356.44655547084204, -617.3835442584165, -350.6290720705163], + [-2866.191236031939, 4964.3888450159575, -4682.564667457716], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [1654.9426299749784, -12909.362184624497, -1634.9099719140254], + 1045.366993825708, + [ + [631.817974180563, 111.40655574459755, -1292.164485550744], + [-2402.8971915802563, -423.69560674419665, 1965.2400595046643], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-5.545584879562406e16, -3.897295731456281e16, -6.00724551378345e15], + 6.533482491809166e14, + [ + [9.384971254744408e13, -1.6255247040790753e14, -9.231801267129023e13], + [-7.546467190636238e14, 1.307086459183353e15, -1.2328842607140105e15], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 1.0, 1.0], [1.0, 0.0, 0.0]], + [[1.0, 1.0, 1.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [4.145472181514702e15, -2.814898155635645e16, -2.152297848725213e15], + 2.7523731221791462e14, + [ + [1.6635294786571403e14, 2.9332513029114016e13, -3.4021724623699825e14], + [-6.326648616732528e14, -1.1155588485918328e14, 5.1743301159862644e14], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 0.0, 0.0]], + [[0.0, 1.0, 1.0], [0.0, 0.0, 1.0], [2.0, 1.0, 0.0]], + [[0, 0, 0], [0.9526279441628825, 0.55, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-138985.57323908593, -97123.42586287575, -21501.617361604447], + 13583.235236081233, + [ + [0.0, 0.0, -6448.0496895867645], + [-11825.889548882442, 20483.04154336218, -9632.374538723448], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 0.0, 0.0]], + [[0.0, 1.0, 1.0], [0.0, 0.0, 1.0], [2.0, 1.0, 0.0]], + [[0, 0, 0], [-0.19101299543362338, 1.0832885283134288, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [7754.7557135632205, -24687.796151765757, -3306.6477337182796], + 2063.2418829657026, + [ + [0.0, 0.0, -4488.243718545056], + [-3582.053557110608, -631.6126884613307, 803.2957077724617], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 0.0, 0.0]], + [[0.0, 1.0, 1.0], [0.0, 0.0, 1.0], [2.0, 1.0, 0.0]], + [[0, 0, 0], [0.1905255888325765, 0.11, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [-3.104239064372109e17, -2.0144530397799e17, -2.830607531087193e16], + 3.5763642622009175e15, + [ + [0.0, 0.0, -1.6977232647401915e15], + [-3.113668284195913e15, 5.393031666143131e15, -2.5361321851305225e15], + ], + ), + ( + hoomd.md.pair.aniso.PatchyExpandedMie, + {}, + { + "pair_params": { + "epsilon": 0.78, + "sigma": 2.14, + "n": 5.5, + "m": 12.4, + "delta": 0.1, + }, + "envelope_params": {"alpha": 0.6981317007977318, "omega": 2}, + }, + [[1.0, 0.0, 0.0]], + [[0.0, 1.0, 1.0], [0.0, 0.0, 1.0], [2.0, 1.0, 0.0]], + [[0, 0, 0], [-0.03820259908672467, 0.21665770566268577, 0]], + [[1.0, 0.0, 0.0, 0.0], [0.9843766433940419, 0.0, 0.0, 0.17607561994858706]], + [1.409072519691006e16, -5.451579177557966e16, -4.353078105849488e15], + 5.4323615885809244e14, + [ + [0.0, 0.0, -1.1817210079977378e15], + [-9.431279149838202e14, -1.662988976709668e14, 2.115017528096385e14], + ], + ), +] + - def make_snapshot(position_i=numpy.array([0, 0, 0]), - position_j=numpy.array([2, 0, 0]), - orientation_i=(1, 0, 0, 0), - orientation_j=(1, 0, 0, 0), - dimensions=3, - L=20): +@pytest.fixture(scope="session") +def patchy_snapshot_factory(device): + def make_snapshot( + position_i=numpy.array([0, 0, 0]), + position_j=numpy.array([2, 0, 0]), + orientation_i=(1, 0, 0, 0), + orientation_j=(1, 0, 0, 0), + dimensions=3, + L=20, + ): snapshot = hoomd.Snapshot(device.communicator) if snapshot.communicator.rank == 0: N = 2 @@ -69,7 +729,7 @@ def make_snapshot(position_i=numpy.array([0, 0, 0]), snapshot.particles.N = N snapshot.particles.position[:] = [position_i, position_j] snapshot.particles.orientation[:] = [orientation_i, orientation_j] - snapshot.particles.types = ['A', 'B'] + snapshot.particles.types = ["A", "B"] snapshot.particles.typeid[:] = [0, 1] snapshot.particles.moment_inertia[:] = [(1, 1, 1)] * N snapshot.particles.angmom[:] = [(0, 0, 0, 0)] * N @@ -79,98 +739,136 @@ def make_snapshot(position_i=numpy.array([0, 0, 0]), @pytest.mark.parametrize( - 'patch_cls, patch_args, params, patches_A, patches_B, positions,' - 'orientations, force, energy, torques', patch_test_parameters) -def test_before_attaching(patch_cls, patch_args, params, patches_A, patches_B, - positions, orientations, force, energy, torques): - potential = patch_cls(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=4, - **patch_args) + "patch_cls, patch_args, params, patches_A, patches_B, positions," + "orientations, force, energy, torques", + patch_test_parameters, +) +def test_before_attaching( + patch_cls, + patch_args, + params, + patches_A, + patches_B, + positions, + orientations, + force, + energy, + torques, +): + potential = patch_cls( + nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=4, **patch_args + ) potential.params.default = params - potential.directors['A'] = patches_A - potential.directors['B'] = patches_B + potential.directors["A"] = patches_A + potential.directors["B"] = patches_B for key in params: - assert potential.params[('A', 'A')][key] == pytest.approx(params[key]) + assert potential.params[("A", "A")][key] == pytest.approx(params[key]) for i, patch in enumerate(patches_A): # only normalized after attaching - assert potential.directors['A'][i] == pytest.approx(patch) + assert potential.directors["A"][i] == pytest.approx(patch) for i, patch in enumerate(patches_B): - assert potential.directors['B'][i] == pytest.approx(patch) + assert potential.directors["B"][i] == pytest.approx(patch) @pytest.mark.parametrize( - 'patch_cls, patch_args, params, patches_A, patches_B, positions,' - 'orientations, force, energy, torques', patch_test_parameters) -def test_after_attaching(patchy_snapshot_factory, simulation_factory, patch_cls, - patch_args, params, patches_A, patches_B, positions, - orientations, force, energy, torques): + "patch_cls, patch_args, params, patches_A, patches_B, positions," + "orientations, force, energy, torques", + patch_test_parameters, +) +def test_after_attaching( + patchy_snapshot_factory, + simulation_factory, + patch_cls, + patch_args, + params, + patches_A, + patches_B, + positions, + orientations, + force, + energy, + torques, +): sim = simulation_factory(patchy_snapshot_factory()) - potential = patch_cls(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=4, - **patch_args) + potential = patch_cls( + nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=4, **patch_args + ) potential.params.default = params - potential.directors['A'] = patches_A - potential.directors['B'] = patches_B + potential.directors["A"] = patches_A + potential.directors["B"] = patches_B sim.operations.integrator = hoomd.md.Integrator( - dt=0.05, forces=[potential], integrate_rotational_dof=True) + dt=0.05, forces=[potential], integrate_rotational_dof=True + ) sim.run(0) for key in params: - assert potential.params[('A', 'A')][key] == pytest.approx(params[key]) + assert potential.params[("A", "A")][key] == pytest.approx(params[key]) for i, patch in enumerate(patches_A): # patch is returned normalized, so normalize it before checking nn = numpy.array(patch) patch = nn / numpy.linalg.norm(nn) - assert potential.directors['A'][i] == pytest.approx(patch) + assert potential.directors["A"][i] == pytest.approx(patch) for i, patch in enumerate(patches_B): # patch is returned normalized, so normalize it before checking nn = numpy.array(patch) patch = tuple(nn / numpy.linalg.norm(nn)) - assert potential.directors['B'][i] == pytest.approx(patch) + assert potential.directors["B"][i] == pytest.approx(patch) @pytest.mark.parametrize( - 'patch_cls, patch_args, params, patches_A, patches_B, positions,' - 'orientations, force, energy, torques', patch_test_parameters) -def test_forces_energies_torques(patchy_snapshot_factory, simulation_factory, - patch_cls, patch_args, params, patches_A, - patches_B, positions, orientations, force, - energy, torques): - - snapshot = patchy_snapshot_factory(position_i=positions[0], - position_j=positions[1], - orientation_i=orientations[0], - orientation_j=orientations[1]) + "patch_cls, patch_args, params, patches_A, patches_B, positions," + "orientations, force, energy, torques", + patch_test_parameters, +) +def test_forces_energies_torques( + patchy_snapshot_factory, + simulation_factory, + patch_cls, + patch_args, + params, + patches_A, + patches_B, + positions, + orientations, + force, + energy, + torques, +): + snapshot = patchy_snapshot_factory( + position_i=positions[0], + position_j=positions[1], + orientation_i=orientations[0], + orientation_j=orientations[1], + ) sim = simulation_factory(snapshot) - potential = patch_cls(nlist=hoomd.md.nlist.Cell(buffer=0.4), - default_r_cut=4, - **patch_args) + potential = patch_cls( + nlist=hoomd.md.nlist.Cell(buffer=0.4), default_r_cut=4, **patch_args + ) potential.params.default = params - potential.directors['A'] = patches_A - potential.directors['B'] = patches_B + potential.directors["A"] = patches_A + potential.directors["B"] = patches_B sim.operations.integrator = hoomd.md.Integrator( - dt=0.005, forces=[potential], integrate_rotational_dof=True) + dt=0.005, forces=[potential], integrate_rotational_dof=True + ) sim.run(0) sim_forces = potential.forces sim_energy = potential.energy sim_torques = potential.torques if sim.device.communicator.rank == 0: - sim_orientations = snapshot.particles.orientation - numpy.testing.assert_allclose(sim_orientations, orientations, - **TOLERANCES) + numpy.testing.assert_allclose(sim_orientations, orientations, **TOLERANCES) numpy.testing.assert_allclose(sim_energy, energy, **TOLERANCES) numpy.testing.assert_allclose(sim_forces[0], force, **TOLERANCES) - numpy.testing.assert_allclose(sim_forces[1], - [-force[0], -force[1], -force[2]], - **TOLERANCES) + numpy.testing.assert_allclose( + sim_forces[1], [-force[0], -force[1], -force[2]], **TOLERANCES + ) numpy.testing.assert_allclose(sim_torques[0], torques[0], **TOLERANCES) diff --git a/hoomd/md/pytest/test_potential.py b/hoomd/md/pytest/test_potential.py index a710d09b5c..5aa74a8899 100644 --- a/hoomd/md/pytest/test_potential.py +++ b/hoomd/md/pytest/test_potential.py @@ -10,8 +10,11 @@ import hoomd from hoomd import md from hoomd.logging import LoggerCategories -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) from hoomd.error import TypeConversionError import pytest import itertools @@ -37,13 +40,15 @@ def _equivalent_data_structures(reference, struct_2): return False return all( _equivalent_data_structures(reference[key], struct_2[key]) - for key in reference) + for key in reference + ) if isinstance(reference, Sequence): if len(reference) != len(struct_2): return False return all( _equivalent_data_structures(value_1, value_2) - for value_1, value_2 in zip(reference, struct_2)) + for value_1, value_2 in zip(reference, struct_2) + ) if isinstance(reference, Number): return math.isclose(reference, struct_2) @@ -60,72 +65,67 @@ def test_rcut(simulation_factory, two_particle_snapshot_factory): lj.r_cut[("A", "A")] = 0.0 lj.r_cut[("A", "A")] = 2.5 - lj.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} + lj.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} with pytest.raises(TypeConversionError): - lj.r_cut[('A', 'A')] = 'str' + lj.r_cut[("A", "A")] = "str" with pytest.raises(TypeConversionError): - lj.r_cut[('A', 'A')] = [1, 2, 3] + lj.r_cut[("A", "A")] = [1, 2, 3] - sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=.5)) + sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=0.5)) integrator = md.Integrator(dt=0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator - lj.r_cut[('A', 'A')] = 2.5 - assert _equivalent_data_structures({('A', 'A'): 2.5}, lj.r_cut.to_base()) + lj.r_cut[("A", "A")] = 2.5 + assert _equivalent_data_structures({("A", "A"): 2.5}, lj.r_cut.to_base()) sim.run(0) - assert _equivalent_data_structures({('A', 'A'): 2.5}, lj.r_cut.to_base()) + assert _equivalent_data_structures({("A", "A"): 2.5}, lj.r_cut.to_base()) def test_invalid_mode(): cell = md.nlist.Cell(buffer=0.4) - for invalid_mode in [1, 'str', [1, 2, 3]]: + for invalid_mode in [1, "str", [1, 2, 3]]: with pytest.raises(TypeConversionError): md.pair.LJ(nlist=cell, default_r_cut=2.5, mode=invalid_mode) -@pytest.mark.parametrize("mode", ['none', 'shift', 'xplor']) +@pytest.mark.parametrize("mode", ["none", "shift", "xplor"]) def test_mode(simulation_factory, two_particle_snapshot_factory, mode): cell = md.nlist.Cell(buffer=0.4) lj = md.pair.LJ(nlist=cell, default_r_cut=2.5, mode=mode) - lj.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} - snap = two_particle_snapshot_factory(dimensions=3, d=.5) + lj.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} + snap = two_particle_snapshot_factory(dimensions=3, d=0.5) sim = simulation_factory(snap) integrator = md.Integrator(dt=0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator sim.run(1) def test_ron(simulation_factory, two_particle_snapshot_factory): - lj = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), - mode='xplor', - default_r_cut=2.5) - lj.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} + lj = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), mode="xplor", default_r_cut=2.5) + lj.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} with pytest.raises(TypeConversionError): - lj.r_on[('A', 'A')] = 'str' + lj.r_on[("A", "A")] = "str" with pytest.raises(TypeConversionError): - lj.r_on[('A', 'A')] = [1, 2, 3] + lj.r_on[("A", "A")] = [1, 2, 3] - sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=.5)) + sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=0.5)) integrator = md.Integrator(dt=0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator assert lj.r_on.to_base() == {} - lj.r_on[('A', 'A')] = 1.5 - assert _equivalent_data_structures({('A', 'A'): 1.5}, lj.r_on.to_base()) + lj.r_on[("A", "A")] = 1.5 + assert _equivalent_data_structures({("A", "A"): 1.5}, lj.r_on.to_base()) sim.run(0) - assert _equivalent_data_structures({('A', 'A'): 1.5}, lj.r_on.to_base()) + assert _equivalent_data_structures({("A", "A"): 1.5}, lj.r_on.to_base()) - lj.r_on[('A', 'A')] = 1.0 - assert _equivalent_data_structures({('A', 'A'): 1.0}, lj.r_on.to_base()) + lj.r_on[("A", "A")] = 1.0 + assert _equivalent_data_structures({("A", "A"): 1.0}, lj.r_on.to_base()) def _make_invalid_param_dict(valid_dict): @@ -141,7 +141,7 @@ def _make_invalid_param_dict(valid_dict): invalid_dicts[count][key] = [1, 2] invalid_count += 1 if not isinstance(valid_value, (str, np.ndarray)): - invalid_dicts[count + 1][key] = 'str' + invalid_dicts[count + 1][key] = "str" invalid_count += 1 if invalid_count == 2: break @@ -160,14 +160,15 @@ def _make_invalid_param_dict(valid_dict): paramtuple = namedtuple( - 'paramtuple', ['pair_potential', 'pair_potential_params', 'extra_args']) + "paramtuple", ["pair_potential", "pair_potential_params", "extra_args"] +) def _make_invalid_params(invalid_param_dicts, pot, extra_args): N = len(invalid_param_dicts) params = [] for i in range(len(invalid_param_dicts)): - params.append({('A', 'A'): invalid_param_dicts[i]}) + params.append({("A", "A"): invalid_param_dicts[i]}) return [paramtuple(pot, params[i], extra_args) for i in range(N)] @@ -175,213 +176,217 @@ def _invalid_params(): invalid_params_list = [] # Start with valid parameters to get the keys and placeholder values - lj_valid_dict = {'sigma': 1.0, 'epsilon': 1.0} + lj_valid_dict = {"sigma": 1.0, "epsilon": 1.0} lj_invalid_dicts = _make_invalid_param_dict(lj_valid_dict) - invalid_params_list.extend( - _make_invalid_params(lj_invalid_dicts, md.pair.LJ, {})) + invalid_params_list.extend(_make_invalid_params(lj_invalid_dicts, md.pair.LJ, {})) - gauss_valid_dict = {'sigma': 0.05, 'epsilon': 0.05} + gauss_valid_dict = {"sigma": 0.05, "epsilon": 0.05} gauss_invalid_dicts = _make_invalid_param_dict(gauss_valid_dict) - gauss_invalid_dicts.append({'sigma': 0, 'epsilon': 0.05}) + gauss_invalid_dicts.append({"sigma": 0, "epsilon": 0.05}) invalid_params_list.extend( - _make_invalid_params(gauss_invalid_dicts, md.pair.Gaussian, {})) + _make_invalid_params(gauss_invalid_dicts, md.pair.Gaussian, {}) + ) - expanded_gaussian_valid_dict = { - 'sigma': 0.05, - 'epsilon': 0.05, - 'delta': 0.1 - } + expanded_gaussian_valid_dict = {"sigma": 0.05, "epsilon": 0.05, "delta": 0.1} expanded_gaussian_invalid_dicts = _make_invalid_param_dict( - expanded_gaussian_valid_dict) - expanded_gaussian_invalid_dicts.append({ - 'sigma': 0, - 'epsilon': 0.05, - 'delta': 0.1 - }) + expanded_gaussian_valid_dict + ) + expanded_gaussian_invalid_dicts.append({"sigma": 0, "epsilon": 0.05, "delta": 0.1}) invalid_params_list.extend( - _make_invalid_params(expanded_gaussian_invalid_dicts, - md.pair.ExpandedGaussian, {})) + _make_invalid_params( + expanded_gaussian_invalid_dicts, md.pair.ExpandedGaussian, {} + ) + ) yukawa_valid_dict = {"epsilon": 0.0005, "kappa": 1} yukawa_invalid_dicts = _make_invalid_param_dict(yukawa_valid_dict) invalid_params_list.extend( - _make_invalid_params(yukawa_invalid_dicts, md.pair.Yukawa, {})) + _make_invalid_params(yukawa_invalid_dicts, md.pair.Yukawa, {}) + ) ewald_valid_dict = {"alpha": 0.05, "kappa": 1} ewald_invalid_dicts = _make_invalid_param_dict(ewald_valid_dict) invalid_params_list.extend( - _make_invalid_params(ewald_invalid_dicts, md.pair.Ewald, {})) + _make_invalid_params(ewald_invalid_dicts, md.pair.Ewald, {}) + ) morse_valid_dict = {"D0": 0.05, "alpha": 1, "r0": 0} morse_invalid_dicts = _make_invalid_param_dict(morse_valid_dict) invalid_params_list.extend( - _make_invalid_params(morse_invalid_dicts, md.pair.Morse, {})) + _make_invalid_params(morse_invalid_dicts, md.pair.Morse, {}) + ) dpd_conservative_valid_dict = {"A": 0.05} dpd_conservative_invalid_dicts = _make_invalid_param_dict( - dpd_conservative_valid_dict) + dpd_conservative_valid_dict + ) invalid_params_list.extend( - _make_invalid_params(dpd_conservative_invalid_dicts, - md.pair.DPDConservative, {})) + _make_invalid_params( + dpd_conservative_invalid_dicts, md.pair.DPDConservative, {} + ) + ) force_shifted_LJ_valid_dict = {"epsilon": 0.0005, "sigma": 1} force_shifted_LJ_invalid_dicts = _make_invalid_param_dict( - force_shifted_LJ_valid_dict) + force_shifted_LJ_valid_dict + ) invalid_params_list.extend( - _make_invalid_params(force_shifted_LJ_invalid_dicts, - md.pair.ForceShiftedLJ, {})) + _make_invalid_params(force_shifted_LJ_invalid_dicts, md.pair.ForceShiftedLJ, {}) + ) moliere_valid_dict = {"qi": 15, "qj": 12, "aF": 1} moliere_invalid_dicts = _make_invalid_param_dict(moliere_valid_dict) invalid_params_list.extend( - _make_invalid_params(moliere_invalid_dicts, md.pair.Moliere, {})) + _make_invalid_params(moliere_invalid_dicts, md.pair.Moliere, {}) + ) zbl_valid_dict = {"qi": 10, "qj": 8, "aF": 0.5} zbl_invalid_dicts = _make_invalid_param_dict(zbl_valid_dict) - invalid_params_list.extend( - _make_invalid_params(zbl_invalid_dicts, md.pair.ZBL, {})) + invalid_params_list.extend(_make_invalid_params(zbl_invalid_dicts, md.pair.ZBL, {})) mie_valid_dict = {"epsilon": 0.05, "sigma": 0.5, "n": 12, "m": 6} mie_invalid_dicts = _make_invalid_param_dict(mie_valid_dict) - invalid_params_list.extend( - _make_invalid_params(mie_invalid_dicts, md.pair.Mie, {})) + invalid_params_list.extend(_make_invalid_params(mie_invalid_dicts, md.pair.Mie, {})) rf_valid_dict = {"epsilon": 0.05, "eps_rf": 0.5, "use_charge": False} rf_invalid_dicts = _make_invalid_param_dict(rf_valid_dict) invalid_params_list.extend( - _make_invalid_params(rf_invalid_dicts, md.pair.ReactionField, {})) + _make_invalid_params(rf_invalid_dicts, md.pair.ReactionField, {}) + ) buckingham_valid_dict = {"A": 0.05, "rho": 0.5, "C": 0.05} buckingham_invalid_dicts = _make_invalid_param_dict(buckingham_valid_dict) invalid_params_list.extend( - _make_invalid_params(buckingham_invalid_dicts, md.pair.Buckingham, {})) + _make_invalid_params(buckingham_invalid_dicts, md.pair.Buckingham, {}) + ) lj1208_valid_dict = {"sigma": 0.5, "epsilon": 0.0005} lj1208_invalid_dicts = _make_invalid_param_dict(lj1208_valid_dict) invalid_params_list.extend( - _make_invalid_params(lj1208_invalid_dicts, md.pair.LJ1208, {})) + _make_invalid_params(lj1208_invalid_dicts, md.pair.LJ1208, {}) + ) - lj0804_valid_dict = {'sigma': 1.0, 'epsilon': 1.0} + lj0804_valid_dict = {"sigma": 1.0, "epsilon": 1.0} lj0804_invalid_dicts = _make_invalid_param_dict(lj0804_valid_dict) invalid_params_list.extend( - _make_invalid_params(lj0804_invalid_dicts, md.pair.LJ0804, {})) + _make_invalid_params(lj0804_invalid_dicts, md.pair.LJ0804, {}) + ) fourier_valid_dict = {"a": [0.5, 1.0, 1.5], "b": [0.25, 0.034, 0.76]} fourier_invalid_dicts = _make_invalid_param_dict(fourier_valid_dict) invalid_params_list.extend( - _make_invalid_params(fourier_invalid_dicts, md.pair.Fourier, {})) + _make_invalid_params(fourier_invalid_dicts, md.pair.Fourier, {}) + ) expanded_lj_valid_dict = {"sigma": 0.5, "epsilon": 0.0005, "delta": 0.25} expanded_lj_invalid_dicts = _make_invalid_param_dict(expanded_lj_valid_dict) invalid_params_list.extend( - _make_invalid_params(expanded_lj_invalid_dicts, md.pair.ExpandedLJ, {})) + _make_invalid_params(expanded_lj_invalid_dicts, md.pair.ExpandedLJ, {}) + ) expanded_mie_valid_dict = { "epsilon": 0.05, "sigma": 0.5, "n": 12, "m": 6, - "delta": 0.25 + "delta": 0.25, } - expanded_mie_invalid_dicts = _make_invalid_param_dict( - expanded_mie_valid_dict) + expanded_mie_invalid_dicts = _make_invalid_param_dict(expanded_mie_valid_dict) invalid_params_list.extend( - _make_invalid_params(expanded_mie_invalid_dicts, md.pair.ExpandedMie, - {})) + _make_invalid_params(expanded_mie_invalid_dicts, md.pair.ExpandedMie, {}) + ) dpd_valid_dict = {"A": 0.5, "gamma": 0.0005} dpd_invalid_dicts = _make_invalid_param_dict(dpd_valid_dict) invalid_params_list.extend( - _make_invalid_params(dpd_invalid_dicts, md.pair.DPD, {'kT': 2})) + _make_invalid_params(dpd_invalid_dicts, md.pair.DPD, {"kT": 2}) + ) - dpdlj_valid_dict = {'sigma': 0.5, 'epsilon': 0.0005, 'gamma': 0.034} + dpdlj_valid_dict = {"sigma": 0.5, "epsilon": 0.0005, "gamma": 0.034} dpdlj_invalid_dicts = _make_invalid_param_dict(dpdlj_valid_dict) invalid_params_list.extend( - _make_invalid_params(dpdlj_invalid_dicts, md.pair.DPDLJ, {'kT': 1})) + _make_invalid_params(dpdlj_invalid_dicts, md.pair.DPDLJ, {"kT": 1}) + ) - dlvo_valid_dict = {'kappa': 1.0, 'Z': 0.1, 'A': 0.1, 'a1': 0.1, 'a2': 0.25} + dlvo_valid_dict = {"kappa": 1.0, "Z": 0.1, "A": 0.1, "a1": 0.1, "a2": 0.25} dlvo_invalid_dicts = _make_invalid_param_dict(dlvo_valid_dict) invalid_params_list.extend( - _make_invalid_params(dlvo_invalid_dicts, md.pair.DLVO, {})) - - opp_valid_dict = { - 'C1': 1.0, - 'C2': 0.1, - 'eta1': 15, - 'eta2': 3, - 'k': 0.8, - 'phi': 0.1 - } + _make_invalid_params(dlvo_invalid_dicts, md.pair.DLVO, {}) + ) + + opp_valid_dict = {"C1": 1.0, "C2": 0.1, "eta1": 15, "eta2": 3, "k": 0.8, "phi": 0.1} opp_invalid_dicts = _make_invalid_param_dict(opp_valid_dict) invalid_params_list.extend( - _make_invalid_params(opp_invalid_dicts, hoomd.md.pair.OPP, {})) + _make_invalid_params(opp_invalid_dicts, hoomd.md.pair.OPP, {}) + ) - twf_valid_dict = {'sigma': 1.0, 'epsilon': 1.0, 'alpha': 15} + twf_valid_dict = {"sigma": 1.0, "epsilon": 1.0, "alpha": 15} twf_invalid_dicts = _make_invalid_param_dict(twf_valid_dict) invalid_params_list.extend( - _make_invalid_params(twf_invalid_dicts, hoomd.md.pair.TWF, {})) + _make_invalid_params(twf_invalid_dicts, hoomd.md.pair.TWF, {}) + ) - ljgauss_valid_dict = {'r0': 1.8, 'epsilon': 2.0, 'sigma': 0.02} + ljgauss_valid_dict = {"r0": 1.8, "epsilon": 2.0, "sigma": 0.02} ljgauss_invalid_dicts = _make_invalid_param_dict(ljgauss_valid_dict) - ljgauss_invalid_dicts.append({'r0': 1.8, 'epsilon': 0.2, 'sigma': 0}) + ljgauss_invalid_dicts.append({"r0": 1.8, "epsilon": 0.2, "sigma": 0}) invalid_params_list.extend( - _make_invalid_params(ljgauss_invalid_dicts, hoomd.md.pair.LJGauss, {})) + _make_invalid_params(ljgauss_invalid_dicts, hoomd.md.pair.LJGauss, {}) + ) table_valid_dict = { - 'V': np.arange(0, 20, 1) / 10, - 'F': np.asarray(20 * [-1.9 / 2.5]), - 'r_min': 0.0 + "V": np.arange(0, 20, 1) / 10, + "F": np.asarray(20 * [-1.9 / 2.5]), + "r_min": 0.0, } table_invalid_dicts = _make_invalid_param_dict(table_valid_dict) invalid_params_list.extend( - _make_invalid_params(table_invalid_dicts, hoomd.md.pair.Table, {})) + _make_invalid_params(table_invalid_dicts, hoomd.md.pair.Table, {}) + ) tersoff_valid_dict = { - 'cutoff_thickness': 1.0, - 'magnitudes': (5.0, 2.0), - 'exp_factors': (2.0, 2.0), - 'lambda3': 2.0, - 'dimer_r': 2.5, - 'n': 2.0, - 'gamma': 2.0, - 'c': 2.0, - 'd': 2.0, - 'm': 2.0, - 'alpha': 2.0, + "cutoff_thickness": 1.0, + "magnitudes": (5.0, 2.0), + "exp_factors": (2.0, 2.0), + "lambda3": 2.0, + "dimer_r": 2.5, + "n": 2.0, + "gamma": 2.0, + "c": 2.0, + "d": 2.0, + "m": 2.0, + "alpha": 2.0, } tersoff_invalid_dicts = _make_invalid_param_dict(tersoff_valid_dict) invalid_params_list.extend( - _make_invalid_params(tersoff_invalid_dicts, hoomd.md.many_body.Tersoff, - {})) + _make_invalid_params(tersoff_invalid_dicts, hoomd.md.many_body.Tersoff, {}) + ) - square_density_valid_dict = {'A': 5.0, 'B': 2.0} + square_density_valid_dict = {"A": 5.0, "B": 2.0} sq_dens_invalid_dicts = _make_invalid_param_dict(square_density_valid_dict) invalid_params_list.extend( - _make_invalid_params(sq_dens_invalid_dicts, - hoomd.md.many_body.SquareDensity, {})) - - revcross_valid_dict = { - 'sigma': 5.0, - 'n': 2.0, - 'epsilon': 2.0, - 'lambda3': 2.0 - } + _make_invalid_params( + sq_dens_invalid_dicts, hoomd.md.many_body.SquareDensity, {} + ) + ) + + revcross_valid_dict = {"sigma": 5.0, "n": 2.0, "epsilon": 2.0, "lambda3": 2.0} revcross_invalid_dicts = _make_invalid_param_dict(revcross_valid_dict) invalid_params_list.extend( - _make_invalid_params(revcross_invalid_dicts, - hoomd.md.many_body.RevCross, {})) + _make_invalid_params(revcross_invalid_dicts, hoomd.md.many_body.RevCross, {}) + ) return invalid_params_list -@pytest.fixture(scope="function", - params=_invalid_params(), - ids=(lambda x: x[0].__name__)) +@pytest.fixture( + scope="function", params=_invalid_params(), ids=(lambda x: x[0].__name__) +) def invalid_params(request): return deepcopy(request.param) def test_invalid_params(invalid_params): - pot = invalid_params.pair_potential(**invalid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4)) + pot = invalid_params.pair_potential( + **invalid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4) + ) for pair in invalid_params.pair_potential_params: if isinstance(pair, tuple): with pytest.raises(TypeConversionError): @@ -390,7 +395,7 @@ def test_invalid_params(invalid_params): def test_invalid_pair_key(): pot = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4)) - for invalid_key in [3, [1, 2], 'str']: + for invalid_key in [3, [1, 2], "str"]: with pytest.raises(KeyError): pot.r_cut[invalid_key] = 2.5 @@ -403,368 +408,387 @@ def _make_valid_param_dicts(arg_dict): return [dict(zip(arg_dict, val)) for val in zip(*arg_dict.values())] -def _valid_params(particle_types=['A', 'B']): +def _valid_params(particle_types=["A", "B"]): valid_params_list = [] combos = list(itertools.combinations_with_replacement(particle_types, 2)) - lj_arg_dict = {'sigma': [0.5, 1.0, 1.5], 'epsilon': [0.0005, 0.001, 0.0015]} + lj_arg_dict = {"sigma": [0.5, 1.0, 1.5], "epsilon": [0.0005, 0.001, 0.0015]} lj_valid_param_dicts = _make_valid_param_dicts(lj_arg_dict) valid_params_list.append( - paramtuple(md.pair.LJ, dict(zip(combos, lj_valid_param_dicts)), {})) + paramtuple(md.pair.LJ, dict(zip(combos, lj_valid_param_dicts)), {}) + ) - gauss_arg_dict = {'epsilon': [0.025, 0.05, 0.075], 'sigma': [0.5, 1.0, 1.5]} + gauss_arg_dict = {"epsilon": [0.025, 0.05, 0.075], "sigma": [0.5, 1.0, 1.5]} gauss_valid_param_dicts = _make_valid_param_dicts(gauss_arg_dict) valid_params_list.append( - paramtuple(md.pair.Gaussian, dict(zip(combos, gauss_valid_param_dicts)), - {})) + paramtuple(md.pair.Gaussian, dict(zip(combos, gauss_valid_param_dicts)), {}) + ) expanded_gaussian_arg_dict = { - 'epsilon': [0.025, 0.05, 0.075], - 'sigma': [0.5, 1.0, 1.5], - 'delta': [0.1, 0.2, 0.3] + "epsilon": [0.025, 0.05, 0.075], + "sigma": [0.5, 1.0, 1.5], + "delta": [0.1, 0.2, 0.3], } expanded_gaussian_valid_param_dicts = _make_valid_param_dicts( - expanded_gaussian_arg_dict) + expanded_gaussian_arg_dict + ) valid_params_list.append( - paramtuple(md.pair.ExpandedGaussian, - dict(zip(combos, expanded_gaussian_valid_param_dicts)), {})) - - yukawa_arg_dict = { - 'epsilon': [0.00025, 0.0005, 0.00075], - 'kappa': [0.5, 1.0, 1.5] - } + paramtuple( + md.pair.ExpandedGaussian, + dict(zip(combos, expanded_gaussian_valid_param_dicts)), + {}, + ) + ) + + yukawa_arg_dict = {"epsilon": [0.00025, 0.0005, 0.00075], "kappa": [0.5, 1.0, 1.5]} yukawa_valid_param_dicts = _make_valid_param_dicts(yukawa_arg_dict) valid_params_list.append( - paramtuple(md.pair.Yukawa, dict(zip(combos, yukawa_valid_param_dicts)), - {})) + paramtuple(md.pair.Yukawa, dict(zip(combos, yukawa_valid_param_dicts)), {}) + ) ewald_arg_dict = {"alpha": [0.025, 0.05, 0.075], "kappa": [0.5, 1.0, 1.5]} ewald_valid_param_dicts = _make_valid_param_dicts(ewald_arg_dict) valid_params_list.append( - paramtuple(md.pair.Ewald, dict(zip(combos, ewald_valid_param_dicts)), - {})) + paramtuple(md.pair.Ewald, dict(zip(combos, ewald_valid_param_dicts)), {}) + ) morse_arg_dict = { "D0": [0.025, 0.05, 0.075], "alpha": [0.5, 1.0, 1.5], - "r0": [0, 0.05, 0.1] + "r0": [0, 0.05, 0.1], } morse_valid_param_dicts = _make_valid_param_dicts(morse_arg_dict) valid_params_list.append( - paramtuple(md.pair.Morse, dict(zip(combos, morse_valid_param_dicts)), - {})) + paramtuple(md.pair.Morse, dict(zip(combos, morse_valid_param_dicts)), {}) + ) dpd_conservative_arg_dict = {"A": [0.025, 0.05, 0.075]} dpd_conservative_valid_param_dicts = _make_valid_param_dicts( - dpd_conservative_arg_dict) + dpd_conservative_arg_dict + ) valid_params_list.append( - paramtuple(md.pair.DPDConservative, - dict(zip(combos, dpd_conservative_valid_param_dicts)), {})) + paramtuple( + md.pair.DPDConservative, + dict(zip(combos, dpd_conservative_valid_param_dicts)), + {}, + ) + ) force_shifted_LJ_arg_dict = { - 'sigma': [0.5, 1.0, 1.5], - 'epsilon': [0.0005, 0.001, 0.0015] + "sigma": [0.5, 1.0, 1.5], + "epsilon": [0.0005, 0.001, 0.0015], } force_shifted_LJ_valid_param_dicts = _make_valid_param_dicts( - force_shifted_LJ_arg_dict) + force_shifted_LJ_arg_dict + ) valid_params_list.append( - paramtuple(md.pair.ForceShiftedLJ, - dict(zip(combos, force_shifted_LJ_valid_param_dicts)), {})) + paramtuple( + md.pair.ForceShiftedLJ, + dict(zip(combos, force_shifted_LJ_valid_param_dicts)), + {}, + ) + ) moliere_arg_dict = { - 'qi': [2.5, 7.5, 15], - 'qj': [2, 6, 12], - 'aF': [.134197, .234463, .319536] + "qi": [2.5, 7.5, 15], + "qj": [2, 6, 12], + "aF": [0.134197, 0.234463, 0.319536], } moliere_valid_param_dicts = _make_valid_param_dicts(moliere_arg_dict) valid_params_list.append( - paramtuple(md.pair.Moliere, dict(zip(combos, - moliere_valid_param_dicts)), {})) + paramtuple(md.pair.Moliere, dict(zip(combos, moliere_valid_param_dicts)), {}) + ) zbl_arg_dict = { - 'qi': [2.5, 7.5, 15], - 'qj': [2, 6, 12], - 'aF': [.133669, .243535, .341914] + "qi": [2.5, 7.5, 15], + "qj": [2, 6, 12], + "aF": [0.133669, 0.243535, 0.341914], } zbl_valid_param_dicts = _make_valid_param_dicts(zbl_arg_dict) valid_params_list.append( - paramtuple(md.pair.ZBL, dict(zip(combos, zbl_valid_param_dicts)), {})) + paramtuple(md.pair.ZBL, dict(zip(combos, zbl_valid_param_dicts)), {}) + ) mie_arg_dict = { - 'epsilon': [.05, .025, .010], - 'sigma': [.5, 1, 1.5], - 'n': [12, 14, 16], - 'm': [6, 8, 10] + "epsilon": [0.05, 0.025, 0.010], + "sigma": [0.5, 1, 1.5], + "n": [12, 14, 16], + "m": [6, 8, 10], } mie_valid_param_dicts = _make_valid_param_dicts(mie_arg_dict) valid_params_list.append( - paramtuple(md.pair.Mie, dict(zip(combos, mie_valid_param_dicts)), {})) + paramtuple(md.pair.Mie, dict(zip(combos, mie_valid_param_dicts)), {}) + ) reactfield_arg_dict = { - 'epsilon': [.05, .025, .010], - 'eps_rf': [.5, 1, 1.5], - 'use_charge': [False, True, False] + "epsilon": [0.05, 0.025, 0.010], + "eps_rf": [0.5, 1, 1.5], + "use_charge": [False, True, False], } reactfield_valid_param_dicts = _make_valid_param_dicts(reactfield_arg_dict) valid_params_list.append( - paramtuple(md.pair.ReactionField, - dict(zip(combos, reactfield_valid_param_dicts)), {})) + paramtuple( + md.pair.ReactionField, dict(zip(combos, reactfield_valid_param_dicts)), {} + ) + ) buckingham_arg_dict = { - 'A': [.05, .025, .010], - 'rho': [.5, 1, 1.5], - 'C': [.05, .025, .01] + "A": [0.05, 0.025, 0.010], + "rho": [0.5, 1, 1.5], + "C": [0.05, 0.025, 0.01], } buckingham_valid_param_dicts = _make_valid_param_dicts(buckingham_arg_dict) valid_params_list.append( - paramtuple(md.pair.Buckingham, - dict(zip(combos, buckingham_valid_param_dicts)), {})) + paramtuple( + md.pair.Buckingham, dict(zip(combos, buckingham_valid_param_dicts)), {} + ) + ) - lj1208_arg_dict = { - 'sigma': [0.5, 1.0, 1.5], - 'epsilon': [0.0005, 0.001, 0.0015] - } + lj1208_arg_dict = {"sigma": [0.5, 1.0, 1.5], "epsilon": [0.0005, 0.001, 0.0015]} lj1208_valid_param_dicts = _make_valid_param_dicts(lj1208_arg_dict) valid_params_list.append( - paramtuple(md.pair.LJ1208, dict(zip(combos, lj1208_valid_param_dicts)), - {})) + paramtuple(md.pair.LJ1208, dict(zip(combos, lj1208_valid_param_dicts)), {}) + ) fourier_arg_dict = { - 'a': [[0.5, 1.0, 1.5], [.05, .1, .15], [.005, .01, .015]], - 'b': [[0.25, 0.034, 0.76], [0.36, 0.12, 0.65], [0.78, 0.04, 0.98]] + "a": [[0.5, 1.0, 1.5], [0.05, 0.1, 0.15], [0.005, 0.01, 0.015]], + "b": [[0.25, 0.034, 0.76], [0.36, 0.12, 0.65], [0.78, 0.04, 0.98]], } fourier_valid_param_dicts = _make_valid_param_dicts(fourier_arg_dict) valid_params_list.append( - paramtuple(md.pair.Fourier, dict(zip(combos, - fourier_valid_param_dicts)), {})) + paramtuple(md.pair.Fourier, dict(zip(combos, fourier_valid_param_dicts)), {}) + ) expanded_lj_arg_dict = { - 'sigma': [0.5, 1.0, 1.5], - 'epsilon': [0.0005, 0.001, 0.0015], - 'delta': [1.0, 0.5, 0.0] + "sigma": [0.5, 1.0, 1.5], + "epsilon": [0.0005, 0.001, 0.0015], + "delta": [1.0, 0.5, 0.0], } - expanded_lj_valid_param_dicts = _make_valid_param_dicts( - expanded_lj_arg_dict) + expanded_lj_valid_param_dicts = _make_valid_param_dicts(expanded_lj_arg_dict) valid_params_list.append( - paramtuple(md.pair.ExpandedLJ, - dict(zip(combos, expanded_lj_valid_param_dicts)), {})) + paramtuple( + md.pair.ExpandedLJ, dict(zip(combos, expanded_lj_valid_param_dicts)), {} + ) + ) - dpd_arg_dict = {'A': [0.5, 1.0, 1.5], 'gamma': [0.0005, 0.001, 0.0015]} + dpd_arg_dict = {"A": [0.5, 1.0, 1.5], "gamma": [0.0005, 0.001, 0.0015]} dpd_valid_param_dicts = _make_valid_param_dicts(dpd_arg_dict) valid_params_list.append( - paramtuple(md.pair.DPD, dict(zip(combos, dpd_valid_param_dicts)), - {"kT": 2})) + paramtuple(md.pair.DPD, dict(zip(combos, dpd_valid_param_dicts)), {"kT": 2}) + ) dpdlj_arg_dict = { - 'sigma': [0.5, 1.0, 1.5], - 'epsilon': [0.0005, 0.001, 0.0015], - 'gamma': [0.034, 33.2, 1.2] + "sigma": [0.5, 1.0, 1.5], + "epsilon": [0.0005, 0.001, 0.0015], + "gamma": [0.034, 33.2, 1.2], } dpdlj_valid_param_dicts = _make_valid_param_dicts(dpdlj_arg_dict) valid_params_list.append( - paramtuple(md.pair.DPDLJ, dict(zip(combos, dpdlj_valid_param_dicts)), - {"kT": 1})) + paramtuple(md.pair.DPDLJ, dict(zip(combos, dpdlj_valid_param_dicts)), {"kT": 1}) + ) dlvo_arg_dict = { - 'kappa': [1.0, 2.0, 5.0], - 'Z': [0.1, 0.5, 2.0], - 'A': [0.1, 0.5, 2.0], - 'a1': [0.1] * 3, - 'a2': [0.25] * 3, + "kappa": [1.0, 2.0, 5.0], + "Z": [0.1, 0.5, 2.0], + "A": [0.1, 0.5, 2.0], + "a1": [0.1] * 3, + "a2": [0.25] * 3, } dlvo_valid_param_dicts = _make_valid_param_dicts(dlvo_arg_dict) valid_params_list.append( - paramtuple(md.pair.DLVO, dict(zip(combos, dlvo_valid_param_dicts)), {})) + paramtuple(md.pair.DLVO, dict(zip(combos, dlvo_valid_param_dicts)), {}) + ) tersoff_arg_dict = { - 'cutoff_thickness': [0.1, 0.5, 1.0], - 'magnitudes': [(0.02, 0.01), (0.0, 0.005), (0.002, 0.003)], - 'exp_factors': [(0.1, 0.1), (0.05, 0.05), (-0.02, 0.04)], - 'lambda3': [0.0, 0.5, 0.3], - 'dimer_r': [1.0, 2.0, 1.2], - 'n': [0.3, 0.5, 0.7], - 'gamma': [0.1, 0.5, 0.4], - 'c': [0.1, 0.5, 2.0], - 'd': [0.1, 0.5, 2.0], - 'm': [0.1, 0.5, 2.0], - 'alpha': [0.1, 0.5, 2.0], + "cutoff_thickness": [0.1, 0.5, 1.0], + "magnitudes": [(0.02, 0.01), (0.0, 0.005), (0.002, 0.003)], + "exp_factors": [(0.1, 0.1), (0.05, 0.05), (-0.02, 0.04)], + "lambda3": [0.0, 0.5, 0.3], + "dimer_r": [1.0, 2.0, 1.2], + "n": [0.3, 0.5, 0.7], + "gamma": [0.1, 0.5, 0.4], + "c": [0.1, 0.5, 2.0], + "d": [0.1, 0.5, 2.0], + "m": [0.1, 0.5, 2.0], + "alpha": [0.1, 0.5, 2.0], } tersoff_valid_param_dicts = _make_valid_param_dicts(tersoff_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.many_body.Tersoff, - dict(zip(combos, tersoff_valid_param_dicts)), {})) + paramtuple( + hoomd.md.many_body.Tersoff, dict(zip(combos, tersoff_valid_param_dicts)), {} + ) + ) - square_density_arg_dict = {'A': [1.0, 2.0, 5.0], 'B': [0.1, 0.5, 2.0]} - square_density_valid_param_dicts = _make_valid_param_dicts( - square_density_arg_dict) + square_density_arg_dict = {"A": [1.0, 2.0, 5.0], "B": [0.1, 0.5, 2.0]} + square_density_valid_param_dicts = _make_valid_param_dicts(square_density_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.many_body.SquareDensity, - dict(zip(combos, square_density_valid_param_dicts)), {})) + paramtuple( + hoomd.md.many_body.SquareDensity, + dict(zip(combos, square_density_valid_param_dicts)), + {}, + ) + ) revcross_arg_dict = { - 'sigma': [1.0, 2.0, 5.0], - 'n': [0.1, 0.5, 2.0], - 'epsilon': [0.1, 0.5, 2.0], - 'lambda3': [0.1, 0.5, 2.0], + "sigma": [1.0, 2.0, 5.0], + "n": [0.1, 0.5, 2.0], + "epsilon": [0.1, 0.5, 2.0], + "lambda3": [0.1, 0.5, 2.0], } revcross_valid_param_dicts = _make_valid_param_dicts(revcross_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.many_body.RevCross, - dict(zip(combos, revcross_valid_param_dicts)), {})) + paramtuple( + hoomd.md.many_body.RevCross, + dict(zip(combos, revcross_valid_param_dicts)), + {}, + ) + ) opp_arg_dict = { - 'C1': [1.0, 2.0, 5.0], - 'C2': [0.1, 0.5, 2.0], - 'eta1': [15.0, 12.0, 8.0], - 'eta2': [3.0, 2.0, 1.5], - 'k': [1.0, 2.0, 3.0], - 'phi': [0.0, 0.5, np.pi / 2] + "C1": [1.0, 2.0, 5.0], + "C2": [0.1, 0.5, 2.0], + "eta1": [15.0, 12.0, 8.0], + "eta2": [3.0, 2.0, 1.5], + "k": [1.0, 2.0, 3.0], + "phi": [0.0, 0.5, np.pi / 2], } opp_valid_param_dicts = _make_valid_param_dicts(opp_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.pair.OPP, dict(zip(combos, opp_valid_param_dicts)), - {})) + paramtuple(hoomd.md.pair.OPP, dict(zip(combos, opp_valid_param_dicts)), {}) + ) expanded_mie_arg_dict = { - 'epsilon': [.05, .025, .010], - 'sigma': [.5, 1, 1.5], - 'n': [12, 14, 16], - 'm': [6, 8, 10], - 'delta': [.1, .2, .3] + "epsilon": [0.05, 0.025, 0.010], + "sigma": [0.5, 1, 1.5], + "n": [12, 14, 16], + "m": [6, 8, 10], + "delta": [0.1, 0.2, 0.3], } - expanded_mie_valid_param_dicts = _make_valid_param_dicts( - expanded_mie_arg_dict) + expanded_mie_valid_param_dicts = _make_valid_param_dicts(expanded_mie_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.pair.ExpandedMie, - dict(zip(combos, expanded_mie_valid_param_dicts)), {})) + paramtuple( + hoomd.md.pair.ExpandedMie, + dict(zip(combos, expanded_mie_valid_param_dicts)), + {}, + ) + ) twf_arg_dict = { - 'sigma': [0.1, 0.2, 0.5], - 'epsilon': [0.1, 0.5, 2.0], - 'alpha': [15.0, 12.0, 8.0] + "sigma": [0.1, 0.2, 0.5], + "epsilon": [0.1, 0.5, 2.0], + "alpha": [15.0, 12.0, 8.0], } twf_valid_param_dicts = _make_valid_param_dicts(twf_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.pair.TWF, dict(zip(combos, twf_valid_param_dicts)), - {})) + paramtuple(hoomd.md.pair.TWF, dict(zip(combos, twf_valid_param_dicts)), {}) + ) - ljgauss_arg_dict = {'r0': [1.8], 'epsilon': [2.0], 'sigma': [0.02]} + ljgauss_arg_dict = {"r0": [1.8], "epsilon": [2.0], "sigma": [0.02]} ljgauss_valid_param_dicts = _make_valid_param_dicts(ljgauss_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.pair.LJGauss, - dict(zip(combos, ljgauss_valid_param_dicts)), {})) - - rs = [ - np.arange(0, 2.6, 0.1), - np.linspace(0.5, 2.5, 25), - np.arange(0.8, 2.6, 0.1) - ] + paramtuple( + hoomd.md.pair.LJGauss, dict(zip(combos, ljgauss_valid_param_dicts)), {} + ) + ) + + rs = [np.arange(0, 2.6, 0.1), np.linspace(0.5, 2.5, 25), np.arange(0.8, 2.6, 0.1)] Vs = [r[::-1] * 5 for r in rs] Fs = [-1 * np.diff(V) / np.diff(r) for V, r in zip(Vs, rs)] - table_arg_dict = { - 'U': [V[:-1] for V in Vs], - 'F': Fs, - 'r_min': [r[0] for r in rs] - } + table_arg_dict = {"U": [V[:-1] for V in Vs], "F": Fs, "r_min": [r[0] for r in rs]} table_valid_param_dicts = _make_valid_param_dicts(table_arg_dict) valid_params_list.append( - paramtuple(hoomd.md.pair.Table, - dict(zip(combos, table_valid_param_dicts)), {})) + paramtuple(hoomd.md.pair.Table, dict(zip(combos, table_valid_param_dicts)), {}) + ) return valid_params_list -@pytest.fixture(scope="function", - params=_valid_params(), - ids=(lambda x: x[0].__name__)) +@pytest.fixture(scope="function", params=_valid_params(), ids=(lambda x: x[0].__name__)) def valid_params(request): return deepcopy(request.param) def test_valid_params(valid_params): - pot = valid_params.pair_potential(**valid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pot = valid_params.pair_potential( + **valid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for pair in valid_params.pair_potential_params: pot.params[pair] = valid_params.pair_potential_params[pair] - assert _equivalent_data_structures(valid_params.pair_potential_params, - pot.params.to_base()) + assert _equivalent_data_structures( + valid_params.pair_potential_params, pot.params.to_base() + ) def _update_snap(pair_potential, snap): - if (any(name in str(pair_potential) for name in ['Ewald']) - and snap.communicator.rank == 0): - snap.particles.charge[:] = 1. + if ( + any(name in str(pair_potential) for name in ["Ewald"]) + and snap.communicator.rank == 0 + ): + snap.particles.charge[:] = 1.0 def _skip_if_triplet_gpu_mpi(sim, pair_potential): """Determines if the simulation is able to run this pair potential.""" - if (isinstance(sim.device, hoomd.device.GPU) - and sim.device.communicator.num_ranks > 1 - and issubclass(pair_potential, hoomd.md.many_body.Triplet)): + if ( + isinstance(sim.device, hoomd.device.GPU) + and sim.device.communicator.num_ranks > 1 + and issubclass(pair_potential, hoomd.md.many_body.Triplet) + ): pytest.skip("Cannot run triplet potentials with GPU+MPI enabled") -def test_attached_params(simulation_factory, lattice_snapshot_factory, - valid_params): +def test_attached_params(simulation_factory, lattice_snapshot_factory, valid_params): pair_potential, pair_potential_dict, extra_args = valid_params pair_keys = valid_params.pair_potential_params.keys() particle_types = list(set(itertools.chain.from_iterable(pair_keys))) - pot = valid_params.pair_potential(**valid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pot = valid_params.pair_potential( + **valid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) pot.params = valid_params.pair_potential_params - snap = lattice_snapshot_factory(particle_types=particle_types, - n=10, - a=1.5, - r=0.01) + snap = lattice_snapshot_factory(particle_types=particle_types, n=10, a=1.5, r=0.01) _update_snap(valid_params.pair_potential, snap) if snap.communicator.rank == 0: - snap.particles.typeid[:] = np.random.randint(0, - len(snap.particles.types), - snap.particles.N) + snap.particles.typeid[:] = np.random.randint( + 0, len(snap.particles.types), snap.particles.N + ) sim = simulation_factory(snap) _skip_if_triplet_gpu_mpi(sim, valid_params.pair_potential) sim.operations.integrator = hoomd.md.Integrator(dt=0.005) sim.operations.integrator.forces.append(pot) sim.run(1) - assert _equivalent_data_structures(valid_params.pair_potential_params, - pot.params.to_base()) + assert _equivalent_data_structures( + valid_params.pair_potential_params, pot.params.to_base() + ) def test_run(simulation_factory, lattice_snapshot_factory, valid_params): pair_keys = valid_params.pair_potential_params.keys() particle_types = list(set(itertools.chain.from_iterable(pair_keys))) - pot = valid_params.pair_potential(**valid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pot = valid_params.pair_potential( + **valid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) pot.params = valid_params.pair_potential_params - snap = lattice_snapshot_factory(particle_types=particle_types, - n=7, - a=1.7, - r=0.01) + snap = lattice_snapshot_factory(particle_types=particle_types, n=7, a=1.7, r=0.01) if snap.communicator.rank == 0: - snap.particles.typeid[:] = np.random.randint(0, - len(snap.particles.types), - snap.particles.N) + snap.particles.typeid[:] = np.random.randint( + 0, len(snap.particles.types), snap.particles.N + ) sim = simulation_factory(snap) _skip_if_triplet_gpu_mpi(sim, valid_params.pair_potential) integrator = hoomd.md.Integrator(dt=0.005) integrator.forces.append(pot) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator sim.operations._schedule() old_snap = sim.state.get_snapshot() sim.run(2) new_snap = sim.state.get_snapshot() if new_snap.communicator.rank == 0: - assert not np.allclose(new_snap.particles.position, - old_snap.particles.position) + assert not np.allclose(new_snap.particles.position, old_snap.particles.position) autotuned_kernel_parameter_check(instance=pot, activate=lambda: sim.run(1)) @@ -772,8 +796,8 @@ def test_run(simulation_factory, lattice_snapshot_factory, valid_params): def set_distance(simulation, distance): snap = simulation.state.get_snapshot() if snap.communicator.rank == 0: - snap.particles.position[0] = [0, 0, .1] - snap.particles.position[1] = [0, 0, distance + .1] + snap.particles.position[0] = [0, 0, 0.1] + snap.particles.position[1] = [0, 0, distance + 0.1] simulation.state.set_snapshot(snap) @@ -787,8 +811,8 @@ def S_r(r, r_cut, r_on): # noqa: N802 - allow uppercase function name return 1 elif r > r_cut: return 0 - numerator = ((r_cut**2 - r**2)**2) * (r_cut**2 + 2 * r**2 - 3 * r_on**2) - denominator = (r_cut**2 - r_on**2)**3 + numerator = ((r_cut**2 - r**2) ** 2) * (r_cut**2 + 2 * r**2 - 3 * r_on**2) + denominator = (r_cut**2 - r_on**2) ** 3 return numerator / denominator def get_energy(simulation): @@ -801,10 +825,9 @@ def get_energy(simulation): distance = 1.1 lj = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), default_r_cut=r_cut * 1.2) - lj.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} + lj.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} - sim = simulation_factory( - two_particle_snapshot_factory(dimensions=3, d=distance)) + sim = simulation_factory(two_particle_snapshot_factory(dimensions=3, d=distance)) integrator = md.Integrator(dt=0.005, forces=[lj]) sim.operations.integrator = integrator @@ -816,10 +839,10 @@ def get_energy(simulation): set_distance(sim, r_cut) E_r_cut = get_energy(sim) - lj_shift = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), - mode='shift', - default_r_cut=r_cut) - lj_shift.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} + lj_shift = md.pair.LJ( + nlist=md.nlist.Cell(buffer=0.4), mode="shift", default_r_cut=r_cut + ) + lj_shift.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} integrator.forces = [lj_shift] set_distance(sim, distance) @@ -828,11 +851,11 @@ def get_energy(simulation): assert math.isclose(sum(energies), E_r - E_r_cut) r_on = 0.5 - lj_xplor = md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), - mode='xplor', - default_r_cut=r_cut) - lj_xplor.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} - lj_xplor.r_on[('A', 'A')] = r_on + lj_xplor = md.pair.LJ( + nlist=md.nlist.Cell(buffer=0.4), mode="xplor", default_r_cut=r_cut + ) + lj_xplor.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} + lj_xplor.r_on[("A", "A")] = r_on integrator.forces = [lj_xplor] # When 0 < r_on < r_ij < r_cut @@ -841,14 +864,14 @@ def get_energy(simulation): assert math.isclose(sum(energies), E_r * S_r(distance, r_cut, r_on)) # When 0 < r_ij < r_on < r_cut - lj_xplor.r_on[('A', 'A')] = distance * 1.2 + lj_xplor.r_on[("A", "A")] = distance * 1.2 sim.run(1) # recompute forces energies = sim.operations.integrator.forces[0].energies if energies is not None: assert math.isclose(sum(energies), E_r) # When 0 < r_ij < r_cut < r_on - lj_xplor.r_on[('A', 'A')] = r_cut * 1.2 + lj_xplor.r_on[("A", "A")] = r_cut * 1.2 sim.run(1) # recompute forces energies = sim.operations.integrator.forces[0].energies if energies is not None: @@ -898,27 +921,32 @@ def _calculate_force(sim): return 0, 0 # return dummy values if not on rank 1 -def test_force_energy_relationship(device, simulation_factory, - two_particle_snapshot_factory, valid_params): +def test_force_energy_relationship( + device, simulation_factory, two_particle_snapshot_factory, valid_params +): # don't really test DPD and DPDLJ for this test pot_name = valid_params.pair_potential.__name__ if any(pot_name == name for name in ["DPD", "DPDLJ"]): - pytest.skip("Cannot test force energy relationship for " + pot_name - + " pair force") - - if (pot_name == 'Tersoff' and isinstance(device, hoomd.device.GPU) - and hoomd.version.gpu_platform == 'ROCm'): + pytest.skip( + "Cannot test force energy relationship for " + pot_name + " pair force" + ) + + if ( + pot_name == "Tersoff" + and isinstance(device, hoomd.device.GPU) + and hoomd.version.gpu_platform == "ROCm" + ): pytest.skip("Tersoff causes seg faults on ROCm (#1606).") pair_keys = valid_params.pair_potential_params.keys() particle_types = list(set(itertools.chain.from_iterable(pair_keys))) - pot = valid_params.pair_potential(**valid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pot = valid_params.pair_potential( + **valid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for pair in valid_params.pair_potential_params: pot.params[pair] = valid_params.pair_potential_params[pair] - if pot_name == 'DLVO': + if pot_name == "DLVO": pot.r_cut[pair] = 2.5 - ((0.2 + 0.5) / 2 - 1) snap = two_particle_snapshot_factory(particle_types=particle_types, d=1.5) @@ -927,8 +955,7 @@ def test_force_energy_relationship(device, simulation_factory, _skip_if_triplet_gpu_mpi(sim, valid_params.pair_potential) integrator = md.Integrator(dt=0.005) integrator.forces.append(pot) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator sim.run(0) for pair in valid_params.pair_potential_params: @@ -941,12 +968,8 @@ def test_force_energy_relationship(device, simulation_factory, calculated_forces = _calculate_force(sim) sim_forces = sim.operations.integrator.forces[0].forces if sim_forces is not None: - np.testing.assert_allclose(calculated_forces[0], - sim_forces[0], - rtol=1e-05) - np.testing.assert_allclose(calculated_forces[1], - sim_forces[1], - rtol=1e-05) + np.testing.assert_allclose(calculated_forces[0], sim_forces[0], rtol=1e-05) + np.testing.assert_allclose(calculated_forces[1], sim_forces[1], rtol=1e-05) def _forces_and_energies(): @@ -956,10 +979,10 @@ def _forces_and_energies(): and then stored in the json file below. Values were calculated at distances of 0.75 and 1.5 for each argument dictionary """ - FEtuple = namedtuple('FEtuple', [ - 'pair_potential', 'pair_potential_params', 'extra_args', 'forces', - 'energies' - ]) + FEtuple = namedtuple( + "FEtuple", + ["pair_potential", "pair_potential_params", "extra_args", "forces", "energies"], + ) path = Path(__file__).parent / "forces_and_energies.json" @@ -977,17 +1000,17 @@ def json_with_inf(val): param_list = [] for pot in F_and_E.keys(): if pot[0].isalpha(): - kT_dict = {'DPD': {'kT': 2}, 'DPDLJ': {'kT': 1}}.get(pot, {}) + kT_dict = {"DPD": {"kT": 2}, "DPDLJ": {"kT": 1}}.get(pot, {}) for i in range(len(F_and_E[pot]["params"])): param_list.append( - FEtuple(getattr(md.pair, pot), - F_and_E[pot]["params"][i], kT_dict, [ - json_with_inf(v) - for v in F_and_E[pot]["forces"][i] - ], [ - json_with_inf(v) - for v in F_and_E[pot]["energies"][i] - ])) + FEtuple( + getattr(md.pair, pot), + F_and_E[pot]["params"][i], + kT_dict, + [json_with_inf(v) for v in F_and_E[pot]["forces"][i]], + [json_with_inf(v) for v in F_and_E[pot]["energies"][i]], + ) + ) return param_list @@ -1009,28 +1032,30 @@ def isclose(value, reference, rtol=5e-6): # file needs to be multipled by r to compare with the computed force of the # simulation. @pytest.mark.filterwarnings("ignore:invalid value encountered in multiply") -@pytest.mark.parametrize("forces_and_energies", - _forces_and_energies(), - ids=lambda x: x.pair_potential.__name__) -def test_force_energy_accuracy(simulation_factory, - two_particle_snapshot_factory, - forces_and_energies): +@pytest.mark.parametrize( + "forces_and_energies", + _forces_and_energies(), + ids=lambda x: x.pair_potential.__name__, +) +def test_force_energy_accuracy( + simulation_factory, two_particle_snapshot_factory, forces_and_energies +): pot_name = forces_and_energies.pair_potential.__name__ if pot_name == "DPD" or pot_name == "DPDLJ": - pytest.skip("Cannot test force energy accuracy for " + pot_name - + " pair force") - - pot = forces_and_energies.pair_potential(**forces_and_energies.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) - pot.params[('A', 'A')] = forces_and_energies.pair_potential_params - snap = two_particle_snapshot_factory(particle_types=['A'], d=0.75) + pytest.skip("Cannot test force energy accuracy for " + pot_name + " pair force") + + pot = forces_and_energies.pair_potential( + **forces_and_energies.extra_args, + nlist=md.nlist.Cell(buffer=0.4), + default_r_cut=2.5, + ) + pot.params[("A", "A")] = forces_and_energies.pair_potential_params + snap = two_particle_snapshot_factory(particle_types=["A"], d=0.75) _update_snap(forces_and_energies.pair_potential, snap) sim = simulation_factory(snap) integrator = md.Integrator(dt=0.005) integrator.forces.append(pot) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator sim.run(0) particle_distances = [0.75, 1.5] @@ -1039,22 +1064,23 @@ def test_force_energy_accuracy(simulation_factory, r = np.array([0, 0, d]) / d snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - snap.particles.position[0] = [0, 0, .1] - snap.particles.position[1] = [0, 0, d + .1] + snap.particles.position[0] = [0, 0, 0.1] + snap.particles.position[1] = [0, 0, d + 0.1] sim.state.set_snapshot(snap) sim_energies = sim.operations.integrator.forces[0].energies sim_forces = sim.operations.integrator.forces[0].forces if sim_energies is not None: assert isclose(sum(sim_energies), forces_and_energies.energies[i]) - assert np.allclose(sim_forces[0], - forces_and_energies.forces[i] * r, - equal_nan=True) + assert np.allclose( + sim_forces[0], forces_and_energies.forces[i] * r, equal_nan=True + ) def populate_sim(sim): """Add an integrator for the following tests.""" sim.operations.integrator = md.Integrator( - dt=0.005, methods=[md.methods.ConstantVolume(hoomd.filter.All())]) + dt=0.005, methods=[md.methods.ConstantVolume(hoomd.filter.All())] + ) return sim @@ -1113,53 +1139,53 @@ def test_setting_nlist(simulation_factory, two_particle_snapshot_factory): # Test logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((md.pair.Pair, md.pair.aniso.AnisotropicPair, md.many_body.Triplet, - md.many_body.Tersoff, md.many_body.RevCross, - md.many_body.SquareDensity), - (('md', 'pair'), ('md', 'pair', 'aniso'), ('md', 'many_body'), - ('md', 'many_body'), ('md', 'many_body'), ('md', 'many_body')), - itertools.repeat({ - 'energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'energies': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'forces': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'torques': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'virials': { - 'category': LoggerCategories.particle, - 'default': True - }, - 'additional_energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'additional_virial': { - 'category': LoggerCategories.sequence, - 'default': True + "cls, expected_namespace, expected_loggables", + zip( + ( + md.pair.Pair, + md.pair.aniso.AnisotropicPair, + md.many_body.Triplet, + md.many_body.Tersoff, + md.many_body.RevCross, + md.many_body.SquareDensity, + ), + ( + ("md", "pair"), + ("md", "pair", "aniso"), + ("md", "many_body"), + ("md", "many_body"), + ("md", "many_body"), + ("md", "many_body"), + ), + itertools.repeat( + { + "energy": {"category": LoggerCategories.scalar, "default": True}, + "energies": {"category": LoggerCategories.particle, "default": True}, + "forces": {"category": LoggerCategories.particle, "default": True}, + "torques": {"category": LoggerCategories.particle, "default": True}, + "virials": {"category": LoggerCategories.particle, "default": True}, + "additional_energy": { + "category": LoggerCategories.scalar, + "default": True, + }, + "additional_virial": { + "category": LoggerCategories.sequence, + "default": True, + }, } - }))) + ), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) -def test_pickling(simulation_factory, two_particle_snapshot_factory, - valid_params): +def test_pickling(simulation_factory, two_particle_snapshot_factory, valid_params): sim = simulation_factory(two_particle_snapshot_factory()) _skip_if_triplet_gpu_mpi(sim, valid_params.pair_potential) - pot = valid_params.pair_potential(**valid_params.extra_args, - nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=2.5) + pot = valid_params.pair_potential( + **valid_params.extra_args, nlist=md.nlist.Cell(buffer=0.4), default_r_cut=2.5 + ) for pair in valid_params.pair_potential_params: pot.params[pair] = valid_params.pair_potential_params[pair] pickling_check(pot) @@ -1169,24 +1195,19 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory, pickling_check(pot) -@pytest.mark.parametrize("mode", ['none', 'shift', 'xplor']) -def test_shift_mode_with_lrc(simulation_factory, two_particle_snapshot_factory, - mode): +@pytest.mark.parametrize("mode", ["none", "shift", "xplor"]) +def test_shift_mode_with_lrc(simulation_factory, two_particle_snapshot_factory, mode): cell = md.nlist.Cell(buffer=0.4) - lj = md.pair.LJ(nlist=cell, - default_r_cut=2.5, - mode=mode, - tail_correction=True) + lj = md.pair.LJ(nlist=cell, default_r_cut=2.5, mode=mode, tail_correction=True) - lj.params[('A', 'A')] = {'sigma': 1, 'epsilon': 0.5} - snap = two_particle_snapshot_factory(dimensions=3, d=.5) + lj.params[("A", "A")] = {"sigma": 1, "epsilon": 0.5} + snap = two_particle_snapshot_factory(dimensions=3, d=0.5) sim = simulation_factory(snap) integrator = md.Integrator(dt=0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator - shift_allowed_modes = {'none'} + shift_allowed_modes = {"none"} if mode not in shift_allowed_modes: with pytest.raises(RuntimeError): sim.run(1) @@ -1200,10 +1221,9 @@ def test_lrc_non_lj(simulation_factory, two_particle_snapshot_factory): with pytest.raises(TypeError): # flake8 complains about unused variable with # gauss = md.pair.Gaussian(...) - md.pair.Gaussian(nlist=cell, - default_r_cut=2.5, - mode='none', - tail_correction=True) + md.pair.Gaussian( + nlist=cell, default_r_cut=2.5, mode="none", tail_correction=True + ) def test_tail_corrections(simulation_factory, two_particle_snapshot_factory): @@ -1218,12 +1238,14 @@ def test_tail_corrections(simulation_factory, two_particle_snapshot_factory): r_cut = 2.0 for tail_correction in [True, False]: cell = md.nlist.Cell(buffer=0.4) - lj = md.pair.LJ(nlist=cell, - default_r_cut=r_cut, - mode='none', - tail_correction=tail_correction) - - lj.params[('A', 'A')] = {'sigma': sigma, 'epsilon': epsilon} + lj = md.pair.LJ( + nlist=cell, + default_r_cut=r_cut, + mode="none", + tail_correction=tail_correction, + ) + + lj.params[("A", "A")] = {"sigma": sigma, "epsilon": epsilon} snap = two_particle_snapshot_factory(dimensions=3, d=d_pair) v1 = np.array([0.46168675, -0.21020661, 0.21240303]) v2 = -v1 # zero linear momentum @@ -1233,12 +1255,12 @@ def test_tail_corrections(simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(snap) integrator = md.Integrator(dt=0.005) integrator.forces.append(lj) - integrator.methods.append( - hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) + integrator.methods.append(hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1)) sim.operations.integrator = integrator sim.always_compute_pressure = True thermodynamic_properties = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.All()) + filter=hoomd.filter.All() + ) sim.operations.computes.append(thermodynamic_properties) sim.run(0) sims[tail_correction] = sim @@ -1252,7 +1274,7 @@ def test_tail_corrections(simulation_factory, two_particle_snapshot_factory): rho = N / volume def lj_energy(r, sig, eps): - return 4 * eps * ((sig / r)**12 - (sig / r)**6) + return 4 * eps * ((sig / r) ** 12 - (sig / r) ** 6) def energy_correction(sigma, epsilon, r_cut, rho, N): """Long-range tail correction to energy.""" @@ -1263,7 +1285,7 @@ def energy_correction(sigma, epsilon, r_cut, rho, N): def lj_force_mag(r, sig, eps): """Magnitude of force on particles from LJ potential a distance r.""" - return 24 * eps / r * (2 * (sig / r)**12 - (sig / r)**6) + return 24 * eps / r * (2 * (sig / r) ** 12 - (sig / r) ** 6) def pressure_correction(sigma, epsilon, r_cut, rho): """Long-range tail correction to pressure.""" @@ -1293,8 +1315,7 @@ def pressure_correction(sigma, epsilon, r_cut, rho): assert p_true < p_false -def test_adding_to_operations(simulation_factory, - two_particle_snapshot_factory): +def test_adding_to_operations(simulation_factory, two_particle_snapshot_factory): """Test that forces can work like computes since they are.""" sim = simulation_factory(two_particle_snapshot_factory(d=0.5)) lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(0.4)) @@ -1305,8 +1326,7 @@ def test_adding_to_operations(simulation_factory, assert lj.energy != 0 -def test_forces_multiple_lists(simulation_factory, - two_particle_snapshot_factory): +def test_forces_multiple_lists(simulation_factory, two_particle_snapshot_factory): """Test that forces added to an integrator and compute work correctly. Look at the edge cases where a force is added twice to a simulation: once @@ -1346,12 +1366,13 @@ def test_forces_multiple_lists(simulation_factory, assert lj._use_count == 0 -@pytest.mark.parametrize("forces_and_energies", - _forces_and_energies(), - ids=lambda x: x.pair_potential.__name__) -def test_shift(simulation_factory, two_particle_snapshot_factory, - forces_and_energies): - if 'shift' not in forces_and_energies.pair_potential._accepted_modes: +@pytest.mark.parametrize( + "forces_and_energies", + _forces_and_energies(), + ids=lambda x: x.pair_potential.__name__, +) +def test_shift(simulation_factory, two_particle_snapshot_factory, forces_and_energies): + if "shift" not in forces_and_energies.pair_potential._accepted_modes: pytest.skip("Potential does not support the shift mode.") r_cut = 2.0 @@ -1359,18 +1380,19 @@ def test_shift(simulation_factory, two_particle_snapshot_factory, potential = forces_and_energies.pair_potential( **forces_and_energies.extra_args, nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=r_cut) - potential.params[('A', 'A')] = forces_and_energies.pair_potential_params + default_r_cut=r_cut, + ) + potential.params[("A", "A")] = forces_and_energies.pair_potential_params potential_shifted = forces_and_energies.pair_potential( **forces_and_energies.extra_args, nlist=md.nlist.Cell(buffer=0.4), - default_r_cut=r_cut) - potential_shifted.params[('A', - 'A')] = forces_and_energies.pair_potential_params - potential_shifted.mode = 'shift' + default_r_cut=r_cut, + ) + potential_shifted.params[("A", "A")] = forces_and_energies.pair_potential_params + potential_shifted.mode = "shift" - snap = two_particle_snapshot_factory(particle_types=['A'], d=r_cut - 1e-7) + snap = two_particle_snapshot_factory(particle_types=["A"], d=r_cut - 1e-7) _update_snap(forces_and_energies.pair_potential, snap) sim = simulation_factory(snap) sim.operations.computes.extend([potential, potential_shifted]) diff --git a/hoomd/md/pytest/test_pppm_coulomb.py b/hoomd/md/pytest/test_pppm_coulomb.py index 78588f8625..43a6949f8a 100644 --- a/hoomd/md/pytest/test_pppm_coulomb.py +++ b/hoomd/md/pytest/test_pppm_coulomb.py @@ -7,11 +7,11 @@ import numpy -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def two_charged_particle_snapshot_factory(two_particle_snapshot_factory): """Make a snapshot with two charged particles.""" - def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20, q=1): + def make_snapshot(particle_types=["A"], dimensions=3, d=1, L=20, q=1): """Make the snapshot. Args: @@ -21,10 +21,9 @@ def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20, q=1): L: Box length q: Particle charge """ - s = two_particle_snapshot_factory(particle_types=particle_types, - dimensions=dimensions, - d=d, - L=L) + s = two_particle_snapshot_factory( + particle_types=particle_types, dimensions=dimensions, d=d, L=L + ) if s.communicator.rank == 0: s.particles.charge[0] = -q @@ -34,8 +33,7 @@ def make_snapshot(particle_types=['A'], dimensions=3, d=1, L=20, q=1): return make_snapshot -def test_attach_detach(simulation_factory, - two_charged_particle_snapshot_factory): +def test_attach_detach(simulation_factory, two_charged_particle_snapshot_factory): """Ensure that md.long_range.pppm.Coulomb can be attached. Also test that parameters can be set. @@ -43,7 +41,8 @@ def test_attach_detach(simulation_factory, # detached nlist = hoomd.md.nlist.Cell(buffer=0.4) ewald, coulomb = hoomd.md.long_range.pppm.make_pppm_coulomb_forces( - nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0) + nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0 + ) assert ewald.nlist is nlist assert coulomb.nlist is nlist @@ -87,7 +86,7 @@ def test_attach_detach(simulation_factory, assert coulomb.r_cut == 2.5 assert coulomb.alpha == 1.5 - assert ewald.params[('A', 'A')]['alpha'] == 1.5 + assert ewald.params[("A", "A")]["alpha"] == 1.5 with pytest.raises(AttributeError): coulomb.resolution = (32, 32, 32) @@ -99,12 +98,12 @@ def test_attach_detach(simulation_factory, coulomb.alpha = 3.0 -def test_kernel_parameters(simulation_factory, - two_charged_particle_snapshot_factory): +def test_kernel_parameters(simulation_factory, two_charged_particle_snapshot_factory): """Test that md.long_range.pppm.Coulomb can be pickled and unpickled.""" nlist = hoomd.md.nlist.Cell(buffer=0.4) ewald, coulomb = hoomd.md.long_range.pppm.make_pppm_coulomb_forces( - nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0) + nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0 + ) sim = simulation_factory(two_charged_particle_snapshot_factory()) integrator = hoomd.md.Integrator(dt=0.005) @@ -115,8 +114,7 @@ def test_kernel_parameters(simulation_factory, sim.run(0) - autotuned_kernel_parameter_check(instance=coulomb, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=coulomb, activate=lambda: sim.run(1)) def test_pickling(simulation_factory, two_charged_particle_snapshot_factory): @@ -124,7 +122,8 @@ def test_pickling(simulation_factory, two_charged_particle_snapshot_factory): # detached nlist = hoomd.md.nlist.Cell(buffer=0.4) ewald, coulomb = hoomd.md.long_range.pppm.make_pppm_coulomb_forces( - nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0) + nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0 + ) pickling_check(coulomb) # attached @@ -147,7 +146,8 @@ def test_pppm_energy(simulation_factory, two_charged_particle_snapshot_factory): """Test that md.long_range.pppm.Coulomb computes the correct energy.""" nlist = hoomd.md.nlist.Cell(buffer=0.4) ewald, coulomb = hoomd.md.long_range.pppm.make_pppm_coulomb_forces( - nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0) + nlist=nlist, resolution=(64, 64, 64), order=6, r_cut=3.0, alpha=0 + ) sim = simulation_factory(two_charged_particle_snapshot_factory()) integrator = hoomd.md.Integrator(dt=0.005) diff --git a/hoomd/md/pytest/test_reverse_perturbation_flow.py b/hoomd/md/pytest/test_reverse_perturbation_flow.py index f51a0f7523..3e69d76760 100644 --- a/hoomd/md/pytest/test_reverse_perturbation_flow.py +++ b/hoomd/md/pytest/test_reverse_perturbation_flow.py @@ -7,7 +7,7 @@ from hoomd.logging import LoggerCategories from hoomd.conftest import logging_check, autotuned_kernel_parameter_check -_directions = list(permutations(['x', 'y', 'z'], 2)) +_directions = list(permutations(["x", "y", "z"], 2)) @pytest.mark.parametrize("slab_direction, flow_direction", _directions) @@ -15,8 +15,9 @@ def test_before_attaching(slab_direction, flow_direction): filt = hoomd.filter.All() ramp = hoomd.variant.Ramp(0.0, 0.1e8, 0, int(1e8)) n_slabs = 20 - mpf = hoomd.md.update.ReversePerturbationFlow(filt, ramp, slab_direction, - flow_direction, n_slabs) + mpf = hoomd.md.update.ReversePerturbationFlow( + filt, ramp, slab_direction, flow_direction, n_slabs + ) assert mpf.filter == filt assert mpf.flow_target == ramp @@ -32,13 +33,15 @@ def test_before_attaching(slab_direction, flow_direction): @pytest.mark.parametrize("slab_direction, flow_direction", _directions) -def test_after_attaching(simulation_factory, two_particle_snapshot_factory, - slab_direction, flow_direction): +def test_after_attaching( + simulation_factory, two_particle_snapshot_factory, slab_direction, flow_direction +): filt = hoomd.filter.All() ramp = hoomd.variant.Ramp(0.0, 0.1e8, 0, int(1e8)) n_slabs = 20 - mpf = hoomd.md.update.ReversePerturbationFlow(filt, ramp, slab_direction, - flow_direction, n_slabs) + mpf = hoomd.md.update.ReversePerturbationFlow( + filt, ramp, slab_direction, flow_direction, n_slabs + ) nve = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) sim = simulation_factory(two_particle_snapshot_factory()) @@ -57,7 +60,7 @@ def test_after_attaching(simulation_factory, two_particle_snapshot_factory, with pytest.raises(AttributeError): # filter cannot be set after scheduling - mpf.filter = hoomd.filter.Type(['A']) + mpf.filter = hoomd.filter.Type(["A"]) with pytest.raises(AttributeError): # flow_target cannot be set after scheduling mpf.flow_target = hoomd.variant.Ramp(0.0, 0.1e7, 0, int(1e7)) @@ -85,16 +88,19 @@ def test_after_attaching(simulation_factory, two_particle_snapshot_factory, if sim.device.communicator.num_ranks == 1: # ReversePerturbationFlow doesn't execute its kernel on all ranks, # test only on serial simulations. - autotuned_kernel_parameter_check(instance=mpf, - activate=lambda: sim.run(1), - all_optional=True) + autotuned_kernel_parameter_check( + instance=mpf, activate=lambda: sim.run(1), all_optional=True + ) def test_logging(): logging_check( - hoomd.md.update.ReversePerturbationFlow, ('md', 'update'), { - 'summed_exchanged_momentum': { - 'category': LoggerCategories.scalar, - 'default': True + hoomd.md.update.ReversePerturbationFlow, + ("md", "update"), + { + "summed_exchanged_momentum": { + "category": LoggerCategories.scalar, + "default": True, } - }) + }, + ) diff --git a/hoomd/md/pytest/test_rigid.py b/hoomd/md/pytest/test_rigid.py index bde8e821ea..3c931a332a 100644 --- a/hoomd/md/pytest/test_rigid.py +++ b/hoomd/md/pytest/test_rigid.py @@ -9,6 +9,7 @@ try: import rowan + skip_rowan = False except ImportError: skip_rowan = True @@ -24,10 +25,10 @@ def valid_body_definition(): return { "constituent_types": ["B", "B", "B", "B"], "positions": [ - [1, 0, -1 / (2**(1. / 2.))], - [-1, 0, -1 / (2**(1. / 2.))], - [0, -1, 1 / (2**(1. / 2.))], - [0, 1, 1 / (2**(1. / 2.))], + [1, 0, -1 / (2 ** (1.0 / 2.0))], + [-1, 0, -1 / (2 ** (1.0 / 2.0))], + [0, -1, 1 / (2 ** (1.0 / 2.0))], + [0, 1, 1 / (2 ** (1.0 / 2.0))], ], "orientations": [(1.0, 0.0, 0.0, 0.0)] * 4, } @@ -37,8 +38,7 @@ def test_body_setting(valid_body_definition): invalid_body_definitions = { "constituent_types": [[4], "hello", ("A", 4)], "positions": [[(1, 2)], [(1.0, 4.0, "foo")], 1.0, "hello"], - "orientations": [[(1, 2, 3)], [(1.0, 4.0, 5.0, "foo")], [1.0], 1.0, - "foo"], + "orientations": [[(1, 2, 3)], [(1.0, 4.0, 5.0, "foo")], [1.0], 1.0, "foo"], } rigid = md.constrain.Rigid() @@ -46,8 +46,11 @@ def test_body_setting(valid_body_definition): rigid.body["A"] = current_body_definition for key, value in rigid.body["A"].items(): - if (isinstance(value, Sequence) and len(value) > 0 - and not isinstance(value[0], str)): + if ( + isinstance(value, Sequence) + and len(value) > 0 + and not isinstance(value[0], str) + ): assert np.allclose(value, current_body_definition[key]) else: assert value == current_body_definition[key] @@ -83,42 +86,57 @@ def check_bodies(snapshot, definition, charges=None): assert snapshot.particles.charge[i + 2] == charges[i] assert snapshot.particles.charge[i + 6] == charges[i] - particle_one = (snapshot.particles.position[0], - snapshot.particles.orientation[0]) - particle_two = (snapshot.particles.position[1], - snapshot.particles.orientation[1]) + particle_one = (snapshot.particles.position[0], snapshot.particles.orientation[0]) + particle_two = (snapshot.particles.position[1], snapshot.particles.orientation[1]) # Check positions - def check_position(central_position, central_orientation, - constituent_position, local_position): + def check_position( + central_position, central_orientation, constituent_position, local_position + ): d_pos = rowan.rotate(central_orientation, local_position) assert np.allclose(central_position + d_pos, constituent_position) for i in range(4): - check_position(*particle_one, snapshot.particles.position[i + 2], - definition["positions"][i]) - check_position(*particle_two, snapshot.particles.position[i + 6], - definition["positions"][i]) + check_position( + *particle_one, + snapshot.particles.position[i + 2], + definition["positions"][i], + ) + check_position( + *particle_two, + snapshot.particles.position[i + 6], + definition["positions"][i], + ) # check orientation - def check_orientation(central_orientation, constituent_orientation, - local_orientation): + def check_orientation( + central_orientation, constituent_orientation, local_orientation + ): expected_orientation = rowan.normalize( - rowan.multiply(central_orientation, local_orientation)) + rowan.multiply(central_orientation, local_orientation) + ) assert np.allclose(expected_orientation, local_orientation) for i in range(4): - check_orientation(particle_one[1], - snapshot.particles.orientation[i + 2], - definition["orientations"][i]) - check_orientation(particle_two[1], - snapshot.particles.orientation[i + 6], - definition["orientations"][i]) + check_orientation( + particle_one[1], + snapshot.particles.orientation[i + 2], + definition["orientations"][i], + ) + check_orientation( + particle_two[1], + snapshot.particles.orientation[i + 6], + definition["orientations"][i], + ) @skip_rowan -def test_create_bodies(simulation_factory, two_particle_snapshot_factory, - lattice_snapshot_factory, valid_body_definition): +def test_create_bodies( + simulation_factory, + two_particle_snapshot_factory, + lattice_snapshot_factory, + valid_body_definition, +): rigid = md.constrain.Rigid() rigid.body["A"] = valid_body_definition @@ -165,16 +183,16 @@ def test_create_bodies(simulation_factory, two_particle_snapshot_factory, assert np.all(snapshot.particles.body[56:100] == -1) assert np.all(snapshot.particles.body[800:1000] == -1) # Check constituent_particles - assert np.all( - snapshot.particles.body[1000:] == np.repeat(central_tags, 4)) + assert np.all(snapshot.particles.body[1000:] == np.repeat(central_tags, 4)) sim.operations.integrator = hoomd.md.Integrator(dt=0.005, rigid=rigid) # Ensure validate bodies passes sim.run(0) -def test_attaching(simulation_factory, two_particle_snapshot_factory, - valid_body_definition): +def test_attaching( + simulation_factory, two_particle_snapshot_factory, valid_body_definition +): rigid = md.constrain.Rigid() rigid.body["A"] = valid_body_definition langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid()) @@ -191,17 +209,20 @@ def test_attaching(simulation_factory, two_particle_snapshot_factory, sim.run(0) for key, value in rigid.body["A"].items(): - if (isinstance(value, Sequence) and len(value) > 0 - and not isinstance(value[0], str)): + if ( + isinstance(value, Sequence) + and len(value) > 0 + and not isinstance(value[0], str) + ): assert np.allclose(value, valid_body_definition[key]) else: assert value == valid_body_definition[key] @pytest.mark.serial -def test_error_on_invalid_body(simulation_factory, - two_particle_snapshot_factory, - valid_body_definition): +def test_error_on_invalid_body( + simulation_factory, two_particle_snapshot_factory, valid_body_definition +): """Tests that Simulation fails when bodies are not present in state.""" rigid = md.constrain.Rigid() rigid.body["A"] = valid_body_definition @@ -220,8 +241,9 @@ def test_error_on_invalid_body(simulation_factory, @skip_rowan -def test_running_simulation(simulation_factory, two_particle_snapshot_factory, - valid_body_definition): +def test_running_simulation( + simulation_factory, two_particle_snapshot_factory, valid_body_definition +): rigid = md.constrain.Rigid() rigid.body["A"] = valid_body_definition langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid()) @@ -229,7 +251,7 @@ def test_running_simulation(simulation_factory, two_particle_snapshot_factory, lj.params.default = {"epsilon": 0.0, "sigma": 1} lj.params[("A", "A")] = {"epsilon": 1.0} lj.params[("B", "B")] = {"epsilon": 1.0} - lj.r_cut.default = 2**(1.0 / 6.0) + lj.r_cut.default = 2 ** (1.0 / 6.0) integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj]) integrator.rigid = rigid @@ -247,19 +269,19 @@ def test_running_simulation(simulation_factory, two_particle_snapshot_factory, if sim.device.communicator.rank == 0: check_bodies(snapshot, valid_body_definition, charges) - autotuned_kernel_parameter_check(instance=rigid, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=rigid, activate=lambda: sim.run(1)) -def test_running_without_body_definition(simulation_factory, - two_particle_snapshot_factory): +def test_running_without_body_definition( + simulation_factory, two_particle_snapshot_factory +): rigid = md.constrain.Rigid() langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid()) lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(buffer=0.4), mode="shift") lj.params.default = {"epsilon": 0.0, "sigma": 1} lj.params[("A", "A")] = {"epsilon": 1.0} lj.params[("B", "B")] = {"epsilon": 1.0} - lj.r_cut.default = 2**(1.0 / 6.0) + lj.r_cut.default = 2 ** (1.0 / 6.0) integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj]) integrator.rigid = rigid @@ -274,9 +296,9 @@ def test_running_without_body_definition(simulation_factory, @pytest.mark.serial -def test_setting_body_after_attaching(simulation_factory, - two_particle_snapshot_factory, - valid_body_definition): +def test_setting_body_after_attaching( + simulation_factory, two_particle_snapshot_factory, valid_body_definition +): """Test updating body definition without updating sim particles fails.""" rigid = md.constrain.Rigid() langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid()) @@ -284,7 +306,7 @@ def test_setting_body_after_attaching(simulation_factory, lj.params.default = {"epsilon": 0.0, "sigma": 1} lj.params[("A", "A")] = {"epsilon": 1.0} lj.params[("B", "B")] = {"epsilon": 1.0} - lj.r_cut.default = 2**(1.0 / 6.0) + lj.r_cut.default = 2 ** (1.0 / 6.0) integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj]) integrator.rigid = rigid @@ -311,7 +333,7 @@ def test_rigid_body_restart(simulation_factory, valid_body_definition): s.particles.N = N s.particles.position[:] = [[-0.5, 0, 0]] * N s.particles.body[:] = [x for x in range(N)] - s.particles.types = ['A', 'B'] + s.particles.types = ["A", "B"] s.particles.typeid[:] = [0] * N s.configuration.box = [2, 2, 2, 0, 0, 0] @@ -327,7 +349,7 @@ def test_rigid_body_restart(simulation_factory, valid_body_definition): sim.run(0) snapshot = sim.state.get_snapshot() - N_const = len(valid_body_definition['constituent_types']) + N_const = len(valid_body_definition["constituent_types"]) if snapshot.communicator.rank == 0: assert np.all(snapshot.particles.typeid[:N] == 0) assert np.all(snapshot.particles.typeid[N:] == 1) @@ -336,16 +358,21 @@ def test_rigid_body_restart(simulation_factory, valid_body_definition): assert np.all(snapshot.particles.body[N:] == should_be) -@pytest.mark.parametrize("reload_snapshot, n_free", - itertools.product([False, True], [0, 10])) -def test_rigid_dof(lattice_snapshot_factory, simulation_factory, - valid_body_definition, reload_snapshot, n_free): +@pytest.mark.parametrize( + "reload_snapshot, n_free", itertools.product([False, True], [0, 10]) +) +def test_rigid_dof( + lattice_snapshot_factory, + simulation_factory, + valid_body_definition, + reload_snapshot, + n_free, +): n = 7 n_bodies = n**3 - n_free - initial_snapshot = lattice_snapshot_factory(particle_types=['A', 'B'], - n=n, - dimensions=3, - a=5) + initial_snapshot = lattice_snapshot_factory( + particle_types=["A", "B"], n=n, dimensions=3, a=5 + ) if initial_snapshot.communicator.rank == 0: initial_snapshot.particles.body[:n_bodies] = range(n_bodies) @@ -366,36 +393,49 @@ def test_rigid_dof(lattice_snapshot_factory, simulation_factory, integrator = hoomd.md.Integrator(dt=0.0, integrate_rotational_dof=True) integrator.rigid = rigid - thermo_all = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.All()) + thermo_all = hoomd.md.compute.ThermodynamicQuantities(filter=hoomd.filter.All()) thermo_two = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.Tags([0, 1])) + filter=hoomd.filter.Tags([0, 1]) + ) thermo_central = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.Rigid(flags=("center",))) + filter=hoomd.filter.Rigid(flags=("center",)) + ) thermo_central_free = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.Rigid(flags=("center", "free"))) + filter=hoomd.filter.Rigid(flags=("center", "free")) + ) thermo_constituent = hoomd.md.compute.ThermodynamicQuantities( - filter=hoomd.filter.Rigid(flags=("constituent",))) - - sim.operations.computes.extend([ - thermo_all, thermo_two, thermo_central, thermo_central_free, - thermo_constituent - ]) + filter=hoomd.filter.Rigid(flags=("constituent",)) + ) + + sim.operations.computes.extend( + [ + thermo_all, + thermo_two, + thermo_central, + thermo_central_free, + thermo_constituent, + ] + ) sim.operations.integrator = integrator integrator.methods.append( - hoomd.md.methods.ConstantVolume(filter=hoomd.filter.Rigid( - flags=("center", "free")))) + hoomd.md.methods.ConstantVolume( + filter=hoomd.filter.Rigid(flags=("center", "free")) + ) + ) sim.run(0) - assert thermo_all.translational_degrees_of_freedom == (n_bodies - + n_free) * 3 - 3 + assert thermo_all.translational_degrees_of_freedom == (n_bodies + n_free) * 3 - 3 assert thermo_two.translational_degrees_of_freedom == 2 * 3 - 3 * ( - 2 / (n_bodies + n_free)) + 2 / (n_bodies + n_free) + ) assert thermo_central.translational_degrees_of_freedom == ( - n_bodies * 3 - 3 * (n_bodies / (n_bodies + n_free))) - assert thermo_central_free.translational_degrees_of_freedom == ( - n_bodies + n_free) * 3 - 3 + n_bodies * 3 - 3 * (n_bodies / (n_bodies + n_free)) + ) + assert ( + thermo_central_free.translational_degrees_of_freedom + == (n_bodies + n_free) * 3 - 3 + ) assert thermo_constituent.translational_degrees_of_freedom == 0 # Test again with the rigid body constraints removed. Now the integration @@ -406,10 +446,10 @@ def test_rigid_dof(lattice_snapshot_factory, simulation_factory, sim.run(0) - assert thermo_all.translational_degrees_of_freedom == (n_bodies - + n_free) * 3 + assert thermo_all.translational_degrees_of_freedom == (n_bodies + n_free) * 3 assert thermo_two.translational_degrees_of_freedom == 2 * 3 assert thermo_central.translational_degrees_of_freedom == n_bodies * 3 - assert thermo_central_free.translational_degrees_of_freedom == ( - n_bodies + n_free) * 3 + assert ( + thermo_central_free.translational_degrees_of_freedom == (n_bodies + n_free) * 3 + ) assert thermo_constituent.translational_degrees_of_freedom == 0 diff --git a/hoomd/md/pytest/test_special_pair.py b/hoomd/md/pytest/test_special_pair.py index 7bacd6abfb..dee801d5e8 100644 --- a/hoomd/md/pytest/test_special_pair.py +++ b/hoomd/md/pytest/test_special_pair.py @@ -4,8 +4,11 @@ import hoomd from hoomd import md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import pytest import numpy @@ -22,7 +25,7 @@ dict(epsilon=1.5, sigma=0.5), 2.5, 24 * 0.5**6 * 1.5 * (R**6 - 2 * 0.5**6) / R**13, - 4 * 1.5 * ((0.5 / R)**12 - (0.5 / R)**6), + 4 * 1.5 * ((0.5 / R) ** 12 - (0.5 / R) ** 6), ), ( hoomd.md.special_pair.Coulomb, @@ -34,26 +37,26 @@ ] -@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy", - special_pair_test_parameters) +@pytest.mark.parametrize( + "special_pair_cls, params, r_cut, force, energy", special_pair_test_parameters +) def test_before_attaching(special_pair_cls, params, r_cut, force, energy): potential = special_pair_cls() - potential.params['A-A'] = params - potential.r_cut['A-A'] = r_cut - assert potential.r_cut['A-A'] == r_cut + potential.params["A-A"] = params + potential.r_cut["A-A"] = r_cut + assert potential.r_cut["A-A"] == r_cut for key in params: - assert potential.params['A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A"][key] == pytest.approx(params[key]) -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def snapshot_factory(two_particle_snapshot_factory): - def make_snapshot(): snapshot = two_particle_snapshot_factory(d=R, L=R * 10) if snapshot.communicator.rank == 0: snapshot.particles.charge[:] = CHARGE snapshot.pairs.N = 1 - snapshot.pairs.types = ['A-A'] + snapshot.pairs.types = ["A-A"] snapshot.pairs.typeid[0] = 0 snapshot.pairs.group[0] = (0, 1) @@ -62,39 +65,41 @@ def make_snapshot(): return make_snapshot -@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy", - special_pair_test_parameters) -def test_after_attaching(snapshot_factory, simulation_factory, special_pair_cls, - params, r_cut, force, energy): +@pytest.mark.parametrize( + "special_pair_cls, params, r_cut, force, energy", special_pair_test_parameters +) +def test_after_attaching( + snapshot_factory, simulation_factory, special_pair_cls, params, r_cut, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = special_pair_cls() - potential.params['A-A'] = params - potential.r_cut['A-A'] = r_cut + potential.params["A-A"] = params + potential.r_cut["A-A"] = r_cut - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) - potential.r_cut['A-A'] = r_cut + potential.r_cut["A-A"] = r_cut for key in params: - assert potential.params['A-A'][key] == pytest.approx(params[key]) + assert potential.params["A-A"][key] == pytest.approx(params[key]) -@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy", - special_pair_test_parameters) -def test_forces_and_energies(snapshot_factory, simulation_factory, - special_pair_cls, params, r_cut, force, energy): +@pytest.mark.parametrize( + "special_pair_cls, params, r_cut, force, energy", special_pair_test_parameters +) +def test_forces_and_energies( + snapshot_factory, simulation_factory, special_pair_cls, params, r_cut, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = special_pair_cls() - potential.params['A-A'] = params - potential.r_cut['A-A'] = r_cut + potential.params["A-A"] = params + potential.r_cut["A-A"] = r_cut - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) @@ -102,60 +107,64 @@ def test_forces_and_energies(snapshot_factory, simulation_factory, sim_forces = potential.forces if sim.device.communicator.rank == 0: assert sum(sim_energies) == pytest.approx(energy) - numpy.testing.assert_allclose(sim_forces[0], [force, 0.0, 0.0], - rtol=1e-6, - atol=1e-5) - numpy.testing.assert_allclose(sim_forces[1], [-1 * force, 0.0, 0.0], - rtol=1e-6, - atol=1e-5) + numpy.testing.assert_allclose( + sim_forces[0], [force, 0.0, 0.0], rtol=1e-6, atol=1e-5 + ) + numpy.testing.assert_allclose( + sim_forces[1], [-1 * force, 0.0, 0.0], rtol=1e-6, atol=1e-5 + ) # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip((md.special_pair.SpecialPair, md.special_pair.LJ, - md.special_pair.Coulomb), itertools.repeat(('md', 'special_pair')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + (md.special_pair.SpecialPair, md.special_pair.LJ, md.special_pair.Coulomb), + itertools.repeat(("md", "special_pair")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) -@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy", - special_pair_test_parameters) -def test_kernel_parameters(simulation_factory, snapshot_factory, - special_pair_cls, params, r_cut, force, energy): +@pytest.mark.parametrize( + "special_pair_cls, params, r_cut, force, energy", special_pair_test_parameters +) +def test_kernel_parameters( + simulation_factory, snapshot_factory, special_pair_cls, params, r_cut, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = special_pair_cls() - potential.params['A-A'] = params - potential.r_cut['A-A'] = r_cut + potential.params["A-A"] = params + potential.r_cut["A-A"] = r_cut - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) - autotuned_kernel_parameter_check(instance=potential, - activate=lambda: sim.run(1)) + autotuned_kernel_parameter_check(instance=potential, activate=lambda: sim.run(1)) # Test Pickling -@pytest.mark.parametrize("special_pair_cls, params, r_cut, force, energy", - special_pair_test_parameters) -def test_pickling(simulation_factory, snapshot_factory, special_pair_cls, - params, r_cut, force, energy): +@pytest.mark.parametrize( + "special_pair_cls, params, r_cut, force, energy", special_pair_test_parameters +) +def test_pickling( + simulation_factory, snapshot_factory, special_pair_cls, params, r_cut, force, energy +): snapshot = snapshot_factory() sim = simulation_factory(snapshot) potential = special_pair_cls() - potential.params['A-A'] = params - potential.r_cut['A-A'] = r_cut + potential.params["A-A"] = params + potential.r_cut["A-A"] = r_cut pickling_check(potential) - sim.operations.integrator = hoomd.md.Integrator(dt=0.005, - forces=[potential]) + sim.operations.integrator = hoomd.md.Integrator(dt=0.005, forces=[potential]) sim.run(0) pickling_check(potential) diff --git a/hoomd/md/pytest/test_table_pressure.py b/hoomd/md/pytest/test_table_pressure.py index 51e5a16473..864f69a419 100644 --- a/hoomd/md/pytest/test_table_pressure.py +++ b/hoomd/md/pytest/test_table_pressure.py @@ -18,21 +18,23 @@ def test_table_pressure(simulation_factory, two_particle_snapshot_factory): integrator = hoomd.md.Integrator(dt=0.0) thermostat = hoomd.md.methods.thermostats.Bussi(kT=1.0) integrator.methods.append( - hoomd.md.methods.ConstantVolume(hoomd.filter.All(), thermostat)) + hoomd.md.methods.ConstantVolume(hoomd.filter.All(), thermostat) + ) sim.operations.integrator = integrator - logger = hoomd.logging.Logger(categories=['scalar']) - logger.add(thermo, quantities=['pressure']) + logger = hoomd.logging.Logger(categories=["scalar"]) + logger.add(thermo, quantities=["pressure"]) output = io.StringIO("") table_writer = hoomd.write.Table(1, logger, output) sim.operations.writers.append(table_writer) sim.run(1) - ideal_gas_pressure = (2 * thermo.translational_kinetic_energy / 3 - / sim.state.box.volume) + ideal_gas_pressure = ( + 2 * thermo.translational_kinetic_energy / 3 / sim.state.box.volume + ) if sim.device.communicator.rank == 0: - output_lines = output.getvalue().split('\n') - numpy.testing.assert_allclose(float(output_lines[1]), - ideal_gas_pressure, - rtol=0.2) + output_lines = output.getvalue().split("\n") + numpy.testing.assert_allclose( + float(output_lines[1]), ideal_gas_pressure, rtol=0.2 + ) diff --git a/hoomd/md/pytest/test_thermo.py b/hoomd/md/pytest/test_thermo.py index f0f0018325..09a726284a 100644 --- a/hoomd/md/pytest/test_thermo.py +++ b/hoomd/md/pytest/test_thermo.py @@ -7,19 +7,20 @@ from hoomd.logging import LoggerCategories import pytest import numpy as np + """ Each entry is a quantity and its type """ _thermo_qtys = [ - ('kinetic_temperature', float), - ('pressure', float), - ('pressure_tensor', list), - ('kinetic_energy', float), - ('translational_kinetic_energy', float), - ('rotational_kinetic_energy', float), - ('potential_energy', float), - ('degrees_of_freedom', float), - ('translational_degrees_of_freedom', float), - ('rotational_degrees_of_freedom', float), - ('num_particles', int), + ("kinetic_temperature", float), + ("pressure", float), + ("pressure_tensor", list), + ("kinetic_energy", float), + ("translational_kinetic_energy", float), + ("rotational_kinetic_energy", float), + ("potential_energy", float), + ("degrees_of_freedom", float), + ("translational_degrees_of_freedom", float), + ("rotational_degrees_of_freedom", float), + ("num_particles", int), ] @@ -50,25 +51,25 @@ def test_attach_detach(simulation_factory, two_particle_snapshot_factory): getattr(thermo, qty) -def _assert_thermo_properties(thermo, npart, rdof, tdof, pe, rke, tke, ke, p, - pt, volume): - +def _assert_thermo_properties( + thermo, npart, rdof, tdof, pe, rke, tke, ke, p, pt, volume +): assert thermo.num_particles == npart assert thermo.rotational_degrees_of_freedom == rdof assert thermo.translational_degrees_of_freedom == tdof - assert thermo.degrees_of_freedom == (thermo.translational_degrees_of_freedom - + thermo.rotational_degrees_of_freedom) + assert thermo.degrees_of_freedom == ( + thermo.translational_degrees_of_freedom + thermo.rotational_degrees_of_freedom + ) np.testing.assert_allclose(thermo.potential_energy, pe) np.testing.assert_allclose(thermo.rotational_kinetic_energy, rke, rtol=1e-4) - np.testing.assert_allclose(thermo.translational_kinetic_energy, - tke, - rtol=1e-4) + np.testing.assert_allclose(thermo.translational_kinetic_energy, tke, rtol=1e-4) np.testing.assert_allclose(thermo.kinetic_energy, ke, rtol=1e-4) - np.testing.assert_allclose(thermo.kinetic_temperature, - 2 * thermo.kinetic_energy - / thermo.degrees_of_freedom, - rtol=1e-4) + np.testing.assert_allclose( + thermo.kinetic_temperature, + 2 * thermo.kinetic_energy / thermo.degrees_of_freedom, + rtol=1e-4, + ) np.testing.assert_allclose(thermo.pressure, p, rtol=1e-4) np.testing.assert_allclose(thermo.pressure_tensor, pt, rtol=1e-4, atol=5e-5) np.testing.assert_allclose(thermo.volume, volume, rtol=1e-7, atol=1e-7) @@ -91,22 +92,33 @@ def test_basic_system_3d(simulation_factory, two_particle_snapshot_factory): sim.run(1) - volume = (snap.configuration.box[0] * snap.configuration.box[1] - * snap.configuration.box[2]) - - _assert_thermo_properties(thermo, 2, 0, 3, 0.0, 0.0, 4.0, 4.0, - 2.0 / 3 * thermo.kinetic_energy / 20**3, - [8.0 / 20.0**3, 0., 0., 0., 0., 0.], volume) + volume = ( + snap.configuration.box[0] + * snap.configuration.box[1] + * snap.configuration.box[2] + ) + + _assert_thermo_properties( + thermo, + 2, + 0, + 3, + 0.0, + 0.0, + 4.0, + 4.0, + 2.0 / 3 * thermo.kinetic_energy / 20**3, + [8.0 / 20.0**3, 0.0, 0.0, 0.0, 0.0, 0.0], + volume, + ) def test_basic_system_2d(simulation_factory, lattice_snapshot_factory): - filterA = hoomd.filter.Type(['A']) - filterB = hoomd.filter.Type(['B']) + filterA = hoomd.filter.Type(["A"]) + filterB = hoomd.filter.Type(["B"]) thermoA = hoomd.md.compute.ThermodynamicQuantities(filterA) thermoB = hoomd.md.compute.ThermodynamicQuantities(filterB) - snap = lattice_snapshot_factory(particle_types=['A', 'B'], - dimensions=2, - n=2) + snap = lattice_snapshot_factory(particle_types=["A", "B"], dimensions=2, n=2) if snap.communicator.rank == 0: snap.particles.velocity[:] = [[-1, 0, 0], [2, 0, 0]] * 2 snap.particles.typeid[:] = [0, 1, 0, 1] @@ -117,10 +129,10 @@ def test_basic_system_2d(simulation_factory, lattice_snapshot_factory): integrator = hoomd.md.Integrator(dt=0.0001) thermostat = hoomd.md.methods.thermostats.MTTK(kT=1.0, tau=1.0) + integrator.methods.append(hoomd.md.methods.ConstantVolume(filterA, thermostat)) integrator.methods.append( - hoomd.md.methods.ConstantVolume(filterA, thermostat)) - integrator.methods.append( - hoomd.md.methods.Langevin(filterB, kT=1, default_gamma=0.00001)) + hoomd.md.methods.Langevin(filterB, kT=1, default_gamma=0.00001) + ) sim.operations.integrator = integrator sim.run(1) @@ -128,18 +140,37 @@ def test_basic_system_2d(simulation_factory, lattice_snapshot_factory): volume = snap.configuration.box[0] * snap.configuration.box[1] # tests for group A - _assert_thermo_properties(thermoA, 2, 0, 4, 0.0, 0.0, 1.0, 1.0, - thermoA.kinetic_energy / 2.0**2, - (2.0 / 2.0**2, 0., 0., 0., 0., 0.), volume) + _assert_thermo_properties( + thermoA, + 2, + 0, + 4, + 0.0, + 0.0, + 1.0, + 1.0, + thermoA.kinetic_energy / 2.0**2, + (2.0 / 2.0**2, 0.0, 0.0, 0.0, 0.0, 0.0), + volume, + ) # tests for group B - _assert_thermo_properties(thermoB, 2, 0, 4, 0.0, 0.0, 4.0, 4.0, - thermoB.kinetic_energy / 2.0**2, - (8.0 / 2.0**2, 0., 0., 0., 0., 0.), volume) + _assert_thermo_properties( + thermoB, + 2, + 0, + 4, + 0.0, + 0.0, + 4.0, + 4.0, + thermoB.kinetic_energy / 2.0**2, + (8.0 / 2.0**2, 0.0, 0.0, 0.0, 0.0, 0.0), + volume, + ) def test_system_rotational_dof(simulation_factory, device): - snap = hoomd.Snapshot(device.communicator) if snap.communicator.rank == 0: box = [10, 10, 10, 0, 0, 0] @@ -149,7 +180,7 @@ def test_system_rotational_dof(simulation_factory, device): snap.particles.velocity[:] = [[0, 0, 0], [0, -1, 0], [0, 1, 0]] snap.particles.moment_inertia[:] = [[2.0, 0, 0], [1, 1, 1], [1, 1, 1]] snap.particles.angmom[:] = [[0, 2, 4, 6]] * 3 - snap.particles.types = ['A'] + snap.particles.types = ["A"] filt = hoomd.filter.All() thermo = hoomd.md.compute.ThermodynamicQuantities(filter=filt) @@ -164,17 +195,19 @@ def test_system_rotational_dof(simulation_factory, device): sim.run(0) - _assert_thermo_properties(thermo, - 3, - 7, - 6, - 0.0, - 57 / 4., - 1.0, - 61 / 4., - 2. / 3 * thermo.translational_kinetic_energy - / 10.0**3, (0., 0., 0., 2. / 10**3, 0., 0.), - volume=1000) + _assert_thermo_properties( + thermo, + 3, + 7, + 6, + 0.0, + 57 / 4.0, + 1.0, + 61 / 4.0, + 2.0 / 3 * thermo.translational_kinetic_energy / 10.0**3, + (0.0, 0.0, 0.0, 2.0 / 10**3, 0.0, 0.0), + volume=1000, + ) integrator.integrate_rotational_dof = False sim.run(0) @@ -190,53 +223,38 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory): def test_logging(): logging_check( - hoomd.md.compute.ThermodynamicQuantities, ('md', 'compute'), { - 'kinetic_temperature': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'pressure': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'pressure_tensor': { - 'category': LoggerCategories.sequence, - 'default': True - }, - 'kinetic_energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'translational_kinetic_energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'rotational_kinetic_energy': { - 'category': LoggerCategories.scalar, - 'default': True + hoomd.md.compute.ThermodynamicQuantities, + ("md", "compute"), + { + "kinetic_temperature": { + "category": LoggerCategories.scalar, + "default": True, }, - 'potential_energy': { - 'category': LoggerCategories.scalar, - 'default': True + "pressure": {"category": LoggerCategories.scalar, "default": True}, + "pressure_tensor": {"category": LoggerCategories.sequence, "default": True}, + "kinetic_energy": {"category": LoggerCategories.scalar, "default": True}, + "translational_kinetic_energy": { + "category": LoggerCategories.scalar, + "default": True, }, - 'degrees_of_freedom': { - 'category': LoggerCategories.scalar, - 'default': True + "rotational_kinetic_energy": { + "category": LoggerCategories.scalar, + "default": True, }, - 'translational_degrees_of_freedom': { - 'category': LoggerCategories.scalar, - 'default': True + "potential_energy": {"category": LoggerCategories.scalar, "default": True}, + "degrees_of_freedom": { + "category": LoggerCategories.scalar, + "default": True, }, - 'rotational_degrees_of_freedom': { - 'category': LoggerCategories.scalar, - 'default': True + "translational_degrees_of_freedom": { + "category": LoggerCategories.scalar, + "default": True, }, - 'num_particles': { - 'category': LoggerCategories.scalar, - 'default': True + "rotational_degrees_of_freedom": { + "category": LoggerCategories.scalar, + "default": True, }, - 'volume': { - 'category': LoggerCategories.scalar, - 'default': True - } - }) + "num_particles": {"category": LoggerCategories.scalar, "default": True}, + "volume": {"category": LoggerCategories.scalar, "default": True}, + }, + ) diff --git a/hoomd/md/pytest/test_thermoHMA.py b/hoomd/md/pytest/test_thermoHMA.py index b46d0c48cd..0364a18ede 100644 --- a/hoomd/md/pytest/test_thermoHMA.py +++ b/hoomd/md/pytest/test_thermoHMA.py @@ -11,8 +11,7 @@ def test_before_attaching(): filt = hoomd.filter.All() - thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities( - filt, 1.0) + thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities(filt, 1.0) assert thermoHMA._filter == filt assert thermoHMA.kT == 1.0 assert thermoHMA.harmonic_pressure == 0.0 @@ -21,16 +20,14 @@ def test_before_attaching(): with pytest.raises(DataAccessError): thermoHMA.pressure - thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities( - filt, 2.5, 0.6) + thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities(filt, 2.5, 0.6) assert thermoHMA.kT == 2.5 assert thermoHMA.harmonic_pressure == 0.6 def test_after_attaching(simulation_factory, two_particle_snapshot_factory): filt = hoomd.filter.All() - thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities( - filt, 1.0) + thermoHMA = hoomd.md.compute.HarmonicAveragedThermodynamicQuantities(filt, 1.0) sim = simulation_factory(two_particle_snapshot_factory()) sim.operations.add(thermoHMA) @@ -53,13 +50,9 @@ def test_after_attaching(simulation_factory, two_particle_snapshot_factory): def test_logging(): logging_check( hoomd.md.compute.HarmonicAveragedThermodynamicQuantities, - ('md', 'compute'), { - 'potential_energy': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'pressure': { - 'category': LoggerCategories.scalar, - 'default': True - } - }) + ("md", "compute"), + { + "potential_energy": {"category": LoggerCategories.scalar, "default": True}, + "pressure": {"category": LoggerCategories.scalar, "default": True}, + }, + ) diff --git a/hoomd/md/pytest/test_update_group_dof.py b/hoomd/md/pytest/test_update_group_dof.py index e75f5b8294..3dbb811ea7 100644 --- a/hoomd/md/pytest/test_update_group_dof.py +++ b/hoomd/md/pytest/test_update_group_dof.py @@ -5,7 +5,7 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def snapshot_factory(device): """Make a test snapshot for update_group_dof testing.""" @@ -25,7 +25,7 @@ def make_snapshot(): [1, 1, 1], ] snap.particles.angmom[:] = [[0, 2, 4, 6]] * 3 - snap.particles.types = ['A', 'B'] + snap.particles.types = ["A", "B"] return snap @@ -112,8 +112,7 @@ def test_set_integrator(simulation_factory, snapshot_factory): integrate_rotational_dof=True, ) thermostat = hoomd.md.methods.thermostats.Bussi(kT=1.0) - method2 = hoomd.md.methods.ConstantVolume(filter=filter_all, - thermostat=thermostat) + method2 = hoomd.md.methods.ConstantVolume(filter=filter_all, thermostat=thermostat) integrator2 = hoomd.md.Integrator( 0.005, methods=[method2], @@ -149,8 +148,7 @@ def test_set_method(simulation_factory, snapshot_factory): integrate_rotational_dof=True, ) thermostat = hoomd.md.methods.thermostats.Bussi(kT=1.0) - method2 = hoomd.md.methods.ConstantVolume(filter=filter_all, - thermostat=thermostat) + method2 = hoomd.md.methods.ConstantVolume(filter=filter_all, thermostat=thermostat) sim.operations.integrator = integrator thermo = hoomd.md.compute.ThermodynamicQuantities(filter=filter_all) @@ -207,7 +205,7 @@ def test_filter_updater(simulation_factory, snapshot_factory): snapshot = snapshot_factory() sim = simulation_factory(snapshot) - filter_type = hoomd.filter.Type(['A']) + filter_type = hoomd.filter.Type(["A"]) method = hoomd.md.methods.Langevin(filter=filter_type, kT=1.0) integrator = hoomd.md.Integrator( 0.005, @@ -235,7 +233,8 @@ def test_filter_updater(simulation_factory, snapshot_factory): # add the filter updater and trigger it to change the DOF filter_updater = hoomd.update.FilterUpdater( - trigger=hoomd.trigger.Periodic(1), filters=[filter_type]) + trigger=hoomd.trigger.Periodic(1), filters=[filter_type] + ) sim.operations.updaters.append(filter_updater) sim.run(1) diff --git a/hoomd/md/pytest/test_wall_data.py b/hoomd/md/pytest/test_wall_data.py index ebf49e49d9..bf8a6b5e1c 100644 --- a/hoomd/md/pytest/test_wall_data.py +++ b/hoomd/md/pytest/test_wall_data.py @@ -10,7 +10,6 @@ class _TestCounter: - def __init__(self): self.previous_cls = None self.count = 0 @@ -27,77 +26,78 @@ def __call__(self, arg): _test_args = ( - (_md.SphereWall, ({ - "radius": 4.0, - "origin": (1.0, 0, 0), - "inside": True, - "open": False - }, { - "radius": 1.0, - "origin": (0.0, 0.0, 0.0), - "inside": False, - "open": False - }, { - "radius": 3.1415, - "origin": (-1.0, -2.0, 2.0), - "inside": False, - "open": True - })), - (_md.CylinderWall, ({ - "radius": 4.0, - "origin": (1.0, 0, 0), - "axis": (1.0, 0.0, 0), - "inside": True, - "open": True - }, { - "radius": 1.0, - "origin": (0.0, 0.0, 0.0), - "axis": (0.0, 1.0, 0.0), - "inside": False, - "open": False - }, { - "radius": 3.1415, - "origin": (-1.0, -2.0, 1.0), - "axis": (0.0, 0.0, 1.0), - "inside": False, - "open": True - })), ( - _md.PlaneWall, + _md.SphereWall, + ( + {"radius": 4.0, "origin": (1.0, 0, 0), "inside": True, "open": False}, + {"radius": 1.0, "origin": (0.0, 0.0, 0.0), "inside": False, "open": False}, + { + "radius": 3.1415, + "origin": (-1.0, -2.0, 2.0), + "inside": False, + "open": True, + }, + ), + ), + ( + _md.CylinderWall, ( - # The normals have to be unit vectors for the equality checks to - # hold. The C++ class currently normalizes any input vector. { + "radius": 4.0, "origin": (1.0, 0, 0), - "normal": (1.0, 0.0, 0), - "open": False + "axis": (1.0, 0.0, 0), + "inside": True, + "open": True, }, { + "radius": 1.0, "origin": (0.0, 0.0, 0.0), - "normal": (0.0, 1.0, 0.0), - "open": True + "axis": (0.0, 1.0, 0.0), + "inside": False, + "open": False, }, { + "radius": 3.1415, "origin": (-1.0, -2.0, 1.0), - "normal": (0.0, 0.0, 1.0), - "open": False - }))) + "axis": (0.0, 0.0, 1.0), + "inside": False, + "open": True, + }, + ), + ), + ( + _md.PlaneWall, + ( + # The normals have to be unit vectors for the equality checks to + # hold. The C++ class currently normalizes any input vector. + {"origin": (1.0, 0, 0), "normal": (1.0, 0.0, 0), "open": False}, + {"origin": (0.0, 0.0, 0.0), "normal": (0.0, 1.0, 0.0), "open": True}, + {"origin": (-1.0, -2.0, 1.0), "normal": (0.0, 0.0, 1.0), "open": False}, + ), + ), +) -@pytest.mark.parametrize("cls, constructor_kwargs", - ((cls, constructor_kwargs) - for cls, arg_list in _test_args - for constructor_kwargs in arg_list), - ids=_TestCounter()) +@pytest.mark.parametrize( + "cls, constructor_kwargs", + ( + (cls, constructor_kwargs) + for cls, arg_list in _test_args + for constructor_kwargs in arg_list + ), + ids=_TestCounter(), +) def test_valid_construction(cls, constructor_kwargs): obj = cls(**constructor_kwargs) for key, value in constructor_kwargs.items(): assert np.allclose(getattr(obj, key), value) -@pytest.mark.parametrize("cls, constructor_kwargs", - ((cls, arg_list[0]) for cls, arg_list in _test_args), - ids=_TestCounter()) +@pytest.mark.parametrize( + "cls, constructor_kwargs", + ((cls, arg_list[0]) for cls, arg_list in _test_args), + ids=_TestCounter(), +) def test_immutability(cls, constructor_kwargs): obj = cls(**constructor_kwargs) for key, value in constructor_kwargs.items(): diff --git a/hoomd/md/pytest/test_wall_potential.py b/hoomd/md/pytest/test_wall_potential.py index 2a99b6a526..fb00387c61 100644 --- a/hoomd/md/pytest/test_wall_potential.py +++ b/hoomd/md/pytest/test_wall_potential.py @@ -7,8 +7,11 @@ import hoomd import hoomd.md as md from hoomd.conftest import expected_loggable_params -from hoomd.conftest import (logging_check, pickling_check, - autotuned_kernel_parameter_check) +from hoomd.conftest import ( + logging_check, + pickling_check, + autotuned_kernel_parameter_check, +) import itertools @@ -33,9 +36,7 @@ def generate(cls, types=("Sphere", "Cylinder", "Plane")): origin = (cls.float(), cls.float(), cls.float()) inside = cls.rng.choice((True, False)) if type == "Sphere": - return hoomd.wall.Sphere(radius=cls.float(), - origin=origin, - inside=inside) + return hoomd.wall.Sphere(radius=cls.float(), origin=origin, inside=inside) if type == "Cylinder": return hoomd.wall.Cylinder( radius=cls.float(), @@ -102,33 +103,12 @@ def _params(r_cut=None, r_extrap=None): parameters. """ base = ( - { - "sigma": 1.0, - "epsilon": 1.0 - }, - { - "sigma": 1.0, - "epsilon": 5.5 - }, - { - "kappa": 1.0, - "epsilon": 1.5 - }, - { - "r0": 1.0, - "D0": 1.0, - "alpha": 1.0 - }, - { - "sigma": 1.0, - "epsilon": 1.0 - }, - { - "sigma": 1.0, - "epsilon": 1.0, - "m": 10, - "n": 20 - }, + {"sigma": 1.0, "epsilon": 1.0}, + {"sigma": 1.0, "epsilon": 5.5}, + {"kappa": 1.0, "epsilon": 1.5}, + {"r0": 1.0, "D0": 1.0, "alpha": 1.0}, + {"sigma": 1.0, "epsilon": 1.0}, + {"sigma": 1.0, "epsilon": 1.0, "m": 10, "n": 20}, ) for p in base: if r_cut is None: @@ -172,10 +152,12 @@ def test_attaching(simulation, cls, params): @pytest.mark.parametrize("cls, params", zip(_potential_cls, _params(2.5, 0.0))) def test_plane(simulation, cls, params): """Test that particles stay in box slice defined by two plane walls.""" - wall_pot = cls([ - hoomd.wall.Plane(normal=(0, 0, -1), origin=(0, 0, 1)), - hoomd.wall.Plane(normal=(0, 0, 1), origin=(0, 0, -1)), - ]) + wall_pot = cls( + [ + hoomd.wall.Plane(normal=(0, 0, -1), origin=(0, 0, 1)), + hoomd.wall.Plane(normal=(0, 0, 1), origin=(0, 0, -1)), + ] + ) simulation.operations.integrator.forces.append(wall_pot) wall_pot.params["A"] = params for _ in range(10): @@ -189,15 +171,13 @@ def test_plane(simulation, cls, params): def test_sphere(simulation, cls, params): """Test that particles stay within a sphere wall.""" radius = 5 - wall_pot = cls( - [hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=True)]) + wall_pot = cls([hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=True)]) simulation.operations.integrator.forces.append(wall_pot) wall_pot.params["A"] = params for _ in range(10): simulation.run(10) with simulation.state.cpu_local_snapshot as snap: - assert np.all( - np.linalg.norm(snap.particles.position, axis=1) < radius) + assert np.all(np.linalg.norm(snap.particles.position, axis=1) < radius) @pytest.mark.parametrize("cls, params", zip(_potential_cls, _params(2.5, 0.0))) @@ -205,12 +185,9 @@ def test_cylinder(simulation, cls, params): """Test that particles stay within the pipe defined by a cylinder wall.""" n = np.array([1, 1, 1]) radius = 5 - wall_pot = cls([ - hoomd.wall.Cylinder(radius=radius, - origin=(0, 0, 0), - axis=n, - inside=True) - ]) + wall_pot = cls( + [hoomd.wall.Cylinder(radius=radius, origin=(0, 0, 0), axis=n, inside=True)] + ) simulation.operations.integrator.forces.append(wall_pot) wall_pot.params["A"] = params for _ in range(10): @@ -225,8 +202,7 @@ def test_cylinder(simulation, cls, params): def test_outside(simulation, cls, params): """Test that particles stay outside a sphere wall when inside=False.""" radius = 5.0 - wall_pot = cls( - [hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=False)]) + wall_pot = cls([hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=False)]) simulation.operations.integrator.forces.append(wall_pot) wall_pot.params["A"] = params snap = simulation.state.get_snapshot() @@ -237,16 +213,14 @@ def test_outside(simulation, cls, params): for _ in range(10): simulation.run(50) with simulation.state.cpu_local_snapshot as snap: - assert np.all( - np.linalg.norm(snap.particles.position, axis=1) > radius) + assert np.all(np.linalg.norm(snap.particles.position, axis=1) > radius) @pytest.mark.parametrize("cls, params", zip(_potential_cls, _params(2.5, 1.1))) def test_r_extrap(simulation, cls, params): """Test a force is generated in the other half space with r_extrap set.""" radius = 5.0 - wall_pot = cls( - [hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=False)]) + wall_pot = cls([hoomd.wall.Sphere(radius=radius, origin=(0, 0, 0), inside=False)]) simulation.operations.integrator.forces.append(wall_pot) wall_pot.params["A"] = params snap = simulation.state.get_snapshot() @@ -264,9 +238,13 @@ def test_r_extrap(simulation, cls, params): # Test Logging @pytest.mark.parametrize( - 'cls, expected_namespace, expected_loggables', - zip(_potential_cls, itertools.repeat(('md', 'external', 'wall')), - itertools.repeat(expected_loggable_params))) + "cls, expected_namespace, expected_loggables", + zip( + _potential_cls, + itertools.repeat(("md", "external", "wall")), + itertools.repeat(expected_loggable_params), + ), +) def test_logging(cls, expected_namespace, expected_loggables): logging_check(cls, expected_namespace, expected_loggables) @@ -279,8 +257,9 @@ def test_kernel_parameters(simulation, cls, params): simulation.run(0) - autotuned_kernel_parameter_check(instance=wall_pot, - activate=lambda: simulation.run(1)) + autotuned_kernel_parameter_check( + instance=wall_pot, activate=lambda: simulation.run(1) + ) # Pickle Testing diff --git a/hoomd/md/special_pair.py b/hoomd/md/special_pair.py index 772e6d6534..81b9ba1469 100644 --- a/hoomd/md/special_pair.py +++ b/hoomd/md/special_pair.py @@ -131,10 +131,13 @@ def __init__(self): super().__init__() # setup the coefficient options params = TypeParameter( - "params", "special_pair_types", - TypeParameterDict(epsilon=float, sigma=float, len_keys=1)) - r_cut = TypeParameter("r_cut", "special_pair_types", - TypeParameterDict(float, len_keys=1)) + "params", + "special_pair_types", + TypeParameterDict(epsilon=float, sigma=float, len_keys=1), + ) + r_cut = TypeParameter( + "r_cut", "special_pair_types", TypeParameterDict(float, len_keys=1) + ) self._extend_typeparam([params, r_cut]) @@ -188,15 +191,17 @@ class Coulomb(SpecialPair): def __init__(self): super().__init__() - params = TypeParameter("params", "special_pair_types", - TypeParameterDict(alpha=float, len_keys=1)) - r_cut = TypeParameter("r_cut", "special_pair_types", - TypeParameterDict(float, len_keys=1)) + params = TypeParameter( + "params", "special_pair_types", TypeParameterDict(alpha=float, len_keys=1) + ) + r_cut = TypeParameter( + "r_cut", "special_pair_types", TypeParameterDict(float, len_keys=1) + ) self._extend_typeparam([params, r_cut]) __all__ = [ - 'LJ', - 'Coulomb', - 'SpecialPair', + "LJ", + "Coulomb", + "SpecialPair", ] diff --git a/hoomd/md/tune/__init__.py b/hoomd/md/tune/__init__.py index 7496d40312..b95d765785 100644 --- a/hoomd/md/tune/__init__.py +++ b/hoomd/md/tune/__init__.py @@ -6,5 +6,5 @@ from .nlist_buffer import NeighborListBuffer __all__ = [ - 'NeighborListBuffer', + "NeighborListBuffer", ] diff --git a/hoomd/md/tune/nlist_buffer.py b/hoomd/md/tune/nlist_buffer.py index e754df515e..79d66089f1 100644 --- a/hoomd/md/tune/nlist_buffer.py +++ b/hoomd/md/tune/nlist_buffer.py @@ -19,7 +19,6 @@ class _IntervalTPS: - def __init__(self, simulation): self._simulation = simulation self._initial_timestep = None @@ -72,13 +71,16 @@ def __init__( nlist=SetOnce(NeighborList), solver=SetOnce(hoomd.tune.solve.Optimizer), maximum_buffer=OnlyTypes(float, postprocess=self._buffer_post), - minimum_buffer=OnlyTypes(float, postprocess=self._buffer_post)) - param_dict.update({ - "nlist": nlist, - "solver": solver, - "maximum_buffer": maximum_buffer, - "minimum_buffer": minimum_buffer - }) + minimum_buffer=OnlyTypes(float, postprocess=self._buffer_post), + ) + param_dict.update( + { + "nlist": nlist, + "solver": solver, + "maximum_buffer": maximum_buffer, + "minimum_buffer": minimum_buffer, + } + ) self._param_dict.update(param_dict) self._simulation = None @@ -246,8 +248,7 @@ def with_gradient_descent( nlist: NeighborList, maximum_buffer: float, minimum_buffer: float = 0.0, - alpha: hoomd.variant.variant_like = hoomd.variant.Ramp( - 1e-5, 1e-6, 0, 30), + alpha: hoomd.variant.variant_like = hoomd.variant.Ramp(1e-5, 1e-6, 0, 30), kappa: typing.Optional[np.ndarray] = (0.33, 0.165), tol: float = 1e-5, max_delta: "float | None" = None, @@ -299,8 +300,7 @@ def with_gradient_descent( return cls( trigger, nlist, - hoomd.tune.solve.GradientDescent(alpha, kappa, tol, True, - max_delta), + hoomd.tune.solve.GradientDescent(alpha, kappa, tol, True, max_delta), maximum_buffer=maximum_buffer, ) diff --git a/hoomd/md/update.py b/hoomd/md/update.py index 53103da309..75042c8b8b 100644 --- a/hoomd/md/update.py +++ b/hoomd/md/update.py @@ -42,7 +42,8 @@ class ZeroMomentum(Updater): Examples:: zero_momentum = hoomd.md.update.ZeroMomentum( - hoomd.trigger.Periodic(100)) + hoomd.trigger.Periodic(100) + ) """ __doc__ += Updater._doc_inherited @@ -54,7 +55,8 @@ def __init__(self, trigger): def _attach_hook(self): # create the c++ mirror class self._cpp_obj = _md.ZeroMomentumUpdater( - self._simulation.state._cpp_sys_def, self.trigger) + self._simulation.state._cpp_sys_def, self.trigger + ) class ReversePerturbationFlow(Updater): @@ -117,11 +119,13 @@ class ReversePerturbationFlow(Updater): # const integrated flow with 0.1 slope for max 1e8 timesteps ramp = hoomd.variant.Ramp(0.0, 0.1e8, 0, int(1e8)) # velocity gradient in z direction and shear flow in x direction. - mpf = hoomd.md.update.ReversePerturbationFlow(filter=hoomd.filter.All(), - flow_target=ramp, - slab_direction="Z", - flow_direction="X", - n_slabs=20) + mpf = hoomd.md.update.ReversePerturbationFlow( + filter=hoomd.filter.All(), + flow_target=ramp, + slab_direction="Z", + flow_direction="X", + n_slabs=20, + ) {inherited} @@ -152,34 +156,35 @@ class ReversePerturbationFlow(Updater): __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) - def __init__(self, - filter, - flow_target, - slab_direction, - flow_direction, - n_slabs, - max_slab=-1, - min_slab=-1): - + def __init__( + self, + filter, + flow_target, + slab_direction, + flow_direction, + n_slabs, + max_slab=-1, + min_slab=-1, + ): params = ParameterDict( filter=hoomd.filter.ParticleFilter, flow_target=hoomd.variant.Variant, - slab_direction=OnlyTypes(str, - strict=True, - postprocess=self._to_lowercase), - flow_direction=OnlyTypes(str, - strict=True, - postprocess=self._to_lowercase), + slab_direction=OnlyTypes(str, strict=True, postprocess=self._to_lowercase), + flow_direction=OnlyTypes(str, strict=True, postprocess=self._to_lowercase), n_slabs=OnlyTypes(int, preprocess=self._preprocess_n_slabs), max_slab=OnlyTypes(int, preprocess=self._preprocess_max_slab), min_slab=OnlyTypes(int, preprocess=self._preprocess_min_slab), - flow_epsilon=float(1e-2)) + flow_epsilon=float(1e-2), + ) params.update( - dict(filter=filter, - flow_target=flow_target, - slab_direction=slab_direction, - flow_direction=flow_direction, - n_slabs=n_slabs)) + dict( + filter=filter, + flow_target=flow_target, + slab_direction=slab_direction, + flow_direction=flow_direction, + n_slabs=n_slabs, + ) + ) self._param_dict.update(params) self._param_dict.update(dict(max_slab=max_slab)) self._param_dict.update(dict(min_slab=min_slab)) @@ -218,14 +223,30 @@ def _attach_hook(self): sys_def = self._simulation.state._cpp_sys_def if isinstance(self._simulation.device, hoomd.device.CPU): self._cpp_obj = _md.MuellerPlatheFlow( - sys_def, self.trigger, group, self.flow_target, - self.slab_direction, self.flow_direction, self.n_slabs, - self.min_slab, self.max_slab, self.flow_epsilon) + sys_def, + self.trigger, + group, + self.flow_target, + self.slab_direction, + self.flow_direction, + self.n_slabs, + self.min_slab, + self.max_slab, + self.flow_epsilon, + ) else: self._cpp_obj = _md.MuellerPlatheFlowGPU( - sys_def, self.trigger, group, self.flow_target, - self.slab_direction, self.flow_direction, self.n_slabs, - self.min_slab, self.max_slab, self.flow_epsilon) + sys_def, + self.trigger, + group, + self.flow_target, + self.slab_direction, + self.flow_direction, + self.n_slabs, + self.min_slab, + self.max_slab, + self.flow_epsilon, + ) @log(category="scalar", requires_run=True) def summed_exchanged_momentum(self): @@ -283,8 +304,10 @@ class ActiveRotationalDiffusion(Updater): def __init__(self, trigger, active_force, rotational_diffusion): super().__init__(trigger) - param_dict = ParameterDict(rotational_diffusion=hoomd.variant.Variant, - active_force=hoomd.md.force.Active) + param_dict = ParameterDict( + rotational_diffusion=hoomd.variant.Variant, + active_force=hoomd.md.force.Active, + ) param_dict["rotational_diffusion"] = rotational_diffusion param_dict["active_force"] = active_force self._add_dependency(active_force) @@ -297,14 +320,19 @@ def _attach_hook(self): if not self.active_force._attached: raise SimulationDefinitionError( "Active force for ActiveRotationalDiffusion object does not " - "belong to the simulation integrator.") + "belong to the simulation integrator." + ) if self.active_force._simulation is not self._simulation: raise SimulationDefinitionError( "Active force for ActiveRotationalDiffusion object belongs to " - "another simulation.") + "another simulation." + ) self._cpp_obj = _md.ActiveRotationalDiffusionUpdater( - self._simulation.state._cpp_sys_def, self.trigger, - self.rotational_diffusion, self.active_force._cpp_obj) + self._simulation.state._cpp_sys_def, + self.trigger, + self.rotational_diffusion, + self.active_force._cpp_obj, + ) def _handle_removed_dependency(self, active_force): sim = self._simulation @@ -319,7 +347,7 @@ def _setattr_param(self, attr, value): __all__ = [ - 'ActiveRotationalDiffusion', - 'ReversePerturbationFlow', - 'ZeroMomentum', + "ActiveRotationalDiffusion", + "ReversePerturbationFlow", + "ZeroMomentum", ] diff --git a/hoomd/mesh.py b/hoomd/mesh.py index e1b4b9eb08..c2489121d6 100644 --- a/hoomd/mesh.py +++ b/hoomd/mesh.py @@ -51,8 +51,15 @@ class Mesh(_HOOMDBaseObject): mesh_obj = hoomd.mesh.Mesh() mesh_obj.types = ["mesh"] - mesh_obj.triangulation = dict(type_ids = [0,0,0,0], - triangles = [[0,1,2],[0,2,3],[0,1,3],[1,2,3]]) + mesh_obj.triangulation = dict( + type_ids=[0, 0, 0, 0], + triangles=[ + [0, 1, 2], + [0, 2, 3], + [0, 1, 3], + [1, 2, 3], + ], + ) .. py:attribute:: types @@ -74,50 +81,52 @@ class Mesh(_HOOMDBaseObject): """ def __init__(self): - param_dict = ParameterDict( types=[str], - triangulation=OnlyIf(to_type_converter({ - "type_ids": NDArrayValidator(np.uint), - "triangles": NDArrayValidator(np.uint, shape=(None, 3)) - }), - postprocess=self._ensure_same_size)) + triangulation=OnlyIf( + to_type_converter( + { + "type_ids": NDArrayValidator(np.uint), + "triangles": NDArrayValidator(np.uint, shape=(None, 3)), + } + ), + postprocess=self._ensure_same_size, + ), + ) param_dict["types"] = ["mesh"] - param_dict["triangulation"] = dict(type_ids=np.zeros(0, dtype=int), - triangles=np.zeros((0, 3), - dtype=int)) + param_dict["triangulation"] = dict( + type_ids=np.zeros(0, dtype=int), triangles=np.zeros((0, 3), dtype=int) + ) self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _hoomd.MeshDefinition( - self._simulation.state._cpp_sys_def, len(self._param_dict["types"])) + self._simulation.state._cpp_sys_def, len(self._param_dict["types"]) + ) - self._cpp_obj.setTypes(list(self._param_dict['types'])) + self._cpp_obj.setTypes(list(self._param_dict["types"])) if hoomd.version.mpi_enabled: pdata = self._simulation.state._cpp_sys_def.getParticleData() decomposition = pdata.getDomainDecomposition() if decomposition is not None: - self._simulation._system_communicator.addMeshDefinition( - self._cpp_obj) + self._simulation._system_communicator.addMeshDefinition(self._cpp_obj) def _ensure_same_size(self, triangulation): if triangulation is None: return None if len(triangulation["triangles"]) != len(triangulation["type_ids"]): - raise ValueError( - "Number of type_ids do not match number of triangles.") + raise ValueError("Number of type_ids do not match number of triangles.") return triangulation - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def type_ids(self): """((*N*) `numpy.ndarray` of ``uint32``): Triangle type ids.""" return self.triangulation["type_ids"] - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def triangles(self): """((*N*, 3) `numpy.ndarray` of ``uint32``): Mesh triangulation. @@ -126,7 +135,7 @@ def triangles(self): """ return self.triangulation["triangles"] - @log(category='sequence', requires_run=True) + @log(category="sequence", requires_run=True) def bonds(self): """((*N*, 2) `numpy.ndarray` of ``uint32``): Mesh bonds. @@ -145,4 +154,4 @@ def size(self): return len(self.triangulation["triangles"]) -__all__ = ['Mesh'] +__all__ = ["Mesh"] diff --git a/hoomd/mpcd/__init__.py b/hoomd/mpcd/__init__.py index 03bb8cabb8..fe9eae71f8 100644 --- a/hoomd/mpcd/__init__.py +++ b/hoomd/mpcd/__init__.py @@ -69,12 +69,12 @@ from hoomd.mpcd import tune __all__ = [ - 'Integrator', - 'collide', - 'fill', - 'force', - 'geometry', - 'methods', - 'stream', - 'tune', + "Integrator", + "collide", + "fill", + "force", + "geometry", + "methods", + "stream", + "tune", ] diff --git a/hoomd/mpcd/collide.py b/hoomd/mpcd/collide.py index 1f8d96ba96..abb171a956 100644 --- a/hoomd/mpcd/collide.py +++ b/hoomd/mpcd/collide.py @@ -71,7 +71,9 @@ class CellList(Compute): def __init__(self, shift=True): super().__init__() - param_dict = ParameterDict(shift=bool(shift),) + param_dict = ParameterDict( + shift=bool(shift), + ) self._param_dict.update(param_dict) def _attach_hook(self): @@ -138,7 +140,9 @@ class CollisionMethod(Operation): """ __doc__ = __doc__.replace("{inherited}", Operation._doc_inherited) - _doc_inherited = Operation._doc_inherited + """ + _doc_inherited = ( + Operation._doc_inherited + + """ ---------- **Members inherited from** @@ -154,14 +158,14 @@ class CollisionMethod(Operation): Number of integration steps between collisions. `Read more... ` """ + ) def __init__(self, period, embedded_particles=None): super().__init__() param_dict = ParameterDict( period=int(period), - embedded_particles=OnlyTypes(hoomd.filter.ParticleFilter, - allow_none=True), + embedded_particles=OnlyTypes(hoomd.filter.ParticleFilter, allow_none=True), ) param_dict["embedded_particles"] = embedded_particles self._param_dict.update(param_dict) @@ -195,8 +199,8 @@ class AndersenThermostat(CollisionMethod): .. code-block:: python andersen_thermostat = hoomd.mpcd.collide.AndersenThermostat( - period=1, - kT=1.0) + period=1, kT=1.0 + ) simulation.operations.integrator.collision_method = andersen_thermostat Collision including embedded particles. @@ -206,7 +210,8 @@ class AndersenThermostat(CollisionMethod): andersen_thermostat = hoomd.mpcd.collide.AndersenThermostat( period=20, kT=1.0, - embedded_particles=hoomd.filter.All()) + embedded_particles=hoomd.filter.All(), + ) simulation.operations.integrator.collision_method = andersen_thermostat {inherited} @@ -256,12 +261,14 @@ def _attach_hook(self): else: cpp_class = _mpcd.ATCollisionMethod - self._cpp_obj = cpp_class(sim.state._cpp_sys_def, sim.timestep, - self.period, 0, self.kT) + self._cpp_obj = cpp_class( + sim.state._cpp_sys_def, sim.timestep, self.period, 0, self.kT + ) if self.embedded_particles is not None: self._cpp_obj.setEmbeddedGroup( - sim.state._get_group(self.embedded_particles)) + sim.state._get_group(self.embedded_particles) + ) super()._attach_hook() @@ -311,9 +318,8 @@ class StochasticRotationDynamics(CollisionMethod): .. code-block:: python srd = hoomd.mpcd.collide.StochasticRotationDynamics( - period=1, - angle=130, - kT=1.0) + period=1, angle=130, kT=1.0 + ) simulation.operations.integrator.collision_method = srd Collision including embedded particles. @@ -324,7 +330,8 @@ class StochasticRotationDynamics(CollisionMethod): period=20, angle=130, kT=1.0, - embedded_particles=hoomd.filter.All()) + embedded_particles=hoomd.filter.All(), + ) simulation.operations.integrator.collision_method = srd {inherited} @@ -368,9 +375,9 @@ def __init__(self, period, angle, kT=None, embedded_particles=None): param_dict = ParameterDict( angle=float(angle), - kT=OnlyTypes(hoomd.variant.Variant, - allow_none=True, - preprocess=variant_preprocessing), + kT=OnlyTypes( + hoomd.variant.Variant, allow_none=True, preprocess=variant_preprocessing + ), ) param_dict["kT"] = kT self._param_dict.update(param_dict) @@ -384,19 +391,21 @@ def _attach_hook(self): else: cpp_class = _mpcd.SRDCollisionMethod - self._cpp_obj = cpp_class(sim.state._cpp_sys_def, sim.timestep, - self.period, 0, self.angle) + self._cpp_obj = cpp_class( + sim.state._cpp_sys_def, sim.timestep, self.period, 0, self.angle + ) if self.embedded_particles is not None: self._cpp_obj.setEmbeddedGroup( - sim.state._get_group(self.embedded_particles)) + sim.state._get_group(self.embedded_particles) + ) super()._attach_hook() __all__ = [ - 'AndersenThermostat', - 'CellList', - 'CollisionMethod', - 'StochasticRotationDynamics', + "AndersenThermostat", + "CellList", + "CollisionMethod", + "StochasticRotationDynamics", ] diff --git a/hoomd/mpcd/fill.py b/hoomd/mpcd/fill.py index b3b5e97929..55a483a184 100644 --- a/hoomd/mpcd/fill.py +++ b/hoomd/mpcd/fill.py @@ -84,7 +84,9 @@ class VirtualParticleFiller(Operation): """ __doc__ = __doc__.replace("{inherited}", Operation._doc_inherited) - _doc_inherited = Operation._doc_inherited + """ + _doc_inherited = ( + Operation._doc_inherited + + """ ---------- **Members inherited from** @@ -105,6 +107,7 @@ class VirtualParticleFiller(Operation): Type of particles to fill. `Read more... ` """ + ) def __init__(self, type, density, kT): super().__init__() @@ -146,10 +149,8 @@ class GeometryFiller(VirtualParticleFiller): plates = hoomd.mpcd.geometry.ParallelPlates(separation=6.0) filler = hoomd.mpcd.fill.GeometryFiller( - type="A", - density=5.0, - kT=1.0, - geometry=plates) + type="A", density=5.0, kT=1.0, geometry=plates + ) simulation.operations.integrator.virtual_particle_fillers = [filler] {inherited} @@ -163,14 +164,15 @@ class GeometryFiller(VirtualParticleFiller): (*read only*). """ - __doc__ = __doc__.replace("{inherited}", - VirtualParticleFiller._doc_inherited) + __doc__ = __doc__.replace("{inherited}", VirtualParticleFiller._doc_inherited) _cpp_class_map = {} def __init__(self, type, density, kT, geometry): super().__init__(type, density, kT) - param_dict = ParameterDict(geometry=Geometry,) + param_dict = ParameterDict( + geometry=Geometry, + ) param_dict["geometry"] = geometry self._param_dict.update(param_dict) @@ -190,8 +192,7 @@ def _attach_hook(self): if isinstance(sim.device, hoomd.device.GPU): class_info[1] += "GPU" class_ = getattr(*class_info, None) - assert class_ is not None, ("Virtual particle filler for geometry " - "not found") + assert class_ is not None, "Virtual particle filler for geometry " "not found" self._cpp_obj = class_( sim.state._cpp_sys_def, @@ -212,10 +213,9 @@ def _register_cpp_class(cls, geometry, module, cpp_class_name): cls._cpp_class_map[geometry] = (module, cpp_class_name) -GeometryFiller._register_cpp_class(ParallelPlates, _mpcd, - "ParallelPlateGeometryFiller") +GeometryFiller._register_cpp_class(ParallelPlates, _mpcd, "ParallelPlateGeometryFiller") __all__ = [ - 'GeometryFiller', - 'VirtualParticleFiller', + "GeometryFiller", + "VirtualParticleFiller", ] diff --git a/hoomd/mpcd/force.py b/hoomd/mpcd/force.py index 37bc927a1a..874f0f2083 100644 --- a/hoomd/mpcd/force.py +++ b/hoomd/mpcd/force.py @@ -119,8 +119,7 @@ def __init__(self, force, separation=None, width=None): self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _mpcd.BlockForce(self.force, self.separation, - self.width) + self._cpp_obj = _mpcd.BlockForce(self.force, self.separation, self.width) super()._attach_hook() @@ -164,7 +163,8 @@ def __init__(self, force): def _attach_hook(self): self._cpp_obj = _mpcd.ConstantForce( - _hoomd.make_scalar3(self.force[0], self.force[1], self.force[2])) + _hoomd.make_scalar3(self.force[0], self.force[1], self.force[2]) + ) super()._attach_hook() @@ -194,8 +194,8 @@ class SineForce(BodyForce): Ly = simulation.state.box.Ly force = hoomd.mpcd.force.SineForce( - amplitude=1.0, - wavenumber=2 * numpy.pi / Ly) + amplitude=1.0, wavenumber=2 * numpy.pi / Ly + ) stream = hoomd.mpcd.stream.Bulk(period=1, mpcd_particle_force=force) simulation.operations.integrator.streaming_method = stream @@ -222,8 +222,9 @@ class SineForce(BodyForce): def __init__(self, amplitude, wavenumber): super().__init__() - param_dict = ParameterDict(amplitude=float(amplitude), - wavenumber=float(wavenumber)) + param_dict = ParameterDict( + amplitude=float(amplitude), wavenumber=float(wavenumber) + ) self._param_dict.update(param_dict) def _attach_hook(self): @@ -232,8 +233,8 @@ def _attach_hook(self): __all__ = [ - 'BlockForce', - 'BodyForce', - 'ConstantForce', - 'SineForce', + "BlockForce", + "BodyForce", + "ConstantForce", + "SineForce", ] diff --git a/hoomd/mpcd/geometry.py b/hoomd/mpcd/geometry.py index 7eba856c75..00abc9e002 100644 --- a/hoomd/mpcd/geometry.py +++ b/hoomd/mpcd/geometry.py @@ -62,7 +62,9 @@ class Geometry(_HOOMDBaseObject): def __init__(self, no_slip): super().__init__() - param_dict = ParameterDict(no_slip=bool(no_slip),) + param_dict = ParameterDict( + no_slip=bool(no_slip), + ) self._param_dict.update(param_dict) @@ -88,8 +90,9 @@ class ConcentricCylinders(Geometry): .. code-block:: python - cylinders = hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0) + cylinders = hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0 + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=cylinders) simulation.operations.integrator.streaming_method = stream @@ -98,7 +101,8 @@ class ConcentricCylinders(Geometry): .. code-block:: python cylinders = hoomd.mpcd.geometry.ConcentricCylinders( - inner_radius=2.0, outer_radius=5.0, no_slip=False) + inner_radius=2.0, outer_radius=5.0, no_slip=False + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=cylinders) simulation.operations.integrator.streaming_method = stream @@ -107,7 +111,11 @@ class ConcentricCylinders(Geometry): .. code-block:: python cylinders = hoomd.mpcd.geometry.ConcentricCylinders( - inner_radius=2.0, outer_radius=5.0, angular_speed=1.0, no_slip=True) + inner_radius=2.0, + outer_radius=5.0, + angular_speed=1.0, + no_slip=True, + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=cylinders) simulation.operations.integrator.streaming_method = stream @@ -131,11 +139,7 @@ class ConcentricCylinders(Geometry): __doc__ = __doc__.replace("{inherited}", Geometry._doc_inherited) - def __init__(self, - inner_radius, - outer_radius, - angular_speed=0.0, - no_slip=True): + def __init__(self, inner_radius, outer_radius, angular_speed=0.0, no_slip=True): super().__init__(no_slip) param_dict = ParameterDict( inner_radius=float(inner_radius), @@ -145,10 +149,9 @@ def __init__(self, self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _mpcd.ConcentricCylinders(self.inner_radius, - self.outer_radius, - self.angular_speed, - self.no_slip) + self._cpp_obj = _mpcd.ConcentricCylinders( + self.inner_radius, self.outer_radius, self.angular_speed, self.no_slip + ) super()._attach_hook() @@ -177,9 +180,8 @@ class CosineChannel(Geometry): .. code-block:: python channel = hoomd.mpcd.geometry.CosineChannel( - amplitude=2.0, - separation=4.0, - repeat_length=10.0) + amplitude=2.0, separation=4.0, repeat_length=10.0 + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=channel) simulation.operations.integrator.streaming_method = stream @@ -210,8 +212,9 @@ def __init__(self, amplitude, repeat_length, separation, no_slip=True): self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _mpcd.CosineChannel(self.amplitude, self.repeat_length, - self.separation, self.no_slip) + self._cpp_obj = _mpcd.CosineChannel( + self.amplitude, self.repeat_length, self.separation, self.no_slip + ) super()._attach_hook() @@ -244,7 +247,8 @@ class CosineExpansionContraction(Geometry): channel = hoomd.mpcd.geometry.CosineExpansionContraction( expansion_separation=6.0, contraction_separation=3.0, - repeat_length=10.0) + repeat_length=10.0, + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=channel) simulation.operations.integrator.streaming_method = stream @@ -266,11 +270,9 @@ class CosineExpansionContraction(Geometry): __doc__ = __doc__.replace("{inherited}", Geometry._doc_inherited) - def __init__(self, - expansion_separation, - contraction_separation, - repeat_length, - no_slip=True): + def __init__( + self, expansion_separation, contraction_separation, repeat_length, no_slip=True + ): super().__init__(no_slip) param_dict = ParameterDict( @@ -282,8 +284,11 @@ def __init__(self, def _attach_hook(self): self._cpp_obj = _mpcd.CosineExpansionContraction( - self.expansion_separation, self.contraction_separation, - self.repeat_length, self.no_slip) + self.expansion_separation, + self.contraction_separation, + self.repeat_length, + self.no_slip, + ) super()._attach_hook() @@ -318,7 +323,8 @@ class ParallelPlates(Geometry): .. code-block:: python plates = hoomd.mpcd.geometry.ParallelPlates( - separation=6.0, no_slip=False) + separation=6.0, no_slip=False + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=plates) simulation.operations.integrator.streaming_method = stream @@ -327,7 +333,8 @@ class ParallelPlates(Geometry): .. code-block:: python plates = hoomd.mpcd.geometry.ParallelPlates( - separation=6.0, speed=1.0, no_slip=True) + separation=6.0, speed=1.0, no_slip=True + ) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=plates) simulation.operations.integrator.streaming_method = stream @@ -358,8 +365,7 @@ def __init__(self, separation, speed=0.0, no_slip=True): self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _mpcd.ParallelPlates(self.separation, self.speed, - self.no_slip) + self._cpp_obj = _mpcd.ParallelPlates(self.separation, self.speed, self.no_slip) super()._attach_hook() @@ -413,8 +419,7 @@ def __init__(self, separation, length, no_slip=True): self._param_dict.update(param_dict) def _attach_hook(self): - self._cpp_obj = _mpcd.PlanarPore(self.separation, self.length, - self.no_slip) + self._cpp_obj = _mpcd.PlanarPore(self.separation, self.length, self.no_slip) super()._attach_hook() @@ -462,7 +467,9 @@ class Sphere(Geometry): def __init__(self, radius, no_slip=True): super().__init__(no_slip) - param_dict = ParameterDict(radius=float(radius),) + param_dict = ParameterDict( + radius=float(radius), + ) self._param_dict.update(param_dict) def _attach_hook(self): @@ -471,11 +478,11 @@ def _attach_hook(self): __all__ = [ - 'ConcentricCylinders', - 'CosineChannel', - 'CosineExpansionContraction', - 'Geometry', - 'ParallelPlates', - 'PlanarPore', - 'Sphere', + "ConcentricCylinders", + "CosineChannel", + "CosineExpansionContraction", + "Geometry", + "ParallelPlates", + "PlanarPore", + "Sphere", ] diff --git a/hoomd/mpcd/integrate.py b/hoomd/mpcd/integrate.py index da41f8a834..10793d3eba 100644 --- a/hoomd/mpcd/integrate.py +++ b/hoomd/mpcd/integrate.py @@ -90,13 +90,14 @@ class Integrator(_MDIntegrator): stream = hoomd.mpcd.stream.Bulk(period=1) collide = hoomd.mpcd.collide.StochasticRotationDynamics( - period=1, - angle=130) + period=1, angle=130 + ) integrator = hoomd.mpcd.Integrator( dt=0.1, streaming_method=stream, collision_method=collide, - mpcd_particle_sorter=hoomd.mpcd.tune.ParticleSorter(trigger=20)) + mpcd_particle_sorter=hoomd.mpcd.tune.ParticleSorter(trigger=20), + ) simulation.operations.integrator = integrator MPCD integrator with solutes. @@ -104,15 +105,17 @@ class Integrator(_MDIntegrator): .. code-block:: python dt_md = 0.005 - md_steps_per_collision = 20 # collision time = 0.1 + md_steps_per_collision = 20 # collision time = 0.1 stream = hoomd.mpcd.stream.Bulk(period=md_steps_per_collision) collide = hoomd.mpcd.collide.StochasticRotationDynamics( period=md_steps_per_collision, angle=130, - embedded_particles=hoomd.filter.All()) + embedded_particles=hoomd.filter.All(), + ) solute_method = hoomd.md.methods.ConstantVolume( - filter=collide.embedded_particles) + filter=collide.embedded_particles + ) integrator = hoomd.mpcd.Integrator( dt=dt_md, @@ -120,8 +123,9 @@ class Integrator(_MDIntegrator): streaming_method=stream, collision_method=collide, mpcd_particle_sorter=hoomd.mpcd.tune.ParticleSorter( - trigger=20*md_steps_per_collision) - ) + trigger=20 * md_steps_per_collision + ), + ) simulation.operations.integrator = integrator MPCD integrator with virtual particle filler. @@ -131,21 +135,19 @@ class Integrator(_MDIntegrator): plates = hoomd.mpcd.geometry.ParallelPlates(separation=6.0) stream = hoomd.mpcd.stream.BounceBack(period=1, geometry=plates) collide = hoomd.mpcd.collide.StochasticRotationDynamics( - period=1, - angle=130, - kT=1.0) + period=1, angle=130, kT=1.0 + ) filler = hoomd.mpcd.fill.GeometryFiller( - type="A", - density=5.0, - kT=1.0, - geometry=plates) + type="A", density=5.0, kT=1.0, geometry=plates + ) integrator = hoomd.mpcd.Integrator( dt=0.1, streaming_method=stream, collision_method=collide, virtual_particle_fillers=[filler], - mpcd_particle_sorter=hoomd.mpcd.tune.ParticleSorter(trigger=20)) + mpcd_particle_sorter=hoomd.mpcd.tune.ParticleSorter(trigger=20), + ) simulation.operations.integrator = integrator {inherited} @@ -193,8 +195,9 @@ def __init__( self._cell_list = CellList() - virtual_particle_fillers = ([] if virtual_particle_fillers is None else - virtual_particle_fillers) + virtual_particle_fillers = ( + [] if virtual_particle_fillers is None else virtual_particle_fillers + ) self._virtual_particle_fillers = syncedlist.SyncedList( VirtualParticleFiller, syncedlist._PartialGetAttr("_cpp_obj"), @@ -211,7 +214,8 @@ def __init__( streaming_method=streaming_method, collision_method=collision_method, mpcd_particle_sorter=mpcd_particle_sorter, - )) + ) + ) self._param_dict.update(param_dict) @property @@ -244,10 +248,8 @@ def _attach_hook(self): if self.mpcd_particle_sorter is not None: self.mpcd_particle_sorter._attach(self._simulation) - self._cpp_obj = _mpcd.Integrator(self._simulation.state._cpp_sys_def, - self.dt) - self._virtual_particle_fillers._sync(self._simulation, - self._cpp_obj.fillers) + self._cpp_obj = _mpcd.Integrator(self._simulation.state._cpp_sys_def, self.dt) + self._virtual_particle_fillers._sync(self._simulation, self._cpp_obj.fillers) self._cpp_obj.cell_list = self._cell_list._cpp_obj super(_MDIntegrator, self)._attach_hook() @@ -265,8 +267,7 @@ def _detach_hook(self): super()._detach_hook() def _setattr_param(self, attr, value): - if attr in ("streaming_method", "collision_method", - "mpcd_particle_sorter"): + if attr in ("streaming_method", "collision_method", "mpcd_particle_sorter"): cur_value = getattr(self, attr) if value is cur_value: return diff --git a/hoomd/mpcd/methods.py b/hoomd/mpcd/methods.py index 2dde8d1bd7..14e523adbd 100644 --- a/hoomd/mpcd/methods.py +++ b/hoomd/mpcd/methods.py @@ -67,7 +67,8 @@ class BounceBack(Method): plates = hoomd.mpcd.geometry.ParallelPlates(separation=6.0) nve = hoomd.mpcd.methods.BounceBack( - filter=hoomd.filter.All(), geometry=plates) + filter=hoomd.filter.All(), geometry=plates + ) simulation.operations.integrator.methods.append(nve) {inherited} @@ -130,8 +131,7 @@ def _attach_hook(self): assert class_ is not None, "Bounce back method for geometry not found" group = sim.state._get_group(self.filter) - self._cpp_obj = class_(sim.state._cpp_sys_def, group, - self.geometry._cpp_obj) + self._cpp_obj = class_(sim.state._cpp_sys_def, group, self.geometry._cpp_obj) super()._attach_hook() def _detach_hook(self): @@ -144,5 +144,5 @@ def _register_cpp_class(cls, geometry, module, cpp_class_name): __all__ = [ - 'BounceBack', + "BounceBack", ] diff --git a/hoomd/mpcd/pytest/test_collide.py b/hoomd/mpcd/pytest/test_collide.py index 04d3a1dddc..e529494c8c 100644 --- a/hoomd/mpcd/pytest/test_collide.py +++ b/hoomd/mpcd/pytest/test_collide.py @@ -49,7 +49,6 @@ def test_cell_list(small_snap, simulation_factory): ids=["AndersenThermostat", "StochasticRotationDynamics"], ) class TestCollisionMethod: - def test_create(self, small_snap, simulation_factory, cls, init_args): sim = simulation_factory(small_snap) cm = cls(period=5, **init_args) @@ -76,16 +75,14 @@ def test_pickling(self, small_snap, simulation_factory, cls, init_args): pickling_check(cm) sim = simulation_factory(small_snap) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - collision_method=cm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, collision_method=cm) sim.run(0) pickling_check(cm) def test_embed(self, small_snap, simulation_factory, cls, init_args): sim = simulation_factory(small_snap) cm = cls(period=1, embedded_particles=hoomd.filter.All(), **init_args) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - collision_method=cm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, collision_method=cm) assert isinstance(cm.embedded_particles, hoomd.filter.All) sim.run(0) @@ -99,8 +96,7 @@ def test_temperature(self, small_snap, simulation_factory, cls, init_args): else: kT_required = True cm = cls(period=1, **init_args) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - collision_method=cm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, collision_method=cm) assert isinstance(cm.kT, hoomd.variant.Constant) assert cm.kT(0) == 1.0 @@ -122,8 +118,7 @@ def test_temperature(self, small_snap, simulation_factory, cls, init_args): def test_run(self, small_snap, simulation_factory, cls, init_args): sim = simulation_factory(small_snap) cm = cls(period=1, **init_args) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - collision_method=cm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, collision_method=cm) # test that one step can run without error with only solvent sim.run(1) @@ -132,5 +127,6 @@ def test_run(self, small_snap, simulation_factory, cls, init_args): if "kT" not in init_args: init_args["kT"] = 1.0 sim.operations.integrator.collision_method = cls( - period=1, embedded_particles=hoomd.filter.All(), **init_args) + period=1, embedded_particles=hoomd.filter.All(), **init_args + ) sim.run(1) diff --git a/hoomd/mpcd/pytest/test_fill.py b/hoomd/mpcd/pytest/test_fill.py index 6c7d997358..a28cfbacd7 100644 --- a/hoomd/mpcd/pytest/test_fill.py +++ b/hoomd/mpcd/pytest/test_fill.py @@ -22,45 +22,41 @@ def snap(): @pytest.mark.parametrize( "cls, init_args", [ - (hoomd.mpcd.geometry.ConcentricCylinders, { - "inner_radius": 4.0, - "outer_radius": 8.0 - }), - (hoomd.mpcd.geometry.CosineChannel, { - "amplitude": 4.0, - "repeat_length": 20.0, - "separation": 2.0 - }), - (hoomd.mpcd.geometry.CosineExpansionContraction, { - "expansion_separation": 8.0, - "contraction_separation": 4.0, - "repeat_length": 20.0, - }), - (hoomd.mpcd.geometry.ParallelPlates, { - "separation": 8.0 - }), - (hoomd.mpcd.geometry.PlanarPore, { - "separation": 8.0, - "length": 10.0 - }), - (hoomd.mpcd.geometry.Sphere, { - "radius": 4.0 - }), + ( + hoomd.mpcd.geometry.ConcentricCylinders, + {"inner_radius": 4.0, "outer_radius": 8.0}, + ), + ( + hoomd.mpcd.geometry.CosineChannel, + {"amplitude": 4.0, "repeat_length": 20.0, "separation": 2.0}, + ), + ( + hoomd.mpcd.geometry.CosineExpansionContraction, + { + "expansion_separation": 8.0, + "contraction_separation": 4.0, + "repeat_length": 20.0, + }, + ), + (hoomd.mpcd.geometry.ParallelPlates, {"separation": 8.0}), + (hoomd.mpcd.geometry.PlanarPore, {"separation": 8.0, "length": 10.0}), + (hoomd.mpcd.geometry.Sphere, {"radius": 4.0}), ], ids=[ - "ConcentricCylinders", "CosineChannel", "CosineExpansionContraction", - "ParallelPlates", "PlanarPore", "Sphere" + "ConcentricCylinders", + "CosineChannel", + "CosineExpansionContraction", + "ParallelPlates", + "PlanarPore", + "Sphere", ], ) class TestGeometryFiller: - - def test_create_and_attributes(self, simulation_factory, snap, cls, - init_args): + def test_create_and_attributes(self, simulation_factory, snap, cls, init_args): geom = cls(**init_args) - filler = hoomd.mpcd.fill.GeometryFiller(type="A", - density=5.0, - kT=1.0, - geometry=geom) + filler = hoomd.mpcd.fill.GeometryFiller( + type="A", density=5.0, kT=1.0, geometry=geom + ) assert filler.geometry is geom assert filler.type == "A" @@ -85,10 +81,9 @@ def test_create_and_attributes(self, simulation_factory, snap, cls, assert filler.kT(0) == 2.0 def test_run(self, simulation_factory, snap, cls, init_args): - filler = hoomd.mpcd.fill.GeometryFiller(type="A", - density=5.0, - kT=1.0, - geometry=cls(**init_args)) + filler = hoomd.mpcd.fill.GeometryFiller( + type="A", density=5.0, kT=1.0, geometry=cls(**init_args) + ) sim = simulation_factory(snap) ig = hoomd.mpcd.Integrator(dt=0.1, virtual_particle_fillers=[filler]) sim.operations.integrator = ig @@ -96,10 +91,9 @@ def test_run(self, simulation_factory, snap, cls, init_args): def test_pickling(self, simulation_factory, snap, cls, init_args): geom = cls(**init_args) - filler = hoomd.mpcd.fill.GeometryFiller(type="A", - density=5.0, - kT=1.0, - geometry=geom) + filler = hoomd.mpcd.fill.GeometryFiller( + type="A", density=5.0, kT=1.0, geometry=geom + ) pickling_check(filler) sim = simulation_factory(snap) diff --git a/hoomd/mpcd/pytest/test_geometry.py b/hoomd/mpcd/pytest/test_geometry.py index f1e39c4fd6..631637e7a4 100644 --- a/hoomd/mpcd/pytest/test_geometry.py +++ b/hoomd/mpcd/pytest/test_geometry.py @@ -20,10 +20,10 @@ def snap(): class TestConcentricCylinders: - def test_default_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=4.0) + geom = hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=4.0 + ) assert geom.inner_radius == 2.0 assert geom.outer_radius == 4.0 assert geom.angular_speed == 0.0 @@ -37,10 +37,9 @@ def test_default_init(self, simulation_factory, snap): assert geom.no_slip def test_nondefault_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0, - angular_speed=1.0, - no_slip=False) + geom = hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0, angular_speed=1.0, no_slip=False + ) assert geom.inner_radius == 2.0 assert geom.outer_radius == 5.0 assert geom.angular_speed == 1.0 @@ -54,8 +53,9 @@ def test_nondefault_init(self, simulation_factory, snap): assert not geom.no_slip def test_pickling(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=4.0) + geom = hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=4.0 + ) pickling_check(geom) sim = simulation_factory(snap) @@ -64,11 +64,10 @@ def test_pickling(self, simulation_factory, snap): class TestCosineChannel: - def test_default_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=10.0, - separation=4.0) + geom = hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=10.0, separation=4.0 + ) assert geom.amplitude == 4.0 assert geom.repeat_length == 10.0 assert geom.separation == 4.0 @@ -82,10 +81,9 @@ def test_default_init(self, simulation_factory, snap): assert geom.no_slip def test_nondefault_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=10.0, - separation=4.0, - no_slip=False) + geom = hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=10.0, separation=4.0, no_slip=False + ) assert geom.amplitude == 4.0 assert geom.repeat_length == 10.0 assert geom.separation == 4.0 @@ -99,9 +97,9 @@ def test_nondefault_init(self, simulation_factory, snap): assert not geom.no_slip def test_pickling(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=10.0, - separation=4.0) + geom = hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=10.0, separation=4.0 + ) pickling_check(geom) sim = simulation_factory(snap) @@ -110,12 +108,10 @@ def test_pickling(self, simulation_factory, snap): class TestCosineExpansionContraction: - def test_default_init(self, simulation_factory, snap): geom = hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=4, - contraction_separation=2, - repeat_length=10.0) + expansion_separation=4, contraction_separation=2, repeat_length=10.0 + ) assert geom.expansion_separation == 4.0 assert geom.contraction_separation == 2.0 @@ -134,7 +130,8 @@ def test_nondefault_init(self, simulation_factory, snap): expansion_separation=4, contraction_separation=2, repeat_length=10.0, - no_slip=False) + no_slip=False, + ) assert geom.expansion_separation == 4.0 assert geom.contraction_separation == 2.0 assert geom.repeat_length == 10.0 @@ -149,9 +146,8 @@ def test_nondefault_init(self, simulation_factory, snap): def test_pickling(self, simulation_factory, snap): geom = hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=4, - contraction_separation=2, - repeat_length=10.0) + expansion_separation=4, contraction_separation=2, repeat_length=10.0 + ) pickling_check(geom) @@ -161,7 +157,6 @@ def test_pickling(self, simulation_factory, snap): class TestParallelPlates: - def test_default_init(self, simulation_factory, snap): geom = hoomd.mpcd.geometry.ParallelPlates(separation=8.0) assert geom.separation == 8.0 @@ -175,9 +170,9 @@ def test_default_init(self, simulation_factory, snap): assert geom.no_slip def test_nondefault_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.ParallelPlates(separation=10.0, - speed=1.0, - no_slip=False) + geom = hoomd.mpcd.geometry.ParallelPlates( + separation=10.0, speed=1.0, no_slip=False + ) assert geom.separation == 10.0 assert geom.speed == 1.0 assert not geom.no_slip @@ -198,7 +193,6 @@ def test_pickling(self, simulation_factory, snap): class TestPlanarPore: - def test_default_init(self, simulation_factory, snap): geom = hoomd.mpcd.geometry.PlanarPore(separation=8.0, length=10.0) assert geom.separation == 8.0 @@ -212,9 +206,9 @@ def test_default_init(self, simulation_factory, snap): assert geom.no_slip def test_nondefault_init(self, simulation_factory, snap): - geom = hoomd.mpcd.geometry.PlanarPore(separation=10.0, - length=8.0, - no_slip=False) + geom = hoomd.mpcd.geometry.PlanarPore( + separation=10.0, length=8.0, no_slip=False + ) assert geom.separation == 10.0 assert geom.length == 8.0 assert not geom.no_slip @@ -235,7 +229,6 @@ def test_pickling(self, simulation_factory, snap): class TestSphere: - def test_default_init(self, simulation_factory, snap): geom = hoomd.mpcd.geometry.Sphere(radius=4.0) assert geom.radius == 4.0 diff --git a/hoomd/mpcd/pytest/test_integrator.py b/hoomd/mpcd/pytest/test_integrator.py index c760a3b743..e850dd4893 100644 --- a/hoomd/mpcd/pytest/test_integrator.py +++ b/hoomd/mpcd/pytest/test_integrator.py @@ -9,7 +9,6 @@ @pytest.fixture def make_simulation(simulation_factory): - def _make_simulation(): snap = hoomd.Snapshot() if snap.communicator.rank == 0: @@ -170,7 +169,8 @@ def test_attach_and_detach(make_simulation): # attach with both methods ig.streaming_method = hoomd.mpcd.stream.Bulk(period=1) ig.collision_method = hoomd.mpcd.collide.StochasticRotationDynamics( - period=1, angle=130) + period=1, angle=130 + ) ig.mpcd_particle_sorter = hoomd.mpcd.tune.ParticleSorter(trigger=1) sim.run(0) assert ig.streaming_method._attached diff --git a/hoomd/mpcd/pytest/test_methods.py b/hoomd/mpcd/pytest/test_methods.py index 1cf4373f8b..75ec71f28b 100644 --- a/hoomd/mpcd/pytest/test_methods.py +++ b/hoomd/mpcd/pytest/test_methods.py @@ -25,13 +25,13 @@ def snap(): def integrator(): bb = hoomd.mpcd.methods.BounceBack( filter=hoomd.filter.All(), - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0)) + geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0), + ) ig = hoomd.mpcd.Integrator(dt=0.1, methods=[bb]) return ig class TestBounceBack: - def test_pickling(self, simulation_factory, snap, integrator): pickling_check(integrator.methods[0]) @@ -50,31 +50,33 @@ def test_step_noslip(self, simulation_factory, snap, integrator): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]]) + snap.particles.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step where one particle will now hit the wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.95, 3.95, 4.95], [-0.2, -4.0, -0.2]]) + snap.particles.position, [[-4.95, 3.95, 4.95], [-0.2, -4.0, -0.2]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, - [[-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0]]) + snap.particles.velocity, [[-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0]] + ) # take another step, reflecting the second particle sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[4.95, 3.85, -4.95], [-0.1, -3.9, -0.1]]) + snap.particles.position, [[4.95, 3.85, -4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, 1.0]]) + snap.particles.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, 1.0]] + ) def test_step_slip(self, simulation_factory, snap, integrator): """Test step with slip boundary conditions.""" @@ -88,31 +90,33 @@ def test_step_slip(self, simulation_factory, snap, integrator): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]]) + snap.particles.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step where one particle will now hit the wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.85, 3.95, 4.85], [-0.2, -4.0, -0.2]]) + snap.particles.position, [[-4.85, 3.95, 4.85], [-0.2, -4.0, -0.2]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, - [[1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.particles.velocity, [[1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step, reflecting perpendicular motion of second particle sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.75, 3.85, 4.75], [-0.3, -3.9, -0.3]]) + snap.particles.position, [[-4.75, 3.85, 4.75], [-0.3, -3.9, -0.3]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]]) + snap.particles.velocity, [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]] + ) def test_step_moving_wall(self, simulation_factory, snap, integrator): integrator.dt = 0.3 @@ -128,10 +132,11 @@ def test_step_moving_wall(self, simulation_factory, snap, integrator): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.75, 3.85, -4.95], [-0.4, -3.9, -0.1]]) + snap.particles.position, [[-4.75, 3.85, -4.95], [-0.4, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.0, -1.0, 1.0], [0.0, 1.0, 1.0]]) + snap.particles.velocity, [[1.0, -1.0, 1.0], [0.0, 1.0, 1.0]] + ) def test_accel(self, simulation_factory, snap, integrator): force = hoomd.md.force.Constant(filter=hoomd.filter.All()) @@ -147,14 +152,16 @@ def test_accel(self, simulation_factory, snap, integrator): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[0.11, 0.12, -0.11], [-0.095, -0.09, -0.105]]) + snap.particles.position, [[0.11, 0.12, -0.11], [-0.095, -0.09, -0.105]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.2, 1.4, -1.2], [-0.9, -0.8, -1.1]]) + snap.particles.velocity, [[1.2, 1.4, -1.2], [-0.9, -0.8, -1.1]] + ) @pytest.mark.parametrize("H,expected_result", [(4.0, True), (3.8, False)]) - def test_check_particles(self, simulation_factory, snap, integrator, H, - expected_result): + def test_check_particles( + self, simulation_factory, snap, integrator, H, expected_result + ): """Test box validation raises an error on run.""" integrator.methods[0].geometry.separation = 2 * H @@ -168,7 +175,8 @@ def test_md_integrator(self, simulation_factory, snap): """Test we can also attach to a normal MD integrator.""" bb = hoomd.mpcd.methods.BounceBack( filter=hoomd.filter.All(), - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0)) + geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0), + ) integrator = hoomd.md.Integrator(dt=0.1, methods=[bb]) sim = simulation_factory(snap) @@ -179,7 +187,8 @@ def test_md_integrator(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.particles.position, - [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]]) + snap.particles.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.particles.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] + ) diff --git a/hoomd/mpcd/pytest/test_snapshot.py b/hoomd/mpcd/pytest/test_snapshot.py index b300b797be..46287df477 100644 --- a/hoomd/mpcd/pytest/test_snapshot.py +++ b/hoomd/mpcd/pytest/test_snapshot.py @@ -70,10 +70,12 @@ def test_resize(snap): # grow the snapshot by one, and make sure first entry is retained, and # it is padded by zeros snap.mpcd.N = 2 - np.testing.assert_array_equal(snap.mpcd.position, - [test_positions[0], [0, 0, 0]]) - np.testing.assert_array_equal(snap.mpcd.velocity, - [test_velocities[0], [0, 0, 0]]) + np.testing.assert_array_equal( + snap.mpcd.position, [test_positions[0], [0, 0, 0]] + ) + np.testing.assert_array_equal( + snap.mpcd.velocity, [test_velocities[0], [0, 0, 0]] + ) np.testing.assert_array_equal(snap.mpcd.typeid, [test_typeids[0], 0]) # grow the snapshot to the "standard" size and fill it back in @@ -149,10 +151,9 @@ def reap_mpcd_pdata(simulation): vel_i = pdata.getVelocity(i) type_i = pdata.getType(i) tag_i = pdata.getTag(i) - dat += [[ - tag_i, pos_i.x, pos_i.y, pos_i.z, vel_i.x, vel_i.y, vel_i.z, - type_i - ]] + dat += [ + [tag_i, pos_i.x, pos_i.y, pos_i.z, vel_i.x, vel_i.y, vel_i.z, type_i] + ] return np.array(sorted(dat, key=lambda p: p[0])) # set snap values and initialize diff --git a/hoomd/mpcd/pytest/test_stream.py b/hoomd/mpcd/pytest/test_stream.py index 1a8e4d7bd0..d307cdc2b2 100644 --- a/hoomd/mpcd/pytest/test_stream.py +++ b/hoomd/mpcd/pytest/test_stream.py @@ -27,62 +27,62 @@ def snap(): ( hoomd.mpcd.stream.BounceBack, { - "geometry": - hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0, - angular_speed=0.0, - no_slip=True), + "geometry": hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0, angular_speed=0.0, no_slip=True + ), }, ), ( hoomd.mpcd.stream.BounceBack, { - "geometry": - hoomd.mpcd.geometry.CosineChannel( - amplitude=4.0, repeat_length=20.0, separation=2.0), + "geometry": hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=2.0 + ), }, ), ( hoomd.mpcd.stream.BounceBack, { - "geometry": - hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=8.0, - contraction_separation=4.0, - repeat_length=20.0, - no_slip=True) + "geometry": hoomd.mpcd.geometry.CosineExpansionContraction( + expansion_separation=8.0, + contraction_separation=4.0, + repeat_length=20.0, + no_slip=True, + ) }, ), ( hoomd.mpcd.stream.BounceBack, { - "geometry": - hoomd.mpcd.geometry.ParallelPlates( - separation=8.0, speed=0.0, no_slip=True), + "geometry": hoomd.mpcd.geometry.ParallelPlates( + separation=8.0, speed=0.0, no_slip=True + ), }, ), ( hoomd.mpcd.stream.BounceBack, { - "geometry": - hoomd.mpcd.geometry.PlanarPore( - separation=8.0, length=6.0, no_slip=True) + "geometry": hoomd.mpcd.geometry.PlanarPore( + separation=8.0, length=6.0, no_slip=True + ) }, ), ( hoomd.mpcd.stream.BounceBack, - { - "geometry": hoomd.mpcd.geometry.Sphere(radius=4.0, no_slip=True) - }, + {"geometry": hoomd.mpcd.geometry.Sphere(radius=4.0, no_slip=True)}, ), ], ids=[ - "Bulk", "ConcentricCylinders", "CosineChannel", - "CosineExpansionContraction", "ParallelPlates", "PlanarPore", "Sphere" + "Bulk", + "ConcentricCylinders", + "CosineChannel", + "CosineExpansionContraction", + "ParallelPlates", + "PlanarPore", + "Sphere", ], ) class TestStreamingMethod: - def test_create(self, simulation_factory, snap, cls, init_args): sim = simulation_factory(snap) sm = cls(period=5, **init_args) @@ -100,8 +100,7 @@ def test_pickling(self, simulation_factory, snap, cls, init_args): pickling_check(sm) sim = simulation_factory(snap) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - streaming_method=sm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, streaming_method=sm) sim.run(0) pickling_check(sm) @@ -115,16 +114,14 @@ def test_pickling(self, simulation_factory, snap, cls, init_args): ], ids=["NoForce", "BlockForce", "ConstantForce", "SineForce"], ) - def test_force_attach(self, simulation_factory, snap, cls, init_args, - force): + def test_force_attach(self, simulation_factory, snap, cls, init_args, force): """Test that force can be attached with various forces.""" sm = cls(period=5, **init_args, mpcd_particle_force=force) assert sm.mpcd_particle_force is force pickling_check(sm) sim = simulation_factory(snap) - sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, - streaming_method=sm) + sim.operations.integrator = hoomd.mpcd.Integrator(dt=0.02, streaming_method=sm) sim.run(0) assert sm.mpcd_particle_force is force @@ -145,9 +142,11 @@ def test_forced_step(self, simulation_factory, snap, cls, init_args): snap.mpcd.velocity[0] = [1, -2, 3] sim = simulation_factory(snap) - sm = cls(period=1, - **init_args, - mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, -1))) + sm = cls( + period=1, + **init_args, + mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, -1)), + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -155,14 +154,13 @@ def test_forced_step(self, simulation_factory, snap, cls, init_args): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - [[1.1, -2.0, 2.9]]) - np.testing.assert_array_almost_equal(snap.mpcd.position, - [[0.105, 3.8, -0.705]]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, [[1.1, -2.0, 2.9]]) + np.testing.assert_array_almost_equal( + snap.mpcd.position, [[0.105, 3.8, -0.705]] + ) class TestBulk: - def test_bulk_step(self, simulation_factory, snap): if snap.communicator.rank == 0: snap.mpcd.N = 2 @@ -179,21 +177,24 @@ def test_bulk_step(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.1, 4.95, 3.1], [-3.1, -4.85, -1.1]]) + snap.mpcd.position, [[1.1, 4.95, 3.1], [-3.1, -4.85, -1.1]] + ) # take another step, wrapping the first particle through the boundary sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.2, -4.95, 3.2], [-3.2, -4.95, -1.2]]) + snap.mpcd.position, [[1.2, -4.95, 3.2], [-3.2, -4.95, -1.2]] + ) # take another step, wrapping the second particle through the boundary sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.3, -4.85, 3.3], [-3.3, 4.95, -1.3]]) + snap.mpcd.position, [[1.3, -4.85, 3.3], [-3.3, 4.95, -1.3]] + ) # change streaming method to use a different period, and change # integrator step running again should not move the particles since we @@ -204,25 +205,27 @@ def test_bulk_step(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.3, -4.85, 3.3], [-3.3, 4.95, -1.3]]) + snap.mpcd.position, [[1.3, -4.85, 3.3], [-3.3, 4.95, -1.3]] + ) # but running one more should move them sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.5, -4.65, 3.5], [-3.5, 4.75, -1.5]]) + snap.mpcd.position, [[1.5, -4.65, 3.5], [-3.5, 4.75, -1.5]] + ) # then 3 more should do nothing sim.run(3) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[1.5, -4.65, 3.5], [-3.5, 4.75, -1.5]]) + snap.mpcd.position, [[1.5, -4.65, 3.5], [-3.5, 4.75, -1.5]] + ) class TestConcentricCylinders: - def test_step_noslip(self, simulation_factory, snap): """Test step with no-slip boundary conditions.""" if snap.communicator.rank == 0: @@ -232,8 +235,10 @@ def test_step_noslip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0)) + geometry=hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0 + ), + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -242,27 +247,33 @@ def test_step_noslip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.0, -3.0, 0.1], [-2.15, -0.15, 0.00]]) + snap.mpcd.position, [[-4.0, -3.0, 0.1], [-2.15, -0.15, 0.00]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, -1.0]] + ) # take another step where first particle will now hit the outer wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-3.9, -2.9, 0.0], [-2.05, -0.05, -0.10]]) + snap.mpcd.position, [[-3.9, -2.9, 0.0], [-2.05, -0.05, -0.10]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, 1.0, -1.0], [1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[1.0, 1.0, -1.0], [1.0, 1.0, -1.0]] + ) # take another step where second particle will now hit the inner wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-3.8, -2.8, -0.1], [-2.05, -0.05, -0.10]]) + snap.mpcd.position, [[-3.8, -2.8, -0.1], [-2.05, -0.05, -0.10]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, 1.0]]) + snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, 1.0]] + ) def test_step_slip(self, simulation_factory, snap): """Test step with slip boundary conditions.""" @@ -273,9 +284,9 @@ def test_step_slip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0, - no_slip=False), + geometry=hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0, no_slip=False + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -285,29 +296,33 @@ def test_step_slip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.0, -3.0, 0.1], [-2.15, -0.15, 0.0]]) + snap.mpcd.position, [[-4.0, -3.0, 0.1], [-2.15, -0.15, 0.0]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, -1.0]] + ) # take another step where first particle will now hit the outer wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, - [[-3.972, -3.028, 0.2], [-2.05, -0.05, -0.10]]) + snap.mpcd.position, [[-3.972, -3.028, 0.2], [-2.05, -0.05, -0.10]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0.28, -0.28, 1.0], [1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[0.28, -0.28, 1.0], [1.0, 1.0, -1.0]] + ) # take another step where second partile will now hit the inner wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, - [[-3.944, -3.056, 0.3], [-2.05, 0.05, -0.20]]) + snap.mpcd.position, [[-3.944, -3.056, 0.3], [-2.05, 0.05, -0.20]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0.28, -0.28, 1.0], [-1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[0.28, -0.28, 1.0], [-1.0, 1.0, -1.0]] + ) def test_step_moving_wall_no_slip(self, simulation_factory, snap): """Test step with moving wall and no_slip condition.""" @@ -318,10 +333,9 @@ def test_step_moving_wall_no_slip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0, - angular_speed=1, - no_slip=True), + geometry=hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0, angular_speed=1, no_slip=True + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -331,9 +345,11 @@ def test_step_moving_wall_no_slip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-3.60, -3.3, 0.0], [-2.05, -0.05, -0.10]]) + snap.mpcd.position, [[-3.60, -3.3, 0.0], [-2.05, -0.05, -0.10]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[8.0, -6.0, -1.0], [-1.0, -1.0, 1.0]]) + snap.mpcd.velocity, [[8.0, -6.0, -1.0], [-1.0, -1.0, 1.0]] + ) def test_step_moving_wall_slip(self, simulation_factory, snap): """Test step with moving wall and slip condition.""" @@ -344,10 +360,9 @@ def test_step_moving_wall_slip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=2.0, - outer_radius=5.0, - angular_speed=1, - no_slip=False), + geometry=hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=2.0, outer_radius=5.0, angular_speed=1, no_slip=False + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -357,22 +372,25 @@ def test_step_moving_wall_slip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, - [[-3.972, -3.028, 0.1], [-2.05, 0.05, -0.20]]) + snap.mpcd.position, [[-3.972, -3.028, 0.1], [-2.05, 0.05, -0.20]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0.56, -0.56, 1.0], [-1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[0.56, -0.56, 1.0], [-1.0, 1.0, -1.0]] + ) - @pytest.mark.parametrize("R0, R1, expected_result", [(3, 5, False), - (2, 5, True)]) - def test_check_mpcd_particles(self, simulation_factory, snap, R0, R1, - expected_result): + @pytest.mark.parametrize("R0, R1, expected_result", [(3, 5, False), (2, 5, True)]) + def test_check_mpcd_particles( + self, simulation_factory, snap, R0, R1, expected_result + ): if snap.communicator.rank == 0: snap.mpcd.position[0] = [2.5, 0, 0] sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ConcentricCylinders(inner_radius=R0, - outer_radius=R1)) + geometry=hoomd.mpcd.geometry.ConcentricCylinders( + inner_radius=R0, outer_radius=R1 + ), + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -381,20 +399,19 @@ def test_check_mpcd_particles(self, simulation_factory, snap, R0, R1, class TestCosineChannel: - def _make_particles(self, snap): if snap.communicator.rank == 0: snap.configuration.box = [20, 20, 20, 0, 0, 0] snap.mpcd.N = 3 snap.mpcd.position[:] = [ - [0., 5.85, -3.0], + [0.0, 5.85, -3.0], [1.55, 5.5, 0], [0.0, 2.2, 0.0], ] snap.mpcd.velocity[:] = [ - [0, 1., 0.], - [1., 0., 0.], - [-1., -1., -1.], + [0, 1.0, 0.0], + [1.0, 0.0, 0.0], + [-1.0, -1.0, -1.0], ] return snap @@ -403,10 +420,9 @@ def test_step_noslip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=20.0, - separation=4.0, - no_slip=True), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=4.0, no_slip=True + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -417,9 +433,11 @@ def test_step_noslip(self, simulation_factory, snap): if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( snap.mpcd.position, - [[0, 5.95, -3.0], [1.567225, 5.5, 0.0], [-0.1, 2.1, -0.1]]) + [[0, 5.95, -3.0], [1.567225, 5.5, 0.0], [-0.1, 2.1, -0.1]], + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0, 1, 0.], [-1, 0, 0], [-1, -1, -1]]) + snap.mpcd.velocity, [[0, 1, 0.0], [-1, 0, 0], [-1, -1, -1]] + ) # particle 0 hits the highest spot and is reflected back sim.run(1) @@ -427,29 +445,32 @@ def test_step_noslip(self, simulation_factory, snap): if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( snap.mpcd.position, - [[0, 5.95, -3.0], [1.467225, 5.5, 0.0], [-0.2, 2.0, -0.2]]) + [[0, 5.95, -3.0], [1.467225, 5.5, 0.0], [-0.2, 2.0, -0.2]], + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0, -1, 0.], [-1, 0, 0], [-1, -1, -1]]) + snap.mpcd.velocity, [[0, -1, 0.0], [-1, 0, 0], [-1, -1, -1]] + ) # particle 2 collides diagonally sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[0, 5.85, -3.0], [1.367225, 5.5, 0.0], - [-0.11717, 2.08283, -0.11717]]) + snap.mpcd.position, + [[0, 5.85, -3.0], [1.367225, 5.5, 0.0], [-0.11717, 2.08283, -0.11717]], + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[0, -1., 0.], [-1, 0, 0], [1, 1, 1]]) + snap.mpcd.velocity, [[0, -1.0, 0.0], [-1, 0, 0], [1, 1, 1]] + ) def test_step_slip(self, simulation_factory, snap): snap = self._make_particles(snap) sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=20.0, - separation=4.0, - no_slip=False), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=4.0, no_slip=False + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -460,33 +481,35 @@ def test_step_slip(self, simulation_factory, snap): if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( snap.mpcd.position, - [[0, 5.95, -3.0], [1.62764, 5.463246, 0], [-0.1, 2.1, -0.1]]) + [[0, 5.95, -3.0], [1.62764, 5.463246, 0], [-0.1, 2.1, -0.1]], + ) np.testing.assert_array_almost_equal( snap.mpcd.velocity, - [[0, 1, 0.], [0.459737, -0.888055, 0], [-1, -1, -1]]) + [[0, 1, 0.0], [0.459737, -0.888055, 0], [-1, -1, -1]], + ) # take one step,particle 0 hits the wall (same as for no_slip, because # it's vertical) sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [0, 5.95, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0, -1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-0.2, 2.0, -0.2]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1, -1, -1]) + np.testing.assert_array_almost_equal(snap.mpcd.position[0], [0, 5.95, -3.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0, -1.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-0.2, 2.0, -0.2] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], [-1, -1, -1]) # take another step, particle 2 hits the wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-0.313714, 2.066657, -0.3]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1.150016, 0.823081, -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-0.313714, 2.066657, -0.3] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.150016, 0.823081, -1.0] + ) def test_check_mpcd_particles(self, simulation_factory, snap): """Test particle out of bounds.""" @@ -497,50 +520,55 @@ def test_check_mpcd_particles(self, simulation_factory, snap): ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=20.0, - separation=4.0), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=4.0 + ), ) sim.run(0) assert ig.streaming_method.check_mpcd_particles() ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=10.0, - repeat_length=20.0, - separation=4.0), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=10.0, repeat_length=20.0, separation=4.0 + ), ) sim.run(0) assert not ig.streaming_method.check_mpcd_particles() ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=20.0, - separation=4.0), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=4.0 + ), ) assert ig.streaming_method.check_mpcd_particles() ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.CosineChannel(amplitude=4.0, - repeat_length=20.0, - separation=2.0), + geometry=hoomd.mpcd.geometry.CosineChannel( + amplitude=4.0, repeat_length=20.0, separation=2.0 + ), ) sim.run(0) assert not ig.streaming_method.check_mpcd_particles() class TestCosineExpansionContraction: - def _make_particles(self, snap): if snap.communicator.rank == 0: snap.configuration.box = [15, 15, 15, 0, 0, 0] snap.mpcd.N = 3 - snap.mpcd.position[:] = [[1., -3.8, -3.0], [3.5, 3., 0.], - [-4.2, -2.2, 5.1]] - snap.mpcd.velocity[:] = [[0., -1., 0.], [1., 0., 0.], - [-1., -1., -1.]] + snap.mpcd.position[:] = [ + [1.0, -3.8, -3.0], + [3.5, 3.0, 0.0], + [-4.2, -2.2, 5.1], + ] + snap.mpcd.velocity[:] = [ + [0.0, -1.0, 0.0], + [1.0, 0.0, 0.0], + [-1.0, -1.0, -1.0], + ] return snap def test_step_noslip(self, simulation_factory, snap): @@ -552,7 +580,8 @@ def test_step_noslip(self, simulation_factory, snap): expansion_separation=8.0, contraction_separation=4.0, repeat_length=15.0, - no_slip=True), + no_slip=True, + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -561,18 +590,16 @@ def test_step_noslip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.9, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., -1, 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.6, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1., 0, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.3, -2.3, 5.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal(snap.mpcd.position[0], [1, -3.9, -3.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0.0, -1, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.6, 3.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], [1.0, 0, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.3, -2.3, 5.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # take another step where particle 1 will now hit the wall vertically # point of wall contact is z=-(cos(2*pi/15.)+3) = -3.913545, remaining @@ -581,18 +608,18 @@ def test_step_noslip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.82709, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., 1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.7, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1., 0., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.4, -2.4, 4.9]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [1, -3.82709, -3.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0.0, 1.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.7, 3.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], [1.0, 0.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.4, -2.4, 4.9] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # take another step, where particle 2 will now hit the wall # horizontally dt = 0.05, particle travels exactly 0.05 inside, and then @@ -600,35 +627,39 @@ def test_step_noslip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.72709, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., 1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.7, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [-1., 0., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.5, -2.5, 4.8]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [1, -3.72709, -3.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0.0, 1.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.7, 3.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [-1.0, 0.0, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.5, -2.5, 4.8] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # take another step, no particle collides, check for spurious collisions sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.62709, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., 1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.6, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [-1., 0., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.6, -2.6, 4.7]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [1, -3.62709, -3.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0.0, 1.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.6, 3.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [-1.0, 0.0, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.6, -2.6, 4.7] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # take another step, last particle collides # wall intersection: -4.636956 4.663044 -2.63696 (calculated with @@ -637,18 +668,18 @@ def test_step_noslip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.52709, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., 1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.5, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [-1., 0., 0.]) - np.testing.assert_array_almost_equal( - snap.mpcd.position[2], [-4.573913, -2.573919, 4.726087]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [1., 1., 1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [1, -3.52709, -3.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], [0.0, 1.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.5, 3.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [-1.0, 0.0, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.573913, -2.573919, 4.726087] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], [1.0, 1.0, 1.0]) def test_step_slip(self, simulation_factory, snap): snap = self._make_particles(snap) @@ -659,7 +690,8 @@ def test_step_slip(self, simulation_factory, snap): expansion_separation=8.0, contraction_separation=4.0, repeat_length=15.0, - no_slip=False), + no_slip=False, + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -668,18 +700,18 @@ def test_step_slip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [1, -3.9, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [0., -1., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.6, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1., 0, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.3, -2.3, 5.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal(snap.mpcd.position[0], [1, -3.9, -3.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[0], [0.0, -1.0, 0.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.6, 3.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], [1.0, 0, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.3, -2.3, 5.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # take another step where particle 1 will now hit the wall vertically # point of contact with wall same test before, but velocity needs to be @@ -693,18 +725,20 @@ def test_step_slip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [0.971372, -3.831968, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [-0.331135, 0.943583, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.7, 3.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1., 0., 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.4, -2.4, 4.9]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [0.971372, -3.831968, -3.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[0], [-0.331135, 0.943583, 0.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.7, 3.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], [1.0, 0.0, 0.0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.4, -2.4, 4.9] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # one more step, second particle collides # B = 0.418879 ( x0 approx 3.7) @@ -714,18 +748,24 @@ def test_step_slip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [0.9382585, -3.7376097, -3.0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [-0.331135, 0.943583, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.785073, 2.964365, 0.]) - np.testing.assert_array_almost_equal( - snap.mpcd.velocity[1], [0.70146211038, -0.71270674733, 0.]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-4.5, -2.5, 4.8]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1., -1., -1.]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [0.9382585, -3.7376097, -3.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[0], [-0.331135, 0.943583, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[1], [3.785073, 2.964365, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [0.70146211038, -0.71270674733, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-4.5, -2.5, 4.8] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, -1.0] + ) # two more steps, last particle collides # B = 0.390301 (x0 approx -4.6) @@ -736,10 +776,11 @@ def test_step_slip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position[2], [-4.640625, -2.547881, 4.600002]) + snap.mpcd.position[2], [-4.640625, -2.547881, 4.600002] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity[2], - [-0.05819760480217273, 1.4130155833518931, -1.]) + snap.mpcd.velocity[2], [-0.05819760480217273, 1.4130155833518931, -1.0] + ) def test_check_mpcd_particles(self, simulation_factory, snap): """Test particle out of bounds.""" @@ -751,9 +792,8 @@ def test_check_mpcd_particles(self, simulation_factory, snap): ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=8.0, - contraction_separation=4.0, - repeat_length=15.0), + expansion_separation=8.0, contraction_separation=4.0, repeat_length=15.0 + ), ) sim.run(0) assert ig.streaming_method.check_mpcd_particles() @@ -761,9 +801,8 @@ def test_check_mpcd_particles(self, simulation_factory, snap): ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=4.0, - contraction_separation=2.0, - repeat_length=15.0), + expansion_separation=4.0, contraction_separation=2.0, repeat_length=15.0 + ), ) sim.run(0) assert not ig.streaming_method.check_mpcd_particles() @@ -771,9 +810,8 @@ def test_check_mpcd_particles(self, simulation_factory, snap): ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=8.0, - contraction_separation=4.0, - repeat_length=15.0), + expansion_separation=8.0, contraction_separation=4.0, repeat_length=15.0 + ), ) sim.run(0) assert ig.streaming_method.check_mpcd_particles() @@ -781,15 +819,13 @@ def test_check_mpcd_particles(self, simulation_factory, snap): ig.streaming_method = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.CosineExpansionContraction( - expansion_separation=4.0, - contraction_separation=2.0, - repeat_length=15.0), + expansion_separation=4.0, contraction_separation=2.0, repeat_length=15.0 + ), ) assert not ig.streaming_method.check_mpcd_particles() class TestParallelPlates: - def test_step_noslip(self, simulation_factory, snap): """Test step with no-slip boundary conditions.""" if snap.communicator.rank == 0: @@ -798,8 +834,8 @@ def test_step_noslip(self, simulation_factory, snap): snap.mpcd.velocity[:] = [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( - period=1, - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0)) + period=1, geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0) + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -808,27 +844,33 @@ def test_step_noslip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]]) + snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step where one particle will now hit the wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.2, -4.0, -0.2]]) + snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.2, -4.0, -0.2]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0]]) + snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0]] + ) # take another step, reflecting the second particle sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[4.95, 3.85, -4.95], [-0.1, -3.9, -0.1]]) + snap.mpcd.position, [[4.95, 3.85, -4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, 1.0]]) + snap.mpcd.velocity, [[-1.0, -1.0, 1.0], [1.0, 1.0, 1.0]] + ) def test_step_slip(self, simulation_factory, snap): """Test step with slip boundary conditions.""" @@ -839,8 +881,7 @@ def test_step_slip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0, - no_slip=False), + geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0, no_slip=False), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -850,27 +891,33 @@ def test_step_slip(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]]) + snap.mpcd.position, [[-4.95, 3.95, 4.95], [-0.1, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.mpcd.velocity, [[1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step where one particle will now hit the wall sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.85, 3.95, 4.85], [-0.2, -4.0, -0.2]]) + snap.mpcd.position, [[-4.85, 3.95, 4.85], [-0.2, -4.0, -0.2]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]]) + snap.mpcd.velocity, [[1.0, -1.0, -1.0], [-1.0, -1.0, -1.0]] + ) # take another step, reflecting perpendicular motion of second particle sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.75, 3.85, 4.75], [-0.3, -3.9, -0.3]]) + snap.mpcd.position, [[-4.75, 3.85, 4.75], [-0.3, -3.9, -0.3]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]]) + snap.mpcd.velocity, [[1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]] + ) def test_step_moving_wall(self, simulation_factory, snap): """Test step with moving wall. @@ -889,9 +936,9 @@ def test_step_moving_wall(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=8.0, - speed=1, - no_slip=True), + geometry=hoomd.mpcd.geometry.ParallelPlates( + separation=8.0, speed=1, no_slip=True + ), ) ig = hoomd.mpcd.Integrator(dt=0.3, streaming_method=sm) sim.operations.integrator = ig @@ -901,19 +948,20 @@ def test_step_moving_wall(self, simulation_factory, snap): snap = sim.state.get_snapshot() if snap.communicator.rank == 0: np.testing.assert_array_almost_equal( - snap.mpcd.position, [[-4.75, 3.85, -4.95], [-0.4, -3.9, -0.1]]) + snap.mpcd.position, [[-4.75, 3.85, -4.95], [-0.4, -3.9, -0.1]] + ) np.testing.assert_array_almost_equal( - snap.mpcd.velocity, [[1.0, -1.0, 1.0], [0.0, 1.0, 1.0]]) + snap.mpcd.velocity, [[1.0, -1.0, 1.0], [0.0, 1.0, 1.0]] + ) @pytest.mark.parametrize("H,expected_result", [(4.0, True), (3.8, False)]) - def test_check_mpcd_particles(self, simulation_factory, snap, H, - expected_result): + def test_check_mpcd_particles(self, simulation_factory, snap, H, expected_result): if snap.communicator.rank == 0: snap.mpcd.position[0] = [0, 3.85, 0] sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( - period=1, - geometry=hoomd.mpcd.geometry.ParallelPlates(separation=2 * H)) + period=1, geometry=hoomd.mpcd.geometry.ParallelPlates(separation=2 * H) + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -922,7 +970,6 @@ def test_check_mpcd_particles(self, simulation_factory, snap, H, class TestPlanarPore: - def _make_particles(self, snap): if snap.communicator.rank == 0: snap.mpcd.N = 8 @@ -953,9 +1000,9 @@ def test_step_noslip(self, simulation_factory, snap): sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.PlanarPore(separation=8.0, - length=6.0, - no_slip=True), + geometry=hoomd.mpcd.geometry.PlanarPore( + separation=8.0, length=6.0, no_slip=True + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -964,72 +1011,78 @@ def test_step_noslip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [-3.05, -4.11, -4]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [-1.0, -1.0, 1.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.05, 4.11, 4]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1.0, 1.0, -1.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-3.05, 4.11, -2]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1.0, 1.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[3], - [3.05, -4.11, 2]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[3], - [1.0, -1.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[4], - [0, 3.95, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[4], - [0, -1.0, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[5], - [0, -3.95, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[5], - [0, 1.0, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [-3.05, -4.11, -4] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[0], [-1.0, -1.0, 1.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[1], [3.05, 4.11, 4]) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [1.0, 1.0, -1.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-3.05, 4.11, -2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, 1.0, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[3], [3.05, -4.11, 2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[3], [1.0, -1.0, 0.0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[4], [0, 3.95, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[4], [0, -1.0, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[5], [0, -3.95, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[5], [0, 1.0, 0]) # hits y = -4 after 0.02, then reverses. # x is 3.01, so reverses to 3.09 - np.testing.assert_array_almost_equal(snap.mpcd.position[6], - [3.09, -3.92, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[6], - [1, 1, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[6], [3.09, -3.92, 0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[6], [1, 1, 0]) # hits x = 3 after 0.02, then reverses. # y is -3.99, so reverses to -3.91 - np.testing.assert_array_almost_equal(snap.mpcd.position[7], - [3.08, -3.91, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[7], - [1, 1, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[7], [3.08, -3.91, 0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[7], [1, 1, 0]) # take another step where nothing hits now sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [-3.15, -4.21, -3.9]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.15, 4.21, 3.9]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-3.15, 4.21, -2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[3], - [3.15, -4.21, 2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[4], - [0, 3.85, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[5], - [0, -3.85, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[6], - [3.19, -3.82, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[7], - [3.18, -3.81, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [-3.15, -4.21, -3.9] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[1], [3.15, 4.21, 3.9] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-3.15, 4.21, -2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[3], [3.15, -4.21, 2] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[4], [0, 3.85, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[5], [0, -3.85, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[6], [3.19, -3.82, 0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[7], [3.18, -3.81, 0] + ) def test_step_slip(self, simulation_factory, snap): snap = self._make_particles(snap) sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( period=1, - geometry=hoomd.mpcd.geometry.PlanarPore(separation=8.0, - length=6.0, - no_slip=False), + geometry=hoomd.mpcd.geometry.PlanarPore( + separation=8.0, length=6.0, no_slip=False + ), ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -1038,63 +1091,69 @@ def test_step_slip(self, simulation_factory, snap): sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [-3.05, -4.01, -4.1]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[0], - [-1.0, 1.0, -1.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.05, 4.01, 4.1]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[1], - [1.0, -1.0, 1.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-3.05, 4.01, -2]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[2], - [-1.0, -1.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[3], - [3.05, -4.01, 2]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[3], - [1.0, 1.0, 0.0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[4], - [0, 3.95, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[4], - [0, -1.0, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[5], - [0, -3.95, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[5], - [0, 1.0, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [-3.05, -4.01, -4.1] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[0], [-1.0, 1.0, -1.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[1], [3.05, 4.01, 4.1] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[1], [1.0, -1.0, 1.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-3.05, 4.01, -2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.velocity[2], [-1.0, -1.0, 0.0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[3], [3.05, -4.01, 2] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[3], [1.0, 1.0, 0.0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[4], [0, 3.95, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[4], [0, -1.0, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[5], [0, -3.95, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[5], [0, 1.0, 0]) # hits y = -4 after 0.02, then reverses. # x is not touched because slip - np.testing.assert_array_almost_equal(snap.mpcd.position[6], - [2.93, -3.92, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[6], - [-1, 1, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[6], [2.93, -3.92, 0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[6], [-1, 1, 0]) # hits x = 3 after 0.02, then reverses. # y is not touched because slip - np.testing.assert_array_almost_equal(snap.mpcd.position[7], - [3.08, -4.07, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.velocity[7], - [1, -1, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[7], [3.08, -4.07, 0] + ) + np.testing.assert_array_almost_equal(snap.mpcd.velocity[7], [1, -1, 0]) # take another step where nothing hits now sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: - np.testing.assert_array_almost_equal(snap.mpcd.position[0], - [-3.15, -3.91, -4.2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[1], - [3.15, 3.91, 4.2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[2], - [-3.15, 3.91, -2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[3], - [3.15, -3.91, 2]) - np.testing.assert_array_almost_equal(snap.mpcd.position[4], - [0, 3.85, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[5], - [0, -3.85, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[6], - [2.83, -3.82, 0]) - np.testing.assert_array_almost_equal(snap.mpcd.position[7], - [3.18, -4.17, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[0], [-3.15, -3.91, -4.2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[1], [3.15, 3.91, 4.2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[2], [-3.15, 3.91, -2] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[3], [3.15, -3.91, 2] + ) + np.testing.assert_array_almost_equal(snap.mpcd.position[4], [0, 3.85, 0]) + np.testing.assert_array_almost_equal(snap.mpcd.position[5], [0, -3.85, 0]) + np.testing.assert_array_almost_equal( + snap.mpcd.position[6], [2.83, -3.82, 0] + ) + np.testing.assert_array_almost_equal( + snap.mpcd.position[7], [3.18, -4.17, 0] + ) def test_check_mpcd_particles(self, simulation_factory, snap): """Test particle out of bounds.""" @@ -1125,7 +1184,6 @@ def test_check_mpcd_particles(self, simulation_factory, snap): class TestSphere: - def _make_particles(self, snap): if snap.communicator.rank == 0: snap.mpcd.N = 4 @@ -1133,15 +1191,15 @@ def _make_particles(self, snap): # particle 1: Hits the wall in the second streaming step, gets # reflected accordingly snap.mpcd.position[0] = [2.85, 0.895, np.sqrt(6) + 0.075] - snap.mpcd.velocity[0] = [1., 0.7, -0.5] + snap.mpcd.velocity[0] = [1.0, 0.7, -0.5] # particle 2: Always inside the sphere, so no reflection by the BC - snap.mpcd.position[1] = [0., 0., 0.] - snap.mpcd.velocity[1] = [-1., -1., -1.] + snap.mpcd.position[1] = [0.0, 0.0, 0.0] + snap.mpcd.velocity[1] = [-1.0, -1.0, -1.0] # particle 3: Hits the wall normally and gets reflected back. - snap.mpcd.position[2] = 0.965 * np.array([-1., -2., np.sqrt(11)]) - snap.mpcd.velocity[2] = 0.25 * np.array([-1., -2., np.sqrt(11)]) + snap.mpcd.position[2] = 0.965 * np.array([-1.0, -2.0, np.sqrt(11)]) + snap.mpcd.velocity[2] = 0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) # particle 4: Lands almost exactly on the sphere surface and needs # to be backtracked one complete step @@ -1154,7 +1212,8 @@ def test_step_noslip(self, simulation_factory, snap): snap = self._make_particles(snap) sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( - period=1, geometry=hoomd.mpcd.geometry.Sphere(radius=4.0)) + period=1, geometry=hoomd.mpcd.geometry.Sphere(radius=4.0) + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig @@ -1164,18 +1223,16 @@ def test_step_noslip(self, simulation_factory, snap): test_positions = np.zeros((4, 3)) test_positions[0] = [2.95, 0.965, np.sqrt(6) + 0.025] test_positions[1] = [-0.1, -0.1, -0.1] - test_positions[2] = 0.99 * np.array([-1., -2., np.sqrt(11)]) - test_positions[3] = [2., -2., -np.sqrt(8)] - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + test_positions[2] = 0.99 * np.array([-1.0, -2.0, np.sqrt(11)]) + test_positions[3] = [2.0, -2.0, -np.sqrt(8)] + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) - test_velocities[0] = [1., 0.7, -0.5] - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = 0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[0] = [1.0, 0.7, -0.5] + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = 0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = [0.8, -0.4, -0.5] - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) sim.run(1) snap = sim.state.get_snapshot() @@ -1183,18 +1240,16 @@ def test_step_noslip(self, simulation_factory, snap): test_positions = np.zeros((4, 3)) test_positions[0] = [2.95, 0.965, np.sqrt(6) + 0.025] test_positions[1] = [-0.2, -0.2, -0.2] - test_positions[2] = 0.985 * np.array([-1., -2., np.sqrt(11)]) + test_positions[2] = 0.985 * np.array([-1.0, -2.0, np.sqrt(11)]) test_positions[3] = [1.92, -1.96, -np.sqrt(8) + 0.05] - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) - test_velocities[0] = [-1., -0.7, 0.5] - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = -0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[0] = [-1.0, -0.7, 0.5] + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = -0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = [-0.8, 0.4, 0.5] - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) sim.run(1) snap = sim.state.get_snapshot() @@ -1202,18 +1257,16 @@ def test_step_noslip(self, simulation_factory, snap): test_positions = np.zeros((4, 3)) test_positions[0] = [2.85, 0.895, np.sqrt(6) + 0.075] test_positions[1] = [-0.3, -0.3, -0.3] - test_positions[2] = 0.96 * np.array([-1., -2., np.sqrt(11)]) + test_positions[2] = 0.96 * np.array([-1.0, -2.0, np.sqrt(11)]) test_positions[3] = [1.84, -1.92, -np.sqrt(8) + 0.1] - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) - test_velocities[0] = [-1., -0.7, 0.5] - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = -0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[0] = [-1.0, -0.7, 0.5] + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = -0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = [-0.8, 0.4, 0.5] - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) def test_step_slip(self, simulation_factory, snap): """Test step with slip boundary conditions.""" @@ -1232,51 +1285,45 @@ def test_step_slip(self, simulation_factory, snap): test_positions = np.zeros((4, 3)) test_positions[0] = [2.95, 0.965, np.sqrt(6) + 0.025] test_positions[1] = [-0.1, -0.1, -0.1] - test_positions[2] = 0.99 * np.array([-1., -2., np.sqrt(11)]) - test_positions[3] = [2., -2., -np.sqrt(8)] - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + test_positions[2] = 0.99 * np.array([-1.0, -2.0, np.sqrt(11)]) + test_positions[3] = [2.0, -2.0, -np.sqrt(8)] + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) - test_velocities[0] = [1., 0.7, -0.5] - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = 0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[0] = [1.0, 0.7, -0.5] + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = 0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = [0.8, -0.4, -0.5] - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) sim.run(1) snap = sim.state.get_snapshot() if snap.communicator.rank == 0: # calculate change for particle 0 by formulas - r0_before = np.array([3., 1., np.sqrt(6)]) - v0_before = np.array([1., 0.7, -0.5]) - v0_after = v0_before - 1 / 8. * np.dot(v0_before, - r0_before) * r0_before + r0_before = np.array([3.0, 1.0, np.sqrt(6)]) + v0_before = np.array([1.0, 0.7, -0.5]) + v0_after = v0_before - 1 / 8.0 * np.dot(v0_before, r0_before) * r0_before r0_after = r0_before + v0_after * 0.05 # calculate change for particle 3 by formulas - r3_before = np.array([2., -2., -np.sqrt(8)]) + r3_before = np.array([2.0, -2.0, -np.sqrt(8)]) v3_before = np.array([0.8, -0.4, -0.5]) - v3_after = v3_before - 1. / 8. * np.dot(v3_before, - r3_before) * r3_before + v3_after = v3_before - 1.0 / 8.0 * np.dot(v3_before, r3_before) * r3_before r3_after = r3_before + v3_after * 0.1 test_positions = np.zeros((4, 3)) test_positions[0] = r0_after test_positions[1] = [-0.2, -0.2, -0.2] - test_positions[2] = 0.985 * np.array([-1., -2., np.sqrt(11)]) + test_positions[2] = 0.985 * np.array([-1.0, -2.0, np.sqrt(11)]) test_positions[3] = r3_after - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) test_velocities[0] = v0_after - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = -0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = -0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = v3_after - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) sim.run(1) snap = sim.state.get_snapshot() @@ -1288,27 +1335,25 @@ def test_step_slip(self, simulation_factory, snap): test_positions = np.zeros((4, 3)) test_positions[0] = r0_after test_positions[1] = [-0.3, -0.3, -0.3] - test_positions[2] = 0.96 * np.array([-1., -2., np.sqrt(11)]) + test_positions[2] = 0.96 * np.array([-1.0, -2.0, np.sqrt(11)]) test_positions[3] = r3_after - np.testing.assert_array_almost_equal(snap.mpcd.position, - test_positions) + np.testing.assert_array_almost_equal(snap.mpcd.position, test_positions) test_velocities = np.zeros((4, 3)) test_velocities[0] = v0_after - test_velocities[1] = [-1., -1., -1.] - test_velocities[2] = -0.25 * np.array([-1., -2., np.sqrt(11)]) + test_velocities[1] = [-1.0, -1.0, -1.0] + test_velocities[2] = -0.25 * np.array([-1.0, -2.0, np.sqrt(11)]) test_velocities[3] = v3_after - np.testing.assert_array_almost_equal(snap.mpcd.velocity, - test_velocities) + np.testing.assert_array_almost_equal(snap.mpcd.velocity, test_velocities) @pytest.mark.parametrize("R,expected_result", [(4.0, True), (3.8, False)]) - def test_check_mpcd_particles(self, simulation_factory, snap, R, - expected_result): + def test_check_mpcd_particles(self, simulation_factory, snap, R, expected_result): if snap.communicator.rank == 0: snap.mpcd.position[0] = [0, 3.85, 0] sim = simulation_factory(snap) sm = hoomd.mpcd.stream.BounceBack( - period=1, geometry=hoomd.mpcd.geometry.Sphere(radius=R)) + period=1, geometry=hoomd.mpcd.geometry.Sphere(radius=R) + ) ig = hoomd.mpcd.Integrator(dt=0.1, streaming_method=sm) sim.operations.integrator = ig diff --git a/hoomd/mpcd/pytest/test_tune.py b/hoomd/mpcd/pytest/test_tune.py index ddec40026d..49f62a9bb8 100644 --- a/hoomd/mpcd/pytest/test_tune.py +++ b/hoomd/mpcd/pytest/test_tune.py @@ -20,7 +20,6 @@ def snap(): class TestParticleSorter: - def test_create(self, simulation_factory, snap): sim = simulation_factory(snap) @@ -45,6 +44,7 @@ def test_pickling(self, simulation_factory, snap): sim = simulation_factory(snap) sim.operations.integrator = hoomd.mpcd.Integrator( - dt=0.02, mpcd_particle_sorter=sorter) + dt=0.02, mpcd_particle_sorter=sorter + ) sim.run(0) pickling_check(sorter) diff --git a/hoomd/mpcd/stream.py b/hoomd/mpcd/stream.py index b1f0e33ff5..8a2dfc0e8c 100644 --- a/hoomd/mpcd/stream.py +++ b/hoomd/mpcd/stream.py @@ -68,9 +68,12 @@ class StreamingMethod(Operation): `StreamingMethod` is constructed, but its attributes can be modified. """ + __doc__ = __doc__.replace("{inherited}", Operation._doc_inherited) - _doc_inherited = Operation._doc_inherited + """ + _doc_inherited = ( + Operation._doc_inherited + + """ ---------- **Members inherited from** @@ -86,6 +89,7 @@ class StreamingMethod(Operation): Body force on MPCD particles. `Read more... ` """ + ) def __init__(self, period, mpcd_particle_force=None): super().__init__() @@ -124,7 +128,8 @@ class Bulk(StreamingMethod): stream = hoomd.mpcd.stream.Bulk( period=1, - mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, 0))) + mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, 0)), + ) simulation.operations.integrator.streaming_method = stream """ @@ -157,8 +162,7 @@ def _attach_hook(self): if isinstance(sim.device, hoomd.device.GPU): class_info[1] += "GPU" class_ = getattr(*class_info, None) - assert class_ is not None, ("C++ streaming method could not be " - "determined") + assert class_ is not None, "C++ streaming method could not be " "determined" self._cpp_obj = class_( sim.state._cpp_sys_def, @@ -222,7 +226,9 @@ class BounceBack(StreamingMethod): stream = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.ParallelPlates( - separation=6.0, speed=1.0, no_slip=True)) + separation=6.0, speed=1.0, no_slip=True + ), + ) simulation.operations.integrator.streaming_method = stream Pressure driven flow between parallel plates. @@ -232,8 +238,10 @@ class BounceBack(StreamingMethod): stream = hoomd.mpcd.stream.BounceBack( period=1, geometry=hoomd.mpcd.geometry.ParallelPlates( - separation=6.0, no_slip=True), - mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, 0))) + separation=6.0, no_slip=True + ), + mpcd_particle_force=hoomd.mpcd.force.ConstantForce((1, 0, 0)), + ) simulation.operations.integrator.streaming_method = stream {inherited} @@ -334,7 +342,7 @@ def _register_cpp_class(cls, geometry, force, module, cpp_class_name): __all__ = [ - 'BounceBack', - 'Bulk', - 'StreamingMethod', + "BounceBack", + "Bulk", + "StreamingMethod", ] diff --git a/hoomd/mpcd/tune.py b/hoomd/mpcd/tune.py index 5f729909e7..3c9c322c43 100644 --- a/hoomd/mpcd/tune.py +++ b/hoomd/mpcd/tune.py @@ -60,10 +60,9 @@ def _attach_hook(self): class_ = _mpcd.SorterGPU else: class_ = _mpcd.Sorter - self._cpp_obj = class_(self._simulation.state._cpp_sys_def, - self.trigger) + self._cpp_obj = class_(self._simulation.state._cpp_sys_def, self.trigger) __all__ = [ - 'ParticleSorter', + "ParticleSorter", ] diff --git a/hoomd/operation.py b/hoomd/operation.py index f55adba20a..b5c5becf8b 100644 --- a/hoomd/operation.py +++ b/hoomd/operation.py @@ -48,8 +48,8 @@ class _HOOMDGetSetAttrBase: that exist due to ``__getattr__`` such as those from ``_param_dict`` or ``_typeparam_dict``. """ - _reserved_default_attrs = dict(_param_dict=ParameterDict, - _typeparam_dict=dict) + + _reserved_default_attrs = dict(_param_dict=ParameterDict, _typeparam_dict=dict) _override_setattr = set() _skip_for_equality = set() @@ -103,8 +103,11 @@ def _setattr_typeparam(self, attr, value): for k, v in value.items(): self._typeparam_dict[attr][k] = v except TypeError: - raise ValueError("To set {}, you must use a dictionary " - "with types as keys.".format(attr)) + raise ValueError( + "To set {}, you must use a dictionary " "with types as keys.".format( + attr + ) + ) def __dir__(self): """Expose all attributes for dynamic querying in notebooks and IDEs.""" @@ -179,9 +182,7 @@ def _remove_dependent(self, obj): pass -class _HOOMDBaseObject(_HOOMDGetSetAttrBase, - _DependencyRelation, - metaclass=Loggable): +class _HOOMDBaseObject(_HOOMDGetSetAttrBase, _DependencyRelation, metaclass=Loggable): """Handles attaching/detaching to a simulation. ``_StatefulAttrBase`` handles getting and setting attributes as well as @@ -208,23 +209,28 @@ class _HOOMDBaseObject(_HOOMDGetSetAttrBase, its consumers. This does allow for simple dependency handling outside of the features of `_DependencyRelation`. """ + _reserved_default_attrs = { **_HOOMDGetSetAttrBase._reserved_default_attrs, - '_cpp_obj': None, - '_simulation_': None, - '_dependents': list, - '_dependencies': list, + "_cpp_obj": None, + "_simulation_": None, + "_dependents": list, + "_dependencies": list, # Keeps track of the number of times _attach is called to avoid # premature detaching. "_use_count": int, } _skip_for_equality = { - '_cpp_obj', '_dependents', '_dependencies', '_simulation_', "_use_count" + "_cpp_obj", + "_dependents", + "_dependencies", + "_simulation_", + "_use_count", } # _use_count must be included or attaching and detaching won't work as # expected as _use_count may not equal 0. - _remove_for_pickling = ('_simulation_', '_cpp_obj', "_use_count") + _remove_for_pickling = ("_simulation_", "_cpp_obj", "_use_count") def _detach(self, force=False): """Decrement attach count and destroy C++ object if count == 0. @@ -353,8 +359,8 @@ def _apply_typeparam_dict(self, cpp_obj, simulation): typeparam._attach(cpp_obj, simulation.state) except ValueError as err: raise err.__class__( - f"For {type(self)} in TypeParameter {typeparam.name} " - f"{err!s}") + f"For {type(self)} in TypeParameter {typeparam.name} " f"{err!s}" + ) def _unapply_typeparam_dict(self): for typeparam in self._typeparam_dict.values(): @@ -496,7 +502,7 @@ def is_tuning_complete(self): .. code-block:: python - while (not operation.is_tuning_complete): + while not operation.is_tuning_complete: simulation.run(1000) """ if not self._attached: @@ -518,8 +524,9 @@ def tune_kernel_parameters(self): operation.tune_kernel_parameters() """ if not self._attached: - raise RuntimeError("Call Simulation.run() before " - "tune_kernel_parameters.") + raise RuntimeError( + "Call Simulation.run() before " "tune_kernel_parameters." + ) self._cpp_obj.startAutotuning() @@ -565,7 +572,9 @@ class TriggeredOperation(Operation): __doc__ = __doc__.replace("{inherited}", Operation._doc_inherited) - _doc_inherited = Operation._doc_inherited + """ + _doc_inherited = ( + Operation._doc_inherited + + """ ---------- **Members inherited from** @@ -576,6 +585,7 @@ class TriggeredOperation(Operation): The trigger to activate this operation. `Read more... ` """ + ) def __init__(self, trigger): trigger_param = ParameterDict(trigger=hoomd.trigger.Trigger) @@ -592,7 +602,8 @@ class Updater(TriggeredOperation): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ - _cpp_list_name = 'updaters' + + _cpp_list_name = "updaters" __doc__ += TriggeredOperation._doc_inherited @@ -606,7 +617,8 @@ class Writer(TriggeredOperation): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ - _cpp_list_name = 'analyzers' + + _cpp_list_name = "analyzers" __doc__ += TriggeredOperation._doc_inherited @@ -621,6 +633,7 @@ class Compute(Operation): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ + __doc__ += Operation._doc_inherited @@ -636,6 +649,7 @@ class Tuner(TriggeredOperation): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ + __doc__ += TriggeredOperation._doc_inherited @@ -651,6 +665,7 @@ class Integrator(Operation): This class should not be instantiated by users. The class can be used for `isinstance` or `issubclass` checks. """ + __doc__ += Operation._doc_inherited def _attach_hook(self): @@ -661,12 +676,12 @@ def _attach_hook(self): __all__ = [ - 'AutotunedObject', - 'Compute', - 'Integrator', - 'Operation', - 'TriggeredOperation', - 'Tuner', - 'Updater', - 'Writer', + "AutotunedObject", + "Compute", + "Integrator", + "Operation", + "TriggeredOperation", + "Tuner", + "Updater", + "Writer", ] diff --git a/hoomd/operations.py b/hoomd/operations.py index 00f77abd0e..989ad1ebdc 100644 --- a/hoomd/operations.py +++ b/hoomd/operations.py @@ -66,7 +66,7 @@ class Operations(Collection): def __init__(self): self._scheduled = False self._simulation = None - sync_func = syncedlist._PartialGetAttr('_cpp_obj') + sync_func = syncedlist._PartialGetAttr("_cpp_obj") self._updaters = syncedlist.SyncedList(Updater, sync_func) self._writers = syncedlist.SyncedList(Writer, sync_func) self._tuners = syncedlist.SyncedList(Tuner, sync_func) @@ -120,8 +120,10 @@ def add(self, operation): try: container = self._get_proper_container(operation) except TypeError: - raise TypeError(f"Type {type(operation)} is not a valid " - f"type to add to Operations.") + raise TypeError( + f"Type {type(operation)} is not a valid " + f"type to add to Operations." + ) container.append(operation) def __iadd__(self, operation): @@ -167,8 +169,10 @@ def remove(self, operation): try: container = self._get_proper_container(operation) except TypeError: - raise TypeError(f"Type {type(operation)} is not a valid " - f"type to remove from Operations.") + raise TypeError( + f"Type {type(operation)} is not a valid " + f"type to remove from Operations." + ) container.remove(operation) def __isub__(self, operation): @@ -255,8 +259,9 @@ def __iter__(self): pass """ integrator = (self._integrator,) if self._integrator else [] - yield from chain(self._tuners, self._updaters, integrator, - self._writers, self._computes) + yield from chain( + self._tuners, self._updaters, integrator, self._writers, self._computes + ) def __len__(self): """Return the number of operations contained in this collection. @@ -296,8 +301,10 @@ def integrator(self): def integrator(self, op): if op is not None: if not isinstance(op, Integrator): - raise TypeError("Cannot set integrator to a type not derived " - "from hoomd.operation.Integrator") + raise TypeError( + "Cannot set integrator to a type not derived " + "from hoomd.operation.Integrator" + ) old_ref = self.integrator self._integrator = op # Handle attaching and detaching integrators dealing with None values @@ -360,7 +367,7 @@ def is_tuning_complete(self): .. code-block:: python - while (not simulation.operations.is_tuning_complete): + while not simulation.operations.is_tuning_complete: simulation.run(1000) """ if not self._scheduled: @@ -371,7 +378,8 @@ def is_tuning_complete(self): return result else: return _hoomd.mpi_allreduce_bcast_and( - result, self._simulation.device._cpp_exec_conf) + result, self._simulation.device._cpp_exec_conf + ) def tune_kernel_parameters(self): """Start tuning kernel parameters in all children. @@ -386,8 +394,9 @@ def tune_kernel_parameters(self): simulation.operations.tune_kernel_parameters() """ if not self._scheduled: - raise RuntimeError("Call Simulation.run() before " - "tune_kernel_parameters.") + raise RuntimeError( + "Call Simulation.run() before " "tune_kernel_parameters." + ) for op in self: op.tune_kernel_parameters() @@ -396,8 +405,8 @@ def __getstate__(self): """Get the current state of the operations container for pickling.""" # ensure that top level changes to self.__dict__ are not propagated state = copy(self.__dict__) - state['_simulation'] = None - state['_scheduled'] = False + state["_simulation"] = None + state["_scheduled"] = False return state @property diff --git a/hoomd/pytest/dummy.py b/hoomd/pytest/dummy.py index 3c4d8887ed..7025d31b77 100644 --- a/hoomd/pytest/dummy.py +++ b/hoomd/pytest/dummy.py @@ -6,7 +6,6 @@ class DummySimulation: - def __init__(self): self.state = DummyState() self.operations = DummyOperations() @@ -15,13 +14,11 @@ def __init__(self): class DummySystem: - def __init__(self): self.dummy_list = [] class DummyState: - def __init__(self): pass @@ -35,7 +32,6 @@ class DummyOperations: class DummyCppObj: - def __init__(self): self._dict = dict() @@ -73,6 +69,7 @@ class DummyOperation(Operation): This is for testing purposes. """ + _current_obj_number = 0 def __init__(self): @@ -88,6 +85,5 @@ def __eq__(self, other): class DummyTrigger(Trigger): - def __call__(self, ts): return True diff --git a/hoomd/pytest/test_attr_tuner.py b/hoomd/pytest/test_attr_tuner.py index 76892e49a4..10e19d1ea9 100644 --- a/hoomd/pytest/test_attr_tuner.py +++ b/hoomd/pytest/test_attr_tuner.py @@ -13,11 +13,13 @@ def attr_dict(): @pytest.fixture def attr_definition(attr_dict): - return ManualTuneDefinition(get_y=lambda: attr_dict['y'], - get_x=lambda: attr_dict['x'], - set_x=lambda x: attr_dict.__setitem__('x', x), - target=attr_dict['target'], - domain=attr_dict['domain']) + return ManualTuneDefinition( + get_y=lambda: attr_dict["y"], + get_x=lambda: attr_dict["x"], + set_x=lambda x: attr_dict.__setitem__("x", x), + target=attr_dict["target"], + domain=attr_dict["domain"], + ) @pytest.fixture @@ -26,40 +28,39 @@ def alternate_definition(): get_y=lambda: 46, get_x=lambda: 1293, set_x=lambda x: None, - target='foo', + target="foo", ) class TestManualTuneDefinition: - def test_getting_attrs(self, attr_dict, attr_definition): - assert attr_dict['x'] == attr_definition.x - assert attr_dict['y'] == attr_definition.y - assert attr_dict['target'] == attr_definition.target - assert attr_dict['domain'] == attr_definition.domain + assert attr_dict["x"] == attr_definition.x + assert attr_dict["y"] == attr_definition.y + assert attr_dict["target"] == attr_definition.target + assert attr_dict["domain"] == attr_definition.domain def test_setting_attrs(self, attr_dict, attr_definition): attr_definition.x = 5 - assert attr_dict['x'] == attr_definition.x + assert attr_dict["x"] == attr_definition.x assert attr_definition.x == 5 attr_definition.target = 1 - assert attr_dict['target'] != attr_definition.target + assert attr_dict["target"] != attr_definition.target assert attr_definition.target == 1 attr_definition.domain = (0, None) - assert attr_dict['domain'] != attr_definition.domain + assert attr_dict["domain"] != attr_definition.domain assert attr_definition.domain == (0, None) with pytest.raises(AttributeError): attr_definition.y = 43 def test_domain_wrapping(self, attr_definition): - domain_clamped_pairs = [((0, None), [(1, 1), (2, 2), (-1, 0), - (1000, 1000)]), - ((None, 5), [(-1, -1), (-1000, -1000), - (4.9, 4.9), (5.01, 5)]), - (None, [(1000, 1000), (-1000, -1000)])] + domain_clamped_pairs = [ + ((0, None), [(1, 1), (2, 2), (-1, 0), (1000, 1000)]), + ((None, 5), [(-1, -1), (-1000, -1000), (4.9, 4.9), (5.01, 5)]), + (None, [(1000, 1000), (-1000, -1000)]), + ] for domain, value_pairs in domain_clamped_pairs: attr_definition.domain = domain for x, clamped_x in value_pairs: @@ -73,11 +74,11 @@ def test_setting_x_with_wrapping(self, attr_definition): assert attr_definition.x == 5 def test_in_domain(self, attr_definition): - domain_check_pairs = [((0, None), [(1, True), (2, True), (-1, False), - (1000, True)]), - ((None, 5), [(-1, True), (-1000, True), - (4.9, True), (5.01, False)]), - (None, [(1000, True), (-1000, True)])] + domain_check_pairs = [ + ((0, None), [(1, True), (2, True), (-1, False), (1000, True)]), + ((None, 5), [(-1, True), (-1000, True), (4.9, True), (5.01, False)]), + (None, [(1000, True), (-1000, True)]), + ] for domain, check_pairs in domain_check_pairs: attr_definition.domain = domain for x, in_domain in check_pairs: diff --git a/hoomd/pytest/test_balance.py b/hoomd/pytest/test_balance.py index 418415ec7e..7cd1229e9e 100644 --- a/hoomd/pytest/test_balance.py +++ b/hoomd/pytest/test_balance.py @@ -8,12 +8,9 @@ def test_balance_properties(): trigger = hoomd.trigger.Periodic(3) - balance = hoomd.tune.LoadBalancer(trigger, - x=True, - y=True, - z=True, - tolerance=1.125, - max_iterations=1) + balance = hoomd.tune.LoadBalancer( + trigger, x=True, y=True, z=True, tolerance=1.125, max_iterations=1 + ) assert balance.trigger is trigger @@ -43,12 +40,9 @@ def test_attach_detach(simulation_factory, lattice_snapshot_factory): sim = simulation_factory(snapshot) trigger = hoomd.trigger.Periodic(3) - balance = hoomd.tune.LoadBalancer(trigger, - x=True, - y=True, - z=True, - tolerance=1.125, - max_iterations=1) + balance = hoomd.tune.LoadBalancer( + trigger, x=True, y=True, z=True, tolerance=1.125, max_iterations=1 + ) sim.operations.tuners.append(balance) sim.run(0) @@ -80,12 +74,9 @@ def test_attach_detach(simulation_factory, lattice_snapshot_factory): def test_pickling(simulation_factory, two_particle_snapshot_factory): trigger = hoomd.trigger.Periodic(3) - balance = hoomd.tune.LoadBalancer(trigger, - x=True, - y=True, - z=True, - tolerance=1.125, - max_iterations=1) + balance = hoomd.tune.LoadBalancer( + trigger, x=True, y=True, z=True, tolerance=1.125, max_iterations=1 + ) sim = simulation_factory(two_particle_snapshot_factory()) operation_pickling_check(balance, sim) diff --git a/hoomd/pytest/test_box.py b/hoomd/pytest/test_box.py index 9e80e5b009..4825a0a3a7 100644 --- a/hoomd/pytest/test_box.py +++ b/hoomd/pytest/test_box.py @@ -29,15 +29,14 @@ def base_box(box_dict): def test_cpp_python_correspondence(base_box): cpp_obj = base_box._cpp_obj cpp_L = cpp_obj.getL() - assert base_box.Lx == cpp_L.x and base_box.Ly == cpp_L.y \ - and base_box.Lz == cpp_L.z + assert base_box.Lx == cpp_L.x and base_box.Ly == cpp_L.y and base_box.Lz == cpp_L.z assert base_box.xy == cpp_obj.getTiltFactorXY() assert base_box.xz == cpp_obj.getTiltFactorXZ() assert base_box.yz == cpp_obj.getTiltFactorYZ() def test_setting_lengths(base_box): - for attr in ['Lx', 'Ly', 'Lz']: + for attr in ["Lx", "Ly", "Lz"]: for L in np.linspace(1, 100, 10): setattr(base_box, attr, L) assert getattr(base_box, attr) == L @@ -49,7 +48,7 @@ def test_setting_lengths(base_box): def test_setting_tilts(base_box): - for attr in ['xy', 'xz', 'yz']: + for attr in ["xy", "xz", "yz"]: for tilt in np.linspace(1, 100, 10): setattr(base_box, attr, tilt) assert getattr(base_box, attr) == tilt @@ -118,14 +117,17 @@ def test_periodic(base_box): @fixture def expected_matrix(box_dict): - return np.array([ + return np.array( [ - box_dict['Lx'], box_dict['Ly'] * box_dict['xy'], - box_dict['Lz'] * box_dict['xz'] - ], - [0, box_dict['Ly'], box_dict['Lz'] * box_dict['yz']], - [0, 0, box_dict['Lz']], - ]) + [ + box_dict["Lx"], + box_dict["Ly"] * box_dict["xy"], + box_dict["Lz"] * box_dict["xz"], + ], + [0, box_dict["Ly"], box_dict["Lz"] * box_dict["yz"]], + [0, 0, box_dict["Lz"]], + ] + ) def test_matrix(base_box, expected_matrix): @@ -142,8 +144,7 @@ def test_matrix(base_box, expected_matrix): def new_box_matrix_dict(): Lx, Ly, Lz = 2, 4, 8 xy, xz, yz = 1, 3, 5 - new_box_matrix = np.array([[Lx, Ly * xy, Lz * xz], [0, Ly, Lz * yz], - [0, 0, Lz]]) + new_box_matrix = np.array([[Lx, Ly * xy, Lz * xz], [0, Ly, Lz * yz], [0, 0, Lz]]) return dict(Lx=Lx, Ly=Ly, Lz=Lz, xy=xy, xz=xz, yz=yz, matrix=new_box_matrix) @@ -162,27 +163,41 @@ def test_square(): def test_from_matrix(new_box_matrix_dict): - box = Box.from_matrix(new_box_matrix_dict['matrix']) - assert np.allclose(new_box_matrix_dict['matrix'], box.to_matrix()) - assert np.allclose(box.L, [ - new_box_matrix_dict['Lx'], new_box_matrix_dict['Ly'], - new_box_matrix_dict['Lz'] - ]) - assert np.allclose(box.tilts, [ - new_box_matrix_dict['xy'], new_box_matrix_dict['xz'], - new_box_matrix_dict['yz'] - ]) - - -@pytest.mark.parametrize("theta", - [np.pi, np.pi / 2, np.pi / 3, np.pi / 4, np.pi * 1.23]) + box = Box.from_matrix(new_box_matrix_dict["matrix"]) + assert np.allclose(new_box_matrix_dict["matrix"], box.to_matrix()) + assert np.allclose( + box.L, + [ + new_box_matrix_dict["Lx"], + new_box_matrix_dict["Ly"], + new_box_matrix_dict["Lz"], + ], + ) + assert np.allclose( + box.tilts, + [ + new_box_matrix_dict["xy"], + new_box_matrix_dict["xz"], + new_box_matrix_dict["yz"], + ], + ) + + +@pytest.mark.parametrize( + "theta", [np.pi, np.pi / 2, np.pi / 3, np.pi / 4, np.pi * 1.23] +) def test_from_basis_vectors_non_triangular(theta): - box_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], - [np.sin(theta), np.cos(theta), 0], [0, 0, 1]]) + box_matrix = np.array( + [ + [np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 1], + ] + ) box, rotation = Box.from_basis_vectors(box_matrix.T) - assert np.allclose([box.Lx, box.Ly, box.Lz, box.xy, box.xz, box.yz], - [1, 1, 1, 0, 0, 0], - atol=1e-6) + assert np.allclose( + [box.Lx, box.Ly, box.Lz, box.xy, box.xz, box.yz], [1, 1, 1, 0, 0, 0], atol=1e-6 + ) rotated_matrix = box.to_matrix() rotated_points = rotation @ box_matrix assert np.allclose(rotated_matrix, rotated_points) @@ -194,11 +209,17 @@ def test_from_matrix_two_dimensional(): assert box.is2D and box.dimensions == 2 -@pytest.mark.parametrize("theta", - [np.pi, np.pi / 2, np.pi / 3, np.pi / 4, np.pi * 1.23]) +@pytest.mark.parametrize( + "theta", [np.pi, np.pi / 2, np.pi / 3, np.pi / 4, np.pi * 1.23] +) def test_rotation_matrix_from_basis_vectors_two_dimensional(theta): - box_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], - [np.sin(theta), np.cos(theta), 0], [0, 0, 0]]) + box_matrix = np.array( + [ + [np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], + [0, 0, 0], + ] + ) box, rotation = Box.from_basis_vectors(box_matrix.T) rotated_matrix = box.to_matrix() rotated_points = rotation @ box_matrix @@ -209,6 +230,7 @@ def test_rotation_matrix_from_basis_vectors_two_dimensional(theta): def test_invalid_from_basis_vectors_two_dimensional(): box_matrix = np.array([[1, 0, 0], [0, 1, 1], [0, 0, 0]]) import pytest + with pytest.raises(ValueError): Box.from_basis_vectors(box_matrix) diff --git a/hoomd/pytest/test_box_resize.py b/hoomd/pytest/test_box_resize.py index 90376072df..c12fd370df 100644 --- a/hoomd/pytest/test_box_resize.py +++ b/hoomd/pytest/test_box_resize.py @@ -24,16 +24,17 @@ def fractional_coordinates(n=_n_points): _box = ( [ - [1., 2., 1., 1., 0., 3.], # Initial box, 3D - [10., 12., 20., 0., 1., 2.] + [1.0, 2.0, 1.0, 1.0, 0.0, 3.0], # Initial box, 3D + [10.0, 12.0, 20.0, 0.0, 1.0, 2.0], ], # Final box, 3D [ - [1., 2., 0., 1., 0., 0.], # Initial box, 2D - [10., 12., 0., 0., 0., 0.] - ]) # Final box, 2D + [1.0, 2.0, 0.0, 1.0, 0.0, 0.0], # Initial box, 2D + [10.0, 12.0, 0.0, 0.0, 0.0, 0.0], + ], +) # Final box, 2D -@pytest.fixture(scope="function", params=_box, ids=['sys_3d', 'sys_2d']) +@pytest.fixture(scope="function", params=_box, ids=["sys_3d", "sys_2d"]) def sys(request, fractional_coordinates): """System box sizes and particle positions. @@ -47,10 +48,13 @@ def sys(request, fractional_coordinates): box_start = request.param[0] box_end = request.param[1] - return (make_system(fractional_coordinates, - box_start), lambda power: make_sys_halfway( - fractional_coordinates, box_start, box_end, power), - make_system(fractional_coordinates, box_end)) + return ( + make_system(fractional_coordinates, box_start), + lambda power: make_sys_halfway( + fractional_coordinates, box_start, box_end, power + ), + make_system(fractional_coordinates, box_end), + ) def make_system(fractional_coordinates, box): @@ -64,7 +68,7 @@ def make_system(fractional_coordinates, box): _t_mid = _t_start + _t_ramp // 2 -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def trigger(): return hoomd.trigger.After(_t_mid - 1) @@ -74,14 +78,14 @@ def make_sys_halfway(fractional_coordinates, box_start, box_end, power): box_end = np.array(box_end) intermediate_t = (_t_mid - _t_start) / _t_ramp # set to halfway, 0.5 - box_mid = hoomd.Box.from_box(box_start + (box_end - box_start) - * intermediate_t**power) + box_mid = hoomd.Box.from_box( + box_start + (box_end - box_start) * intermediate_t**power + ) return make_system(fractional_coordinates, box_mid) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def get_snapshot(sys, device): - def make_shapshot(): box1, points1 = sys[0] s = hoomd.snapshot.Snapshot(device.communicator) @@ -89,7 +93,7 @@ def make_shapshot(): s.configuration.box = box1 s.particles.N = points1.shape[0] s.particles.typeid[:] = [0] * points1.shape[0] - s.particles.types = ['A'] + s.particles.types = ["A"] s.particles.position[:] = points1 return s @@ -99,15 +103,15 @@ def make_shapshot(): _power = 2 -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def box_variant(sys): sys1, _, sys2 = sys return hoomd.variant.box.Interpolate( - sys1[0], sys2[0], hoomd.variant.Power(0., 1., _power, _t_start, - _t_ramp)) + sys1[0], sys2[0], hoomd.variant.Power(0.0, 1.0, _power, _t_start, _t_ramp) + ) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def box_resize(trigger, box_variant): b = hoomd.update.BoxResize(box=box_variant, trigger=trigger) return b @@ -117,15 +121,13 @@ def assert_positions(sim, reference_points, filter=None): with sim.state.cpu_local_snapshot as data: if filter is not None: filter_tags = np.copy(filter(sim.state)).astype(int) - is_particle_local = np.isin(data.particles.tag, - filter_tags, - assume_unique=True) - reference_point = reference_points[ - data.particles.tag[is_particle_local]] + is_particle_local = np.isin( + data.particles.tag, filter_tags, assume_unique=True + ) + reference_point = reference_points[data.particles.tag[is_particle_local]] pos = data.particles.position[is_particle_local] else: - pos = data.particles.position[data.particles.rtag[ - data.particles.tag]] + pos = data.particles.position[data.particles.rtag[data.particles.tag]] reference_point = reference_points[data.particles.tag] npt.assert_allclose(pos, reference_point) @@ -163,13 +165,14 @@ def test_update(simulation_factory, get_snapshot, sys): assert_positions(sim, sys2[1]) -_filter = ([[hoomd.filter.All(), hoomd.filter.Null()], - [hoomd.filter.Null(), hoomd.filter.All()], - [ - hoomd.filter.Tags([0, 5]), - hoomd.filter.SetDifference(hoomd.filter.Tags([0]), - hoomd.filter.All()) - ]]) +_filter = [ + [hoomd.filter.All(), hoomd.filter.Null()], + [hoomd.filter.Null(), hoomd.filter.All()], + [ + hoomd.filter.Tags([0, 5]), + hoomd.filter.SetDifference(hoomd.filter.Tags([0]), hoomd.filter.All()), + ], +] @pytest.fixture(scope="function", params=_filter, ids=["All", "None", "Tags"]) @@ -177,15 +180,16 @@ def filters(request): return request.param -def test_position_scale(device, get_snapshot, sys, box_variant, trigger, - filters, simulation_factory): +def test_position_scale( + device, get_snapshot, sys, box_variant, trigger, filters, simulation_factory +): filter_scale, filter_noscale = filters sys1, make_sys_halfway, sys2 = sys sys_halfway = make_sys_halfway(_power) - box_resize = hoomd.update.BoxResize(box=box_variant, - trigger=trigger, - filter=filter_scale) + box_resize = hoomd.update.BoxResize( + box=box_variant, trigger=trigger, filter=filter_scale + ) sim = simulation_factory(get_snapshot()) sim.operations.updaters.append(box_resize) @@ -205,9 +209,9 @@ def test_position_scale(device, get_snapshot, sys, box_variant, trigger, def test_get_filter(device, get_snapshot, box_variant, trigger, filters): filter_scale, _ = filters - box_resize = hoomd.update.BoxResize(box=box_variant, - trigger=trigger, - filter=filter_scale) + box_resize = hoomd.update.BoxResize( + box=box_variant, trigger=trigger, filter=filter_scale + ) assert box_resize.filter == filter_scale @@ -227,9 +231,9 @@ def test_mutability_error(simulation_factory, two_particle_snapshot_factory): box1 = hoomd.Box.cube(L=10) box2 = hoomd.Box.cube(L=12) var = hoomd.variant.Ramp(10, 12, 0, 100) - box_op = hoomd.update.BoxResize(box=hoomd.variant.box.Interpolate( - box1, box2, var), - trigger=trig) + box_op = hoomd.update.BoxResize( + box=hoomd.variant.box.Interpolate(box1, box2, var), trigger=trig + ) sim.operations.add(box_op) assert len(sim.operations.updaters) == 1 sim.run(0) @@ -241,14 +245,14 @@ def test_mutability_error(simulation_factory, two_particle_snapshot_factory): def test_new_api(simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory()) inverse_volume_ramp = hoomd.variant.box.InverseVolumeRamp( - initial_box=hoomd.Box.cube(6), - final_volume=100, - t_start=1_000, - t_ramp=21_000) - - box_resize = hoomd.update.BoxResize(trigger=hoomd.trigger.Periodic(1), - filter=hoomd.filter.All(), - box=inverse_volume_ramp) + initial_box=hoomd.Box.cube(6), final_volume=100, t_start=1_000, t_ramp=21_000 + ) + + box_resize = hoomd.update.BoxResize( + trigger=hoomd.trigger.Periodic(1), + filter=hoomd.filter.All(), + box=inverse_volume_ramp, + ) sim.operations.updaters.append(box_resize) sim.run(0) diff --git a/hoomd/pytest/test_box_variant.py b/hoomd/pytest/test_box_variant.py index d369758d5f..672dc2d057 100644 --- a/hoomd/pytest/test_box_variant.py +++ b/hoomd/pytest/test_box_variant.py @@ -18,49 +18,48 @@ def box_to_array(box): scalar_variant2 = hoomd.variant.Ramp(0, 1, 10, 30) valid_constructors = [ - (hoomd.variant.box.Constant, { - 'box': test_box1 - }), - (hoomd.variant.box.Interpolate, { - 'initial_box': test_box1, - 'final_box': test_box2, - 'variant': scalar_variant1 - }), - (hoomd.variant.box.InverseVolumeRamp, { - 'initial_box': test_box2, - 'final_volume': 1000, - 't_start': 10, - 't_ramp': 50 - }), + (hoomd.variant.box.Constant, {"box": test_box1}), + ( + hoomd.variant.box.Interpolate, + {"initial_box": test_box1, "final_box": test_box2, "variant": scalar_variant1}, + ), + ( + hoomd.variant.box.InverseVolumeRamp, + {"initial_box": test_box2, "final_volume": 1000, "t_start": 10, "t_ramp": 50}, + ), ] # variant: dict(attr: [val1, val2,...]) valid_attrs = [ - (hoomd.variant.box.Constant, { - 'box': [test_box1, test_box2] - }), - (hoomd.variant.box.Interpolate, { - 'initial_box': [test_box1, test_box2], - 'final_box': [test_box2, test_box1], - 'variant': [scalar_variant1, scalar_variant2] - }), - (hoomd.variant.box.InverseVolumeRamp, { - 'initial_box': [test_box1, test_box2], - 'final_volume': [1000, 300], - 't_start': [0, 10], - 't_ramp': [10, 50, 100] - }), + (hoomd.variant.box.Constant, {"box": [test_box1, test_box2]}), + ( + hoomd.variant.box.Interpolate, + { + "initial_box": [test_box1, test_box2], + "final_box": [test_box2, test_box1], + "variant": [scalar_variant1, scalar_variant2], + }, + ), + ( + hoomd.variant.box.InverseVolumeRamp, + { + "initial_box": [test_box1, test_box2], + "final_volume": [1000, 300], + "t_start": [0, 10], + "t_ramp": [10, 50, 100], + }, + ), ] -@pytest.mark.parametrize('cls, kwargs', valid_constructors) +@pytest.mark.parametrize("cls, kwargs", valid_constructors) def test_construction(cls, kwargs): variant = cls(**kwargs) for key, value in kwargs.items(): assert getattr(variant, key) == value -@pytest.mark.parametrize('cls, attrs', valid_attrs) +@pytest.mark.parametrize("cls, attrs", valid_attrs) def test_setattr(cls, attrs): kwargs = {k: v[0] for k, v in attrs.items()} variant = cls(**kwargs) @@ -71,17 +70,17 @@ def test_setattr(cls, attrs): class VolumeRampBoxVariant(hoomd.variant.box.BoxVariant): - def __init__(self, box1, final_volume, t_start, t_ramp): self._initial_volume = box1.volume self._box1 = box1 - self._volume_variant = hoomd.variant.Ramp(box1.volume, final_volume, - t_start, t_ramp) + self._volume_variant = hoomd.variant.Ramp( + box1.volume, final_volume, t_start, t_ramp + ) hoomd.variant.box.BoxVariant.__init__(self) def __call__(self, timestep): current_volume = self._volume_variant(timestep) - scale_L = (current_volume / self._initial_volume)**(1 / 3) + scale_L = (current_volume / self._initial_volume) ** (1 / 3) return np.concatenate((self._box1.L * scale_L, self._box1.tilts)) def __eq__(self, other): @@ -93,17 +92,31 @@ def test_custom(): # the expected values final_volume = test_box1.volume * 2 - test_box = hoomd.Box(test_box1.Lx, test_box1.Ly, test_box1.Lz, test_box1.xy, - test_box1.xz, test_box1.yz) + test_box = hoomd.Box( + test_box1.Lx, + test_box1.Ly, + test_box1.Lz, + test_box1.xy, + test_box1.xz, + test_box1.yz, + ) custom_variant = VolumeRampBoxVariant(test_box1, final_volume, 100, 100) def box_t(custom_variant, timestep): - return hoomd._hoomd._test_vector_variant_box_call( - custom_variant, timestep) - - for t, f in ((0, 0), (42, 0), (100, 0), (101, 0.01), (150, 0.5), - (175, 0.75), (199, 0.99), (200, 1.0), (250, 1.0), (123456789, - 1.0)): + return hoomd._hoomd._test_vector_variant_box_call(custom_variant, timestep) + + for t, f in ( + (0, 0), + (42, 0), + (100, 0), + (101, 0.01), + (150, 0.5), + (175, 0.75), + (199, 0.99), + (200, 1.0), + (250, 1.0), + (123456789, 1.0), + ): test_box.volume = (1 - f) * test_box1.volume + f * final_volume npt.assert_allclose(box_t(custom_variant, t), box_to_array(test_box)) @@ -112,33 +125,34 @@ def test_interpolate_evaluation(): t_start = 50 t_ramp = 100 scalar_variant = hoomd.variant.Ramp(0, 1, t_start, t_ramp) - box_variant = hoomd.variant.box.Interpolate(test_box1, test_box2, - scalar_variant) + box_variant = hoomd.variant.box.Interpolate(test_box1, test_box2, scalar_variant) npt.assert_allclose(box_variant(0), box_to_array(test_box1)) npt.assert_allclose(box_variant(25), box_to_array(test_box1)) npt.assert_allclose(box_variant(t_start), box_to_array(test_box1)) npt.assert_allclose( - box_variant(51), - 0.99 * box_to_array(test_box1) + 0.01 * box_to_array(test_box2)) + box_variant(51), 0.99 * box_to_array(test_box1) + 0.01 * box_to_array(test_box2) + ) npt.assert_allclose( - box_variant(75), - 0.75 * box_to_array(test_box1) + 0.25 * box_to_array(test_box2)) + box_variant(75), 0.75 * box_to_array(test_box1) + 0.25 * box_to_array(test_box2) + ) npt.assert_allclose( - box_variant(100), - 0.5 * box_to_array(test_box1) + 0.5 * box_to_array(test_box2)) + box_variant(100), 0.5 * box_to_array(test_box1) + 0.5 * box_to_array(test_box2) + ) npt.assert_allclose( box_variant(125), - 0.25 * box_to_array(test_box1) + 0.75 * box_to_array(test_box2)) + 0.25 * box_to_array(test_box1) + 0.75 * box_to_array(test_box2), + ) npt.assert_allclose( box_variant(149), - 0.01 * box_to_array(test_box1) + 0.99 * box_to_array(test_box2)) + 0.01 * box_to_array(test_box1) + 0.99 * box_to_array(test_box2), + ) npt.assert_allclose(box_variant(t_start + t_ramp), box_to_array(test_box2)) - npt.assert_allclose(box_variant(t_start + t_ramp + 100), - box_to_array(test_box2)) - npt.assert_allclose(box_variant(t_start + t_ramp + 1000000), - box_to_array(test_box2)) + npt.assert_allclose(box_variant(t_start + t_ramp + 100), box_to_array(test_box2)) + npt.assert_allclose( + box_variant(t_start + t_ramp + 1000000), box_to_array(test_box2) + ) def test_inverse_volume_ramp_evaluation(): @@ -146,8 +160,7 @@ def test_inverse_volume_ramp_evaluation(): final_volume = 500 t_start = 10 t_ramp = 100 - variant = hoomd.variant.box.InverseVolumeRamp(box1, final_volume, t_start, - t_ramp) + variant = hoomd.variant.box.InverseVolumeRamp(box1, final_volume, t_start, t_ramp) def get_volume(variant, timestep): return hoomd.Box(*variant(timestep)).volume @@ -156,12 +169,15 @@ def get_volume(variant, timestep): assert get_volume(variant, 5) == box1.volume assert get_volume(variant, 10) == box1.volume assert get_volume(variant, 11) != box1.volume - npt.assert_allclose(get_volume(variant, 35), - (0.75 / box1.volume + 0.25 / final_volume)**-1) - npt.assert_allclose(get_volume(variant, 60), - (0.5 / box1.volume + 0.5 / final_volume)**-1) - npt.assert_allclose(get_volume(variant, 85), - (0.25 / box1.volume + 0.75 / final_volume)**-1) + npt.assert_allclose( + get_volume(variant, 35), (0.75 / box1.volume + 0.25 / final_volume) ** -1 + ) + npt.assert_allclose( + get_volume(variant, 60), (0.5 / box1.volume + 0.5 / final_volume) ** -1 + ) + npt.assert_allclose( + get_volume(variant, 85), (0.25 / box1.volume + 0.75 / final_volume) ** -1 + ) npt.assert_allclose(get_volume(variant, 110), final_volume) npt.assert_allclose(get_volume(variant, 1010), final_volume) # make sure tilts don't change diff --git a/hoomd/pytest/test_collections.py b/hoomd/pytest/test_collections.py index b22718f89e..80e2ef9c06 100644 --- a/hoomd/pytest/test_collections.py +++ b/hoomd/pytest/test_collections.py @@ -12,7 +12,6 @@ class MockRoot: - def __init__(self, schema, data): self._data = data validator = typeconverter.to_type_converter(schema) @@ -37,7 +36,6 @@ def _read(self, obj): class TestHoomdList(BaseListTest): - @pytest.fixture(autouse=True, params=("ints", "floats", "strs")) def current_list(self, request): self._current_list = request.param @@ -51,10 +49,7 @@ def generate_plain_collection(self): elif self._current_list == "strs": def generate_one(): - return [ - self.generator.str() - for _ in range(3 + self.generator.int(10)) - ] + return [self.generator.str() for _ in range(3 + self.generator.int(10))] def generate(n): return [generate_one() for _ in range(n)] @@ -65,26 +60,17 @@ def is_equal(self, a, b): return a == b def final_check(self, test_list): - assert test_list.to_base() == self._data._data["lists"][ - self._current_list] + assert test_list.to_base() == self._data._data["lists"][self._current_list] if self._current_list == "strs": assert all( - isinstance(t_item, collections._HOOMDList) - for t_item in test_list) + isinstance(t_item, collections._HOOMDList) for t_item in test_list + ) @pytest.fixture def empty_collection(self): self._data = MockRoot( - {"lists": { - "ints": [int], - "floats": [float], - "strs": [[str]] - }}, - {"lists": { - "ints": [], - "floats": [], - "strs": [] - }}, + {"lists": {"ints": [int], "floats": [float], "strs": [[str]]}}, + {"lists": {"ints": [], "floats": [], "strs": []}}, ) with self._data._sync_data["lists"]._suspend_read_and_write: return self._data._sync_data["lists"][self._current_list] @@ -115,16 +101,16 @@ def test_iadd(self, populated_collection): class TestHoomdTuple(BaseSequenceTest): - @pytest.fixture def generate_plain_collection(self): - def generate(n): - strings = [ - self.generator.str() for _ in range(self.generator.int(10)) - ] - return (self.generator.int(), strings, self.generator.float(), - self.generator.ndarray((None, 3))) + strings = [self.generator.str() for _ in range(self.generator.int(10))] + return ( + self.generator.int(), + strings, + self.generator.float(), + self.generator.ndarray((None, 3)), + ) return generate @@ -145,12 +131,17 @@ def empty_collection(self): @pytest.fixture def populated_collection(self, plain_collection): - self._data = MockRoot( { - "tuple": (int, [str], float, - typeconverter.NDArrayValidator("float64", (None, 3))) - }, {"tuple": plain_collection}) + "tuple": ( + int, + [str], + float, + typeconverter.NDArrayValidator("float64", (None, 3)), + ) + }, + {"tuple": plain_collection}, + ) return self._data._sync_data["tuple"], plain_collection @@ -159,15 +150,18 @@ class TestHoomdDict(BaseMappingTest): @pytest.fixture def generate_plain_collection(self): - def generate(n): - tuples = [(self.generator.int(), [ - self.generator.str() for _ in range(3 + self.generator.int(10)) - ]) for _ in range(self.generator.int(10) + 3)] + tuples = [ + ( + self.generator.int(), + [self.generator.str() for _ in range(3 + self.generator.int(10))], + ) + for _ in range(self.generator.int(10) + 3) + ] data = { "sigma": self.generator.float(), "epsilon": self.generator.float(), - "notes": tuples + "notes": tuples, } return data @@ -182,22 +176,19 @@ def final_check(self, test_mapping): return assert isinstance(test_mapping["notes"], collections._HOOMDList) assert all( - isinstance(t, collections._HOOMDTuple) - for t in test_mapping["notes"]) + isinstance(t, collections._HOOMDTuple) for t in test_mapping["notes"] + ) assert all( isinstance(str_list, collections._HOOMDList) - for i, str_list in test_mapping["notes"]) + for i, str_list in test_mapping["notes"] + ) @pytest.fixture def empty_collection(self): self._data = MockRoot( - { - "params": { - "epsilon": float, - "sigma": float, - "notes": [(int, [str])] - } - }, {"params": {}}) + {"params": {"epsilon": float, "sigma": float, "notes": [(int, [str])]}}, + {"params": {}}, + ) return self._data._sync_data["params"] @pytest.fixture(params=(True, False)) diff --git a/hoomd/pytest/test_communicator.py b/hoomd/pytest/test_communicator.py index 372e1c45d1..07cb53f491 100644 --- a/hoomd/pytest/test_communicator.py +++ b/hoomd/pytest/test_communicator.py @@ -4,14 +4,17 @@ import hoomd import pytest import time + try: from mpi4py import MPI + mpi4py_available = True except ImportError: mpi4py_available = False -skip_mpi4py = pytest.mark.skipif(not mpi4py_available, - reason='mpi4py could not be imported.') +skip_mpi4py = pytest.mark.skipif( + not mpi4py_available, reason="mpi4py could not be imported." +) def test_communicator_methods(): @@ -69,8 +72,7 @@ def test_commuicator_walltime(): @skip_mpi4py -@pytest.mark.skipif(not hoomd.version.mpi_enabled, - reason='This test requires MPI') +@pytest.mark.skipif(not hoomd.version.mpi_enabled, reason="This test requires MPI") def test_communicator_mpi4py(): """Check that Communicator can be initialized with mpi4py.""" world_communicator = hoomd.communicator.Communicator() diff --git a/hoomd/pytest/test_custom_writer.py b/hoomd/pytest/test_custom_writer.py index 302c9c94c6..5ace34516b 100644 --- a/hoomd/pytest/test_custom_writer.py +++ b/hoomd/pytest/test_custom_writer.py @@ -28,8 +28,7 @@ class TestCustomWriter: wrapping the class in a custom operation instance. """ - def test_attach_detach(self, simulation_factory, - two_particle_snapshot_factory): + def test_attach_detach(self, simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory()) writer = hoomd.write.CustomWriter(2, WriteTimestep()) sim.operations += writer @@ -42,8 +41,7 @@ def test_attach_detach(self, simulation_factory, assert not writer.action._attached assert not writer._attached - @pytest.mark.skipif(not hoomd.version.md_built, - reason="BUILD_MD=on required") + @pytest.mark.skipif(not hoomd.version.md_built, reason="BUILD_MD=on required") def test_flags(self, simulation_factory, two_particle_snapshot_factory): sim = simulation_factory(two_particle_snapshot_factory()) action = WriteTimestep() @@ -55,7 +53,8 @@ def test_flags(self, simulation_factory, two_particle_snapshot_factory): sim.operations += hoomd.md.Integrator( 0.005, methods=[hoomd.md.methods.Langevin(hoomd.filter.All(), kT=1.0)], - forces=[gauss]) + forces=[gauss], + ) # WriteTimestep is not run so pressure is not available sim.run(1) virials = gauss.virials @@ -69,17 +68,19 @@ def test_flags(self, simulation_factory, two_particle_snapshot_factory): def test_logging(self): expected_namespace = ("pytest", "test_custom_writer") conftest.logging_check( - WriteTimestep, ("pytest", "test_custom_writer"), { + WriteTimestep, + ("pytest", "test_custom_writer"), + { "fourty_two": { "category": hoomd.logging.LoggerCategories.scalar, - "default": True + "default": True, } - }) + }, + ) writer = hoomd.write.CustomWriter(2, WriteTimestep()) # Check namespace log_quantity = writer._export_dict["fourty_two"] - assert log_quantity.namespace == (*expected_namespace, - WriteTimestep.__name__) + assert log_quantity.namespace == (*expected_namespace, WriteTimestep.__name__) assert log_quantity.default assert log_quantity.category == hoomd.logging.LoggerCategories.scalar diff --git a/hoomd/pytest/test_dcd.py b/hoomd/pytest/test_dcd.py index ecd5dd0a02..b931d042a5 100644 --- a/hoomd/pytest/test_dcd.py +++ b/hoomd/pytest/test_dcd.py @@ -11,8 +11,7 @@ def test_attach(simulation_factory, two_particle_snapshot_factory, tmp_path): filename = tmp_path / "temporary_test_file.dcd" sim = simulation_factory(two_particle_snapshot_factory()) - dcd_dump = hoomd.write.DCD(filename=filename, - trigger=hoomd.trigger.Periodic(1)) + dcd_dump = hoomd.write.DCD(filename=filename, trigger=hoomd.trigger.Periodic(1)) sim.operations.add(dcd_dump) sim.run(10) @@ -25,8 +24,7 @@ def test_write(simulation_factory, two_particle_snapshot_factory, tmp_path): dcd_reader = garnett.reader.DCDFileReader() filename = tmp_path / "temporary_test_file.dcd" sim = simulation_factory(two_particle_snapshot_factory()) - dcd_dump = hoomd.write.DCD(filename=filename, - trigger=hoomd.trigger.Periodic(1)) + dcd_dump = hoomd.write.DCD(filename=filename, trigger=hoomd.trigger.Periodic(1)) sim.operations.add(dcd_dump) positions = [] @@ -39,7 +37,7 @@ def test_write(simulation_factory, two_particle_snapshot_factory, tmp_path): sim.run(1) if sim.device.communicator.rank == 0: - with open(filename, 'rb') as dcdfile: + with open(filename, "rb") as dcdfile: traj = dcd_reader.read(dcdfile) traj.load() for i in range(len(traj)): @@ -50,13 +48,11 @@ def test_write(simulation_factory, two_particle_snapshot_factory, tmp_path): def test_pickling(simulation_factory, two_particle_snapshot_factory, tmp_path): filename = tmp_path / "temporary_test_file.dcd" sim = simulation_factory(two_particle_snapshot_factory()) - dcd_dump = hoomd.write.DCD(filename=filename, - trigger=hoomd.trigger.Periodic(1)) + dcd_dump = hoomd.write.DCD(filename=filename, trigger=hoomd.trigger.Periodic(1)) operation_pickling_check(dcd_dump, sim) -def test_mutability_error(simulation_factory, two_particle_snapshot_factory, - tmp_path): +def test_mutability_error(simulation_factory, two_particle_snapshot_factory, tmp_path): sim = simulation_factory(two_particle_snapshot_factory()) trig = hoomd.trigger.Periodic(1) diff --git a/hoomd/pytest/test_device.py b/hoomd/pytest/test_device.py index 6f10fbe82d..ec0e3f4fdd 100644 --- a/hoomd/pytest/test_device.py +++ b/hoomd/pytest/test_device.py @@ -7,7 +7,6 @@ @pytest.mark.gpu def test_gpu_profile(device): - print(device) with device.enable_profiling(): @@ -32,11 +31,10 @@ def test_common_properties(device, tmp_path): # now make a device with non-default arguments device_type = type(device) - dev = device_type(message_filename=str(tmp_path / "example2.txt"), - notice_level=10) - _assert_common_properties(dev, - notice_level=10, - message_filename=str(tmp_path / "example2.txt")) + dev = device_type(message_filename=str(tmp_path / "example2.txt"), notice_level=10) + _assert_common_properties( + dev, notice_level=10, message_filename=str(tmp_path / "example2.txt") + ) @pytest.mark.gpu @@ -122,7 +120,6 @@ def test_device_notice(device, tmp_path): def test_noticefile(device, tmp_path): - # Message file declared. Should output in specified file. device.message_filename = str(tmp_path / "str_message") msg = "This message should output.\n" diff --git a/hoomd/pytest/test_filter.py b/hoomd/pytest/test_filter.py index f4f25551c9..1fd4647c39 100644 --- a/hoomd/pytest/test_filter.py +++ b/hoomd/pytest/test_filter.py @@ -2,8 +2,16 @@ # Part of HOOMD-blue, released under the BSD 3-Clause License. import pytest -from hoomd.filter import (Type, Tags, SetDifference, Union, Intersection, All, - Null, Rigid) +from hoomd.filter import ( + Type, + Tags, + SetDifference, + Union, + Intersection, + All, + Null, + Rigid, +) from hoomd.snapshot import Snapshot from copy import deepcopy from itertools import combinations @@ -13,8 +21,7 @@ @pytest.fixture(scope="function") def make_filter_snapshot(device): - - def filter_snapshot(n=10, particle_types=['A']): + def filter_snapshot(n=10, particle_types=["A"]): s = Snapshot(device.communicator) if s.communicator.rank == 0: s.configuration.box = [20, 20, 20, 0, 0, 0] @@ -28,7 +35,7 @@ def filter_snapshot(n=10, particle_types=['A']): @pytest.mark.serial def test_all_filter(make_filter_snapshot, simulation_factory): - particle_types = ['A'] + particle_types = ["A"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -37,7 +44,7 @@ def test_all_filter(make_filter_snapshot, simulation_factory): def test_null_filter(make_filter_snapshot, simulation_factory): - particle_types = ['A'] + particle_types = ["A"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -50,9 +57,11 @@ def set_types(s, indices, particle_types, particle_type): s.particles.typeid[i] = particle_types.index(particle_type) -_type_indices = [([0, 3, 4, 8], [1, 2, 5, 6, 7, 9]), - ([2, 3, 5, 6, 7, 8, 9], [0, 1, 4]), - ([3, 7], [0, 1, 2, 4, 5, 6, 8, 9])] +_type_indices = [ + ([0, 3, 4, 8], [1, 2, 5, 6, 7, 9]), + ([2, 3, 5, 6, 7, 8, 9], [0, 1, 4]), + ([3, 7], [0, 1, 2, 4, 5, 6, 8, 9]), +] @pytest.fixture(scope="function", params=_type_indices) @@ -62,7 +71,7 @@ def type_indices(request): @pytest.mark.serial def test_type_filter(make_filter_snapshot, simulation_factory, type_indices): - particle_types = ['A', 'B'] + particle_types = ["A", "B"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -91,8 +100,14 @@ def test_type_filter(make_filter_snapshot, simulation_factory, type_indices): assert AB_filter(sim.state) == list(range(N)) -_tag_indices = [[0, 3, 4, 8], [1, 2, 5, 6, 7, 9], [2, 3, 5, 6, 7, 8, 9], - [0, 1, 4], [3, 7], [0, 1, 2, 4, 5, 6, 8, 9]] +_tag_indices = [ + [0, 3, 4, 8], + [1, 2, 5, 6, 7, 9], + [2, 3, 5, 6, 7, 8, 9], + [0, 1, 4], + [3, 7], + [0, 1, 2, 4, 5, 6, 8, 9], +] @pytest.fixture(scope="function", params=_tag_indices) @@ -101,7 +116,7 @@ def tag_indices(request): def test_tags_filter(make_filter_snapshot, simulation_factory, tag_indices): - particle_types = ['A'] + particle_types = ["A"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -110,9 +125,11 @@ def test_tags_filter(make_filter_snapshot, simulation_factory, tag_indices): assert tag_filter(sim.state) == indices -_set_indices = [([0, 3, 8], [1, 6, 7, 9], [2, 4, 5]), - ([2, 3, 5, 7, 8], [0, 1, 4], [6, 9]), - ([3], [0, 7, 8], [1, 2, 4, 5, 6, 9])] +_set_indices = [ + ([0, 3, 8], [1, 6, 7, 9], [2, 4, 5]), + ([2, 3, 5, 7, 8], [0, 1, 4], [6, 9]), + ([3], [0, 7, 8], [1, 2, 4, 5, 6, 9]), +] @pytest.fixture(scope="function", params=_set_indices) @@ -137,7 +154,7 @@ def type_not_in_combo(combo, particle_types): def test_intersection(make_filter_snapshot, simulation_factory, set_indices): - particle_types = ['A', 'B', 'C'] + particle_types = ["A", "B", "C"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -162,7 +179,7 @@ def test_intersection(make_filter_snapshot, simulation_factory, set_indices): def test_union(make_filter_snapshot, simulation_factory, set_indices): - particle_types = ['A', 'B', 'C'] + particle_types = ["A", "B", "C"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -183,7 +200,7 @@ def test_union(make_filter_snapshot, simulation_factory, set_indices): def test_difference(make_filter_snapshot, simulation_factory, set_indices): - particle_types = ['A', 'B', 'C'] + particle_types = ["A", "B", "C"] N = 10 filter_snapshot = make_filter_snapshot(n=N, particle_types=particle_types) sim = simulation_factory(filter_snapshot) @@ -223,18 +240,19 @@ def test_difference(make_filter_snapshot, simulation_factory, set_indices): _constructor_args = [ (), ([1, 2, 3],), - ({'a', 'b'},), - (('center', 'free'),), - (Tags([1, 4, 5]), Type({'a'})), - (Tags([1, 4, 5]), Type({'a'})), - (Tags([1, 4, 5]), Type({'a'})), + ({"a", "b"},), + (("center", "free"),), + (Tags([1, 4, 5]), Type({"a"})), + (Tags([1, 4, 5]), Type({"a"})), + (Tags([1, 4, 5]), Type({"a"})), ] -@pytest.mark.parametrize('constructor, args', - zip(_filter_classes, _constructor_args), - ids=lambda x: None - if isinstance(x, tuple) else x.__name__) +@pytest.mark.parametrize( + "constructor, args", + zip(_filter_classes, _constructor_args), + ids=lambda x: None if isinstance(x, tuple) else x.__name__, +) def test_pickling(constructor, args): filter_ = constructor(*args) pickled_filter = pickle.loads(pickle.dumps(filter_)) diff --git a/hoomd/pytest/test_filter_updater.py b/hoomd/pytest/test_filter_updater.py index 6326caf638..e0951eab6c 100644 --- a/hoomd/pytest/test_filter_updater.py +++ b/hoomd/pytest/test_filter_updater.py @@ -10,9 +10,7 @@ @pytest.fixture def filter_list(): - class NewFilter(hoomd.filter.CustomFilter): - def __call__(self, state): return np.array([]) @@ -26,7 +24,7 @@ def __eq__(self, other): hoomd.filter.All(), hoomd.filter.Tags([1, 2, 3]), hoomd.filter.Type(["A"]), - NewFilter() + NewFilter(), ] @@ -53,8 +51,7 @@ def filter_updater(filter_list): @pytest.fixture(scope="function") def simulation(lattice_snapshot_factory, simulation_factory, filter_list): - sim = simulation_factory( - lattice_snapshot_factory(particle_types=["A", "B"])) + sim = simulation_factory(lattice_snapshot_factory(particle_types=["A", "B"])) # place filters in state list manually to enable updating the particle # groups. for filter_ in filter_list: @@ -115,4 +112,5 @@ def test_pickling(simulation): hoomd.filter.Type(["A"]), ] hoomd.conftest.operation_pickling_check( - hoomd.update.FilterUpdater(1, filters), simulation) + hoomd.update.FilterUpdater(1, filters), simulation + ) diff --git a/hoomd/pytest/test_local_snapshot.py b/hoomd/pytest/test_local_snapshot.py index 6e2df248fe..780281e955 100644 --- a/hoomd/pytest/test_local_snapshot.py +++ b/hoomd/pytest/test_local_snapshot.py @@ -8,6 +8,7 @@ from hoomd.data.array import HOOMDGPUArray import numpy as np import pytest + try: # This try block is purely to allow testing locally without mpi4py. We could # require it for testing, and simplify the logic here. The CI containers all @@ -18,8 +19,7 @@ else: skip_mpi4py = False -skip_mpi4py = pytest.mark.skipif(skip_mpi4py, - reason='mpi4py could not be imported.') +skip_mpi4py = pytest.mark.skipif(skip_mpi4py, reason="mpi4py could not be imported.") try: # We use the CUPY_IMPORTED variable to allow for local GPU testing without @@ -27,6 +27,7 @@ # requiring its installation for testing. The CI containers already have # CuPy installed when build for the GPU. import cupy + CUPY_IMPORTED = True except ImportError: CUPY_IMPORTED = False @@ -39,174 +40,213 @@ Np = 5 _particle_data = dict( _N=Np, - position=dict(np_type=np.floating, - value=[[-1, -1, -1], [-1, -1, 0], [-1, 0, 0], [1, 1, 1], - [1, 0, 0]], - new_value=[[5, 5, 5]] * Np, - shape=(Np, 3)), - velocity=dict(np_type=np.floating, - value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)), - new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)), - shape=(Np, 3)), - acceleration=dict(np_type=np.floating, - value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)), - new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)), - shape=(Np, 3)), - angmom=dict(np_type=np.floating, - value=np.linspace(-3, 6, Np * 4).reshape((Np, 4)), - new_value=np.linspace(1, 3, Np * 4).reshape((Np, 4)), - shape=(Np, 4)), - moment_inertia=dict(np_type=np.floating, - value=np.linspace(3, 12, Np * 3).reshape((Np, 3)), - new_value=np.linspace(0, 20, Np * 3).reshape((Np, 3)), - shape=(Np, 3)), + position=dict( + np_type=np.floating, + value=[[-1, -1, -1], [-1, -1, 0], [-1, 0, 0], [1, 1, 1], [1, 0, 0]], + new_value=[[5, 5, 5]] * Np, + shape=(Np, 3), + ), + velocity=dict( + np_type=np.floating, + value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)), + new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)), + shape=(Np, 3), + ), + acceleration=dict( + np_type=np.floating, + value=np.linspace(-4, 4, Np * 3).reshape((Np, 3)), + new_value=np.linspace(4, 8, Np * 3).reshape((Np, 3)), + shape=(Np, 3), + ), + angmom=dict( + np_type=np.floating, + value=np.linspace(-3, 6, Np * 4).reshape((Np, 4)), + new_value=np.linspace(1, 3, Np * 4).reshape((Np, 4)), + shape=(Np, 4), + ), + moment_inertia=dict( + np_type=np.floating, + value=np.linspace(3, 12, Np * 3).reshape((Np, 3)), + new_value=np.linspace(0, 20, Np * 3).reshape((Np, 3)), + shape=(Np, 3), + ), # We don't care about a valid body specification here just that we can # retrieve and set it correctly. - body=dict(np_type=np.uint32, - value=np.linspace(4294967295, 10, Np, dtype=np.uint32), - new_value=np.linspace(1, 20, Np, dtype=np.uint32), - shape=(Np,)), + body=dict( + np_type=np.uint32, + value=np.linspace(4294967295, 10, Np, dtype=np.uint32), + new_value=np.linspace(1, 20, Np, dtype=np.uint32), + shape=(Np,), + ), # typeid is a signed integer in C++ despite always being nonnegative - typeid=dict(np_type=np.int32, - value=[0, 0, 0, 1, 1], - new_value=[1, 1, 1, 0, 0], - shape=(Np,)), - mass=dict(np_type=np.floating, - value=[5, 4, 3, 2, 1], - new_value=[1, 2, 3, 4, 5], - shape=(Np,)), - charge=dict(np_type=np.floating, - value=[1, 2, 3, 2, 1], - new_value=[-1, -1, -3, -2, -1], - shape=(Np,)), - diameter=dict(np_type=np.floating, - value=[5, 2, 3, 2, 5], - new_value=[2, 1, 0.5, 1, 2], - shape=(Np,)), - image=dict(np_type=np.int32, - value=np.linspace(-10, 20, Np * 3, - dtype=np.int32).reshape(Np, 3), - new_value=np.linspace(-20, 10, Np * 3, - dtype=np.int32).reshape(Np, 3), - shape=(Np, 3)), + typeid=dict( + np_type=np.int32, value=[0, 0, 0, 1, 1], new_value=[1, 1, 1, 0, 0], shape=(Np,) + ), + mass=dict( + np_type=np.floating, + value=[5, 4, 3, 2, 1], + new_value=[1, 2, 3, 4, 5], + shape=(Np,), + ), + charge=dict( + np_type=np.floating, + value=[1, 2, 3, 2, 1], + new_value=[-1, -1, -3, -2, -1], + shape=(Np,), + ), + diameter=dict( + np_type=np.floating, + value=[5, 2, 3, 2, 5], + new_value=[2, 1, 0.5, 1, 2], + shape=(Np,), + ), + image=dict( + np_type=np.int32, + value=np.linspace(-10, 20, Np * 3, dtype=np.int32).reshape(Np, 3), + new_value=np.linspace(-20, 10, Np * 3, dtype=np.int32).reshape(Np, 3), + shape=(Np, 3), + ), tag=dict(np_type=np.uint32, value=None, shape=(Np,)), - _types=['p1', 'p2']) + _types=["p1", "p2"], +) _particle_local_data = dict( - net_force=dict(np_type=np.floating, - value=np.linspace(0.5, 4.5, Np * 3).reshape((Np, 3)), - new_value=np.linspace(6, 12, Np * 3).reshape((Np, 3)), - shape=(Np, 3)), - net_torque=dict(np_type=np.floating, - value=np.linspace(-0.5, 2.5, Np * 3).reshape((Np, 3)), - new_value=np.linspace(12.75, 25, Np * 3).reshape((Np, 3)), - shape=(Np, 3)), - net_virial=dict(np_type=np.floating, - value=np.linspace(-1.5, 6.5, Np * 6).reshape((Np, 6)), - new_value=np.linspace(9.75, 13.12, Np * 6).reshape((Np, 6)), - shape=(Np, 6)), - net_energy=dict(np_type=np.floating, - value=np.linspace(0.5, 3.5, Np), - new_value=np.linspace(0, 4.2, Np), - shape=(Np,)), + net_force=dict( + np_type=np.floating, + value=np.linspace(0.5, 4.5, Np * 3).reshape((Np, 3)), + new_value=np.linspace(6, 12, Np * 3).reshape((Np, 3)), + shape=(Np, 3), + ), + net_torque=dict( + np_type=np.floating, + value=np.linspace(-0.5, 2.5, Np * 3).reshape((Np, 3)), + new_value=np.linspace(12.75, 25, Np * 3).reshape((Np, 3)), + shape=(Np, 3), + ), + net_virial=dict( + np_type=np.floating, + value=np.linspace(-1.5, 6.5, Np * 6).reshape((Np, 6)), + new_value=np.linspace(9.75, 13.12, Np * 6).reshape((Np, 6)), + shape=(Np, 6), + ), + net_energy=dict( + np_type=np.floating, + value=np.linspace(0.5, 3.5, Np), + new_value=np.linspace(0, 4.2, Np), + shape=(Np,), + ), ) Nb = 2 -_bond_data = dict(_N=Nb, - typeid=dict(np_type=np.unsignedinteger, - value=[0, 1], - new_value=[1, 0], - shape=(Nb,)), - group=dict(np_type=np.unsignedinteger, - value=[[0, 1], [2, 3]], - new_value=[[1, 0], [3, 2]], - shape=(Nb, 2)), - tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)), - _types=['b1', 'b2']) +_bond_data = dict( + _N=Nb, + typeid=dict( + np_type=np.unsignedinteger, value=[0, 1], new_value=[1, 0], shape=(Nb,) + ), + group=dict( + np_type=np.unsignedinteger, + value=[[0, 1], [2, 3]], + new_value=[[1, 0], [3, 2]], + shape=(Nb, 2), + ), + tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)), + _types=["b1", "b2"], +) Na = 2 -_angle_data = dict(_N=Na, - typeid=dict(np_type=np.unsignedinteger, - value=[1, 0], - new_value=[0, 1], - shape=(Na,)), - group=dict(np_type=np.unsignedinteger, - value=[[0, 1, 2], [2, 3, 4]], - new_value=[[1, 3, 4], [0, 2, 4]], - shape=(Na, 3)), - tag=dict(np_type=np.unsignedinteger, value=None, - shape=(Na,)), - _types=['a1', 'a2']) +_angle_data = dict( + _N=Na, + typeid=dict( + np_type=np.unsignedinteger, value=[1, 0], new_value=[0, 1], shape=(Na,) + ), + group=dict( + np_type=np.unsignedinteger, + value=[[0, 1, 2], [2, 3, 4]], + new_value=[[1, 3, 4], [0, 2, 4]], + shape=(Na, 3), + ), + tag=dict(np_type=np.unsignedinteger, value=None, shape=(Na,)), + _types=["a1", "a2"], +) Nd = 2 -_dihedral_data = dict(_N=Nd, - typeid=dict(np_type=np.unsignedinteger, - value=[1, 0], - new_value=[0, 1], - shape=(Nd,)), - group=dict(np_type=np.unsignedinteger, - value=[[0, 1, 2, 3], [1, 2, 3, 4]], - new_value=[[4, 3, 2, 1], [2, 4, 0, 1]], - shape=(Nd, 4)), - tag=dict(np_type=np.unsignedinteger, - value=None, - shape=(Nd,)), - _types=['d1', 'd2']) +_dihedral_data = dict( + _N=Nd, + typeid=dict( + np_type=np.unsignedinteger, value=[1, 0], new_value=[0, 1], shape=(Nd,) + ), + group=dict( + np_type=np.unsignedinteger, + value=[[0, 1, 2, 3], [1, 2, 3, 4]], + new_value=[[4, 3, 2, 1], [2, 4, 0, 1]], + shape=(Nd, 4), + ), + tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nd,)), + _types=["d1", "d2"], +) Ni = 2 -_improper_data = dict(_N=Ni, - typeid=dict(np_type=np.unsignedinteger, - value=[0, 0], - shape=(Ni,)), - group=dict(np_type=np.unsignedinteger, - value=[[3, 2, 1, 0], [1, 2, 3, 4]], - new_value=[[1, 2, 3, 0], [4, 2, 3, 1]], - shape=(Ni, 4)), - tag=dict(np_type=np.unsignedinteger, - value=None, - shape=(Ni,)), - _types=['i1']) +_improper_data = dict( + _N=Ni, + typeid=dict(np_type=np.unsignedinteger, value=[0, 0], shape=(Ni,)), + group=dict( + np_type=np.unsignedinteger, + value=[[3, 2, 1, 0], [1, 2, 3, 4]], + new_value=[[1, 2, 3, 0], [4, 2, 3, 1]], + shape=(Ni, 4), + ), + tag=dict(np_type=np.unsignedinteger, value=None, shape=(Ni,)), + _types=["i1"], +) Nc = 3 _constraint_data = dict( _N=Nc, - value=dict(np_type=np.floating, - value=[2.5, 0.5, 2.], - new_value=[3., 1.5, 1.], - shape=(Nc,)), - group=dict(np_type=np.unsignedinteger, - value=[[0, 1], [2, 3], [1, 3]], - new_value=[[4, 1], [3, 1], [2, 4]], - shape=(Nc, 2)), + value=dict( + np_type=np.floating, + value=[2.5, 0.5, 2.0], + new_value=[3.0, 1.5, 1.0], + shape=(Nc,), + ), + group=dict( + np_type=np.unsignedinteger, + value=[[0, 1], [2, 3], [1, 3]], + new_value=[[4, 1], [3, 1], [2, 4]], + shape=(Nc, 2), + ), tag=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)), ) Npa = 2 -_pair_data = dict(_N=Npa, - typeid=dict(np_type=np.unsignedinteger, - value=[0, 1], - new_value=[1, 0], - shape=(Npa,)), - group=dict(np_type=np.unsignedinteger, - value=[[0, 1], [2, 3]], - new_value=[[4, 1], [0, 3]], - shape=(Npa, 2)), - tag=dict(np_type=np.unsignedinteger, value=None, - shape=(Npa,)), - _types=['p1', 'p2']) - -_global_dict = dict(rtag=dict( - particles=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)), - bonds=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)), - angles=dict(np_type=np.unsignedinteger, value=None, shape=(Na,)), - dihedrals=dict(np_type=np.unsignedinteger, value=None, shape=(Nd,)), - impropers=dict(np_type=np.unsignedinteger, value=None, shape=(Ni,)), - constraints=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)), - pairs=dict(np_type=np.unsignedinteger, value=None, shape=(Npa,)), -)) - - -@pytest.fixture(scope='session') +_pair_data = dict( + _N=Npa, + typeid=dict( + np_type=np.unsignedinteger, value=[0, 1], new_value=[1, 0], shape=(Npa,) + ), + group=dict( + np_type=np.unsignedinteger, + value=[[0, 1], [2, 3]], + new_value=[[4, 1], [0, 3]], + shape=(Npa, 2), + ), + tag=dict(np_type=np.unsignedinteger, value=None, shape=(Npa,)), + _types=["p1", "p2"], +) + +_global_dict = dict( + rtag=dict( + particles=dict(np_type=np.unsignedinteger, value=None, shape=(Np,)), + bonds=dict(np_type=np.unsignedinteger, value=None, shape=(Nb,)), + angles=dict(np_type=np.unsignedinteger, value=None, shape=(Na,)), + dihedrals=dict(np_type=np.unsignedinteger, value=None, shape=(Nd,)), + impropers=dict(np_type=np.unsignedinteger, value=None, shape=(Ni,)), + constraints=dict(np_type=np.unsignedinteger, value=None, shape=(Nc,)), + pairs=dict(np_type=np.unsignedinteger, value=None, shape=(Npa,)), + ) +) + + +@pytest.fixture(scope="session") def base_snapshot(device): """Defines a snapshot using the data given above.""" @@ -214,66 +254,77 @@ def set_snapshot(snap, data, base): """Sets individual sections of snapshot (e.g. particles).""" snap_section = getattr(snap, base) for k in data: - if k.startswith('_'): + if k.startswith("_"): setattr(snap_section, k[1:], data[k]) continue - elif data[k]['value'] is None: + elif data[k]["value"] is None: continue try: array = getattr(snap_section, k) - array[:] = data[k]['value'] + array[:] = data[k]["value"] except TypeError: - setattr(snap_section, k, data[k]['value']) + setattr(snap_section, k, data[k]["value"]) snapshot = hoomd.Snapshot(device.communicator) if snapshot.communicator.rank == 0: snapshot.configuration.box = [2.1, 2.1, 2.1, 0, 0, 0] - set_snapshot(snapshot, _particle_data, 'particles') - set_snapshot(snapshot, _bond_data, 'bonds') - set_snapshot(snapshot, _angle_data, 'angles') - set_snapshot(snapshot, _dihedral_data, 'dihedrals') - set_snapshot(snapshot, _improper_data, 'impropers') - set_snapshot(snapshot, _constraint_data, 'constraints') - set_snapshot(snapshot, _pair_data, 'pairs') + set_snapshot(snapshot, _particle_data, "particles") + set_snapshot(snapshot, _bond_data, "bonds") + set_snapshot(snapshot, _angle_data, "angles") + set_snapshot(snapshot, _dihedral_data, "dihedrals") + set_snapshot(snapshot, _improper_data, "impropers") + set_snapshot(snapshot, _constraint_data, "constraints") + set_snapshot(snapshot, _pair_data, "pairs") return snapshot -@pytest.fixture(params=[ - 'particles', 'bonds', 'angles', 'dihedrals', 'impropers', 'constraints', - 'pairs' -]) +@pytest.fixture( + params=[ + "particles", + "bonds", + "angles", + "dihedrals", + "impropers", + "constraints", + "pairs", + ] +) def snapshot_section(request): return request.param -@pytest.fixture(scope="function", - params=[(section_name, prop_name, prop_dict) - for prop_name, global_prop_dict in _global_dict.items() - for section_name, prop_dict in global_prop_dict.items() - ], - ids=lambda x: x[0] + '-' + x[1]) +@pytest.fixture( + scope="function", + params=[ + (section_name, prop_name, prop_dict) + for prop_name, global_prop_dict in _global_dict.items() + for section_name, prop_dict in global_prop_dict.items() + ], + ids=lambda x: x[0] + "-" + x[1], +) def global_property(request): return request.param @pytest.fixture( - scope='function', - params=[(name, prop_name, prop_dict) - for name, section_dict in [('particles', { - **_particle_data, - **_particle_local_data - }), ('bonds', _bond_data), ( - 'angles', _angle_data), ( - 'dihedrals', - _dihedral_data), ( - 'impropers', - _improper_data), ( - 'constraints', - _constraint_data), ('pairs', _pair_data)] - for prop_name, prop_dict in section_dict.items() - if not prop_name.startswith('_')], - ids=lambda x: x[0] + '-' + x[1]) + scope="function", + params=[ + (name, prop_name, prop_dict) + for name, section_dict in [ + ("particles", {**_particle_data, **_particle_local_data}), + ("bonds", _bond_data), + ("angles", _angle_data), + ("dihedrals", _dihedral_data), + ("impropers", _improper_data), + ("constraints", _constraint_data), + ("pairs", _pair_data), + ] + for prop_name, prop_dict in section_dict.items() + if not prop_name.startswith("_") + ], + ids=lambda x: x[0] + "-" + x[1], +) def section_name_dict(request): """Parameterization of expected values for local_snapshot properties. @@ -284,9 +335,9 @@ def section_name_dict(request): return deepcopy(request.param) -@pytest.fixture(scope='function', - params=['', 'ghost_', '_with_ghost'], - ids=lambda x: x.strip('_')) +@pytest.fixture( + scope="function", params=["", "ghost_", "_with_ghost"], ids=lambda x: x.strip("_") +) def affix(request): """Parameterizes over the different variations of a local_snapshot property. @@ -297,9 +348,9 @@ def affix(request): def get_property_name_from_affix(name, affix): - if affix.startswith('_'): + if affix.startswith("_"): return name + affix - elif affix.endswith('_'): + elif affix.endswith("_"): return affix + name else: return name @@ -320,7 +371,7 @@ def general_array_equality(arr1, arr2): def check_type(data, prop_dict, tags): """Check that the expected dtype is found for local snapshots.""" - assert np.issubdtype(data.dtype, prop_dict['np_type']) + assert np.issubdtype(data.dtype, prop_dict["np_type"]) def check_shape(data, prop_dict, tags): @@ -330,9 +381,9 @@ def check_shape(data, prop_dict, tags): if len(tags) == 0: assert data.shape == (0,) else: - assert data.shape == (len(tags),) + prop_dict['shape'][1:] + assert data.shape == (len(tags),) + prop_dict["shape"][1:] else: - assert data.shape == (len(tags),) + prop_dict['shape'][1:] + assert data.shape == (len(tags),) + prop_dict["shape"][1:] def check_getting(data, prop_dict, tags): @@ -340,13 +391,13 @@ def check_getting(data, prop_dict, tags): # Check to end test early if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED: pytest.skip("Not available for HOOMDGPUArray without CuPy.") - if len(tags) == 0 or prop_dict['value'] is None: + if len(tags) == 0 or prop_dict["value"] is None: return None if isinstance(data, HOOMDGPUArray): - expected_values = cupy.array(prop_dict['value']) + expected_values = cupy.array(prop_dict["value"]) else: - expected_values = np.array(prop_dict['value']) + expected_values = np.array(prop_dict["value"]) assert general_array_equality(data, expected_values[tags.tolist()]) @@ -360,13 +411,13 @@ def check_setting(data, prop_dict, tags): # Test if test should be skipped or just return if isinstance(data, HOOMDGPUArray) and not CUPY_IMPORTED: pytest.skip("Not available for HOOMDGPUArray without CuPy.") - if 'new_value' not in prop_dict: + if "new_value" not in prop_dict: return None if isinstance(data, HOOMDGPUArray): - new_values = cupy.array(prop_dict['new_value'])[tags.tolist()] + new_values = cupy.array(prop_dict["new_value"])[tags.tolist()] else: - new_values = np.array(prop_dict['new_value'])[tags] + new_values = np.array(prop_dict["new_value"])[tags] if data.read_only: with pytest.raises(ValueError): @@ -376,8 +427,9 @@ def check_setting(data, prop_dict, tags): assert general_array_equality(data, new_values) -@pytest.fixture(scope='function', - params=[check_type, check_shape, check_getting, check_setting]) +@pytest.fixture( + scope="function", params=[check_type, check_shape, check_getting, check_setting] +) def property_check(request): """Parameterizes differnt types of checks on local_snapshot properties.""" return request.param @@ -401,8 +453,7 @@ def test_box(self, base_simulation, base_snapshot): sim = base_simulation() for lcl_snapshot_attr in self.get_snapshot_attr(sim): with getattr(sim.state, lcl_snapshot_attr) as data: - self.check_box(data, sim.state.box, - sim.device.communicator.num_ranks) + self.check_box(data, sim.state.box, sim.device.communicator.num_ranks) @staticmethod def check_tag_shape(base_snapshot, local_snapshot, group, ranks): @@ -415,9 +466,10 @@ def check_tag_shape(base_snapshot, local_snapshot, group, ranks): N = mpi_comm.bcast(N, root=0) # check particles tag size - if group == 'particles': - total_len = mpi_comm.allreduce(len(local_snapshot.particles.tag), - op=MPI.SUM) + if group == "particles": + total_len = mpi_comm.allreduce( + len(local_snapshot.particles.tag), op=MPI.SUM + ) assert total_len == N else: local_snapshot_section = getattr(local_snapshot, group) @@ -437,25 +489,30 @@ def test_tags_shape(self, base_simulation, base_snapshot, snapshot_section): sim = base_simulation() for lcl_snapshot_attr in self.get_snapshot_attr(sim): with getattr(sim.state, lcl_snapshot_attr) as data: - self.check_tag_shape(base_snapshot, data, snapshot_section, - sim.device.communicator.num_ranks) + self.check_tag_shape( + base_snapshot, + data, + snapshot_section, + sim.device.communicator.num_ranks, + ) @staticmethod def check_global_properties(prop, global_property_dict, N): - assert prop.shape == global_property_dict['shape'] - assert np.issubdtype(prop.dtype, global_property_dict['np_type']) + assert prop.shape == global_property_dict["shape"] + assert np.issubdtype(prop.dtype, global_property_dict["np_type"]) if isinstance(prop, HOOMDGPUArray) and not CUPY_IMPORTED: return else: - if global_property_dict['value'] is not None: - general_array_equality(prop, global_property_dict['value']) + if global_property_dict["value"] is not None: + general_array_equality(prop, global_property_dict["value"]) with pytest.raises(ValueError): prop[:] = 1 @skip_mpi4py @pytest.mark.cupy_optional - def test_cpu_global_properties(self, base_simulation, base_snapshot, - global_property): + def test_cpu_global_properties( + self, base_simulation, base_snapshot, global_property + ): section_name, prop_name, prop_dict = global_property sim = base_simulation() snapshot = sim.state.get_snapshot() @@ -469,11 +526,13 @@ def test_cpu_global_properties(self, base_simulation, base_snapshot, N = mpi_comm.bcast(N, root=0) with sim.state.cpu_local_snapshot as data: self.check_global_properties( - getattr(getattr(data, section_name), prop_name), prop_dict, N) + getattr(getattr(data, section_name), prop_name), prop_dict, N + ) @pytest.mark.cupy_optional - def test_arrays_properties(self, base_simulation, section_name_dict, affix, - property_check): + def test_arrays_properties( + self, base_simulation, section_name_dict, affix, property_check + ): """This test makes extensive use of parameterizing in pytest. This test tests the type, shape, getting, and setting of array values in @@ -482,7 +541,7 @@ def test_arrays_properties(self, base_simulation, section_name_dict, affix, """ name, property_name, property_dict = section_name_dict property_name = get_property_name_from_affix(property_name, affix) - tag_name = get_property_name_from_affix('tag', affix) + tag_name = get_property_name_from_affix("tag", affix) sim = base_simulation() for lcl_snapshot_attr in self.get_snapshot_attr(sim): @@ -514,10 +573,10 @@ def base_simulation(self, simulation_factory, base_snapshot): def factory(): sim = simulation_factory(base_snapshot) with sim.state.cpu_local_snapshot as snap: - particle_data = getattr(snap, 'particles') + particle_data = getattr(snap, "particles") tags = snap.particles.tag for attr, inner_dict in _particle_local_data.items(): - arr_values = np.array(inner_dict['value'])[tags] + arr_values = np.array(inner_dict["value"])[tags] getattr(particle_data, attr)[:] = arr_values return sim @@ -525,7 +584,7 @@ def factory(): def get_snapshot_attr(self, sim): if isinstance(sim.device, hoomd.device.CPU): - yield 'cpu_local_snapshot' + yield "cpu_local_snapshot" else: - yield 'cpu_local_snapshot' - yield 'gpu_local_snapshot' + yield "cpu_local_snapshot" + yield "gpu_local_snapshot" diff --git a/hoomd/pytest/test_logging.py b/hoomd/pytest/test_logging.py index 62ca8ccb6a..804f0a83ac 100644 --- a/hoomd/pytest/test_logging.py +++ b/hoomd/pytest/test_logging.py @@ -3,9 +3,15 @@ from hoomd.conftest import pickling_check from pytest import raises, fixture, mark -from hoomd.logging import (_LoggerQuantity, _NamespaceFilter, - _SafeNamespaceDict, Logger, Loggable, - LoggerCategories, log) +from hoomd.logging import ( + _LoggerQuantity, + _NamespaceFilter, + _SafeNamespaceDict, + Logger, + Loggable, + LoggerCategories, + log, +) from hoomd.util import _dict_map @@ -13,48 +19,51 @@ class DummyNamespace: pass -@fixture(scope='module') +@fixture(scope="module") def dummy_namespace(): - return ('pytest', 'test_logging', 'DummyNamespace') + return ("pytest", "test_logging", "DummyNamespace") # ------- Test _LoggerQuantity class TestLoggerQuantity: - def test_initialization(self, dummy_namespace): - logquant = _LoggerQuantity('foo', DummyNamespace, category='particle') - assert logquant.category == LoggerCategories['particle'] - assert logquant.name == 'foo' + logquant = _LoggerQuantity("foo", DummyNamespace, category="particle") + assert logquant.category == LoggerCategories["particle"] + assert logquant.name == "foo" assert logquant.namespace == dummy_namespace def test_yield_names(self, dummy_namespace): - name = 'foo' + name = "foo" quantity = _LoggerQuantity(name=name, cls=DummyNamespace) for i, given_namespace in enumerate(quantity.yield_names()): if i == 0: assert given_namespace == (*dummy_namespace, name) elif i < 100: - assert given_namespace[-2].endswith('_' + str(i)) and \ - given_namespace[:-2] == dummy_namespace[:-1] and \ - given_namespace[-2].split('_')[0] == \ - dummy_namespace[-1] and given_namespace[-1] == name + assert ( + given_namespace[-2].endswith("_" + str(i)) + and given_namespace[:-2] == dummy_namespace[:-1] + and given_namespace[-2].split("_")[0] == dummy_namespace[-1] + and given_namespace[-1] == name + ) else: break - user_defined_namespace = next(quantity.yield_names('USER')) - assert user_defined_namespace == dummy_namespace[:-1] + ('USER', name) + user_defined_namespace = next(quantity.yield_names("USER")) + assert user_defined_namespace == dummy_namespace[:-1] + ("USER", name) def test_generate_namespace(self): - assert _LoggerQuantity._generate_namespace(TestLoggerQuantity) == \ - ('pytest', 'test_logging', 'TestLoggerQuantity') + assert _LoggerQuantity._generate_namespace(TestLoggerQuantity) == ( + "pytest", + "test_logging", + "TestLoggerQuantity", + ) class DummyLoggable(metaclass=Loggable): - @log def prop(self): return 1 - @log(category='sequence') + @log(category="sequence") def proplist(self): return [1, 2, 3] @@ -66,12 +75,10 @@ def __eq__(self, other): return isinstance(other, type(self)) -class TestLoggableMetaclass(): - +class TestLoggableMetaclass: dummy_loggable = DummyLoggable class InherentedDummyLoggable(DummyLoggable): - @log def propinherented(self): return None @@ -79,7 +86,6 @@ def propinherented(self): dummy_loggable_inher = InherentedDummyLoggable class NotInherentedDummy(metaclass=Loggable): - @log def propnotinherented(self): return True @@ -87,12 +93,10 @@ def propnotinherented(self): not_dummy_loggable_inher = NotInherentedDummy def test_logger_functor_application(self): - loggable_list = ['prop', 'proplist', "prop_nondefault"] - assert set( - self.dummy_loggable._export_dict.keys()) == set(loggable_list) - expected_namespace = _LoggerQuantity._generate_namespace( - self.dummy_loggable) - expected_categories = ['scalar', 'sequence'] + loggable_list = ["prop", "proplist", "prop_nondefault"] + assert set(self.dummy_loggable._export_dict.keys()) == set(loggable_list) + expected_namespace = _LoggerQuantity._generate_namespace(self.dummy_loggable) + expected_categories = ["scalar", "sequence"] for loggable, category in zip(loggable_list, expected_categories): log_quantity = self.dummy_loggable._export_dict[loggable] assert log_quantity.namespace == expected_namespace @@ -100,30 +104,32 @@ def test_logger_functor_application(self): assert log_quantity.name == loggable def test_loggable_inherentence(self): - inherented_list = ['prop', 'proplist', 'propinherented'] - assert 'propinherented' not in self.dummy_loggable._export_dict.keys() - assert all([ - p in self.dummy_loggable_inher._export_dict.keys() - for p in inherented_list - ]) - assert all([ - p not in self.not_dummy_loggable_inher._export_dict.keys() - for p in inherented_list - ]) - assert 'propnotinherented' in \ - self.not_dummy_loggable_inher._export_dict.keys() + inherented_list = ["prop", "proplist", "propinherented"] + assert "propinherented" not in self.dummy_loggable._export_dict.keys() + assert all( + [ + p in self.dummy_loggable_inher._export_dict.keys() + for p in inherented_list + ] + ) + assert all( + [ + p not in self.not_dummy_loggable_inher._export_dict.keys() + for p in inherented_list + ] + ) + assert "propnotinherented" in self.not_dummy_loggable_inher._export_dict.keys() def test_loggables(self): dummy_obj = self.dummy_loggable() assert dummy_obj.loggables == { - 'prop': 'scalar', - 'proplist': 'sequence', - 'prop_nondefault': 'string' + "prop": "scalar", + "proplist": "sequence", + "prop_nondefault": "string", } class TestNamespaceFilter: - def test_remove_name(self): filter_ = _NamespaceFilter(remove_names={"foo", "bar"}) assert ("baz",) == tuple(filter_(("foo", "bar", "baz"))) @@ -166,7 +172,6 @@ def expected_mapped_dict(): def test_dict_map(base_dict, expected_mapped_dict): - def func(x): return 1 @@ -187,14 +192,23 @@ def blank_namespace_dict(): @fixture def good_keys(): - return [('a',), ('a', 'b'), ('a', 'b', 'c'), ('a', 'd'), ('e'), ('f'), - ('f', 'g'), 'a', 'e', 'f'] + return [ + ("a",), + ("a", "b"), + ("a", "b", "c"), + ("a", "d"), + ("e"), + ("f"), + ("f", "g"), + "a", + "e", + "f", + ] class TestSafeNamespaceDict: - def test_contains(self, namespace_dict, good_keys): - bad_keys = [('z', 'q'), dict(), ('f', 'g', 'h')] + bad_keys = [("z", "q"), dict(), ("f", "g", "h")] for key in good_keys: assert key in namespace_dict assert key in namespace_dict @@ -204,51 +218,46 @@ def test_contains(self, namespace_dict, good_keys): def test_setitem(self, blank_namespace_dict): nsdict = blank_namespace_dict - nsdict['a'] = 5 - nsdict[('b', 'c')] = None - assert nsdict._dict['a'] == 5 - assert isinstance(nsdict._dict['b'], dict) - assert nsdict._dict['b']['c'] is None + nsdict["a"] = 5 + nsdict[("b", "c")] = None + assert nsdict._dict["a"] == 5 + assert isinstance(nsdict._dict["b"], dict) + assert nsdict._dict["b"]["c"] is None def test_delitem(self, namespace_dict): - keys = [('a', 'b', 'c'), 'a'] - keys_left = [('e',), ('f',), ('f', 'g'), 'e', 'f'] + keys = [("a", "b", "c"), "a"] + keys_left = [("e",), ("f",), ("f", "g"), "e", "f"] for key in keys: assert key in namespace_dict del namespace_dict[key] assert key not in namespace_dict - assert ('a', 'b') not in namespace_dict + assert ("a", "b") not in namespace_dict for key in keys_left: assert key in namespace_dict def test_len(self, namespace_dict, blank_namespace_dict): assert len(namespace_dict) == 4 assert len(blank_namespace_dict) == 0 - blank_namespace_dict['a'] = 1 + blank_namespace_dict["a"] = 1 assert len(blank_namespace_dict) == 1 # ------ Test Logger -@fixture(params=( - {}, - { - "only_default": False - }, - { - "categories": LoggerCategories.scalar | LoggerCategories.string - }, - { - "only_default": False, - "categories": ("scalar",) - }, -)) +@fixture( + params=( + {}, + {"only_default": False}, + {"categories": LoggerCategories.scalar | LoggerCategories.string}, + {"only_default": False, "categories": ("scalar",)}, + ) +) def blank_logger(request): return Logger(**request.param) @fixture def log_quantity(): - return _LoggerQuantity('example', DummyNamespace) + return _LoggerQuantity("example", DummyNamespace) @fixture @@ -258,7 +267,7 @@ def logged_obj(): @fixture def base_namespace(): - return ('pytest', 'test_logging', 'DummyLoggable') + return ("pytest", "test_logging", "DummyLoggable") def nested_getitem(obj, namespace): @@ -268,18 +277,16 @@ def nested_getitem(obj, namespace): class TestLogger: - def get_filter(self, logger, overwrite_default=False): - def filter(loggable): with_default = not logger.only_default or loggable.default - return (loggable.category in logger.categories - and (with_default or overwrite_default)) + return loggable.category in logger.categories and ( + with_default or overwrite_default + ) return filter def test_setitem(self, blank_logger): - def check(logger, namespace, loggable): if LoggerCategories[loggable[-1]] not in logger.categories: with raises(ValueError): @@ -294,18 +301,18 @@ def check(logger, namespace, loggable): assert log_quantity.category == LoggerCategories[loggable[-1]] # Valid values with potentially incompatible categories - check(blank_logger, 'a', (5, '__eq__', 'scalar')) - check(blank_logger, ('b', 'c'), (5, '__eq__', 'scalar')) - check(blank_logger, 'c', (lambda: [1, 2, 3], 'sequence')) + check(blank_logger, "a", (5, "__eq__", "scalar")) + check(blank_logger, ("b", "c"), (5, "__eq__", "scalar")) + check(blank_logger, "c", (lambda: [1, 2, 3], "sequence")) # Invalid values for value in [dict(), list(), None, 5, (5, 2), (5, 2, 1)]: with raises(ValueError): - blank_logger[('c', 'd')] = value + blank_logger[("c", "d")] = value # Existent key extant_key = next(iter(blank_logger.keys())) # Requires that scalar category accepted or raises a ValueError with raises(KeyError): - blank_logger[extant_key] = (lambda: 1, 'scalar') + blank_logger[extant_key] = (lambda: 1, "scalar") def test_add_single_quantity(self, blank_logger, log_quantity): # Assumes "scalar" is always accepted @@ -317,14 +324,15 @@ def test_add_single_quantity(self, blank_logger, log_quantity): assert log_value.attr == log_quantity.name assert log_value.category == log_quantity.category blank_logger._add_single_quantity([], log_quantity, None) - namespace = log_quantity.namespace[:-1] + \ - (log_quantity.namespace[-1] + '_1', log_quantity.name) + namespace = log_quantity.namespace[:-1] + ( + log_quantity.namespace[-1] + "_1", + log_quantity.name, + ) assert namespace in blank_logger def test_get_loggables_by_names(self, blank_logger, logged_obj): # Check when quantities is None - log_quantities = list( - blank_logger._get_loggables_by_name(logged_obj, None)) + log_quantities = list(blank_logger._get_loggables_by_name(logged_obj, None)) log_filter = self.get_filter(blank_logger) logged_names = [ loggable.name @@ -332,37 +340,43 @@ def test_get_loggables_by_names(self, blank_logger, logged_obj): if log_filter(loggable) ] assert len(log_quantities) == len(logged_names) - assert all([ - log_quantity.name in logged_names for log_quantity in log_quantities - ]) + assert all( + [log_quantity.name in logged_names for log_quantity in log_quantities] + ) # Check when quantities is given - accepted_quantities = ['proplist', "prop_nondefault"] + accepted_quantities = ["proplist", "prop_nondefault"] log_filter = self.get_filter(blank_logger, overwrite_default=True) log_quantities = list( - blank_logger._get_loggables_by_name(logged_obj, - accepted_quantities)) + blank_logger._get_loggables_by_name(logged_obj, accepted_quantities) + ) logged_names = [ loggable.name for loggable in logged_obj._export_dict.values() if loggable.name in accepted_quantities and log_filter(loggable) ] assert len(log_quantities) == len(logged_names) - assert all([ - log_quantity.name in logged_names for log_quantity in log_quantities - ]) + assert all( + [log_quantity.name in logged_names for log_quantity in log_quantities] + ) # Check when quantities has a bad value - bad_quantities = ['bad', 'quant'] + bad_quantities = ["bad", "quant"] with raises(ValueError): a = blank_logger._get_loggables_by_name(logged_obj, bad_quantities) list(a) - @mark.parametrize("quantities", ([], [ - "prop", - ], ['prop', 'proplist', "prop_nondefault"])) + @mark.parametrize( + "quantities", + ( + [], + [ + "prop", + ], + ["prop", "proplist", "prop_nondefault"], + ), + ) def test_add(self, blank_logger, logged_obj, base_namespace, quantities): - if len(quantities) != 0: blank_logger.add(logged_obj, quantities) log_filter = self.get_filter(blank_logger, overwrite_default=True) @@ -370,12 +384,15 @@ def test_add(self, blank_logger, logged_obj, base_namespace, quantities): blank_logger.add(logged_obj) log_filter = self.get_filter(blank_logger) - expected_namespaces = [(*base_namespace, loggable.name) - for loggable in logged_obj._export_dict.values() - if log_filter(loggable)] + expected_namespaces = [ + (*base_namespace, loggable.name) + for loggable in logged_obj._export_dict.values() + if log_filter(loggable) + ] if len(quantities) != 0: expected_namespaces = [ - name for name in expected_namespaces + name + for name in expected_namespaces if any(name[-1] in q for q in quantities) ] assert all(ns in blank_logger for ns in expected_namespaces) @@ -384,16 +401,15 @@ def test_add(self, blank_logger, logged_obj, base_namespace, quantities): def test_add_with_user_names(self, logged_obj, base_namespace): logger = Logger() # Test adding a user specified identifier into the namespace - user_name = 'UserName' + user_name = "UserName" logger.add(logged_obj, user_name=user_name) - assert base_namespace[:-1] + (user_name, 'prop') in logger - assert base_namespace[:-1] + (user_name, 'proplist') in logger + assert base_namespace[:-1] + (user_name, "prop") in logger + assert base_namespace[:-1] + (user_name, "proplist") in logger def test_remove(self, logged_obj, base_namespace): - # Test removing all properties - prop_namespace = (*base_namespace, 'prop') - list_namespace = (*base_namespace, 'proplist') + prop_namespace = (*base_namespace, "prop") + list_namespace = (*base_namespace, "proplist") log = Logger() log.add(logged_obj) log.remove(logged_obj) @@ -404,20 +420,20 @@ def test_remove(self, logged_obj, base_namespace): # Test removing select properties log = Logger() log.add(logged_obj) - log.remove(logged_obj, 'prop') + log.remove(logged_obj, "prop") assert len(log) == 1 assert prop_namespace not in log assert list_namespace in log log = Logger() log.add(logged_obj) - log.remove(logged_obj, ['prop', 'proplist']) + log.remove(logged_obj, ["prop", "proplist"]) assert len(log) == 0 assert prop_namespace not in log assert list_namespace not in log # Test remove just given namespaces - prop_namespace = (*base_namespace, 'prop') + prop_namespace = (*base_namespace, "prop") log = Logger() log.add(logged_obj) log.remove(quantities=[prop_namespace]) @@ -427,23 +443,25 @@ def test_remove(self, logged_obj, base_namespace): # Test remove when not in initial namespace log = Logger() - log[prop_namespace] = (lambda: None, '__call__', 'scalar') + log[prop_namespace] = (lambda: None, "__call__", "scalar") log.add(logged_obj) assert len(log) == 3 - log.remove(logged_obj, 'prop') + log.remove(logged_obj, "prop") assert len(log) == 2 assert prop_namespace in log assert list_namespace in log - assert prop_namespace[:-2] + (prop_namespace[-2] + '_1', - prop_namespace[-1]) not in log + assert ( + prop_namespace[:-2] + (prop_namespace[-2] + "_1", prop_namespace[-1]) + not in log + ) def test_remove_with_user_name(self, logged_obj, base_namespace): # Test remove using a user specified namespace identifier logger = Logger() - user_name = 'UserName' + user_name = "UserName" logger.add(logged_obj, user_name=user_name) - assert base_namespace[:-1] + (user_name, 'prop') in logger - assert base_namespace[:-1] + (user_name, 'proplist') in logger + assert base_namespace[:-1] + (user_name, "prop") in logger + assert base_namespace[:-1] + (user_name, "proplist") in logger def test_iadd(self, blank_logger, logged_obj): blank_logger.add(logged_obj) @@ -460,24 +478,23 @@ def test_iadd(self, blank_logger, logged_obj): assert len(blank_logger) == len(expected_loggables) def test_isub(self, logged_obj, base_namespace): - # Test when given string log = Logger() log += logged_obj - log -= 'pytest' + log -= "pytest" assert len(log) == 0 log += logged_obj - log -= 'eg' + log -= "eg" assert len(log) == 2 # Test when given a namespace - log -= (*base_namespace, 'prop') - assert (*base_namespace, 'prop') not in log + log -= (*base_namespace, "prop") + assert (*base_namespace, "prop") not in log assert len(log) == 1 # Test with list of namespaces log += logged_obj - rm_nsp = [(*base_namespace, name) for name in ['prop', 'proplist']] + rm_nsp = [(*base_namespace, name) for name in ["prop", "proplist"]] log -= rm_nsp for nsp in rm_nsp: assert nsp not in log @@ -492,9 +509,9 @@ def test_log(self, logged_obj): log = Logger() log += logged_obj logged = log.log() - inner_dict = logged['pytest']['test_logging']['DummyLoggable'] - assert inner_dict['prop'] == (logged_obj.prop, 'scalar') - assert inner_dict['proplist'] == (logged_obj.proplist, 'sequence') + inner_dict = logged["pytest"]["test_logging"]["DummyLoggable"] + assert inner_dict["prop"] == (logged_obj.prop, "scalar") + assert inner_dict["proplist"] == (logged_obj.proplist, "sequence") def test_pickling(self, blank_logger, logged_obj): blank_logger.add(logged_obj) diff --git a/hoomd/pytest/test_mesh.py b/hoomd/pytest/test_mesh.py index 0c2c05c05b..84e1e49874 100644 --- a/hoomd/pytest/test_mesh.py +++ b/hoomd/pytest/test_mesh.py @@ -8,10 +8,9 @@ from hoomd.error import DataAccessError, MutabilityError -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def mesh_snapshot_factory(device): - - def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): + def make_snapshot(d=1.0, phi_deg=45, particle_types=["A"], L=20): phi_rad = phi_deg * (numpy.pi / 180) # the central particles are along the x-axis, so phi is determined from # the angle in the yz plane. @@ -24,14 +23,12 @@ def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): s.particles.N = N s.particles.types = particle_types # shift particle positions slightly in z so MPI tests pass - s.particles.position[:] = [[ - 0.0, d * numpy.cos(phi_rad / 2), - d * numpy.sin(phi_rad / 2) + 0.1 - ], [0.0, 0.0, 0.1], [d, 0.0, 0.1], - [ - d, d * numpy.cos(phi_rad / 2), - -d * numpy.sin(phi_rad / 2) + 0.1 - ]] + s.particles.position[:] = [ + [0.0, d * numpy.cos(phi_rad / 2), d * numpy.sin(phi_rad / 2) + 0.1], + [0.0, 0.0, 0.1], + [d, 0.0, 0.1], + [d, d * numpy.cos(phi_rad / 2), -d * numpy.sin(phi_rad / 2) + 0.1], + ] return s @@ -39,7 +36,6 @@ def make_snapshot(d=1.0, phi_deg=45, particle_types=['A'], L=20): def test_empty_mesh(simulation_factory, two_particle_snapshot_factory): - sim = simulation_factory(two_particle_snapshot_factory(d=2.0)) mesh = Mesh() @@ -111,4 +107,5 @@ def test_mesh_setter_attached(simulation_factory, mesh_snapshot_factory): assert numpy.array_equal(mesh.triangles, mesh_triangles) assert numpy.array_equal(mesh.type_ids, mesh_type_ids) assert numpy.array_equal( - mesh.bonds, numpy.array([[0, 1], [1, 2], [0, 2], [2, 3], [1, 3]])) + mesh.bonds, numpy.array([[0, 1], [1, 2], [0, 2], [2, 3], [1, 3]]) + ) diff --git a/hoomd/pytest/test_operation.py b/hoomd/pytest/test_operation.py index ee8c851948..f15738eac6 100644 --- a/hoomd/pytest/test_operation.py +++ b/hoomd/pytest/test_operation.py @@ -36,28 +36,30 @@ def test_adding_params(base_op, params): @pytest.fixture() def type_param(): - return TypeParameter(name='type_param', - type_kind='particle_types', - param_dict=TypeParameterDict(foo=1, - bar=identity, - len_keys=1)) + return TypeParameter( + name="type_param", + type_kind="particle_types", + param_dict=TypeParameterDict(foo=1, bar=identity, len_keys=1), + ) def test_adding_typeparams(type_param, base_op): base_op._add_typeparam(type_param) - assert 'type_param' in base_op._typeparam_dict.keys() + assert "type_param" in base_op._typeparam_dict.keys() expected_dict = {"foo": 1, "bar": RequiredArg} - assert base_op._typeparam_dict['type_param']['A'] == expected_dict + assert base_op._typeparam_dict["type_param"]["A"] == expected_dict def test_extending_typeparams(base_op): - type_params = (TypeParameter('foo', 'particle', {"a": int}), - TypeParameter('bar', 'particle', {"a": str}), - TypeParameter('baz', 'particle', {"a": 2.5})) + type_params = ( + TypeParameter("foo", "particle", {"a": int}), + TypeParameter("bar", "particle", {"a": str}), + TypeParameter("baz", "particle", {"a": 2.5}), + ) base_op._extend_typeparam(type_params) keys = set(base_op._typeparam_dict.keys()) - expected_keys = {'foo', 'bar', 'baz'} + expected_keys = {"foo", "bar", "baz"} # That keys are the same assert keys.union(expected_keys) == keys and keys - expected_keys == set() # That each value is the same @@ -75,7 +77,7 @@ def full_op(base_op, params, type_param): def test_getattr(full_op, params, type_param): assert type(full_op.type_param) is TypeParameter - assert full_op.type_param['A'] == type_param["A"] + assert full_op.type_param["A"] == type_param["A"] for key, param in params.items(): assert getattr(full_op, key) == param @@ -83,26 +85,16 @@ def test_getattr(full_op, params, type_param): def test_setattr_type_param(full_op): new_dict = {"foo": 2, "bar": 3} full_op.type_param = {"A": new_dict, "B": new_dict} - assert full_op.type_param['A'] == new_dict - assert full_op.type_param['B'] == new_dict + assert full_op.type_param["A"] == new_dict + assert full_op.type_param["B"] == new_dict new_new_dict = {"foo": 3, "bar": None} - full_op.type_param['A'] = new_new_dict - assert full_op.type_param['A'] == new_new_dict + full_op.type_param["A"] = new_new_dict + assert full_op.type_param["A"] == new_new_dict @pytest.fixture() def type_param_non_default(): - return { - "A": { - "bar": "world" - }, - "B": { - "bar": "hello" - }, - "C": { - "bar": "hello world" - } - } + return {"A": {"bar": "world"}, "B": {"bar": "hello"}, "C": {"bar": "hello world"}} def test_apply_typeparam_dict(full_op, type_param_non_default): @@ -141,8 +133,8 @@ def attached(full_op, type_param_non_default): def test_attached_setattr(attached): - attached.type_param['A'] = dict(foo=5., bar='world') - assert attached._cpp_obj.getTypeParam('A') == dict(foo=5., bar='world') + attached.type_param["A"] = dict(foo=5.0, bar="world") + assert attached._cpp_obj.getTypeParam("A") == dict(foo=5.0, bar="world") attached.param1 = 4 assert attached._cpp_obj.param1 == 4 @@ -169,14 +161,17 @@ def test_pickling(full_op, attached): def test_operation_lifetime(simulation_factory, two_particle_snapshot_factory): - def drop_sim(attach=False): sim = simulation_factory(two_particle_snapshot_factory()) # Use operation available regardless of build box_resize = hoomd.update.BoxResize( 10, - hoomd.variant.box.Interpolate(hoomd.Box.cube(4), hoomd.Box.cube(5), - hoomd.variant.Ramp(0, 1, 0, 10_000))) + hoomd.variant.box.Interpolate( + hoomd.Box.cube(4), + hoomd.Box.cube(5), + hoomd.variant.Ramp(0, 1, 0, 10_000), + ), + ) sim.operations.updaters.append(box_resize) if attach: sim.run(0) diff --git a/hoomd/pytest/test_operations.py b/hoomd/pytest/test_operations.py index 7dce993c3f..5480e22da0 100644 --- a/hoomd/pytest/test_operations.py +++ b/hoomd/pytest/test_operations.py @@ -17,7 +17,8 @@ def test_len(): operations.integrator = FakeIntegrator() operations.updaters.append( - hoomd.update.FilterUpdater(1, [hoomd.filter.Type(["A"])])) + hoomd.update.FilterUpdater(1, [hoomd.filter.Type(["A"])]) + ) operations.writers.append(hoomd.write.GSD(1, "filename.gsd")) assert len(operations) == 3 @@ -29,11 +30,13 @@ def test_iter(): assert len(list(operations)) == 1 operations.updaters.append( - hoomd.update.FilterUpdater(1, [hoomd.filter.Type(["A"])])) + hoomd.update.FilterUpdater(1, [hoomd.filter.Type(["A"])]) + ) operations.writers.append(hoomd.write.GSD(1, "filename.gsd")) - expected_list = (operations._tuners[:] + operations._updaters[:] - + operations._writers[:]) + expected_list = ( + operations._tuners[:] + operations._updaters[:] + operations._writers[:] + ) assert list(operations) == expected_list operations.integrator = FakeIntegrator() diff --git a/hoomd/pytest/test_parameter_dict.py b/hoomd/pytest/test_parameter_dict.py index 78c1199930..ceee9b4542 100644 --- a/hoomd/pytest/test_parameter_dict.py +++ b/hoomd/pytest/test_parameter_dict.py @@ -15,7 +15,6 @@ def identity(x): class DummyCppObj: - def __init__(self): self._dict = dict() @@ -46,29 +45,26 @@ def n(self, request): def spec(self, n): self._n = n if n == 1: - spec = { - "int": int, - "list[int]": [int], - "(float, str)": (float, str) - } + spec = {"int": int, "list[int]": [int], "(float, str)": (float, str)} elif n == 2: spec = {"dict": {"str": str}, "filter": hoomd.filter.ParticleFilter} else: spec = { "(float, float, float)": (float, float, float), - "list[dict[str, int]]": [{ - "foo": int, - "bar": int - }], - "(float, str)": (float, str) + "list[dict[str, int]]": [{"foo": int, "bar": int}], + "(float, str)": (float, str), } self._spec = spec return spec def filter(self): return self.generator( - Options(hoomd.filter.All(), hoomd.filter.Type(("A", "B")), - hoomd.filter.Tags([1, 2, 25]))) + Options( + hoomd.filter.All(), + hoomd.filter.Type(("A", "B")), + hoomd.filter.Tags([1, 2, 25]), + ) + ) def _generate_value(self): if self._n == 2: @@ -82,7 +78,6 @@ def _generate_value(self): @pytest.fixture def generate_plain_collection(self): - def generate(n): return self._generate_value() @@ -106,8 +101,7 @@ def check_equivalent(self, test_mapping, other): for key, value in other.items(): assert test_mapping[key] == value - @pytest.fixture(params=(True, False), - ids=lambda x: "in_map" if x else "out_map") + @pytest.fixture(params=(True, False), ids=lambda x: "in_map" if x else "out_map") def setitem_key_value(self, n, request): value = self._generate_value() keys = list(value) @@ -169,7 +163,6 @@ def test_isolation(self, populated_collection, n): class TestSpecialTypes: - def test_variants(self): mapping = ParameterDict(variant=hoomd.variant.Variant) mapping["variant"] = 4.0 @@ -197,7 +190,6 @@ def test_filters(self): assert mapping["filter"] == tag_100 class NewFilter(hoomd.filter.CustomFilter): - def __call__(self, state): return np.array([], dtype=np.uint64) @@ -237,41 +229,31 @@ def test_str(self): mapping["str"] = "abc" assert mapping["str"] == "abc" - @pytest.mark.parametrize("box", ([10, 15, 25, 0, -0.5, 2], { - "Lx": 10, - "Ly": 15, - "Lz": 25, - "xy": 0, - "xz": -0.5, - "yz": 2 - }, { - "Lx": 10, - "Ly": 15, - "Lz": 25 - }, { - "Lx": 10, - "Ly": 15 - }, { - "Lx": 10, - "Ly": 15, - "xy": 0 - }, [10, 15])) + @pytest.mark.parametrize( + "box", + ( + [10, 15, 25, 0, -0.5, 2], + {"Lx": 10, "Ly": 15, "Lz": 25, "xy": 0, "xz": -0.5, "yz": 2}, + {"Lx": 10, "Ly": 15, "Lz": 25}, + {"Lx": 10, "Ly": 15}, + {"Lx": 10, "Ly": 15, "xy": 0}, + [10, 15], + ), + ) def test_box_valid(self, box): mapping = ParameterDict(box=hoomd.Box) mapping["box"] = box assert mapping["box"] == hoomd.Box.from_box(box) - @pytest.mark.parametrize("box", ([10, 15, 25, 0, -0.5], { - "Ly": 15, - "Lz": 25, - "xy": 0, - "xz": -0.5, - "yz": 2 - }, { - "Lx": 10, - "Ly": 15, - "xz": 1 - }, [10])) + @pytest.mark.parametrize( + "box", + ( + [10, 15, 25, 0, -0.5], + {"Ly": 15, "Lz": 25, "xy": 0, "xz": -0.5, "yz": 2}, + {"Lx": 10, "Ly": 15, "xz": 1}, + [10], + ), + ) def test_box_invalid(self, box): mapping = ParameterDict(box=hoomd.Box) with pytest.raises(hoomd.error.TypeConversionError): diff --git a/hoomd/pytest/test_remove_drift.py b/hoomd/pytest/test_remove_drift.py index a57b805667..8d5cf6bb45 100644 --- a/hoomd/pytest/test_remove_drift.py +++ b/hoomd/pytest/test_remove_drift.py @@ -11,18 +11,19 @@ # note: The parameterized tests validate parameters so we can't pass in values # here that require preprocessing valid_constructor_args = [ - dict(trigger=hoomd.trigger.Periodic(10), - reference_positions=[(0, 0, 0), (1, 0, 1)]), - dict(trigger=hoomd.trigger.After(10), - reference_positions=[(0, 0, 0), (1, 0, 1)]), - dict(trigger=hoomd.trigger.Before(10), - reference_positions=[(0, 0, 0), (1, 0, 1)]) + dict( + trigger=hoomd.trigger.Periodic(10), reference_positions=[(0, 0, 0), (1, 0, 1)] + ), + dict(trigger=hoomd.trigger.After(10), reference_positions=[(0, 0, 0), (1, 0, 1)]), + dict(trigger=hoomd.trigger.Before(10), reference_positions=[(0, 0, 0), (1, 0, 1)]), ] -valid_attrs = [('trigger', hoomd.trigger.Periodic(10000)), - ('trigger', hoomd.trigger.After(100)), - ('trigger', hoomd.trigger.Before(12345)), - ('reference_positions', [(0, 0, 0), (1, 0, 1)])] +valid_attrs = [ + ("trigger", hoomd.trigger.Periodic(10000)), + ("trigger", hoomd.trigger.After(100)), + ("trigger", hoomd.trigger.Before(12345)), + ("reference_positions", [(0, 0, 0), (1, 0, 1)]), +] @pytest.mark.parametrize("constructor_args", valid_constructor_args) @@ -36,13 +37,14 @@ def test_valid_construction(constructor_args): @pytest.mark.parametrize("constructor_args", valid_constructor_args) -def test_valid_construction_and_attach(simulation_factory, - two_particle_snapshot_factory, - constructor_args): +def test_valid_construction_and_attach( + simulation_factory, two_particle_snapshot_factory, constructor_args +): """Test that RemoveDrift can be attached with valid arguments.""" remove_drift = hoomd.update.RemoveDrift(**constructor_args) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=['A', 'B'], d=2, L=50)) + two_particle_snapshot_factory(particle_types=["A", "B"], d=2, L=50) + ) sim.operations.updaters.append(remove_drift) sim.run(0) @@ -55,23 +57,25 @@ def test_valid_construction_and_attach(simulation_factory, @pytest.mark.parametrize("attr,value", valid_attrs) def test_valid_setattr(attr, value): """Test that RemoveDrift can get and set attributes.""" - remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(10), - reference_positions=[(0, 0, 1), - (-1, 0, 1)]) + remove_drift = hoomd.update.RemoveDrift( + trigger=hoomd.trigger.Periodic(10), reference_positions=[(0, 0, 1), (-1, 0, 1)] + ) setattr(remove_drift, attr, value) assert np.all(getattr(remove_drift, attr) == value) @pytest.mark.parametrize("attr,value", valid_attrs) -def test_valid_setattr_attached(attr, value, simulation_factory, - two_particle_snapshot_factory): +def test_valid_setattr_attached( + attr, value, simulation_factory, two_particle_snapshot_factory +): """Test that RemoveDrift can get and set attributes while attached.""" - remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(10), - reference_positions=[(0, 0, 1), - (-1, 0, 1)]) + remove_drift = hoomd.update.RemoveDrift( + trigger=hoomd.trigger.Periodic(10), reference_positions=[(0, 0, 1), (-1, 0, 1)] + ) sim = simulation_factory( - two_particle_snapshot_factory(particle_types=['A', 'B'], L=50)) + two_particle_snapshot_factory(particle_types=["A", "B"], L=50) + ) sim.operations.updaters.append(remove_drift) sim.run(0) @@ -83,10 +87,16 @@ def test_valid_setattr_attached(attr, value, simulation_factory, def test_remove_drift(simulation_factory, lattice_snapshot_factory): """Test that RemoveDrift modifies positions correctly.""" # reference positions in a simple cubic lattice with a=1 - reference_positions = [[-0.5, -0.5, -0.5], [-0.5, -0.5, 0.5], - [0.5, -0.5, -0.5], [0.5, -0.5, - 0.5], [-0.5, 0.5, -0.5], - [-0.5, 0.5, 0.5], [0.5, 0.5, -0.5], [0.5, 0.5, 0.5]] + reference_positions = [ + [-0.5, -0.5, -0.5], + [-0.5, -0.5, 0.5], + [0.5, -0.5, -0.5], + [0.5, -0.5, 0.5], + [-0.5, 0.5, -0.5], + [-0.5, 0.5, 0.5], + [0.5, 0.5, -0.5], + [0.5, 0.5, 0.5], + ] # initialize simulation with randomized positions (off lattice) snap = lattice_snapshot_factory(dimensions=3, n=2, a=1, r=0.1) @@ -94,8 +104,8 @@ def test_remove_drift(simulation_factory, lattice_snapshot_factory): # add remove drift updater and run remove_drift = hoomd.update.RemoveDrift( - trigger=hoomd.trigger.Periodic(1), - reference_positions=reference_positions) + trigger=hoomd.trigger.Periodic(1), reference_positions=reference_positions + ) sim.operations.updaters.append(remove_drift) sim.run(1) @@ -110,7 +120,7 @@ def test_remove_drift(simulation_factory, lattice_snapshot_factory): def test_pickling(simulation_factory, two_particle_snapshot_factory): """Test that RemoveDrift objects are picklable.""" sim = simulation_factory(two_particle_snapshot_factory()) - remove_drift = hoomd.update.RemoveDrift(trigger=hoomd.trigger.Periodic(5), - reference_positions=[(0, 0, 1), - (-1, 0, 1)]) + remove_drift = hoomd.update.RemoveDrift( + trigger=hoomd.trigger.Periodic(5), reference_positions=[(0, 0, 1), (-1, 0, 1)] + ) operation_pickling_check(remove_drift, sim) diff --git a/hoomd/pytest/test_simulation.py b/hoomd/pytest/test_simulation.py index 39ca39176a..c1f4234a38 100644 --- a/hoomd/pytest/test_simulation.py +++ b/hoomd/pytest/test_simulation.py @@ -10,18 +10,18 @@ from hoomd.error import MutabilityError from hoomd.logging import LoggerCategories from hoomd.conftest import logging_check, ListWriter + try: import gsd.hoomd + skip_gsd = False except ImportError: skip_gsd = True -skip_gsd = pytest.mark.skipif(skip_gsd, - reason="gsd Python package was not found.") +skip_gsd = pytest.mark.skipif(skip_gsd, reason="gsd Python package was not found.") class SleepUpdater(hoomd.custom.Action): - def act(self, timestep): time.sleep(1e-6 * timestep) @@ -33,15 +33,21 @@ def wrapped(cls): def make_gsd_frame(hoomd_snapshot): s = gsd.hoomd.Frame() for attr in dir(hoomd_snapshot): - if attr[0] != '_' and attr not in [ - 'exists', 'replicate', 'communicator', 'mpcd' + if attr[0] != "_" and attr not in [ + "exists", + "replicate", + "communicator", + "mpcd", ]: if hoomd_snapshot.communicator.rank == 0: for prop in dir(getattr(hoomd_snapshot, attr)): - if prop[0] != '_': + if prop[0] != "_": # s.attr.prop = hoomd_snapshot.attr.prop - setattr(getattr(s, attr), prop, - getattr(getattr(hoomd_snapshot, attr), prop)) + setattr( + getattr(s, attr), + prop, + getattr(getattr(hoomd_snapshot, attr), prop), + ) return s @@ -59,36 +65,36 @@ def update_positions(snap): var = noise * noise cov = np.diag([var, var, var]) shape = snap.particles.position.shape - snap.particles.position[:] += rs.multivariate_normal(mean, - cov, - size=shape[:-1]) + snap.particles.position[:] += rs.multivariate_normal(mean, cov, size=shape[:-1]) return snap def assert_equivalent_snapshots(gsd_snap, hoomd_snap): if hoomd_snap.communicator.rank == 0: - for attr in dir(hoomd_snap): - if attr[0] == '_' or attr in [ - 'exists', 'replicate', 'communicator', 'mpcd' + if attr[0] == "_" or attr in [ + "exists", + "replicate", + "communicator", + "mpcd", ]: continue for prop in dir(getattr(hoomd_snap, attr)): - if prop[0] == '_': + if prop[0] == "_": continue - elif prop == 'types': - assert getattr(getattr(gsd_snap, attr), prop) == \ - getattr(getattr(hoomd_snap, attr), prop) + elif prop == "types": + assert getattr(getattr(gsd_snap, attr), prop) == getattr( + getattr(hoomd_snap, attr), prop + ) else: np.testing.assert_allclose( getattr(getattr(gsd_snap, attr), prop), - getattr(getattr(hoomd_snap, attr), prop)) + getattr(getattr(hoomd_snap, attr), prop), + ) def random_inds(n): - return np.random.choice(np.arange(n), - size=int(n * np.random.rand()), - replace=False) + return np.random.choice(np.arange(n), size=int(n * np.random.rand()), replace=False) def test_initialization(simulation_factory): @@ -136,8 +142,8 @@ def test_tps(simulation_factory, two_particle_snapshot_factory): list_writer = ListWriter(sim, "tps") sim.operations.writers.append( - hoomd.write.CustomWriter(action=list_writer, - trigger=hoomd.trigger.Periodic(1))) + hoomd.write.CustomWriter(action=list_writer, trigger=hoomd.trigger.Periodic(1)) + ) sim.operations += SleepUpdater.wrapped() sim.run(10) tps = list_writer.data @@ -153,8 +159,8 @@ def test_walltime(simulation_factory, two_particle_snapshot_factory): list_writer = ListWriter(sim, "walltime") sim.operations.writers.append( - hoomd.write.CustomWriter(action=list_writer, - trigger=hoomd.trigger.Periodic(1))) + hoomd.write.CustomWriter(action=list_writer, trigger=hoomd.trigger.Periodic(1)) + ) sim.operations += SleepUpdater.wrapped() sim.run(10) walltime = list_writer.data @@ -188,8 +194,7 @@ def test_run_with_timestep(simulation_factory, lattice_snapshot_factory): assert sim.timestep == sum(n_step_list) -_state_args = [((10, ['A']), 10), ((5, ['A', 'B']), 20), - ((8, ['A', 'B', 'C']), 4)] +_state_args = [((10, ["A"]), 10), ((5, ["A", "B"]), 20), ((8, ["A", "B", "C"]), 4)] @pytest.fixture(scope="function", params=_state_args) @@ -198,19 +203,20 @@ def state_args(request): @skip_gsd -def test_state_from_gsd(device, simulation_factory, lattice_snapshot_factory, - state_args, tmp_path): +def test_state_from_gsd( + device, simulation_factory, lattice_snapshot_factory, state_args, tmp_path +): snap_params, nsteps = state_args d = tmp_path / "sub" d.mkdir() filename = d / "temporary_test_file.gsd" if device.communicator.rank == 0: - f = gsd.hoomd.open(name=filename, mode='w') + f = gsd.hoomd.open(name=filename, mode="w") sim = simulation_factory( - lattice_snapshot_factory(n=snap_params[0], - particle_types=snap_params[1])) + lattice_snapshot_factory(n=snap_params[0], particle_types=snap_params[1]) + ) snap = sim.state.get_snapshot() snapshot_dict = {} snapshot_dict[0] = snap @@ -222,8 +228,7 @@ def test_state_from_gsd(device, simulation_factory, lattice_snapshot_factory, for step in range(1, nsteps): particle_type = np.random.choice(snap_params[1]) snap = update_positions(sim.state.get_snapshot()) - set_types(snap, random_inds(snap_params[0]), snap_params[1], - particle_type) + set_types(snap, random_inds(snap_params[0]), snap_params[1], particle_type) if device.communicator.rank == 0: f.append(make_gsd_frame(snap)) @@ -243,9 +248,9 @@ def test_state_from_gsd(device, simulation_factory, lattice_snapshot_factory, @skip_gsd -def test_state_from_gsd_box_dims(device, simulation_factory, - lattice_snapshot_factory, tmp_path): - +def test_state_from_gsd_box_dims( + device, simulation_factory, lattice_snapshot_factory, tmp_path +): def modify_gsd_snap(gsd_snap): """Add nonzero z values to gsd box for testing.""" new_box = list(gsd_snap.configuration.box) @@ -259,10 +264,11 @@ def modify_gsd_snap(gsd_snap): d.mkdir() filename = d / "temporary_test_file.gsd" if device.communicator.rank == 0: - f = gsd.hoomd.open(name=filename, mode='w') + f = gsd.hoomd.open(name=filename, mode="w") sim = simulation_factory( - lattice_snapshot_factory(n=10, particle_types=["A", "B"], dimensions=2)) + lattice_snapshot_factory(n=10, particle_types=["A", "B"], dimensions=2) + ) snap = sim.state.get_snapshot() checks = range(3) @@ -281,13 +287,14 @@ def modify_gsd_snap(gsd_snap): @skip_gsd -def test_state_from_gsd_frame(simulation_factory, lattice_snapshot_factory, - device, state_args, tmp_path): +def test_state_from_gsd_frame( + simulation_factory, lattice_snapshot_factory, device, state_args, tmp_path +): snap_params, nsteps = state_args sim = simulation_factory( - lattice_snapshot_factory(n=snap_params[0], - particle_types=snap_params[1])) + lattice_snapshot_factory(n=snap_params[0], particle_types=snap_params[1]) + ) snap = sim.state.get_snapshot() snap = make_gsd_frame(snap) gsd_snapshot_list = [snap] @@ -295,8 +302,7 @@ def test_state_from_gsd_frame(simulation_factory, lattice_snapshot_factory, for _ in range(1, nsteps): particle_type = np.random.choice(snap_params[1]) snap = update_positions(sim.state.get_snapshot()) - set_types(snap, random_inds(snap_params[0]), snap_params[1], - particle_type) + set_types(snap, random_inds(snap_params[0]), snap_params[1], particle_type) snap = make_gsd_frame(snap) gsd_snapshot_list.append(snap) @@ -311,7 +317,6 @@ def test_writer_order(simulation_factory, two_particle_snapshot_factory): """Ensure that writers run at the end of the loop step.""" class StepRecorder(hoomd.custom.Action): - def __init__(self): self.steps = [] @@ -331,12 +336,10 @@ def act(self, timestep): assert record.steps == [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000] -def test_writer_order_initial(simulation_factory, - two_particle_snapshot_factory): +def test_writer_order_initial(simulation_factory, two_particle_snapshot_factory): """Ensure that writers optionally run at the beginning of the loop.""" class StepRecorder(hoomd.custom.Action): - def __init__(self): self.steps = [] @@ -414,32 +417,30 @@ def test_run_limit(simulation_factory, lattice_snapshot_factory): def test_seed(device, lattice_snapshot_factory): - sim = hoomd.Simulation(device) assert sim.seed is None sim.seed = 42 assert sim.seed == 42 - sim.seed = 0x123456789abcdef - assert sim.seed == 0xcdef + sim.seed = 0x123456789ABCDEF + assert sim.seed == 0xCDEF sim.create_state_from_snapshot(lattice_snapshot_factory()) - assert sim.seed == 0xcdef + assert sim.seed == 0xCDEF sim.seed = 20 assert sim.seed == 20 def test_seed_constructor_out_of_range(device, lattice_snapshot_factory): - sim = hoomd.Simulation(device, seed=0x123456789abcdef) + sim = hoomd.Simulation(device, seed=0x123456789ABCDEF) sim.create_state_from_snapshot(lattice_snapshot_factory()) - assert sim.seed == 0xcdef + assert sim.seed == 0xCDEF -def test_operations_setting(tmp_path, simulation_factory, - lattice_snapshot_factory): +def test_operations_setting(tmp_path, simulation_factory, lattice_snapshot_factory): sim = simulation_factory() sim.create_state_from_snapshot(lattice_snapshot_factory()) @@ -458,13 +459,14 @@ def check_operation_setting(sim, old_operations, new_operations): operations = hoomd.Operations() # Add some operations to test the setting - operations += hoomd.update.BoxResize(trigger=40, - box=hoomd.variant.box.Interpolate( - hoomd.Box.cube(10), - hoomd.Box.cube(20), - hoomd.variant.Ramp(0, 1, 0, 100))) + operations += hoomd.update.BoxResize( + trigger=40, + box=hoomd.variant.box.Interpolate( + hoomd.Box.cube(10), hoomd.Box.cube(20), hoomd.variant.Ramp(0, 1, 0, 100) + ), + ) operations += hoomd.write.GSD(filename=tmp_path / "foo.gsd", trigger=10) - operations += hoomd.write.Table(10, logger=hoomd.logging.Logger(['scalar'])) + operations += hoomd.write.Table(10, logger=hoomd.logging.Logger(["scalar"])) operations.tuners.clear() # Check setting before scheduling check_operation_setting(sim, sim.operations, operations) @@ -474,17 +476,16 @@ def check_operation_setting(sim, old_operations, new_operations): new_operations = hoomd.Operations() new_operations += hoomd.update.BoxResize( trigger=80, - box=hoomd.variant.box.Interpolate(hoomd.Box.cube(300), - hoomd.Box.cube(20), - hoomd.variant.Ramp(0, 1, 0, 100))) + box=hoomd.variant.box.Interpolate( + hoomd.Box.cube(300), hoomd.Box.cube(20), hoomd.variant.Ramp(0, 1, 0, 100) + ), + ) new_operations += hoomd.write.GSD(filename=tmp_path / "bar.gsd", trigger=20) - new_operations += hoomd.write.Table(20, - logger=hoomd.logging.Logger(['scalar'])) + new_operations += hoomd.write.Table(20, logger=hoomd.logging.Logger(["scalar"])) check_operation_setting(sim, sim.operations, new_operations) -def test_mutability_error(simulation_factory, two_particle_snapshot_factory, - tmp_path): +def test_mutability_error(simulation_factory, two_particle_snapshot_factory, tmp_path): filt = hoomd.filter.All() sim = simulation_factory(two_particle_snapshot_factory()) trig = hoomd.trigger.Periodic(1) @@ -500,25 +501,13 @@ def test_mutability_error(simulation_factory, two_particle_snapshot_factory, def test_logging(): logging_check( - hoomd.Simulation, (), { - 'final_timestep': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'seed': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'timestep': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'tps': { - 'category': LoggerCategories.scalar, - 'default': True - }, - 'walltime': { - 'category': LoggerCategories.scalar, - 'default': True - } - }) + hoomd.Simulation, + (), + { + "final_timestep": {"category": LoggerCategories.scalar, "default": True}, + "seed": {"category": LoggerCategories.scalar, "default": True}, + "timestep": {"category": LoggerCategories.scalar, "default": True}, + "tps": {"category": LoggerCategories.scalar, "default": True}, + "walltime": {"category": LoggerCategories.scalar, "default": True}, + }, + ) diff --git a/hoomd/pytest/test_snapshot.py b/hoomd/pytest/test_snapshot.py index 0f4c6a855b..901262c9ba 100644 --- a/hoomd/pytest/test_snapshot.py +++ b/hoomd/pytest/test_snapshot.py @@ -6,14 +6,15 @@ import numpy import pytest from hoomd.pytest.test_simulation import make_gsd_frame + try: import gsd.hoomd # noqa: F401 - need to know if the import fails + skip_gsd = False except ImportError: skip_gsd = True -skip_gsd = pytest.mark.skipif(skip_gsd, - reason="gsd Python package was not found.") +skip_gsd = pytest.mark.skipif(skip_gsd, reason="gsd Python package was not found.") def assert_equivalent_snapshots(gsd_snap, hoomd_snap): @@ -27,24 +28,22 @@ def assert_equivalent_snapshots(gsd_snap, hoomd_snap): if not hoomd_snap.communicator.rank == 0: return True for attr in dir(hoomd_snap): - if attr[0] == '_' or attr in [ - 'exists', 'replicate', 'communicator', 'mpcd' - ]: + if attr[0] == "_" or attr in ["exists", "replicate", "communicator", "mpcd"]: continue for prop in dir(getattr(hoomd_snap, attr)): - if prop[0] == '_': + if prop[0] == "_": continue - elif prop == 'types': + elif prop == "types": x = getattr(getattr(gsd_snap, attr), prop) y = getattr(getattr(hoomd_snap, attr), prop) if x is None: assert y == [] else: assert x == y - elif prop == 'dimensions': + elif prop == "dimensions": x = getattr(getattr(gsd_snap, attr), prop) y = getattr(getattr(hoomd_snap, attr), prop) - x_box = getattr(getattr(gsd_snap, attr), 'box') + x_box = getattr(getattr(gsd_snap, attr), "box") if x_box is None or x_box.all() == 0: # if the box is all zeros, the dimensions won't match # hoomd dimensions will be 2 and gsd will be 3 @@ -53,7 +52,7 @@ def assert_equivalent_snapshots(gsd_snap, hoomd_snap): assert y == [] else: assert x == y - elif prop == 'acceleration' or prop == 'is_accel_set': + elif prop == "acceleration" or prop == "is_accel_set": continue else: x = getattr(getattr(gsd_snap, attr), prop) @@ -64,15 +63,16 @@ def assert_equivalent_snapshots(gsd_snap, hoomd_snap): numpy.testing.assert_allclose(x, y) -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def s(): return Snapshot() def test_empty_snapshot(s): if s.communicator.rank == 0: - numpy.testing.assert_allclose(s.configuration.box, [0, 0, 0, 0, 0, 0], - atol=1e-7) + numpy.testing.assert_allclose( + s.configuration.box, [0, 0, 0, 0, 0, 0], atol=1e-7 + ) assert s.configuration.dimensions == 3 assert s.particles.N == 0 @@ -123,8 +123,7 @@ def test_empty_snapshot(s): def test_configuration(s): if s.communicator.rank == 0: s.configuration.box = [10, 12, 7, 0.1, 0.4, 0.2] - numpy.testing.assert_allclose(s.configuration.box, - [10, 12, 7, 0.1, 0.4, 0.2]) + numpy.testing.assert_allclose(s.configuration.box, [10, 12, 7, 0.1, 0.4, 0.2]) with pytest.raises(AttributeError): s.configuration.dimensions = 2 @@ -136,7 +135,8 @@ def generate_outside(box, interior_points, unwrap_images, initial_images): box = Box.from_box(box) matrix = box.to_matrix() input_points = numpy.zeros( - (len(interior_points), len(unwrap_images), len(initial_images), 3)) + (len(interior_points), len(unwrap_images), len(initial_images), 3) + ) check_points = numpy.zeros_like(input_points) input_images = numpy.zeros_like(input_points, dtype=int) check_images = numpy.zeros_like(input_points, dtype=int) @@ -147,112 +147,160 @@ def generate_outside(box, interior_points, unwrap_images, initial_images): check_points[i, j, k, :] = inside_point input_images[i, j, k, :] = initial_image check_images[i, j, k, :] = initial_image + unwrap_image - return input_points.reshape((-1, 3)), check_points.reshape( - (-1, 3)), input_images.reshape((-1, 3)), check_images.reshape((-1, 3)) + return ( + input_points.reshape((-1, 3)), + check_points.reshape((-1, 3)), + input_images.reshape((-1, 3)), + check_images.reshape((-1, 3)), + ) def run_box_type(s, box, interior_points, unwrap_images, initial_images): - (input_points, check_points, input_images, - check_images) = generate_outside(box, interior_points, unwrap_images, - initial_images) + (input_points, check_points, input_images, check_images) = generate_outside( + box, interior_points, unwrap_images, initial_images + ) s.configuration.box = box s.particles.N = len(input_points) s.particles.position[:] = input_points s.particles.image[:] = input_images s.wrap() - numpy.testing.assert_allclose(s.particles.position, - check_points, - atol=1e-12) + numpy.testing.assert_allclose(s.particles.position, check_points, atol=1e-12) numpy.testing.assert_array_equal(s.particles.image, check_images) # Multiples of lattice vectors to add to interior points -unwrap_images = numpy.array([ - [0, 0, 0], - [1, 0, 0], - [0, 1, 0], - [0, 0, 1], - [-1, 0, 0], - [0, -1, 0], - [0, 0, -1], - [-1, -1, -1], - [-5, 24, 13], - [3, -4, 5], - [3, 4, -5], - [100, 101, 102], - [-50, -50, 50], -]) +unwrap_images = numpy.array( + [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [-1, 0, 0], + [0, -1, 0], + [0, 0, -1], + [-1, -1, -1], + [-5, 24, 13], + [3, -4, 5], + [3, 4, -5], + [100, 101, 102], + [-50, -50, 50], + ] +) test_images = unwrap_images -unwrap_images_2d = numpy.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, 0, 0], - [0, -1, 0], [-1, -1, 0], [1, 1, 0], - [-10, 20, 0]]) +unwrap_images_2d = numpy.array( + [ + [0, 0, 0], + [1, 0, 0], + [0, 1, 0], + [-1, 0, 0], + [0, -1, 0], + [-1, -1, 0], + [1, 1, 0], + [-10, 20, 0], + ] +) def test_wrap_cubic(s): if s.communicator.rank == 0: - run_box_type(s, - box=[1, 1, 1, 0, 0, 0], - interior_points=[[0, 0, 0], [-0.5, 0.0, -0.2], - [0.0, 0.3, -0.1], [0.3, 0.2, -0.1], - [-0.5, 0.2, -0.2]], - unwrap_images=unwrap_images, - initial_images=test_images) + run_box_type( + s, + box=[1, 1, 1, 0, 0, 0], + interior_points=[ + [0, 0, 0], + [-0.5, 0.0, -0.2], + [0.0, 0.3, -0.1], + [0.3, 0.2, -0.1], + [-0.5, 0.2, -0.2], + ], + unwrap_images=unwrap_images, + initial_images=test_images, + ) def test_wrap_triclinic(s): if s.communicator.rank == 0: - run_box_type(s, - box=[10, 12, 7, 0.1, 0.4, 0.2], - interior_points=[[0, 0, 0], [-0.5, 0.0, -0.2], - [0.0, 0.3, -0.1], [0.3, 0.2, -0.1], - [-0.5, 0.2, -0.2], [0, 0, -3.5], - [-6.5, -6.5, -3.5]], - unwrap_images=unwrap_images, - initial_images=test_images) + run_box_type( + s, + box=[10, 12, 7, 0.1, 0.4, 0.2], + interior_points=[ + [0, 0, 0], + [-0.5, 0.0, -0.2], + [0.0, 0.3, -0.1], + [0.3, 0.2, -0.1], + [-0.5, 0.2, -0.2], + [0, 0, -3.5], + [-6.5, -6.5, -3.5], + ], + unwrap_images=unwrap_images, + initial_images=test_images, + ) def test_wrap_2d(s): if s.communicator.rank == 0: - run_box_type(s, - box=[5, 11, 0, 0, 0, 0], - interior_points=[[1, 0, 0], [2.4, 5, 0], [-2.5, 0, 0], - [-2.5, -5.5, 0]], - unwrap_images=unwrap_images_2d, - initial_images=unwrap_images_2d) + run_box_type( + s, + box=[5, 11, 0, 0, 0, 0], + interior_points=[[1, 0, 0], [2.4, 5, 0], [-2.5, 0, 0], [-2.5, -5.5, 0]], + unwrap_images=unwrap_images_2d, + initial_images=unwrap_images_2d, + ) def test_wrap_tetragonal(s): if s.communicator.rank == 0: - run_box_type(s, - box=[7, 7, 4, 0, 0, 0], - interior_points=[[0, 0, 0], [-0.5, 0.0, -0.2], - [0.0, 0.3, -0.1], [0.3, 0.2, -0.1], - [-0.5, 0.2, -0.2], [-3.5, -3.5, -2]], - unwrap_images=unwrap_images, - initial_images=test_images) + run_box_type( + s, + box=[7, 7, 4, 0, 0, 0], + interior_points=[ + [0, 0, 0], + [-0.5, 0.0, -0.2], + [0.0, 0.3, -0.1], + [0.3, 0.2, -0.1], + [-0.5, 0.2, -0.2], + [-3.5, -3.5, -2], + ], + unwrap_images=unwrap_images, + initial_images=test_images, + ) def test_wrap_orthorhombic(s): if s.communicator.rank == 0: - run_box_type(s, - box=[8, 6, 4, 0, 0, 0], - interior_points=[[0, 0, 0], [-0.5, 0.0, -0.2], - [0.0, 0.3, -0.1], [0.3, 0.2, -0.1], - [-0.5, 0.2, -0.2], [-4, -3, -2]], - unwrap_images=unwrap_images, - initial_images=test_images) + run_box_type( + s, + box=[8, 6, 4, 0, 0, 0], + interior_points=[ + [0, 0, 0], + [-0.5, 0.0, -0.2], + [0.0, 0.3, -0.1], + [0.3, 0.2, -0.1], + [-0.5, 0.2, -0.2], + [-4, -3, -2], + ], + unwrap_images=unwrap_images, + initial_images=test_images, + ) def test_wrap_monoclinic(s): if s.communicator.rank == 0: - run_box_type(s, - box=[7, 4, 8, 0, 0.25, 0], - interior_points=[[-2, 1, -1], [-4, 0, -3], [2, 1, 1], - [-1, 0, -4], [-4.5, -2, -4]], - unwrap_images=unwrap_images, - initial_images=test_images) + run_box_type( + s, + box=[7, 4, 8, 0, 0.25, 0], + interior_points=[ + [-2, 1, -1], + [-4, 0, -3], + [2, 1, 1], + [-1, 0, -4], + [-4.5, -2, -4], + ], + unwrap_images=unwrap_images, + initial_images=test_images, + ) def test_particles(s): @@ -273,8 +321,8 @@ def test_particles(s): assert len(s.particles.moment_inertia) == 5 assert len(s.particles.angmom) == 5 - s.particles.types = ['A', 'B'] - assert s.particles.types == ['A', 'B'] + s.particles.types = ["A", "B"] + assert s.particles.types == ["A", "B"] assert s.particles.position.dtype == numpy.float64 assert s.particles.position.shape == (5, 3) @@ -310,8 +358,8 @@ def test_bonds(s): assert len(s.bonds.typeid) == 3 assert len(s.bonds.group) == 3 - s.bonds.types = ['A', 'B'] - assert s.bonds.types == ['A', 'B'] + s.bonds.types = ["A", "B"] + assert s.bonds.types == ["A", "B"] assert s.bonds.typeid.shape == (3,) assert s.bonds.typeid.dtype == numpy.uint32 @@ -327,8 +375,8 @@ def test_angles(s): assert len(s.angles.typeid) == 3 assert len(s.angles.group) == 3 - s.angles.types = ['A', 'B'] - assert s.angles.types == ['A', 'B'] + s.angles.types = ["A", "B"] + assert s.angles.types == ["A", "B"] assert s.angles.typeid.shape == (3,) assert s.angles.typeid.dtype == numpy.uint32 @@ -344,8 +392,8 @@ def test_dihedrals(s): assert len(s.dihedrals.typeid) == 3 assert len(s.dihedrals.group) == 3 - s.dihedrals.types = ['A', 'B'] - assert s.dihedrals.types == ['A', 'B'] + s.dihedrals.types = ["A", "B"] + assert s.dihedrals.types == ["A", "B"] assert s.dihedrals.typeid.shape == (3,) assert s.dihedrals.typeid.dtype == numpy.uint32 @@ -361,8 +409,8 @@ def test_impropers(s): assert len(s.impropers.typeid) == 3 assert len(s.impropers.group) == 3 - s.impropers.types = ['A', 'B'] - assert s.impropers.types == ['A', 'B'] + s.impropers.types = ["A", "B"] + assert s.impropers.types == ["A", "B"] assert s.impropers.typeid.shape == (3,) assert s.impropers.typeid.dtype == numpy.uint32 @@ -378,8 +426,8 @@ def test_pairs(s): assert len(s.pairs.typeid) == 3 assert len(s.pairs.group) == 3 - s.pairs.types = ['A', 'B'] - assert s.pairs.types == ['A', 'B'] + s.pairs.types = ["A", "B"] + assert s.pairs.types == ["A", "B"] assert s.pairs.typeid.shape == (3,) assert s.pairs.typeid.dtype == numpy.uint32 @@ -412,27 +460,43 @@ def test_from_gsd_frame_empty(s, device): def test_from_gsd_frame_populated(s, device): if s.communicator.rank == 0: s.configuration.box = [10, 12, 7, 0.1, 0.4, 0.2] - for section in ('particles', 'bonds', 'angles', 'dihedrals', - 'impropers', 'pairs'): - setattr(getattr(s, section), 'N', 5) - setattr(getattr(s, section), 'types', ['A', 'B']) - - for prop in ('angmom', 'body', 'charge', 'diameter', 'image', 'mass', - 'moment_inertia', 'orientation', 'position', 'typeid', - 'velocity'): + for section in ( + "particles", + "bonds", + "angles", + "dihedrals", + "impropers", + "pairs", + ): + setattr(getattr(s, section), "N", 5) + setattr(getattr(s, section), "types", ["A", "B"]) + + for prop in ( + "angmom", + "body", + "charge", + "diameter", + "image", + "mass", + "moment_inertia", + "orientation", + "position", + "typeid", + "velocity", + ): attr = getattr(s.particles, prop) if attr.dtype == numpy.float64: attr[:] = numpy.random.rand(*attr.shape) else: attr[:] = numpy.random.randint(3, size=attr.shape) - for section in ('bonds', 'angles', 'dihedrals', 'impropers', 'pairs'): - for prop in ('group', 'typeid'): + for section in ("bonds", "angles", "dihedrals", "impropers", "pairs"): + for prop in ("group", "typeid"): attr = getattr(getattr(s, section), prop) attr[:] = numpy.random.randint(3, size=attr.shape) s.constraints.N = 3 - for prop in ('group', 'value'): + for prop in ("group", "value"): attr = getattr(s.constraints, prop) if attr.dtype == numpy.float64: attr[:] = numpy.random.rand(*attr.shape) @@ -446,7 +510,7 @@ def test_from_gsd_frame_populated(s, device): def test_invalid_particle_typeids(simulation_factory, lattice_snapshot_factory): """Test that using invalid particle typeids raises an error.""" - snap = lattice_snapshot_factory(particle_types=['A', 'B']) + snap = lattice_snapshot_factory(particle_types=["A", "B"]) # assign invalid type ids if snap.communicator.rank == 0: @@ -465,10 +529,9 @@ def test_no_particle_types(simulation_factory, lattice_snapshot_factory): @pytest.mark.serial -def test_no_duplicate_particle_types(simulation_factory, - lattice_snapshot_factory): +def test_no_duplicate_particle_types(simulation_factory, lattice_snapshot_factory): """Test that initialization fails when there are duplicate types.""" - snap = lattice_snapshot_factory(particle_types=['A', 'B', 'C', 'A']) + snap = lattice_snapshot_factory(particle_types=["A", "B", "C", "A"]) # Run test in serial as only rank 0 raises the runtime error. with pytest.raises(RuntimeError): @@ -476,14 +539,12 @@ def test_no_duplicate_particle_types(simulation_factory, @pytest.mark.serial -@pytest.mark.parametrize('bond', - ['bonds', 'angles', 'dihedrals', 'impropers', 'pairs']) -def test_no_duplicate_bond_types(simulation_factory, lattice_snapshot_factory, - bond): +@pytest.mark.parametrize("bond", ["bonds", "angles", "dihedrals", "impropers", "pairs"]) +def test_no_duplicate_bond_types(simulation_factory, lattice_snapshot_factory, bond): """Test that initialization fails when there are duplicate types.""" - snap = lattice_snapshot_factory(particle_types=['A']) + snap = lattice_snapshot_factory(particle_types=["A"]) - getattr(snap, bond).types = ['A', 'B', 'B', 'C'] + getattr(snap, bond).types = ["A", "B", "B", "C"] # Run test in serial as only rank 0 raises the runtime error. with pytest.raises(RuntimeError): @@ -498,13 +559,16 @@ def test_zero_particle_system(simulation_factory, lattice_snapshot_factory): simulation_factory(snap) -@pytest.mark.parametrize("group_name,group_size", [ - ("bonds", 2), - ("angles", 3), - ("dihedrals", 4), - ("impropers", 4), - ("pairs", 2), -]) +@pytest.mark.parametrize( + "group_name,group_size", + [ + ("bonds", 2), + ("angles", 3), + ("dihedrals", 4), + ("impropers", 4), + ("pairs", 2), + ], +) def test_invalid_bond_typeids( group_name, group_size, @@ -517,7 +581,7 @@ def test_invalid_bond_typeids( # assign invalid type ids if snap.communicator.rank == 0: group = getattr(snap, group_name) - group.types = ['A'] + group.types = ["A"] group.N = 1 group.group[0] = range(group_size) group.typeid[:] = 2 diff --git a/hoomd/pytest/test_state.py b/hoomd/pytest/test_state.py index f36bf92645..285e61d179 100644 --- a/hoomd/pytest/test_state.py +++ b/hoomd/pytest/test_state.py @@ -7,7 +7,7 @@ import pytest -@pytest.fixture(scope='function') +@pytest.fixture(scope="function") def snap(device): s = Snapshot(device.communicator) N = 1000 @@ -25,83 +25,80 @@ def snap(device): s.particles.orientation[:] = numpy.random.uniform(-1, 1, size=(N, 4)) s.particles.moment_inertia[:] = numpy.random.uniform(1, 5, size=(N, 3)) s.particles.angmom[:] = numpy.random.uniform(-1, 1, size=(N, 4)) - s.particles.types = ['A', 'B', 'C', 'D'] + s.particles.types = ["A", "B", "C", "D"] s.bonds.N = N - 1 for i in range(s.bonds.N): s.bonds.group[i, :] = [i, i + 1] s.bonds.typeid[:] = numpy.random.randint(0, 3, size=s.bonds.N) - s.bonds.types = ['bondA', 'bondB', 'bondC', 'bondD'] + s.bonds.types = ["bondA", "bondB", "bondC", "bondD"] s.angles.N = N - 2 for i in range(s.angles.N): s.angles.group[i, :] = [i, i + 1, i + 2] s.angles.typeid[:] = numpy.random.randint(0, 3, size=s.angles.N) - s.angles.types = ['angleA', 'angleB', 'angleC', 'angleD'] + s.angles.types = ["angleA", "angleB", "angleC", "angleD"] s.dihedrals.N = N - 3 for i in range(s.dihedrals.N): s.dihedrals.group[i, :] = [i, i + 1, i + 2, i + 3] s.dihedrals.typeid[:] = numpy.random.randint(0, 3, size=s.dihedrals.N) - s.dihedrals.types = ['dihedralA', 'dihedralB', 'dihedralC', 'dihedralD'] + s.dihedrals.types = ["dihedralA", "dihedralB", "dihedralC", "dihedralD"] s.impropers.N = N - 3 for i in range(s.impropers.N): s.impropers.group[i, :] = [i, i + 1, i + 2, i + 3] s.impropers.typeid[:] = numpy.random.randint(0, 3, size=s.impropers.N) - s.impropers.types = ['improperA', 'improperB', 'improperC', 'improperD'] + s.impropers.types = ["improperA", "improperB", "improperC", "improperD"] s.pairs.N = N - 1 for i in range(s.pairs.N): s.pairs.group[i, :] = [i, i + 1] s.pairs.typeid[:] = numpy.random.randint(0, 3, size=s.pairs.N) - s.pairs.types = ['pairA', 'pairB', 'pairC', 'pairD'] + s.pairs.types = ["pairA", "pairB", "pairC", "pairD"] s.constraints.N = N - 1 for i in range(s.constraints.N): s.constraints.group[i, :] = [i, i + 1] - s.constraints.value[:] = numpy.random.uniform(1, - 10, - size=s.constraints.N) + s.constraints.value[:] = numpy.random.uniform(1, 10, size=s.constraints.N) return s def assert_snapshots_equal(s1, s2): if s1.communicator.rank == 0: - numpy.testing.assert_allclose(s1.configuration.box, - s2.configuration.box) - numpy.testing.assert_allclose(s1.configuration.dimensions, - s2.configuration.dimensions) + numpy.testing.assert_allclose(s1.configuration.box, s2.configuration.box) + numpy.testing.assert_allclose( + s1.configuration.dimensions, s2.configuration.dimensions + ) assert s1.particles.N == s2.particles.N assert s1.particles.types == s2.particles.types - numpy.testing.assert_allclose(s1.particles.position, - s2.particles.position) - numpy.testing.assert_allclose(s1.particles.velocity, - s2.particles.velocity) - numpy.testing.assert_allclose(s1.particles.acceleration, - s2.particles.acceleration) + numpy.testing.assert_allclose(s1.particles.position, s2.particles.position) + numpy.testing.assert_allclose(s1.particles.velocity, s2.particles.velocity) + numpy.testing.assert_allclose( + s1.particles.acceleration, s2.particles.acceleration + ) numpy.testing.assert_equal(s1.particles.typeid, s2.particles.typeid) numpy.testing.assert_allclose(s1.particles.mass, s2.particles.mass) numpy.testing.assert_allclose(s1.particles.charge, s2.particles.charge) - numpy.testing.assert_allclose(s1.particles.diameter, - s2.particles.diameter) + numpy.testing.assert_allclose(s1.particles.diameter, s2.particles.diameter) numpy.testing.assert_equal(s1.particles.image, s2.particles.image) numpy.testing.assert_equal(s1.particles.body, s2.particles.body) - numpy.testing.assert_allclose(s1.particles.orientation, - s2.particles.orientation) - numpy.testing.assert_allclose(s1.particles.moment_inertia, - s2.particles.moment_inertia) + numpy.testing.assert_allclose( + s1.particles.orientation, s2.particles.orientation + ) + numpy.testing.assert_allclose( + s1.particles.moment_inertia, s2.particles.moment_inertia + ) numpy.testing.assert_allclose(s1.particles.angmom, s2.particles.angmom) - numpy.testing.assert_allclose(s1.particles.diameter, - s2.particles.diameter) + numpy.testing.assert_allclose(s1.particles.diameter, s2.particles.diameter) assert s1.bonds.N == s2.bonds.N assert s1.bonds.types == s2.bonds.types @@ -129,8 +126,7 @@ def assert_snapshots_equal(s1, s2): numpy.testing.assert_equal(s1.pairs.group, s2.pairs.group) assert s1.constraints.N == s2.constraints.N - numpy.testing.assert_allclose(s1.constraints.value, - s2.constraints.value) + numpy.testing.assert_allclose(s1.constraints.value, s2.constraints.value) numpy.testing.assert_equal(s1.constraints.group, s2.constraints.group) @@ -176,8 +172,7 @@ def test_modify_snapshot(simulation_factory, snap): assert_snapshots_equal(snap, snap2) -def test_thermalize_particle_velocity(simulation_factory, - lattice_snapshot_factory): +def test_thermalize_particle_velocity(simulation_factory, lattice_snapshot_factory): snap = lattice_snapshot_factory() sim = simulation_factory(snap) sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=1.5) @@ -191,15 +186,14 @@ def test_thermalize_particle_velocity(simulation_factory, numpy.testing.assert_allclose(p_com, [0, 0, 0], atol=1e-14) - K = numpy.sum(1 / 2 * m * (v[:, 0]**2 + v[:, 1]**2 + v[:, 2]**2)) + K = numpy.sum(1 / 2 * m * (v[:, 0] ** 2 + v[:, 1] ** 2 + v[:, 2] ** 2)) # check that K is somewhat close to the target - the fluctuations are # too large for an allclose check. expected_K = (3 * snap.particles.N - 3) / 2 * 1.5 assert K > expected_K * 3 / 4 and K < expected_K * 4 / 3 -def test_thermalize_angular_momentum(simulation_factory, - lattice_snapshot_factory): +def test_thermalize_angular_momentum(simulation_factory, lattice_snapshot_factory): snap = lattice_snapshot_factory() I = [1, 2, 3] # noqa: E741 - allow ambiguous variable name @@ -216,7 +210,8 @@ def test_thermalize_angular_momentum(simulation_factory, L = snapshot.particles.angmom[:, 1:4] / 2 K = numpy.sum( - 1 / 2 * (L[:, 0]**2 / I[0] + L[:, 1]**2 / I[1] + L[:, 2]**2 / I[2])) + 1 / 2 * (L[:, 0] ** 2 / I[0] + L[:, 1] ** 2 / I[1] + L[:, 2] ** 2 / I[2]) + ) # check that K is somewhat close to the target - the fluctuations are # too large for an allclose check. expected_K = (3 * snap.particles.N) / 2 * 1.5 @@ -228,10 +223,9 @@ def test_thermalize_body_particle_momenta(simulation_factory): if snapshot.communicator.rank == 0: snapshot.configuration.box = (10, 10, 10, 0, 0, 0) snapshot.particles.N = 6 - snapshot.particles.types = ['A'] + snapshot.particles.types = ["A"] snapshot.particles.body[:] = [0, 1, -2, 0, 1, -2] - snapshot.particles.moment_inertia[:] = [[1, 1, 1] - ] * snapshot.particles.N + snapshot.particles.moment_inertia[:] = [[1, 1, 1]] * snapshot.particles.N sim = simulation_factory(snapshot) sim.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=1.0) @@ -267,24 +261,26 @@ def test_replicate(simulation_factory, lattice_snapshot_factory): initial_snapshot.replicate(2, 2, 2) if initial_snapshot.communicator.rank == 0: - numpy.testing.assert_allclose(initial_snapshot.particles.position, [ - [-5, -5, -5], - [-5, -5, 5], - [-5, 5, -5], - [-5, 5, 5], - [5, -5, -5], - [5, -5, 5], - [5, 5, -5], - [5, 5, 5], - ]) + numpy.testing.assert_allclose( + initial_snapshot.particles.position, + [ + [-5, -5, -5], + [-5, -5, 5], + [-5, 5, -5], + [-5, 5, 5], + [5, -5, -5], + [5, -5, 5], + [5, 5, -5], + [5, 5, 5], + ], + ) sim.state.replicate(2, 2, 2) new_snapshot = sim.state.get_snapshot() assert_snapshots_equal(initial_snapshot, new_snapshot) -def test_domain_decomposition(device, simulation_factory, - lattice_snapshot_factory): +def test_domain_decomposition(device, simulation_factory, lattice_snapshot_factory): snapshot = lattice_snapshot_factory() if device.communicator.num_ranks == 1: @@ -312,11 +308,10 @@ def test_domain_decomposition(device, simulation_factory, assert sim.state.domain_decomposition == (1, 2, 1) assert sim.state.domain_decomposition_split_fractions == ([], [0.5], []) - sim = simulation_factory(snapshot, - domain_decomposition=(None, None, [0.25, - 0.75])) + sim = simulation_factory( + snapshot, domain_decomposition=(None, None, [0.25, 0.75]) + ) assert sim.state.domain_decomposition == (1, 1, 2) - assert sim.state.domain_decomposition_split_fractions == ([], [], - [0.25]) + assert sim.state.domain_decomposition_split_fractions == ([], [], [0.25]) else: raise RuntimeError("Test only supports 1 and 2 ranks") diff --git a/hoomd/pytest/test_syncedlist.py b/hoomd/pytest/test_syncedlist.py index 53267dba87..b709fe1456 100644 --- a/hoomd/pytest/test_syncedlist.py +++ b/hoomd/pytest/test_syncedlist.py @@ -5,13 +5,14 @@ import pytest import hoomd -from hoomd.conftest import (BaseListTest, pickling_check) +from hoomd.conftest import BaseListTest, pickling_check from hoomd.pytest.dummy import DummyOperation, DummySimulation from hoomd.data.syncedlist import SyncedList class OpInt(int): """Used to test SyncedList where item equality checks are needed.""" + _cpp_obj = False def _attach(self, simulation): @@ -57,9 +58,7 @@ def generate(n): else: def generate(n): - return [ - OpInt(self.generator.int(100_000_000)) for _ in range(n) - ] + return [OpInt(self.generator.int(100_000_000)) for _ in range(n)] return generate @@ -86,11 +85,9 @@ def final_check(self, test_list): assert self.is_equal(item, synced_item) assert self._synced_list is test_list._synced_list if not test_list._attach_members: - assert not any( - getattr(item, "_attached", False) for item in test_list) + assert not any(getattr(item, "_attached", False) for item in test_list) def test_init(self, generate_plain_collection, item_cls): - # Test automatic to_synced_list function generation synced_list = SyncedList(validation=item_cls) assert item_cls in synced_list._validate.types @@ -103,9 +100,9 @@ def cpp_identity(x): # Test full initialziation plain_list = generate_plain_collection(5) - synced_list = SyncedList(validation=item_cls, - to_synced_list=cpp_identity, - iterable=plain_list) + synced_list = SyncedList( + validation=item_cls, to_synced_list=cpp_identity, iterable=plain_list + ) assert synced_list._to_synced_list_conversion == cpp_identity op._cpp_obj = 2 assert synced_list._to_synced_list_conversion(op) == 2 @@ -122,8 +119,9 @@ def test_synced(self): def test_register_item(self, empty_collection, item_cls): op = item_cls() empty_collection._register_item(op) - assert op._attached == (empty_collection._synced - and empty_collection._attach_members) + assert op._attached == ( + empty_collection._synced and empty_collection._attach_members + ) def test_validate_or_error(self, empty_collection, item_cls): with pytest.raises(ValueError): @@ -147,25 +145,26 @@ def test_unsync(self, populated_collection): def test_synced_iter(self, empty_collection): empty_collection._sync(None, [3, 2, 1]) empty_collection._synced_list = [1, 2, 3] - assert all([ - i == j for i, j in zip(range(1, 4), empty_collection._synced_iter()) - ]) + assert all( + [i == j for i, j in zip(range(1, 4), empty_collection._synced_iter())] + ) def test_pickling(self, populated_collection): test_list, _ = populated_collection pickling_check(test_list) - def test_sim_weakref(self, simulation_factory, - two_particle_snapshot_factory): - + def test_sim_weakref(self, simulation_factory, two_particle_snapshot_factory): def drop_sim(attach=False): sim = simulation_factory(two_particle_snapshot_factory()) # Use operation available regardless of build box_resize = hoomd.update.BoxResize( 10, hoomd.variant.box.Interpolate( - hoomd.Box.cube(4), hoomd.Box.cube(5), - hoomd.variant.Ramp(0, 1, 0, 10_000))) + hoomd.Box.cube(4), + hoomd.Box.cube(5), + hoomd.variant.Ramp(0, 1, 0, 10_000), + ), + ) sim.operations.updaters.append(box_resize) if attach: sim.run(0) diff --git a/hoomd/pytest/test_table.py b/hoomd/pytest/test_table.py index 5814425aed..e77b13ff92 100644 --- a/hoomd/pytest/test_table.py +++ b/hoomd/pytest/test_table.py @@ -13,6 +13,7 @@ try: from mpi4py import MPI + skip_mpi = False except ImportError: skip_mpi = True @@ -21,7 +22,6 @@ class Identity: - def __init__(self, x): self.x = x @@ -34,26 +34,25 @@ def __eq__(self, other): @pytest.fixture def logger(): - logger = hoomd.logging.Logger(categories=['scalar', "string"]) - logger[('dummy', 'loggable', 'int')] = (Identity(42000000), 'scalar') - logger[('dummy', 'loggable', 'float')] = (Identity(3.1415), 'scalar') - logger[('dummy', 'loggable', 'small_float')] = (Identity(0.0000001), - 'scalar') - logger[('dummy', 'loggable', 'zero_float')] = (Identity(0.0), 'scalar') - logger[('dummy', 'loggable', 'zero_int')] = (Identity(0), 'scalar') - logger[('dummy', 'loggable', 'string')] = (Identity("foobarbaz"), 'string') + logger = hoomd.logging.Logger(categories=["scalar", "string"]) + logger[("dummy", "loggable", "int")] = (Identity(42000000), "scalar") + logger[("dummy", "loggable", "float")] = (Identity(3.1415), "scalar") + logger[("dummy", "loggable", "small_float")] = (Identity(0.0000001), "scalar") + logger[("dummy", "loggable", "zero_float")] = (Identity(0.0), "scalar") + logger[("dummy", "loggable", "zero_int")] = (Identity(0), "scalar") + logger[("dummy", "loggable", "string")] = (Identity("foobarbaz"), "string") return logger @pytest.fixture def expected_values(): return { - 'dummy.loggable.int': 42000000, - 'dummy.loggable.float': 3.1415, - 'dummy.loggable.string': "foobarbaz", - 'dummy.loggable.small_float': 0.0000001, - 'dummy.loggable.zero_float': 0.0, - 'dummy.loggable.zero_int': 0, + "dummy.loggable.int": 42000000, + "dummy.loggable.float": 3.1415, + "dummy.loggable.string": "foobarbaz", + "dummy.loggable.small_float": 0.0000001, + "dummy.loggable.zero_float": 0.0, + "dummy.loggable.zero_int": 0, } @@ -76,22 +75,24 @@ def test_header_generation(device, logger): for i in range(10): table_writer.write() output_str = output.getvalue() - lines = output_str.split('\n') + lines = output_str.split("\n") headers = lines[0].split() expected_headers = [ - 'dummy.loggable.int', 'dummy.loggable.float', 'dummy.loggable.string', - 'dummy.loggable.small_float' + "dummy.loggable.int", + "dummy.loggable.float", + "dummy.loggable.string", + "dummy.loggable.small_float", ] assert all(hdr in headers for hdr in expected_headers) for i in range(1, 10): values = lines[i].split() assert not any(v in expected_headers for v in values) - table_writer.logger[('new', 'quantity')] = (lambda: 53, 'scalar') + table_writer.logger[("new", "quantity")] = (lambda: 53, "scalar") table_writer.write() output_str = output.getvalue() - lines = output_str.split('\n') + lines = output_str.split("\n") headers = lines[-3].split() - expected_headers.append('new.quantity') + expected_headers.append("new.quantity") assert all(hdr in headers for hdr in expected_headers) @@ -102,7 +103,7 @@ def test_values(device, logger, expected_values): table_writer._comm = device.communicator for i in range(10): table_writer.write() - lines = output.getvalue().split('\n') + lines = output.getvalue().split("\n") headers = lines[0].split() def test_equality(expected, given): @@ -133,71 +134,63 @@ def test_mpi_write_only(device, logger): comm = MPI.COMM_WORLD if comm.rank == 0: - assert output.getvalue() != '' + assert output.getvalue() != "" else: - assert output.getvalue() == '' + assert output.getvalue() == "" @pytest.mark.serial def test_header_attributes(device, logger): output = StringIO("") - table_writer = hoomd.write.Table(1, - logger, - output, - header_sep='-', - max_header_len=13) + table_writer = hoomd.write.Table( + 1, logger, output, header_sep="-", max_header_len=13 + ) table_writer._comm = device.communicator table_writer.write() - lines = output.getvalue().split('\n') + lines = output.getvalue().split("\n") headers = lines[0].split() - expected_headers = ['loggable-int', 'loggable-float', 'string'] + expected_headers = ["loggable-int", "loggable-float", "string"] assert all(hdr in headers for hdr in expected_headers) @pytest.mark.serial def test_delimiter(device, logger): output = StringIO("") - table_writer = hoomd.write.Table(1, logger, output, delimiter=',') + table_writer = hoomd.write.Table(1, logger, output, delimiter=",") table_writer._comm = device.communicator table_writer.write() - lines = output.getvalue().split('\n') - assert all(len(row.split(',')) == len(logger) for row in lines[:-1]) + lines = output.getvalue().split("\n") + assert all(len(row.split(",")) == len(logger) for row in lines[:-1]) @pytest.mark.serial def test_max_precision(device, logger): output = StringIO("") - table_writer = hoomd.write.Table(1, - logger, - output, - pretty=False, - max_precision=5) + table_writer = hoomd.write.Table(1, logger, output, pretty=False, max_precision=5) table_writer._comm = device.communicator for i in range(10): table_writer.write() - smaller_lines = output.getvalue().split('\n') + smaller_lines = output.getvalue().split("\n") output = StringIO("") - table_writer = hoomd.write.Table(1, - logger, - output, - pretty=False, - max_precision=15) + table_writer = hoomd.write.Table(1, logger, output, pretty=False, max_precision=15) table_writer._comm = device.communicator for i in range(10): table_writer.write() - longer_lines = output.getvalue().split('\n') + longer_lines = output.getvalue().split("\n") for long_row, short_row in zip(longer_lines[1:-1], smaller_lines[1:-1]): assert all( len(long_) >= len(short) - for long_, short in zip(long_row.split(), short_row.split())) + for long_, short in zip(long_row.split(), short_row.split()) + ) assert any( len(long_) > len(short) - for long_, short in zip(long_row.split(), short_row.split())) + for long_, short in zip(long_row.split(), short_row.split()) + ) def test_only_string_and_scalar_quantities(device): @@ -205,7 +198,7 @@ def test_only_string_and_scalar_quantities(device): output = StringIO("") with pytest.raises(ValueError): hoomd.write.Table(1, logger, output) - logger = hoomd.logging.Logger(categories=['sequence']) + logger = hoomd.logging.Logger(categories=["sequence"]) with pytest.raises(ValueError): hoomd.write.Table(1, logger, output) @@ -216,13 +209,12 @@ def test_pickling(simulation_factory, two_particle_snapshot_factory, logger): operation_pickling_check(table, sim) -test_categories = re.split(r'[.|]', str(hoomd.logging.LoggerCategories.ALL))[1:] +test_categories = re.split(r"[.|]", str(hoomd.logging.LoggerCategories.ALL))[1:] # Generate a set for each invalid permutation of the input logger categories # Sets that don't fail are covered by test_only_string_and_scalar_quantities combinations = [ set(combo) - for i in range(1, - len(test_categories) + 1) + for i in range(1, len(test_categories) + 1) for combo in itertools.combinations(test_categories, i) if set(combo) - {"string", "scalar"} != set() ] @@ -243,7 +235,8 @@ def test_invalid_permutations(device, combination): hoomd.write.Table(1, logger, output) # Ensure that correct error message is sent assert "Table Logger may only have scalar or string categories set." in str( - ve.value) + ve.value + ) # Now ensure category formatting operates correctly for category in combination - {"string", "scalar"}: assert category in str(ve.value) diff --git a/hoomd/pytest/test_trigger.py b/hoomd/pytest/test_trigger.py index 99e1954f34..382d8d98e0 100644 --- a/hoomd/pytest/test_trigger.py +++ b/hoomd/pytest/test_trigger.py @@ -2,6 +2,7 @@ # Part of HOOMD-blue, released under the BSD 3-Clause License. """Test the Trigger classes.""" + import itertools from inspect import isclass import pickle @@ -13,12 +14,11 @@ class CustomTrigger(hoomd.trigger.Trigger): - def __init__(self): hoomd.trigger.Trigger.__init__(self) def compute(self, timestep): - return (timestep**(1 / 2)).is_integer() + return (timestep ** (1 / 2)).is_integer() def __str__(self): return "CustomTrigger()" @@ -29,30 +29,37 @@ def __eq__(self, other): # List of trigger classes _classes = [ - hoomd.trigger.Periodic, hoomd.trigger.Before, hoomd.trigger.After, - hoomd.trigger.On, hoomd.trigger.Not, hoomd.trigger.And, hoomd.trigger.Or, - CustomTrigger + hoomd.trigger.Periodic, + hoomd.trigger.Before, + hoomd.trigger.After, + hoomd.trigger.On, + hoomd.trigger.Not, + hoomd.trigger.And, + hoomd.trigger.Or, + CustomTrigger, ] # List of kwargs for the class constructors -_kwargs = [{ - 'period': (456, 10000000000), - 'phase': (18, 60000000000) -}, { - 'timestep': (100, 10000000000) -}, { - 'timestep': (100, 10000000000) -}, { - 'timestep': (100, 10000000000) -}, { - 'trigger': (hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100)) -}, { - 'triggers': ((hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100)), - (hoomd.trigger.After(100), hoomd.trigger.On(101))) -}, { - 'triggers': ((hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100)), - (hoomd.trigger.After(100), hoomd.trigger.On(101))) -}, {}] +_kwargs = [ + {"period": (456, 10000000000), "phase": (18, 60000000000)}, + {"timestep": (100, 10000000000)}, + {"timestep": (100, 10000000000)}, + {"timestep": (100, 10000000000)}, + {"trigger": (hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100))}, + { + "triggers": ( + (hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100)), + (hoomd.trigger.After(100), hoomd.trigger.On(101)), + ) + }, + { + "triggers": ( + (hoomd.trigger.Periodic(10, 1), hoomd.trigger.Before(100)), + (hoomd.trigger.After(100), hoomd.trigger.On(101)), + ) + }, + {}, +] def _cartesian(grid): @@ -71,21 +78,31 @@ def _test_name(arg): # Go over all class and constructor pairs -@pytest.mark.parametrize('cls, kwargs', - ((cls, kwarg) - for cls, kwargs in zip(_classes, _kwargs) - for kwarg in _cartesian(kwargs)), - ids=_test_name) +@pytest.mark.parametrize( + "cls, kwargs", + ( + (cls, kwarg) + for cls, kwargs in zip(_classes, _kwargs) + for kwarg in _cartesian(kwargs) + ), + ids=_test_name, +) def test_properties(cls, kwargs): instance = cls(**kwargs) for key, value in kwargs.items(): assert getattr(instance, key) == value -_strings_beginning = ("hoomd.trigger.Periodic(", "hoomd.trigger.Before(", - "hoomd.trigger.After(", "hoomd.trigger.On(", - "hoomd.trigger.Not(", "hoomd.trigger.And(", - "hoomd.trigger.Or(", "CustomTrigger()") +_strings_beginning = ( + "hoomd.trigger.Periodic(", + "hoomd.trigger.Before(", + "hoomd.trigger.After(", + "hoomd.trigger.On(", + "hoomd.trigger.Not(", + "hoomd.trigger.And(", + "hoomd.trigger.Or(", + "CustomTrigger()", +) # Trigger instanace for the first arguments in _kwargs @@ -94,9 +111,9 @@ def triggers(): return (cls(**kwargs) for cls, kwargs in zip(_classes, _single_kwargs)) -@pytest.mark.parametrize('trigger, instance_string', - zip(triggers(), _strings_beginning), - ids=_test_name) +@pytest.mark.parametrize( + "trigger, instance_string", zip(triggers(), _strings_beginning), ids=_test_name +) def test_str(trigger, instance_string): assert str(trigger).startswith(instance_string) @@ -109,13 +126,13 @@ def test_str(trigger, instance_string): lambda x: not (x - 1) % 10 == 0, # not lambda x: (x - 1) % 10 == 0 and x < 100, # and lambda x: (x - 1) % 10 == 0 or x < 100, # or - lambda x: (x**(1 / 2)).is_integer() + lambda x: (x ** (1 / 2)).is_integer(), ] -@pytest.mark.parametrize('trigger, eval_func', - zip(triggers(), _eval_funcs), - ids=_test_name) +@pytest.mark.parametrize( + "trigger, eval_func", zip(triggers(), _eval_funcs), ids=_test_name +) def test_eval(trigger, eval_func): for i in range(10000): assert trigger(i) == eval_func(i) @@ -124,7 +141,7 @@ def test_eval(trigger, eval_func): assert trigger(i) == eval_func(i) -@pytest.mark.parametrize('trigger', triggers(), ids=_test_name) +@pytest.mark.parametrize("trigger", triggers(), ids=_test_name) def test_pickling(trigger): pkled_trigger = pickle.loads(pickle.dumps(trigger)) assert trigger == pkled_trigger diff --git a/hoomd/pytest/test_tune_solve.py b/hoomd/pytest/test_tune_solve.py index 3062dcec62..b36f868164 100644 --- a/hoomd/pytest/test_tune_solve.py +++ b/hoomd/pytest/test_tune_solve.py @@ -4,8 +4,7 @@ import pytest from hoomd.tune import ManualTuneDefinition -from hoomd.tune import (ScaleSolver, SecantSolver, GradientDescent, - GridOptimizer) +from hoomd.tune import ScaleSolver, SecantSolver, GradientDescent, GridOptimizer import hoomd.variant @@ -22,11 +21,9 @@ def test_solving(self, solver, equation): cnt += 1 if cnt >= 500: err = self.get_y_error_mag(equation) - raise RuntimeError( - "Expected conversion earlier: err={}.".format(err)) + raise RuntimeError("Expected conversion earlier: err={}.".format(err)) err = equation.y - equation.target - assert self.get_y_error_mag(equation) <= getattr( - solver, "tol", self.Y_TOL) + assert self.get_y_error_mag(equation) <= getattr(solver, "tol", self.Y_TOL) assert self.get_x_error_mag(equation) <= self.X_TOL def get_x_error_mag(self, equation): @@ -39,8 +36,10 @@ def get_y_error_mag(self, equation): class TestRootSolvers(SolverTestBase): SOLUTIONS = (-1, 1) - @pytest.fixture(params=[ScaleSolver(), SecantSolver()], - ids=lambda solver: solver.__class__.__name__) + @pytest.fixture( + params=[ScaleSolver(), SecantSolver()], + ids=lambda solver: solver.__class__.__name__, + ) def solver(self, request): return request.param @@ -48,12 +47,13 @@ def solver(self, request): def equation(self): """Evaluate: x^2 - 1, x = (1, -1).""" equation = dict(x=4) - equation['y'] = lambda: equation['x']**2 + equation["y"] = lambda: equation["x"] ** 2 return ManualTuneDefinition( - get_x=lambda: equation['x'], - set_x=lambda x: equation.__setitem__('x', x), - get_y=lambda: equation['y'](), - target=1) + get_x=lambda: equation["x"], + set_x=lambda x: equation.__setitem__("x", x), + get_y=lambda: equation["y"](), + target=1, + ) class TestOptimizers(SolverTestBase): @@ -61,9 +61,10 @@ class TestOptimizers(SolverTestBase): Y_TOL = 1e-2 X_TOL = 2e-3 - @pytest.fixture(params=[GradientDescent(), - GridOptimizer(n_rounds=10)], - ids=lambda solver: solver.__class__.__name__) + @pytest.fixture( + params=[GradientDescent(), GridOptimizer(n_rounds=10)], + ids=lambda solver: solver.__class__.__name__, + ) def solver(self, request): return request.param @@ -71,14 +72,15 @@ def solver(self, request): def equation(self): """Evaluate: max(4 - (x - 2)^2), x = 2, y = 4.""" equation = dict(x=3) - equation['y'] = lambda: 4 - (equation['x'] - 2)**2 + equation["y"] = lambda: 4 - (equation["x"] - 2) ** 2 # We use target for the expect y maximum return ManualTuneDefinition( - get_x=lambda: equation['x'], - set_x=lambda x: equation.__setitem__('x', x), - get_y=lambda: equation['y'](), + get_x=lambda: equation["x"], + set_x=lambda x: equation.__setitem__("x", x), + get_y=lambda: equation["y"](), domain=(0, 4), - target=4) + target=4, + ) def test_gradient_descent_alpha(): diff --git a/hoomd/pytest/test_type_parameter_dict.py b/hoomd/pytest/test_type_parameter_dict.py index 2d61c56131..6177ae7e10 100644 --- a/hoomd/pytest/test_type_parameter_dict.py +++ b/hoomd/pytest/test_type_parameter_dict.py @@ -41,8 +41,8 @@ def _generate_keys(self, n): else: yield from set( tuple(sorted(key)) - for key in itertools.combinations_with_replacement( - self.alphabet[:n], 2)) + for key in itertools.combinations_with_replacement(self.alphabet[:n], 2) + ) def _generate_value(self): if self._spec == "int": @@ -63,11 +63,8 @@ def _generate_value(self): @pytest.fixture def generate_plain_collection(self, len_keys): - def generate(n): - return { - key: self._generate_value() for key in self._generate_keys(n) - } + return {key: self._generate_value() for key in self._generate_keys(n)} return generate @@ -79,20 +76,14 @@ def is_equal(self, test_type_param, mapping): """ if isinstance(test_type_param, (str, tuple)) or self._spec == "int": return test_type_param == mapping - return all( - test_type_param[key] == value for key, value in mapping.items()) + return all(test_type_param[key] == value for key, value in mapping.items()) @pytest.fixture def empty_collection(self, len_keys, spec): """Return an empty type parameter.""" validator = int if spec == "dict": - validator = { - "foo": 1, - "bar": identity, - "baz": "hello", - "gar": [int] - } + validator = {"foo": 1, "bar": identity, "baz": "hello", "gar": [int]} return TypeParameterDict(validator, len_keys=len_keys) def check_equivalent(self, test_mapping, other): @@ -109,11 +100,12 @@ def random_keys(self): if self._len_keys == 1: yield from super().random_keys() else: - yield from (tuple(sorted(k)) for k in zip(super().random_keys(), - super().random_keys())) + yield from ( + tuple(sorted(k)) + for k in zip(super().random_keys(), super().random_keys()) + ) - @pytest.fixture(params=(True, False), - ids=lambda x: "in_map" if x else "out_map") + @pytest.fixture(params=(True, False), ids=lambda x: "in_map" if x else "out_map") def setitem_key_value(self, n, request): keys = list(self._generate_keys(n)) value = self._generate_value() @@ -143,9 +135,9 @@ def yield_key_pairs(len_keys): if len(types) > 4: mid_point = len(types) // 2 key_spec = (types[:mid_point], types[mid_point:]) - keys = {(t1, t2) - for t2 in types[mid_point:] - for t1 in types[mid_point:]} + keys = { + (t1, t2) for t2 in types[mid_point:] for t1 in types[mid_point:] + } yield key_spec, keys return yield_key_pairs @@ -174,8 +166,9 @@ def invalid_key_specs(self, n): # we use populated_collection to ensure that types are correctly captured # for TestTypeParameterDictAttached. - def test_invalid_keys(self, populated_collection, valid_key_specs, - invalid_key_specs): + def test_invalid_keys( + self, populated_collection, valid_key_specs, invalid_key_specs + ): test_mapping, _ = populated_collection for key_spec, expected_keys in valid_key_specs(self._len_keys): assert set(test_mapping._indexer(key_spec)) == expected_keys @@ -238,9 +231,9 @@ class TestTypeParameterDictAttached(TestTypeParameterDict): @pytest.fixture def populated_collection(self, empty_collection, plain_collection, n): empty_collection.update(plain_collection) - empty_collection._attach(DummyCppObj(), - param_name="type_param", - types=self.alphabet[:n]) + empty_collection._attach( + DummyCppObj(), param_name="type_param", types=self.alphabet[:n] + ) return empty_collection, plain_collection def _generate_value(self): @@ -268,16 +261,15 @@ def test_detach(self, populated_collection): def test_premature_attaching(self, empty_collection, plain_collection, n): for key, value in plain_collection.items(): with pytest.raises(IncompleteSpecificationError): - empty_collection._attach(DummyCppObj(), - param_name="type_param", - types=self.alphabet[:n]) + empty_collection._attach( + DummyCppObj(), param_name="type_param", types=self.alphabet[:n] + ) empty_collection[key] = value - empty_collection._attach(DummyCppObj(), - param_name="type_param", - types=self.alphabet[:n]) + empty_collection._attach( + DummyCppObj(), param_name="type_param", types=self.alphabet[:n] + ) - def test_unspecified_sequence_errors(self, empty_collection, - plain_collection, n): + def test_unspecified_sequence_errors(self, empty_collection, plain_collection, n): if self._spec != "dict": return last_key, last_value = plain_collection.popitem() @@ -286,12 +278,12 @@ def test_unspecified_sequence_errors(self, empty_collection, last_value.pop("gar") empty_collection[last_key] = last_value with pytest.raises(IncompleteSpecificationError): - empty_collection._attach(DummyCppObj(), - param_name="type_param", - types=self.alphabet[:n]) + empty_collection._attach( + DummyCppObj(), param_name="type_param", types=self.alphabet[:n] + ) def _invalid_key_specs(self, n): - invalid_types = list(self.alphabet[n:n + n]) + invalid_types = list(self.alphabet[n : n + n]) def yield_invalid_key(len_keys): yield from super(type(self), self)._invalid_key_specs(n)(len_keys) diff --git a/hoomd/pytest/test_typeparam.py b/hoomd/pytest/test_typeparam.py index 20c85df374..885d48e692 100644 --- a/hoomd/pytest/test_typeparam.py +++ b/hoomd/pytest/test_typeparam.py @@ -18,11 +18,11 @@ def typedict(default_value): return TypeParameterDict(**default_value, len_keys=1) -@fixture(scope='function') +@fixture(scope="function") def typeparam(typedict): - return TypeParameter(name='type_param', - type_kind='particle_types', - param_dict=typedict) + return TypeParameter( + name="type_param", type_kind="particle_types", param_dict=typedict + ) @fixture() @@ -37,41 +37,41 @@ def detached(attached): return attached -@fixture(scope='function', params=['typeparam', 'attached', 'detached']) +@fixture(scope="function", params=["typeparam", "attached", "detached"]) def all_(request, typeparam, attached, detached): - if request.param == 'typeparam': + if request.param == "typeparam": return typeparam - elif request.param == 'attached': + elif request.param == "attached": return attached else: return detached def test_set_get_item(all_, default_value): - all_['A'] = {"bar": 2} - all_['B'] = {"bar": 5} - assert all_['A'] == {**default_value, "bar": 2} - assert all_['B'] == {**default_value, "bar": 5} - assert all_['A']['bar'] == 2 - assert all_['B']['bar'] == 5 + all_["A"] = {"bar": 2} + all_["B"] = {"bar": 5} + assert all_["A"] == {**default_value, "bar": 2} + assert all_["B"] == {**default_value, "bar": 5} + assert all_["A"]["bar"] == 2 + assert all_["B"]["bar"] == 5 def test_setitem_attached(attached, default_value): new_value = {"bar": 2} - attached['A'] = new_value + attached["A"] = new_value assert attached._cpp_obj.getTypeParam("A") == {**default_value, **new_value} def test_default(all_): assert all_.default == all_.param_dict.default - all_.default = dict(bar=10.) + all_.default = dict(bar=10.0) assert all_.default == all_.param_dict.default - assert all_.default['bar'] == 10. - assert all_.default['foo'] == 1 + assert all_.default["bar"] == 10.0 + assert all_.default["foo"] == 1 def test_type_checking(all_): - bad_inputs = [dict(), dict(A=4), ['A', 4]] + bad_inputs = [dict(), dict(A=4), ["A", 4]] for input_ in bad_inputs: with raises(KeyError): all_[input_] @@ -79,9 +79,9 @@ def test_type_checking(all_): def test_attached_type_checking(attached): with raises(KeyError): - _ = attached['D'] + _ = attached["D"] with raises(KeyError): - attached['D'] = dict(bar=2) + attached["D"] = dict(bar=2) def test_pickling(all_): diff --git a/hoomd/pytest/test_variant.py b/hoomd/pytest/test_variant.py index 9413635e39..780ea00fee 100644 --- a/hoomd/pytest/test_variant.py +++ b/hoomd/pytest/test_variant.py @@ -12,40 +12,40 @@ import pytest _classes = [ - hoomd.variant.Constant, hoomd.variant.Ramp, hoomd.variant.Cycle, - hoomd.variant.Power + hoomd.variant.Constant, + hoomd.variant.Ramp, + hoomd.variant.Cycle, + hoomd.variant.Power, ] _test_kwargs = [ # Constant: first args value=1 - { - 'value': np.linspace(1, 10, 3) - }, + {"value": np.linspace(1, 10, 3)}, # Ramp: first args A=1, B=3, t_start=0, t_ramp=10 { - 'A': np.linspace(1, 10, 3), - 'B': np.linspace(3, 10, 3), - 't_start': (0, 10, 10000000000), - 't_ramp': (10, 20, 2000000000000) + "A": np.linspace(1, 10, 3), + "B": np.linspace(3, 10, 3), + "t_start": (0, 10, 10000000000), + "t_ramp": (10, 20, 2000000000000), }, # Cycle: first args A=2, B=5, t_start=0, t_A=10, t_AB=15, t_B=10, t_BA_20 { - 'A': np.linspace(2, 10, 3), - 'B': np.linspace(5, 10, 3), - 't_start': (0, 10, 10000000000), - 't_A': (10, 20, 2000000000000), - 't_AB': (15, 30, 40000000000), - 't_B': (10, 20, 2000000000000), - 't_BA': (20, 40, 560000000000) + "A": np.linspace(2, 10, 3), + "B": np.linspace(5, 10, 3), + "t_start": (0, 10, 10000000000), + "t_A": (10, 20, 2000000000000), + "t_AB": (15, 30, 40000000000), + "t_B": (10, 20, 2000000000000), + "t_BA": (20, 40, 560000000000), }, # Power: first args A=1, B=10, t_start=0, t_ramp=10 { - 'A': np.linspace(1, 10, 3), - 'B': np.linspace(10, 100, 3), - 'power': np.linspace(2, 5, 3), - 't_start': (0, 10, 10000000000), - 't_ramp': (10, 20, 2000000000000) - } + "A": np.linspace(1, 10, 3), + "B": np.linspace(10, 100, 3), + "power": np.linspace(2, 5, 3), + "t_start": (0, 10, 10000000000), + "t_ramp": (10, 20, 2000000000000), + }, ] @@ -65,11 +65,15 @@ def _test_id(value): return None -@pytest.mark.parametrize('cls, kwargs', - ((cls, kwarg) - for cls, kwargs in zip(_classes, _test_kwargs) - for kwarg in _to_kwargs(kwargs)), - ids=_test_id) +@pytest.mark.parametrize( + "cls, kwargs", + ( + (cls, kwarg) + for cls, kwargs in zip(_classes, _test_kwargs) + for kwarg in _to_kwargs(kwargs) + ), + ids=_test_id, +) def test_construction(cls, kwargs): variant = cls(**kwargs) for key, value in kwargs.items(): @@ -77,10 +81,10 @@ def test_construction(cls, kwargs): _expected_min_max = [ - (1., 1.), - (1., 3.), - (2., 5.), - (1., 10.), + (1.0, 1.0), + (1.0, 3.0), + (2.0, 5.0), + (1.0, 10.0), ] _single_kwargs = [next(_to_kwargs(kwargs)) for kwargs in _test_kwargs] @@ -90,20 +94,24 @@ def variants(): return (cls(**kwargs) for cls, kwargs in zip(_classes, _single_kwargs)) -@pytest.mark.parametrize('variant, expected_min_max', - zip(variants(), _expected_min_max), - ids=_test_id) +@pytest.mark.parametrize( + "variant, expected_min_max", zip(variants(), _expected_min_max), ids=_test_id +) def test_min_max(variant, expected_min_max): assert np.isclose(variant.min, expected_min_max[0]) assert np.isclose(variant.max, expected_min_max[1]) assert np.allclose(variant.range, expected_min_max) -@pytest.mark.parametrize('variant, attrs', - ((variant, kwarg) - for variant, kwargs in zip(variants(), _test_kwargs) - for kwarg in _to_kwargs(kwargs)), - ids=_test_id) +@pytest.mark.parametrize( + "variant, attrs", + ( + (variant, kwarg) + for variant, kwargs in zip(variants(), _test_kwargs) + for kwarg in _to_kwargs(kwargs) + ), + ids=_test_id, +) def test_setattr(variant, attrs): for attr, value in attrs.items(): setattr(variant, attr, value) @@ -111,7 +119,6 @@ def test_setattr(variant, attrs): def constant_eval(value): - def expected_value(timestep): return value @@ -119,14 +126,13 @@ def expected_value(timestep): def power_eval(A, B, power, t_start, t_ramp): - def expected_value(timestep): if timestep < t_start: return A elif timestep < t_start + t_ramp: - inv_a, inv_b = (A**(1 / power)), (B**(1 / power)) + inv_a, inv_b = (A ** (1 / power)), (B ** (1 / power)) frac = (timestep - t_start) / t_ramp - return ((inv_b * frac) + ((1 - frac) * inv_a))**power + return ((inv_b * frac) + ((1 - frac) * inv_a)) ** power else: return B @@ -134,7 +140,6 @@ def expected_value(timestep): def ramp_eval(A, B, t_start, t_ramp): - def expected_value(timestep): if timestep < t_start: return A @@ -169,12 +174,17 @@ def expected_value(timestep): _eval_constructors = [constant_eval, ramp_eval, cycle_eval, power_eval] -@pytest.mark.parametrize('variant, evaluator, kwargs', - ((variant, evaluator, kwarg) - for variant, evaluator, kwargs in zip( - variants(), _eval_constructors, _test_kwargs) - for kwarg in _to_kwargs(kwargs)), - ids=_test_id) +@pytest.mark.parametrize( + "variant, evaluator, kwargs", + ( + (variant, evaluator, kwarg) + for variant, evaluator, kwargs in zip( + variants(), _eval_constructors, _test_kwargs + ) + for kwarg in _to_kwargs(kwargs) + ), + ids=_test_id, +) def test_evaulation(variant, evaluator, kwargs): for attr, value in kwargs.items(): setattr(variant, attr, value) @@ -186,13 +196,12 @@ def test_evaulation(variant, evaluator, kwargs): class CustomVariant(hoomd.variant.Variant): - def __init__(self): hoomd.variant.Variant.__init__(self) self._a = 1 def __call__(self, timestep): - return (float(timestep)**(1 / 2)) + return float(timestep) ** (1 / 2) def _min(self): return 0.0 @@ -205,9 +214,10 @@ def __eq__(self, other): @pytest.mark.parametrize( - 'variant', + "variant", (variant for variant in itertools.chain(variants(), (CustomVariant(),))), - ids=_test_id) + ids=_test_id, +) def test_pickling(variant): # This also tests equality of objects with the same attributes pickling_check(variant) @@ -219,15 +229,14 @@ def test_custom(): # test that the custom variant can be called from c++ for i in range(10000): - assert hoomd._hoomd._test_variant_call(c, i) == float(i)**(1 / 2) + assert hoomd._hoomd._test_variant_call(c, i) == float(i) ** (1 / 2) for i in range(10000000000, 10000010000): - assert hoomd._hoomd._test_variant_call(c, i) == float(i)**(1 / 2) + assert hoomd._hoomd._test_variant_call(c, i) == float(i) ** (1 / 2) assert hoomd._hoomd._test_variant_min(c) == 0.0 assert hoomd._hoomd._test_variant_max(c) == 1.0 pkled_variant = pickle.loads(pickle.dumps(c)) for i in range(0, 10000, 100): - assert (hoomd._hoomd._test_variant_call(pkled_variant, - i) == float(i)**(1 / 2)) + assert hoomd._hoomd._test_variant_call(pkled_variant, i) == float(i) ** (1 / 2) diff --git a/hoomd/pytest_plugin_validate.py b/hoomd/pytest_plugin_validate.py index d261010ce8..9df93ac729 100644 --- a/hoomd/pytest_plugin_validate.py +++ b/hoomd/pytest_plugin_validate.py @@ -25,12 +25,13 @@ def skip_validate(request): Pass the command line option --validate to enable these tests. """ - if request.node.get_closest_marker('validate'): + if request.node.get_closest_marker("validate"): if not request.config.getoption("validate"): - pytest.skip('Validation tests not requested.') + pytest.skip("Validation tests not requested.") def pytest_configure(config): """Define the ``validate`` marker.""" config.addinivalue_line( - "markers", "validate: Tests that perform long-running validations.") + "markers", "validate: Tests that perform long-running validations." + ) diff --git a/hoomd/simulation.py b/hoomd/simulation.py index b25db38a8c..828166aa46 100644 --- a/hoomd/simulation.py +++ b/hoomd/simulation.py @@ -17,6 +17,7 @@ logger = hoomd.logging.Logger() """ + import inspect import hoomd._hoomd as _hoomd @@ -72,8 +73,9 @@ def device(self): @device.setter def device(self, value): - raise ValueError("Device cannot be removed or replaced once in " - "Simulation object.") + raise ValueError( + "Device cannot be removed or replaced once in " "Simulation object." + ) @log def timestep(self): @@ -87,10 +89,10 @@ def timestep(self): to override values from ``create_`` methods:: sim.timestep = 5000 - sim.create_state_from_gsd('gsd_at_step_10000000.gsd') + sim.create_state_from_gsd("gsd_at_step_10000000.gsd") assert sim.timestep == 5000 """ - if not hasattr(self, '_cpp_sys'): + if not hasattr(self, "_cpp_sys"): return self._timestep else: return self._cpp_sys.getCurrentTimeStep() @@ -136,7 +138,8 @@ def seed(self, v): v_int = v_int & SEED_MAX self.device._cpp_msg.warning( f"Provided seed {v} is larger than {SEED_MAX}. " - f"Truncating to {v_int}.\n") + f"Truncating to {v_int}.\n" + ) self._seed = v_int @@ -166,10 +169,12 @@ def _init_communicator(self): # create the c++ Communicator if isinstance(self.device, hoomd.device.CPU): cpp_communicator = _hoomd.Communicator( - self.state._cpp_sys_def, decomposition) + self.state._cpp_sys_def, decomposition + ) else: cpp_communicator = _hoomd.CommunicatorGPU( - self.state._cpp_sys_def, decomposition) + self.state._cpp_sys_def, decomposition + ) # set Communicator in C++ System and SystemDefinition self._cpp_sys.setCommunicator(cpp_communicator) @@ -183,12 +188,12 @@ def _init_communicator(self): def _warn_if_seed_unset(self): if self.seed is None: self.device._cpp_msg.warning( - "Simulation.seed is not set, using default seed=0\n") + "Simulation.seed is not set, using default seed=0\n" + ) - def create_state_from_gsd(self, - filename, - frame=-1, - domain_decomposition=(None, None, None)): + def create_state_from_gsd( + self, filename, frame=-1, domain_decomposition=(None, None, None) + ): """Create the simulation state from a GSD file. Args: @@ -227,10 +232,12 @@ def create_state_from_gsd(self, raise RuntimeError("Cannot initialize more than once\n") filename = _hoomd.mpi_bcast_str(filename, self.device._cpp_exec_conf) # Grab snapshot and timestep - reader = _hoomd.GSDReader(self.device._cpp_exec_conf, filename, - abs(frame), frame < 0) - snapshot = Snapshot._from_cpp_snapshot(reader.getSnapshot(), - self.device.communicator) + reader = _hoomd.GSDReader( + self.device._cpp_exec_conf, filename, abs(frame), frame < 0 + ) + snapshot = Snapshot._from_cpp_snapshot( + reader.getSnapshot(), self.device.communicator + ) step = reader.getTimeStep() if self.timestep is None else self.timestep self._state = State(self, snapshot, domain_decomposition) @@ -239,9 +246,9 @@ def create_state_from_gsd(self, self._init_system(step) - def create_state_from_snapshot(self, - snapshot, - domain_decomposition=(None, None, None)): + def create_state_from_snapshot( + self, snapshot, domain_decomposition=(None, None, None) + ): """Create the simulation state from a `Snapshot`. Args: @@ -290,14 +297,12 @@ def create_state_from_snapshot(self, if isinstance(snapshot, Snapshot): # snapshot is hoomd.Snapshot self._state = State(self, snapshot, domain_decomposition) - elif _match_class_path(snapshot, 'gsd.hoomd.Frame'): + elif _match_class_path(snapshot, "gsd.hoomd.Frame"): # snapshot is gsd.hoomd.Frame (gsd 2.8+, 3.x) - snapshot = Snapshot.from_gsd_frame(snapshot, - self._device.communicator) + snapshot = Snapshot.from_gsd_frame(snapshot, self._device.communicator) self._state = State(self, snapshot, domain_decomposition) else: - raise TypeError( - "Snapshot must be a hoomd.Snapshot or gsd.hoomd.Frame") + raise TypeError("Snapshot must be a hoomd.Snapshot or gsd.hoomd.Frame") step = 0 if self.timestep is not None: @@ -333,7 +338,8 @@ def operations(self, operations): if operations._scheduled or operations._simulation is not None: raise RuntimeError( "Cannot add `hoomd.Operations` object that belongs to " - "another `hoomd.Simulation` object.") + "another `hoomd.Simulation` object." + ) # Switch out `hoomd.Operations` objects. reschedule = False if self._operations._scheduled: @@ -369,7 +375,7 @@ def tps(self): .. code-block:: python - logger.add(obj=simulation, quantities=['tps']) + logger.add(obj=simulation, quantities=["tps"]) """ if self._state is None: return None @@ -394,7 +400,7 @@ def walltime(self): .. code-block:: python - logger.add(obj=simulation, quantities=['walltime']) + logger.add(obj=simulation, quantities=["walltime"]) """ if self._state is None: return 0.0 @@ -412,7 +418,7 @@ def final_timestep(self): .. code-block:: python - logger.add(obj=simulation, quantities=['final_timestep']) + logger.add(obj=simulation, quantities=["final_timestep"]) """ if self._state is None: return self.timestep @@ -430,7 +436,7 @@ def initial_timestep(self): .. code-block:: python - logger.add(obj=simulation, quantities=['initial_timestep']) + logger.add(obj=simulation, quantities=["initial_timestep"]) """ if self._state is None: return self.timestep @@ -458,17 +464,17 @@ def always_compute_pressure(self): simulation.always_compute_pressure = True """ - if not hasattr(self, '_cpp_sys'): + if not hasattr(self, "_cpp_sys"): return False else: return self._cpp_sys.getPressureFlag() @always_compute_pressure.setter def always_compute_pressure(self, value): - if not hasattr(self, '_cpp_sys'): + if not hasattr(self, "_cpp_sys"): # TODO make this work when not attached by automatically setting # flag when state object is instantiated. - raise RuntimeError('Cannot set flag without state') + raise RuntimeError("Cannot set flag without state") else: self._cpp_sys.setPressureFlag(value) @@ -540,18 +546,18 @@ def run(self, steps, write_at_start=False): simulation.run(1_000) """ # check if initialization has occurred - if not hasattr(self, '_cpp_sys'): - raise RuntimeError('Cannot run before state is set.') + if not hasattr(self, "_cpp_sys"): + raise RuntimeError("Cannot run before state is set.") if self._state._in_context_manager: raise RuntimeError( - "Cannot call run inside of a local snapshot context manager.") + "Cannot call run inside of a local snapshot context manager." + ) if not self.operations._scheduled: self.operations._schedule() steps_int = int(steps) if steps_int < 0 or steps_int > TIMESTEP_MAX - 1: - raise ValueError(f"steps must be in the range [0, " - f"{TIMESTEP_MAX - 1}]") + raise ValueError(f"steps must be in the range [0, " f"{TIMESTEP_MAX - 1}]") self._cpp_sys.run(steps_int, write_at_start) @@ -563,5 +569,7 @@ def __del__(self): def _match_class_path(obj, *matches): - return any(cls.__module__ + '.' + cls.__name__ in matches - for cls in inspect.getmro(type(obj))) + return any( + cls.__module__ + "." + cls.__name__ in matches + for cls in inspect.getmro(type(obj)) + ) diff --git a/hoomd/snapshot.py b/hoomd/snapshot.py index 987f7a8b56..62239cdb86 100644 --- a/hoomd/snapshot.py +++ b/hoomd/snapshot.py @@ -16,7 +16,6 @@ class _ConfigurationData: - def __init__(self, cpp_obj): self._cpp_obj = cpp_obj @@ -28,8 +27,14 @@ def dimensions(self): def box(self): b = self._cpp_obj._global_box L = b.getL() - return (L.x, L.y, L.z, b.getTiltFactorXY(), b.getTiltFactorXZ(), - b.getTiltFactorYZ()) + return ( + L.x, + L.y, + L.z, + b.getTiltFactorXY(), + b.getTiltFactorXZ(), + b.getTiltFactorYZ(), + ) @box.setter def box(self, box): @@ -38,7 +43,8 @@ def box(self, box): except Exception: raise ValueError( f"{box} is not convertible to a hoomd.Box object using " - "hoomd.Box.from_box.") + "hoomd.Box.from_box." + ) self._cpp_obj._dimensions = new_box.dimensions self._cpp_obj._global_box = new_box._cpp_obj @@ -182,7 +188,7 @@ def particles(self): if self.communicator.rank == 0: return self._cpp_obj.particles else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def bonds(self): @@ -209,13 +215,13 @@ def bonds(self): if snapshot.communicator.rank == 0: snapshot.bonds.N = 2 snapshot.bonds.group[:] = [[0, 1], [2, 3]] - snapshot.bonds.types = ['A-B'] + snapshot.bonds.types = ["A-B"] snapshot.bonds.typeid[:] = [0, 0] """ if self.communicator.rank == 0: return self._cpp_obj.bonds else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def angles(self): @@ -242,13 +248,13 @@ def angles(self): if snapshot.communicator.rank == 0: snapshot.angles.N = 1 snapshot.angles.group[:] = [[0, 1, 2]] - snapshot.angles.types = ['A-B-B'] + snapshot.angles.types = ["A-B-B"] snapshot.angles.typeid[:] = [0] """ if self.communicator.rank == 0: return self._cpp_obj.angles else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def dihedrals(self): @@ -275,13 +281,13 @@ def dihedrals(self): if snapshot.communicator.rank == 0: snapshot.dihedrals.N = 1 snapshot.dihedrals.group[:] = [[0, 1, 2, 3]] - snapshot.dihedrals.types = ['A-B-B-A'] + snapshot.dihedrals.types = ["A-B-B-A"] snapshot.dihedrals.typeid[:] = [0] """ if self.communicator.rank == 0: return self._cpp_obj.dihedrals else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def impropers(self): @@ -306,13 +312,13 @@ def impropers(self): if snapshot.communicator.rank == 0: snapshot.impropers.N = 1 snapshot.impropers.group[:] = [[0, 1, 2, 3]] - snapshot.impropers.types = ['A-B-B-A'] + snapshot.impropers.types = ["A-B-B-A"] snapshot.impropers.typeid[:] = [0] """ if self.communicator.rank == 0: return self._cpp_obj.impropers else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def pairs(self): @@ -339,13 +345,13 @@ def pairs(self): if snapshot.communicator.rank == 0: snapshot.pairs.N = 2 snapshot.pairs.group[:] = [[0, 1], [2, 3]] - snapshot.pairs.types = ['A-B'] + snapshot.pairs.types = ["A-B"] snapshot.pairs.typeid[:] = [0, 0] """ if self.communicator.rank == 0: return self._cpp_obj.pairs else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def constraints(self): @@ -375,7 +381,7 @@ def constraints(self): if self.communicator.rank == 0: return self._cpp_obj.constraints else: - raise RuntimeError('Snapshot data is only present on rank 0') + raise RuntimeError("Snapshot data is only present on rank 0") @property def mpcd(self): @@ -483,8 +489,9 @@ def from_gsd_frame(cls, gsd_snap, communicator): """ snap = cls(communicator=communicator) - def set_properties(snap_section, gsd_snap_section, properties, - array_properties): + def set_properties( + snap_section, gsd_snap_section, properties, array_properties + ): for prop in properties: gsd_prop = getattr(gsd_snap_section, prop, None) if gsd_prop is not None: @@ -495,22 +502,38 @@ def set_properties(snap_section, gsd_snap_section, properties, getattr(snap_section, prop)[:] = gsd_prop if communicator.rank == 0: - gsd_snap.validate() - set_properties(snap.particles, gsd_snap.particles, ('N', 'types'), - ('angmom', 'body', 'charge', 'diameter', 'image', - 'mass', 'moment_inertia', 'orientation', 'position', - 'typeid', 'velocity')) - - for section in ('angles', 'bonds', 'dihedrals', 'impropers', - 'pairs'): - set_properties(getattr(snap, - section), getattr(gsd_snap, section), - ('N', 'types'), ('group', 'typeid')) - - set_properties(snap.constraints, gsd_snap.constraints, ('N',), - ('group', 'value')) + set_properties( + snap.particles, + gsd_snap.particles, + ("N", "types"), + ( + "angmom", + "body", + "charge", + "diameter", + "image", + "mass", + "moment_inertia", + "orientation", + "position", + "typeid", + "velocity", + ), + ) + + for section in ("angles", "bonds", "dihedrals", "impropers", "pairs"): + set_properties( + getattr(snap, section), + getattr(gsd_snap, section), + ("N", "types"), + ("group", "typeid"), + ) + + set_properties( + snap.constraints, gsd_snap.constraints, ("N",), ("group", "value") + ) # Set box attribute if gsd_snap.configuration.box is not None: diff --git a/hoomd/state.py b/hoomd/state.py index 091f54d546..68992d9674 100644 --- a/hoomd/state.py +++ b/hoomd/state.py @@ -10,6 +10,7 @@ simulation = hoomd.util.make_example_simulation() """ + import weakref from collections import defaultdict @@ -31,8 +32,10 @@ def _create_domain_decomposition(device, box, domain_decomposition): domain_decomposition: See Simulation.create_state_from_* for a description. """ - if (not isinstance(domain_decomposition, collections.abc.Sequence) - or len(domain_decomposition) != 3): + if ( + not isinstance(domain_decomposition, collections.abc.Sequence) + or len(domain_decomposition) != 3 + ): raise TypeError("domain_decomposition must be a length 3 sequence") initialize_grid = False @@ -61,15 +64,15 @@ def _create_domain_decomposition(device, box, domain_decomposition): return None if initialize_fractions: - fractions = [ - v[:-1] if v is not None else [] for v in domain_decomposition - ] - result = _hoomd.DomainDecomposition(device._cpp_exec_conf, box.getL(), - *fractions) + fractions = [v[:-1] if v is not None else [] for v in domain_decomposition] + result = _hoomd.DomainDecomposition( + device._cpp_exec_conf, box.getL(), *fractions + ) else: grid = [v if v is not None else 0 for v in domain_decomposition] - result = _hoomd.DomainDecomposition(device._cpp_exec_conf, box.getL(), - *grid, False) + result = _hoomd.DomainDecomposition( + device._cpp_exec_conf, box.getL(), *grid, False + ) return result @@ -252,16 +255,17 @@ def __init__(self, simulation, snapshot, domain_decomposition): self._simulation = simulation snapshot._broadcast_box() decomposition = _create_domain_decomposition( - simulation.device, snapshot._cpp_obj._global_box, - domain_decomposition) + simulation.device, snapshot._cpp_obj._global_box, domain_decomposition + ) if decomposition is not None: self._cpp_sys_def = _hoomd.SystemDefinition( - snapshot._cpp_obj, simulation.device._cpp_exec_conf, - decomposition) + snapshot._cpp_obj, simulation.device._cpp_exec_conf, decomposition + ) else: self._cpp_sys_def = _hoomd.SystemDefinition( - snapshot._cpp_obj, simulation.device._cpp_exec_conf) + snapshot._cpp_obj, simulation.device._cpp_exec_conf + ) # Necessary for local snapshot API. This is used to ensure two local # snapshots are not contexted at once. @@ -306,8 +310,9 @@ def get_snapshot(self): snapshot = simulation.state.get_snapshot() """ cpp_snapshot = self._cpp_sys_def.takeSnapshot_double() - return Snapshot._from_cpp_snapshot(cpp_snapshot, - self._simulation.device.communicator) + return Snapshot._from_cpp_snapshot( + cpp_snapshot, self._simulation.device.communicator + ) def set_snapshot(self, snapshot): """Restore the state of the simulation from a snapshot. @@ -343,7 +348,8 @@ def set_snapshot(self, snapshot): """ if self._in_context_manager: raise RuntimeError( - "Cannot set state to new snapshot inside local snapshot.") + "Cannot set state to new snapshot inside local snapshot." + ) if self._simulation.device.communicator.rank == 0: if snapshot.particles.types != self.particle_types: raise RuntimeError("Particle types must remain the same") @@ -441,12 +447,14 @@ def types(self): `dihedral_types`, `improper_types`, and `special_pair_types` into a dictionary with keys matching the property names. """ - return dict(particle_types=self.particle_types, - bond_types=self.bond_types, - angle_types=self.angle_types, - dihedral_types=self.dihedral_types, - improper_types=self.improper_types, - special_pair_types=self.special_pair_types) + return dict( + particle_types=self.particle_types, + bond_types=self.bond_types, + angle_types=self.angle_types, + dihedral_types=self.dihedral_types, + improper_types=self.improper_types, + special_pair_types=self.special_pair_types, + ) @property def N_particles(self): # noqa: N802 - allow N in name @@ -563,17 +571,23 @@ def set_box(self, box): """ if self._in_context_manager: raise RuntimeError( - "Cannot set system box within local snapshot context manager.") + "Cannot set system box within local snapshot context manager." + ) try: box = Box.from_box(box) except Exception: - raise ValueError('{} is not convertible to hoomd.Box using ' - 'hoomd.Box.from_box'.format(box)) + raise ValueError( + "{} is not convertible to hoomd.Box using " "hoomd.Box.from_box".format( + box + ) + ) if box.dimensions != self._cpp_sys_def.getNDimensions(): self._simulation.device._cpp_msg.warning( - "Box changing dimensions from {} to {}." - "".format(self._cpp_sys_def.getNDimensions(), box.dimensions)) + "Box changing dimensions from {} to {}." "".format( + self._cpp_sys_def.getNDimensions(), box.dimensions + ) + ) self._cpp_sys_def.setNDimensions(box.dimensions) self._cpp_sys_def.getParticleData().setGlobalBox(box._cpp_obj) @@ -616,8 +630,8 @@ def _get_group(self, filter_): else: if isinstance(filter_, hoomd.filter.CustomFilter): group = _hoomd.ParticleGroup( - self._cpp_sys_def, - _hoomd.ParticleFilterCustom(filter_, self)) + self._cpp_sys_def, _hoomd.ParticleFilterCustom(filter_, self) + ) else: group = _hoomd.ParticleGroup(self._cpp_sys_def, filter_) group_cache[cls][filter_] = group @@ -694,7 +708,8 @@ def cpu_local_snapshot(self): if self._in_context_manager: raise RuntimeError( "Cannot enter cpu_local_snapshot context manager inside " - "another local_snapshot context manager.") + "another local_snapshot context manager." + ) return LocalSnapshot(self) @property @@ -747,12 +762,12 @@ def gpu_local_snapshot(self): single value is of order :math:`O(1)`. """ if not isinstance(self._simulation.device, hoomd.device.GPU): - raise RuntimeError( - "Cannot access gpu_snapshot with a non GPU device.") + raise RuntimeError("Cannot access gpu_snapshot with a non GPU device.") elif self._in_context_manager: raise RuntimeError( "Cannot enter gpu_local_snapshot context manager inside " - "another local_snapshot context manager.") + "another local_snapshot context manager." + ) else: return LocalSnapshotGPU(self) @@ -792,8 +807,8 @@ def thermalize_particle_momenta(self, filter, kT): .. code-block:: python simulation.state.thermalize_particle_momenta( - filter=hoomd.filter.All(), - kT=1.5) + filter=hoomd.filter.All(), kT=1.5 + ) """ self._simulation._warn_if_seed_unset() group = self._get_group(filter) @@ -806,14 +821,20 @@ def domain_decomposition_split_fractions(self): """ particle_data = self._cpp_sys_def.getParticleData() - if (not hoomd.version.mpi_enabled - or particle_data.getDomainDecomposition() is None): + if ( + not hoomd.version.mpi_enabled + or particle_data.getDomainDecomposition() is None + ): return ([], [], []) - return tuple([ - list(particle_data.getDomainDecomposition().getCumulativeFractions( - dir))[1:-1] for dir in range(3) - ]) + return tuple( + [ + list( + particle_data.getDomainDecomposition().getCumulativeFractions(dir) + )[1:-1] + for dir in range(3) + ] + ) @property def domain_decomposition(self): @@ -822,14 +843,19 @@ def domain_decomposition(self): """ particle_data = self._cpp_sys_def.getParticleData() - if (not hoomd.version.mpi_enabled - or particle_data.getDomainDecomposition() is None): + if ( + not hoomd.version.mpi_enabled + or particle_data.getDomainDecomposition() is None + ): return (1, 1, 1) - return tuple([ - len(particle_data.getDomainDecomposition().getCumulativeFractions( - dir)) - 1 for dir in range(3) - ]) + return tuple( + [ + len(particle_data.getDomainDecomposition().getCumulativeFractions(dir)) + - 1 + for dir in range(3) + ] + ) @property def _simulation(self): diff --git a/hoomd/trigger.py b/hoomd/trigger.py index 7bb98bcfbe..ec8f2f1288 100644 --- a/hoomd/trigger.py +++ b/hoomd/trigger.py @@ -49,12 +49,11 @@ class Trigger(_hoomd.Trigger): .. code-block:: python class CustomTrigger(hoomd.trigger.Trigger): - def __init__(self): hoomd.trigger.Trigger.__init__(self) def compute(self, timestep): - return (timestep**(1 / 2)).is_integer() + return (timestep ** (1 / 2)).is_integer() Methods: __call__(timestep): @@ -143,13 +142,15 @@ def __init__(self, period, phase=0): def __str__(self): """Human readable representation of the trigger as a string.""" - return f"hoomd.trigger.Periodic(period={self.period}, " \ - f"phase={self.phase})" + return f"hoomd.trigger.Periodic(period={self.period}, " f"phase={self.phase})" def __eq__(self, other): """Test for equivalent triggers.""" - return (isinstance(other, Periodic) and self.period == other.period - and self.phase == other.phase) + return ( + isinstance(other, Periodic) + and self.period == other.period + and self.phase == other.phase + ) class Before(_hoomd.BeforeTrigger, Trigger): @@ -460,13 +461,13 @@ def __eq__(self, other): """ __all__ = [ - 'After', - 'And', - 'Before', - 'Not', - 'On', - 'Or', - 'Periodic', - 'Trigger', - 'trigger_like', + "After", + "And", + "Before", + "Not", + "On", + "Or", + "Periodic", + "Trigger", + "trigger_like", ] diff --git a/hoomd/tune/__init__.py b/hoomd/tune/__init__.py index 296277baab..363a0994fc 100644 --- a/hoomd/tune/__init__.py +++ b/hoomd/tune/__init__.py @@ -44,19 +44,26 @@ from hoomd.tune.balance import LoadBalancer from hoomd.tune.custom_tuner import CustomTuner from hoomd.tune.attr_tuner import ManualTuneDefinition -from hoomd.tune.solve import (GridOptimizer, GradientDescent, Optimizer, - RootSolver, ScaleSolver, SecantSolver, SolverStep) +from hoomd.tune.solve import ( + GridOptimizer, + GradientDescent, + Optimizer, + RootSolver, + ScaleSolver, + SecantSolver, + SolverStep, +) __all__ = [ - 'CustomTuner', - 'GradientDescent', - 'GridOptimizer', - 'LoadBalancer', - 'ManualTuneDefinition', - 'Optimizer', - 'ParticleSorter', - 'RootSolver', - 'ScaleSolver', - 'SecantSolver', - 'SolverStep', + "CustomTuner", + "GradientDescent", + "GridOptimizer", + "LoadBalancer", + "ManualTuneDefinition", + "Optimizer", + "ParticleSorter", + "RootSolver", + "ScaleSolver", + "SecantSolver", + "SolverStep", ] diff --git a/hoomd/tune/attr_tuner.py b/hoomd/tune/attr_tuner.py index 209469d713..5e066ac2bd 100644 --- a/hoomd/tune/attr_tuner.py +++ b/hoomd/tune/attr_tuner.py @@ -33,8 +33,9 @@ def in_domain(self, value): return True else: lower_bound, upper_bound = self.domain - return ((lower_bound is None or lower_bound <= value) - and (upper_bound is None or value <= upper_bound)) + return (lower_bound is None or lower_bound <= value) and ( + upper_bound is None or value <= upper_bound + ) def clamp_into_domain(self, value): """Return the closest value within the domain. @@ -217,12 +218,15 @@ def _set_target(self, value): def __hash__(self): """Compute a hash of the tune definition.""" - return hash((self._user_get_x, self._user_set_x, self._user_get_y, - self._target)) + return hash( + (self._user_get_x, self._user_set_x, self._user_get_y, self._target) + ) def __eq__(self, other): """Test for equality.""" - return (self._user_get_x == other._user_get_x - and self._user_set_x == other._user_set_x - and self._user_get_y == other._user_get_y - and self._target == other._target) + return ( + self._user_get_x == other._user_get_x + and self._user_set_x == other._user_set_x + and self._user_get_y == other._user_get_y + and self._target == other._target + ) diff --git a/hoomd/tune/balance.py b/hoomd/tune/balance.py index a8dd6926cb..f272c4ae7a 100644 --- a/hoomd/tune/balance.py +++ b/hoomd/tune/balance.py @@ -91,33 +91,24 @@ class LoadBalancer(Tuner): __doc__ = __doc__.replace("{inherited}", Tuner._doc_inherited) - def __init__(self, - trigger, - x=True, - y=True, - z=True, - tolerance=1.02, - max_iterations=1): + def __init__( + self, trigger, x=True, y=True, z=True, tolerance=1.02, max_iterations=1 + ): super().__init__(trigger) - defaults = dict(x=x, - y=y, - z=z, - tolerance=tolerance, - max_iterations=max_iterations) - load_balancer_params = ParameterDict(x=bool, - y=bool, - z=bool, - max_iterations=int, - tolerance=float) + defaults = dict( + x=x, y=y, z=z, tolerance=tolerance, max_iterations=max_iterations + ) + load_balancer_params = ParameterDict( + x=bool, y=bool, z=bool, max_iterations=int, tolerance=float + ) self._param_dict.update(load_balancer_params) self._param_dict.update(defaults) def _attach_hook(self): if isinstance(self._simulation.device, hoomd.device.GPU): - cpp_cls = getattr(_hoomd, 'LoadBalancerGPU') + cpp_cls = getattr(_hoomd, "LoadBalancerGPU") else: - cpp_cls = getattr(_hoomd, 'LoadBalancer') + cpp_cls = getattr(_hoomd, "LoadBalancer") - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self.trigger) + self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, self.trigger) diff --git a/hoomd/tune/custom_tuner.py b/hoomd/tune/custom_tuner.py index 4c90c2b7d0..674ae91145 100644 --- a/hoomd/tune/custom_tuner.py +++ b/hoomd/tune/custom_tuner.py @@ -13,13 +13,12 @@ def act(self, timestep): custom_action = ExampleAction() """ -from hoomd.custom import (Action, CustomOperation) +from hoomd.custom import Action, CustomOperation from hoomd.custom.custom_operation import _InternalCustomOperation from hoomd.operation import Tuner class _TunerProperty: - @property def tuner(self): return self._action @@ -29,8 +28,7 @@ def tuner(self, tuner): if isinstance(tuner, Action): self._action = tuner else: - raise ValueError( - "updater must be an instance of hoomd.custom.Action") + raise ValueError("updater must be an instance of hoomd.custom.Action") class CustomTuner(CustomOperation, _TunerProperty, Tuner): @@ -54,7 +52,8 @@ class CustomTuner(CustomOperation, _TunerProperty, Tuner): custom_tuner = hoomd.tune.CustomTuner( action=custom_action, - trigger=hoomd.trigger.Periodic(1000)) + trigger=hoomd.trigger.Periodic(1000), + ) simulation.operations.tuners.append(custom_tuner) See Also: @@ -64,12 +63,13 @@ class CustomTuner(CustomOperation, _TunerProperty, Tuner): `hoomd.write.CustomWriter` """ - _cpp_list_name = 'tuners' - _cpp_class_name = 'PythonTuner' + + _cpp_list_name = "tuners" + _cpp_class_name = "PythonTuner" __doc__ += CustomOperation._doc_inherited class _InternalCustomTuner(_InternalCustomOperation, Tuner): - _cpp_list_name = 'tuners' - _cpp_class_name = 'PythonTuner' + _cpp_list_name = "tuners" + _cpp_class_name = "PythonTuner" _operation_func = "tune" diff --git a/hoomd/tune/solve.py b/hoomd/tune/solve.py index 9fc5d08c64..2fc65f474a 100644 --- a/hoomd/tune/solve.py +++ b/hoomd/tune/solve.py @@ -88,6 +88,7 @@ class RootSolver(SolverStep): For solving for a non-zero value, :math:`f(x) - y_t = 0` is solved. """ + pass @@ -131,11 +132,7 @@ class ScaleSolver(RootSolver): This solver is only usable when quantities are strictly positive. """ - def __init__(self, - max_scale=2.0, - gamma=2.0, - correlation="positive", - tol=1e-5): + def __init__(self, max_scale=2.0, gamma=2.0, correlation="positive", tol=1e-5): self.max_scale = max_scale self.gamma = gamma self.correlation = correlation.lower() @@ -178,7 +175,8 @@ def __eq__(self, other): return False return all( getattr(self, attr) == getattr(other, attr) - for attr in ("max_scale", "gamma", "correlation", "tol")) + for attr in ("max_scale", "gamma", "correlation", "tol") + ) class _GradientHelper: @@ -309,11 +307,13 @@ def __eq__(self, other): return False return all( getattr(self, attr) == getattr(other, attr) - for attr in ("gamma", "tol", "_counters", "_previous_pair")) + for attr in ("gamma", "tol", "_counters", "_previous_pair") + ) class Optimizer(SolverStep): """Abstract base class for optimizing :math:`f(x)`.""" + pass @@ -372,12 +372,14 @@ class GradientDescent(Optimizer, _GradientHelper): _max_allowable_counter = 3 - def __init__(self, - alpha: float = 0.1, - kappa: typing.Optional[np.ndarray] = None, - tol: float = 1e-5, - maximize: bool = True, - max_delta: typing.Optional[float] = None): + def __init__( + self, + alpha: float = 0.1, + kappa: typing.Optional[np.ndarray] = None, + tol: float = 1e-5, + maximize: bool = True, + max_delta: typing.Optional[float] = None, + ): self.alpha = alpha self.kappa = None if kappa is None else kappa if self.kappa is not None: @@ -426,8 +428,7 @@ def alpha(self, new_alpha: hoomd.variant.variant_like): elif isinstance(new_alpha, hoomd.variant.Variant): self._alpha = new_alpha else: - raise TypeError( - "Expected either a hoomd.variant.variant_like object.") + raise TypeError("Expected either a hoomd.variant.variant_like object.") def solve_one(self, tunable): """Solve one step.""" @@ -495,12 +496,11 @@ def __eq__(self, other): return False return all( getattr(self, attr) == getattr(other, attr) - for attr in ("alpha", "tol", "_previous_pair")) and np.array_equal( - self.kappa, other.kappa) + for attr in ("alpha", "tol", "_previous_pair") + ) and np.array_equal(self.kappa, other.kappa) class _Repeater: - def __init__(self, value): self._a = value @@ -532,10 +532,7 @@ class GridOptimizer(Optimizer): function (defaults to ``True``). """ - def __init__(self, - n_bins: int = 5, - n_rounds: int = 1, - maximize: bool = True): + def __init__(self, n_bins: int = 5, n_rounds: int = 1, maximize: bool = True): self._n_bins = n_bins self._n_rounds = n_rounds self._opt = max if maximize else min @@ -557,7 +554,7 @@ def solve_one(self, tunable): # Need to increment round or finish optimizing if len(bin_y) == self._n_bins: index = bin_y.index(self._opt(bin_y)) - boundaries = self._bins[tunable][index:index + 2] + boundaries = self._bins[tunable][index : index + 2] if self._round[tunable] == self._n_rounds: center = sum(boundaries) / 2 tunable.x = center @@ -591,14 +588,13 @@ def _initial_binning(self, tunable): """Get the initial bin boundaries for a tunable.""" min_, max_ = tunable.domain if max_ is None or min_ is None: - raise ValueError( - "GridOptimizer requires max and min x value to tune.") + raise ValueError("GridOptimizer requires max and min x value to tune.") self._bins[tunable] = np.linspace(min_, max_, self._n_bins + 1) def _get_bin_center(self, tunable, index): """Get the bin center for a given tunable and bin index.""" min_, max_ = tunable.domain - return sum(self._bins[tunable][index:index + 2]) / 2 + return sum(self._bins[tunable][index : index + 2]) / 2 def __eq__(self, other): """Test for equality.""" @@ -606,9 +602,13 @@ def __eq__(self, other): return NotImplemented if not isinstance(other, type(self)): return False - return (all( - getattr(self, attr) == getattr(other, attr) - for attr in ("_n_bins", "_n_rounds", "_round", "_solved", "_bin_y")) - and self._bins.keys() == other._bins.keys() and all( - np.array_equal(a, other._bins[key]) - for a, key in self._bins.items())) + return ( + all( + getattr(self, attr) == getattr(other, attr) + for attr in ("_n_bins", "_n_rounds", "_round", "_solved", "_bin_y") + ) + and self._bins.keys() == other._bins.keys() + and all( + np.array_equal(a, other._bins[key]) for a, key in self._bins.items() + ) + ) diff --git a/hoomd/tune/sorter.py b/hoomd/tune/sorter.py index 4e7d975218..1f716bb22c 100644 --- a/hoomd/tune/sorter.py +++ b/hoomd/tune/sorter.py @@ -50,16 +50,19 @@ class ParticleSorter(Tuner): def __init__(self, trigger=200, grid=None): super().__init__(trigger) sorter_params = ParameterDict( - grid=OnlyTypes(int, - postprocess=ParticleSorter._to_power_of_two, - preprocess=ParticleSorter._natural_number, - allow_none=True)) + grid=OnlyTypes( + int, + postprocess=ParticleSorter._to_power_of_two, + preprocess=ParticleSorter._natural_number, + allow_none=True, + ) + ) self._param_dict.update(sorter_params) self.grid = grid @staticmethod def _to_power_of_two(value): - return int(2.**ceil(log2(value))) + return int(2.0 ** ceil(log2(value))) @staticmethod def _natural_number(value): @@ -73,8 +76,7 @@ def _natural_number(value): def _attach_hook(self): if isinstance(self._simulation.device, hoomd.device.GPU): - cpp_cls = getattr(_hoomd, 'SFCPackTunerGPU') + cpp_cls = getattr(_hoomd, "SFCPackTunerGPU") else: - cpp_cls = getattr(_hoomd, 'SFCPackTuner') - self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, - self.trigger) + cpp_cls = getattr(_hoomd, "SFCPackTuner") + self._cpp_obj = cpp_cls(self._simulation.state._cpp_sys_def, self.trigger) diff --git a/hoomd/update/__init__.py b/hoomd/update/__init__.py index e1128dd89b..2fe843d3a0 100644 --- a/hoomd/update/__init__.py +++ b/hoomd/update/__init__.py @@ -16,4 +16,4 @@ from hoomd.update.custom_updater import CustomUpdater from hoomd.update.particle_filter import FilterUpdater -__all__ = ['BoxResize', 'CustomUpdater', 'FilterUpdater', 'RemoveDrift'] +__all__ = ["BoxResize", "CustomUpdater", "FilterUpdater", "RemoveDrift"] diff --git a/hoomd/update/box_resize.py b/hoomd/update/box_resize.py index e283344bd1..177e163060 100644 --- a/hoomd/update/box_resize.py +++ b/hoomd/update/box_resize.py @@ -76,8 +76,10 @@ class BoxResize(Updater): .. code-block:: python - box_resize = hoomd.update.BoxResize(trigger=hoomd.trigger.Periodic(10), - box=inverse_volume_ramp) + box_resize = hoomd.update.BoxResize( + trigger=hoomd.trigger.Periodic(10), + box=inverse_volume_ramp, + ) simulation.operations.updaters.append(box_resize) {inherited} @@ -108,15 +110,14 @@ class BoxResize(Updater): __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) def __init__( - self, - trigger, - box, - filter=All(), + self, + trigger, + box, + filter=All(), ): - params = ParameterDict(box=hoomd.variant.box.BoxVariant, - filter=ParticleFilter) + params = ParameterDict(box=hoomd.variant.box.BoxVariant, filter=ParticleFilter) - params.update({'box': box, 'filter': filter}) + params.update({"box": box, "filter": filter}) self._param_dict.update(params) super().__init__(trigger) @@ -124,12 +125,12 @@ def _attach_hook(self): group = self._simulation.state._get_group(self.filter) if isinstance(self._simulation.device, hoomd.device.CPU): self._cpp_obj = _hoomd.BoxResizeUpdater( - self._simulation.state._cpp_sys_def, self.trigger, self.box, - group) + self._simulation.state._cpp_sys_def, self.trigger, self.box, group + ) else: self._cpp_obj = _hoomd.BoxResizeUpdaterGPU( - self._simulation.state._cpp_sys_def, self.trigger, self.box, - group) + self._simulation.state._cpp_sys_def, self.trigger, self.box, group + ) def get_box(self, timestep): """Get the box for a given timestep. @@ -170,18 +171,18 @@ def update(state, box, filter=All()): .. code-block:: python - hoomd.update.BoxResize.update(state=simulation.state, - box=box) + hoomd.update.BoxResize.update(state=simulation.state, box=box) """ group = state._get_group(filter) box_variant = hoomd.variant.box.Constant(box) if isinstance(state._simulation.device, hoomd.device.CPU): - updater = _hoomd.BoxResizeUpdater(state._cpp_sys_def, Periodic(1), - box_variant, group) + updater = _hoomd.BoxResizeUpdater( + state._cpp_sys_def, Periodic(1), box_variant, group + ) else: - updater = _hoomd.BoxResizeUpdaterGPU(state._cpp_sys_def, - Periodic(1), box_variant, - group) + updater = _hoomd.BoxResizeUpdaterGPU( + state._cpp_sys_def, Periodic(1), box_variant, group + ) updater.update(state._simulation.timestep) diff --git a/hoomd/update/custom_updater.py b/hoomd/update/custom_updater.py index 92cef94179..cd18593499 100644 --- a/hoomd/update/custom_updater.py +++ b/hoomd/update/custom_updater.py @@ -13,13 +13,12 @@ def act(self, timestep): custom_action = ExampleAction() """ -from hoomd.custom import (CustomOperation, Action) +from hoomd.custom import CustomOperation, Action from hoomd.custom.custom_operation import _InternalCustomOperation from hoomd.operation import Updater class _UpdaterProperty: - @property def updater(self): return self._action @@ -29,8 +28,7 @@ def updater(self, updater): if isinstance(updater, Action): self._action = updater else: - raise ValueError( - "updater must be an instance of hoomd.custom.Action") + raise ValueError("updater must be an instance of hoomd.custom.Action") class CustomUpdater(CustomOperation, _UpdaterProperty, Updater): @@ -53,7 +51,8 @@ class CustomUpdater(CustomOperation, _UpdaterProperty, Updater): custom_updater = hoomd.update.CustomUpdater( action=custom_action, - trigger=hoomd.trigger.Periodic(1000)) + trigger=hoomd.trigger.Periodic(1000), + ) simulation.operations.updaters.append(custom_updater) See Also: @@ -63,12 +62,13 @@ class CustomUpdater(CustomOperation, _UpdaterProperty, Updater): `hoomd.write.CustomWriter` """ - _cpp_list_name = 'updaters' - _cpp_class_name = 'PythonUpdater' + + _cpp_list_name = "updaters" + _cpp_class_name = "PythonUpdater" __doc__ += CustomOperation._doc_inherited class _InternalCustomUpdater(_InternalCustomOperation, Updater): - _cpp_list_name = 'updaters' - _cpp_class_name = 'PythonUpdater' + _cpp_list_name = "updaters" + _cpp_class_name = "PythonUpdater" _operation_func = "update" diff --git a/hoomd/update/particle_filter.py b/hoomd/update/particle_filter.py index b0e022707f..bba22f619b 100644 --- a/hoomd/update/particle_filter.py +++ b/hoomd/update/particle_filter.py @@ -72,7 +72,8 @@ class FilterUpdater(hoomd.operation.Updater): filter_updater = hoomd.update.FilterUpdater( trigger=hoomd.trigger.Periodic(1_000), - filters=[filter1, filter2]) + filters=[filter1, filter2], + ) {inherited} @@ -81,8 +82,7 @@ class FilterUpdater(hoomd.operation.Updater): **Members defined in** `FilterUpdater`: """ - __doc__ = __doc__.replace("{inherited}", - hoomd.operation.Updater._doc_inherited) + __doc__ = __doc__.replace("{inherited}", hoomd.operation.Updater._doc_inherited) def __init__(self, trigger, filters): super().__init__(trigger) @@ -90,7 +90,8 @@ def __init__(self, trigger, filters): hoomd.filter.ParticleFilter, iterable=filters, to_synced_list=_GroupConverter(), - attach_members=False) + attach_members=False, + ) @property def filters(self): @@ -115,7 +116,8 @@ def _attach_hook(self): # query groups from filters. self._filters._to_synced_list_conversion._attach(self._simulation) self._cpp_obj = hoomd._hoomd.ParticleFilterUpdater( - self._simulation.state._cpp_sys_def, self.trigger) + self._simulation.state._cpp_sys_def, self.trigger + ) self._filters._sync(self._simulation, self._cpp_obj.groups) def __eq__(self, other): diff --git a/hoomd/update/remove_drift.py b/hoomd/update/remove_drift.py index e9aaf2c453..1b59e3c632 100644 --- a/hoomd/update/remove_drift.py +++ b/hoomd/update/remove_drift.py @@ -50,7 +50,8 @@ class RemoveDrift(Updater): .. code-block:: python remove_drift = hoomd.update.RemoveDrift( - reference_positions=[(0,0,0), (1,0,0)]) + reference_positions=[(0, 0, 0), (1, 0, 0)] + ) simulation.operations.updaters.append(remove_drift) {inherited} @@ -67,7 +68,10 @@ class RemoveDrift(Updater): .. code-block:: python - remove_drift.reference_positions = [(0,0,0), (1,0,0)] + remove_drift.reference_positions = [ + (0, 0, 0), + (1, 0, 0), + ] """ __doc__ = __doc__.replace("{inherited}", Updater._doc_inherited) @@ -75,16 +79,18 @@ class RemoveDrift(Updater): def __init__(self, reference_positions, trigger=1): super().__init__(trigger) self._param_dict.update( - ParameterDict({ - "reference_positions": NDArrayValidator(np.float64, (None, 3)) - })) + ParameterDict( + {"reference_positions": NDArrayValidator(np.float64, (None, 3))} + ) + ) self.reference_positions = reference_positions def _attach_hook(self): if isinstance(self._simulation.device, hoomd.device.GPU): self._simulation.device._cpp_msg.warning( - "Falling back on CPU. No GPU implementation available.\n") + "Falling back on CPU. No GPU implementation available.\n" + ) self._cpp_obj = _hoomd.UpdaterRemoveDrift( - self._simulation.state._cpp_sys_def, self.trigger, - self.reference_positions) + self._simulation.state._cpp_sys_def, self.trigger, self.reference_positions + ) diff --git a/hoomd/util.py b/hoomd/util.py index 0ddaacdf5d..3f5ad1a7b6 100644 --- a/hoomd/util.py +++ b/hoomd/util.py @@ -15,7 +15,7 @@ def _to_camel_case(string): This currently capitalizes the first word which is not correct camelcase. """ - return string.replace('_', ' ').title().replace(' ', '') + return string.replace("_", " ").title().replace(" ", "") def _is_iterable(obj): @@ -60,7 +60,7 @@ def _dict_fold(dict_, func, init_value, use_keys=False): .. code-block:: python - mapping = {'a': 0, 'b': 1, 'c': 2} + mapping = {"a": 0, "b": 1, "c": 2} accumulated_value = 0 func = lambda x, y: x + y for value in mapping.values(): @@ -246,16 +246,18 @@ class _SafeNamespaceDict(_NamespaceDict): def __setitem__(self, namespace, value): if namespace in self: - raise KeyError("Namespace {} is being used. Remove before " - "replacing.".format(namespace)) + raise KeyError( + "Namespace {} is being used. Remove before " "replacing.".format( + namespace + ) + ) else: super().__setitem__(namespace, value) -def make_example_simulation(device=None, - dimensions=3, - particle_types=['A'], - mpcd_types=None): +def make_example_simulation( + device=None, dimensions=3, particle_types=["A"], mpcd_types=None +): """Make an example Simulation object. The simulation state contains two particles at positions (-1, 0, 0) and @@ -317,5 +319,5 @@ def make_example_simulation(device=None, __all__ = [ - 'make_example_simulation', + "make_example_simulation", ] diff --git a/hoomd/variant/__init__.py b/hoomd/variant/__init__.py index 46491c1600..7e6206b8f1 100644 --- a/hoomd/variant/__init__.py +++ b/hoomd/variant/__init__.py @@ -21,16 +21,15 @@ object. """ -from hoomd.variant.scalar import (Variant, Constant, Ramp, Cycle, Power, - variant_like) +from hoomd.variant.scalar import Variant, Constant, Ramp, Cycle, Power, variant_like from hoomd.variant import box __all__ = [ - 'Constant', - 'Cycle', - 'Power', - 'Ramp', - 'Variant', - 'box', - 'variant_like', + "Constant", + "Cycle", + "Power", + "Ramp", + "Variant", + "box", + "variant_like", ] diff --git a/hoomd/variant/box.py b/hoomd/variant/box.py index 7e6ff457e6..e61c434c44 100644 --- a/hoomd/variant/box.py +++ b/hoomd/variant/box.py @@ -26,7 +26,7 @@ def __init__(self): hoomd.variant.box.BoxVariant.__init__(self) def __call__(self, timestep): - return [10 + timestep/1e6, 10, 10, 0, 0, 0] + return [10 + timestep / 1e6, 10, 10, 0, 0, 0] .. py:method:: __call__(timestep) @@ -57,8 +57,8 @@ def _private_eq(self, other): if not isinstance(other, type(self)): return False return all( - getattr(self, attr) == getattr(other, attr) - for attr in self._eq_attrs) + getattr(self, attr) == getattr(other, attr) for attr in self._eq_attrs + ) class Constant(_hoomd.VectorVariantBoxConstant, BoxVariant): @@ -76,6 +76,7 @@ class Constant(_hoomd.VectorVariantBoxConstant, BoxVariant): **Members defined in** `Constant`: """ + _eq_attrs = ("box",) __eq__ = BoxVariant._private_eq __doc__ = __doc__.replace("{inherited}", BoxVariant._doc_inherited) @@ -139,6 +140,7 @@ class Interpolate(_hoomd.VectorVariantBoxInterpolate, BoxVariant): variant (hoomd.variant.Variant): A variant used to interpolate between the two boxes. """ + _eq_attrs = ( "initial_box", "final_box", @@ -151,8 +153,9 @@ def __init__(self, initial_box, final_box, variant): box2 = hoomd.data.typeconverter.box_preprocessing(final_box) variant = hoomd.data.typeconverter.variant_preprocessing(variant) BoxVariant.__init__(self) - _hoomd.VectorVariantBoxInterpolate.__init__(self, box1._cpp_obj, - box2._cpp_obj, variant) + _hoomd.VectorVariantBoxInterpolate.__init__( + self, box1._cpp_obj, box2._cpp_obj, variant + ) def __reduce__(self): """Reduce values to picklable format.""" @@ -218,6 +221,7 @@ class InverseVolumeRamp(_hoomd.VectorVariantBoxInverseVolumeRamp, BoxVariant): t_start (int): The time step at the start of the ramp. t_ramp (int): The length of the ramp. """ + _eq_attrs = ("initial_box", "final_volume", "t_start", "t_ramp") __eq__ = BoxVariant._private_eq __doc__ = __doc__.replace("{inherited}", BoxVariant._doc_inherited) @@ -226,12 +230,15 @@ def __init__(self, initial_box, final_volume, t_start, t_ramp): BoxVariant.__init__(self) box = hoomd.data.typeconverter.box_preprocessing(initial_box) _hoomd.VectorVariantBoxInverseVolumeRamp.__init__( - self, box._cpp_obj, final_volume, t_start, t_ramp) + self, box._cpp_obj, final_volume, t_start, t_ramp + ) def __reduce__(self): """Reduce values to picklable format.""" - return (type(self), (self.initial_box, self.final_volume, self.t_start, - self.t_ramp)) + return ( + type(self), + (self.initial_box, self.final_volume, self.t_start, self.t_ramp), + ) @property def initial_box(self): @@ -245,8 +252,8 @@ def initial_box(self, box): __all__ = [ - 'BoxVariant', - 'Constant', - 'Interpolate', - 'InverseVolumeRamp', + "BoxVariant", + "Constant", + "Interpolate", + "InverseVolumeRamp", ] diff --git a/hoomd/variant/scalar.py b/hoomd/variant/scalar.py index ca7ea09ffc..cdb7ec6ee7 100644 --- a/hoomd/variant/scalar.py +++ b/hoomd/variant/scalar.py @@ -24,13 +24,13 @@ def __init__(self): hoomd.variant.Variant.__init__(self) def __call__(self, timestep): - return (float(timestep)**(1 / 2)) + return float(timestep) ** (1 / 2) def _min(self): return 0.0 def _max(self): - return float('inf') + return float("inf") Note: Provide the minimum and maximum values in the ``_min`` and ``_max`` @@ -94,8 +94,8 @@ def _private_eq(self, other): if not isinstance(other, type(self)): return False return all( - getattr(self, attr) == getattr(other, attr) - for attr in self._eq_attrs) + getattr(self, attr) == getattr(other, attr) for attr in self._eq_attrs + ) class Constant(_hoomd.VariantConstant, Variant): @@ -121,6 +121,7 @@ class Constant(_hoomd.VariantConstant, Variant): Attributes: value (float): The value. """ + _eq_attrs = ("value",) def __init__(self, value): @@ -150,10 +151,9 @@ class Ramp(_hoomd.VariantRamp, Variant): .. code-block:: python - variant = hoomd.variant.Ramp(A=1.0, - B=2.0, - t_start=10_000, - t_ramp=100_000) + variant = hoomd.variant.Ramp( + A=1.0, B=2.0, t_start=10_000, t_ramp=100_000 + ) {inherited} @@ -167,6 +167,7 @@ class Ramp(_hoomd.VariantRamp, Variant): t_start (int): The start time step. t_ramp (int): The length of the ramp. """ + _eq_attrs = ("A", "B", "t_start", "t_ramp") __doc__ = __doc__.replace("{inherited}", Variant._doc_inherited) @@ -202,13 +203,15 @@ class Cycle(_hoomd.VariantCycle, Variant): .. code-block:: python - variant = hoomd.variant.Cycle(A=1.0, - B=2.0, - t_start=10_000, - t_A=100_000, - t_AB=1_000_000, - t_B=200_000, - t_BA=2_000_000) + variant = hoomd.variant.Cycle( + A=1.0, + B=2.0, + t_start=10_000, + t_A=100_000, + t_AB=1_000_000, + t_B=200_000, + t_BA=2_000_000, + ) {inherited} @@ -225,6 +228,7 @@ class Cycle(_hoomd.VariantCycle, Variant): t_B (int): The holding time at B. t_BA (int): The time spent ramping from B to A. """ + _eq_attrs = ("A", "B", "t_start", "t_A", "t_AB", "t_B", "t_BA") __doc__ = __doc__.replace("{inherited}", Variant._doc_inherited) @@ -256,10 +260,9 @@ class Power(_hoomd.VariantPower, Variant): .. code-block:: python - variant = hoomd.variant.Power(A=2, - B=8, - power=1 / 10, - t_start=10, t_ramp=20) + variant = hoomd.variant.Power( + A=2, B=8, power=1 / 10, t_start=10, t_ramp=20 + ) {inherited} @@ -274,6 +277,7 @@ class Power(_hoomd.VariantPower, Variant): t_start (int): The start time step. t_ramp (int): The length of the ramp. """ + _eq_attrs = ("A", "B", "power", "t_start", "t_ramp") __doc__ = __doc__.replace("{inherited}", Variant._doc_inherited) diff --git a/hoomd/version.py b/hoomd/version.py index 040c2a0159..fe5e2397c2 100644 --- a/hoomd/version.py +++ b/hoomd/version.py @@ -50,6 +50,7 @@ version (str): HOOMD-blue package version, following semantic versioning. """ + from hoomd import _hoomd from hoomd.version_config import ( @@ -75,22 +76,22 @@ floating_point_precision = _hoomd.BuildInfo.getFloatingPointPrecision() __all__ = [ - 'build_dir', - 'compile_date', - 'compile_flags', - 'cxx_compiler', - 'floating_point_precision', - 'git_branch', - 'git_sha1', - 'gpu_api_version', - 'gpu_enabled', - 'gpu_platform', - 'hpmc_built', - 'install_dir', - 'md_built', - 'metal_built', - 'mpcd_built', - 'mpi_enabled', - 'source_dir', - 'version', + "build_dir", + "compile_date", + "compile_flags", + "cxx_compiler", + "floating_point_precision", + "git_branch", + "git_sha1", + "gpu_api_version", + "gpu_enabled", + "gpu_platform", + "hpmc_built", + "install_dir", + "md_built", + "metal_built", + "mpcd_built", + "mpi_enabled", + "source_dir", + "version", ] diff --git a/hoomd/wall.py b/hoomd/wall.py index b0d95cdedd..82c77666a6 100644 --- a/hoomd/wall.py +++ b/hoomd/wall.py @@ -43,8 +43,10 @@ def to_dict(self): def _setattr_param(self, attr, value): """Make WallGeometry objects effectively immutable.""" - raise ValueError(f"Cannot set {attr} after construction as " - f"{self.__class__} objects are immutable") + raise ValueError( + f"Cannot set {attr} after construction as " + f"{self.__class__} objects are immutable" + ) class Sphere(WallGeometry): @@ -101,10 +103,9 @@ class Sphere(WallGeometry): """ def __init__(self, radius, origin=(0.0, 0.0, 0.0), inside=True, open=True): - param_dict = ParameterDict(radius=float, - origin=(float, float, float), - inside=bool, - open=bool) + param_dict = ParameterDict( + radius=float, origin=(float, float, float), inside=bool, open=bool + ) param_dict["radius"] = radius param_dict["origin"] = origin param_dict["inside"] = inside @@ -130,7 +131,7 @@ def to_dict(self): "radius": self.radius, "origin": self.origin, "inside": self.inside, - "open": self.open + "open": self.open, } @@ -181,7 +182,7 @@ class Cylinder(WallGeometry): .. code-block:: python - cylinder = hoomd.wall.Cylinder(radius=10.0, axis=(0,0,1)) + cylinder = hoomd.wall.Cylinder(radius=10.0, axis=(0, 0, 1)) Attributes: radius (float): @@ -200,17 +201,14 @@ class Cylinder(WallGeometry): ``True`` means do not include the surface. """ - def __init__(self, - radius, - axis, - origin=(0.0, 0.0, 0.0), - inside=True, - open=True): - param_dict = ParameterDict(radius=float, - origin=(float, float, float), - axis=(float, float, float), - inside=bool, - open=bool) + def __init__(self, radius, axis, origin=(0.0, 0.0, 0.0), inside=True, open=True): + param_dict = ParameterDict( + radius=float, + origin=(float, float, float), + axis=(float, float, float), + inside=bool, + open=bool, + ) param_dict["radius"] = radius param_dict["origin"] = origin param_dict["axis"] = axis @@ -238,7 +236,7 @@ def to_dict(self): "origin": self.origin, "axis": self.axis, "inside": self.inside, - "open": self.open + "open": self.open, } @@ -291,9 +289,9 @@ class Plane(WallGeometry): """ def __init__(self, origin, normal, open=True): - param_dict = ParameterDict(origin=(float, float, float), - normal=(float, float, float), - open=bool) + param_dict = ParameterDict( + origin=(float, float, float), normal=(float, float, float), open=bool + ) param_dict["origin"] = origin param_dict["normal"] = normal param_dict["open"] = open @@ -374,14 +372,9 @@ def __init__(self, walls=None, to_cpp=identity): self._walls = [] self._backend_list_index = [] self._backend_lists = { - Sphere: - SyncedList(Sphere, to_synced_list=to_cpp, attach_members=False), - Cylinder: - SyncedList(Cylinder, - to_synced_list=to_cpp, - attach_members=False), - Plane: - SyncedList(Plane, to_synced_list=to_cpp, attach_members=False) + Sphere: SyncedList(Sphere, to_synced_list=to_cpp, attach_members=False), + Cylinder: SyncedList(Cylinder, to_synced_list=to_cpp, attach_members=False), + Plane: SyncedList(Plane, to_synced_list=to_cpp, attach_members=False), } if walls is None: @@ -406,8 +399,7 @@ def __setitem__(self, index, wall): self._backend_lists[new_type][old_backend_index.index] = wall return - new_backend_index = self._get_obj_backend_index(index + 1, new_type, - old_type) + new_backend_index = self._get_obj_backend_index(index + 1, new_type, old_type) self._backend_list_index[index] = new_backend_index # Add/remove the new/old walls from their respective backend lists @@ -499,7 +491,7 @@ def _get_obj_backend_index(self, frontend_index, new_type, old_type=None): if backend_index is not None: return backend_index - for bi in self._backend_list_index[frontend_index - 1::-1]: + for bi in self._backend_list_index[frontend_index - 1 :: -1]: if bi.type == new_type: backend_index = copy(bi) backend_index.index += 1 @@ -511,8 +503,8 @@ def _get_obj_backend_index(self, frontend_index, new_type, old_type=None): __all__ = [ - 'Cylinder', - 'Plane', - 'Sphere', - 'WallGeometry', + "Cylinder", + "Plane", + "Sphere", + "WallGeometry", ] diff --git a/hoomd/write/__init__.py b/hoomd/write/__init__.py index 9124926003..a958d757b1 100644 --- a/hoomd/write/__init__.py +++ b/hoomd/write/__init__.py @@ -34,10 +34,10 @@ from hoomd.write.hdf5 import HDF5Log __all__ = [ - 'DCD', - 'GSD', - 'Burst', - 'CustomWriter', - 'HDF5Log', - 'Table', + "DCD", + "GSD", + "Burst", + "CustomWriter", + "HDF5Log", + "Table", ] diff --git a/hoomd/write/custom_writer.py b/hoomd/write/custom_writer.py index 10d91f37c6..12effbc35f 100644 --- a/hoomd/write/custom_writer.py +++ b/hoomd/write/custom_writer.py @@ -13,13 +13,12 @@ def act(self, timestep): custom_action = ExampleAction() """ -from hoomd.custom import (Action, CustomOperation) +from hoomd.custom import Action, CustomOperation from hoomd.custom.custom_operation import _InternalCustomOperation from hoomd.operation import Writer class _WriterProperty: - @property def writer(self): return self._action @@ -29,8 +28,7 @@ def writer(self, analyzer): if isinstance(analyzer, Action): self._action = analyzer else: - raise ValueError( - "analyzer must be an instance of hoomd.custom.Action") + raise ValueError("analyzer must be an instance of hoomd.custom.Action") class CustomWriter(CustomOperation, _WriterProperty, Writer): @@ -54,7 +52,8 @@ class CustomWriter(CustomOperation, _WriterProperty, Writer): custom_writer = hoomd.write.CustomWriter( action=custom_action, - trigger=hoomd.trigger.Periodic(1000)) + trigger=hoomd.trigger.Periodic(1000), + ) simulation.operations.writers.append(custom_writer) See Also: @@ -64,12 +63,13 @@ class CustomWriter(CustomOperation, _WriterProperty, Writer): `hoomd.tune.CustomTuner` """ - _cpp_list_name = 'analyzers' - _cpp_class_name = 'PythonAnalyzer' + + _cpp_list_name = "analyzers" + _cpp_class_name = "PythonAnalyzer" __doc__ += CustomOperation._doc_inherited class _InternalCustomWriter(_InternalCustomOperation, Writer): - _cpp_list_name = 'analyzers' - _cpp_class_name = 'PythonAnalyzer' + _cpp_list_name = "analyzers" + _cpp_class_name = "PythonAnalyzer" _operation_func = "write" diff --git a/hoomd/write/dcd.py b/hoomd/write/dcd.py index 139d650a8c..08d70e6439 100644 --- a/hoomd/write/dcd.py +++ b/hoomd/write/dcd.py @@ -57,8 +57,10 @@ class DCD(Writer): .. code-block:: python - dcd = hoomd.write.DCD(trigger=hoomd.trigger.Periodic(1_000_000), - filename=dcd_filename) + dcd = hoomd.write.DCD( + trigger=hoomd.trigger.Periodic(1_000_000), + filename=dcd_filename, + ) simulation.operations.writers.append(dcd) {inherited} @@ -131,28 +133,37 @@ class DCD(Writer): __doc__ = __doc__.replace("{inherited}", Writer._doc_inherited) - def __init__(self, - trigger, - filename, - filter=All(), - overwrite=False, - unwrap_full=False, - unwrap_rigid=False, - angle_z=False): - + def __init__( + self, + trigger, + filename, + filter=All(), + overwrite=False, + unwrap_full=False, + unwrap_rigid=False, + angle_z=False, + ): # initialize base class super().__init__(trigger) self._param_dict.update( - ParameterDict(filename=str(filename), - filter=ParticleFilter, - overwrite=bool(overwrite), - unwrap_full=bool(unwrap_full), - unwrap_rigid=bool(unwrap_rigid), - angle_z=bool(angle_z))) + ParameterDict( + filename=str(filename), + filter=ParticleFilter, + overwrite=bool(overwrite), + unwrap_full=bool(unwrap_full), + unwrap_rigid=bool(unwrap_rigid), + angle_z=bool(angle_z), + ) + ) self.filter = filter def _attach_hook(self): group = self._simulation.state._get_group(self.filter) self._cpp_obj = _hoomd.DCDDumpWriter( - self._simulation.state._cpp_sys_def, self.trigger, self.filename, - int(self.trigger.period), group, self.overwrite) + self._simulation.state._cpp_sys_def, + self.trigger, + self.filename, + int(self.trigger.period), + group, + self.overwrite, + ) diff --git a/hoomd/write/gsd.py b/hoomd/write/gsd.py index 63bade94f4..d67c074922 100644 --- a/hoomd/write/gsd.py +++ b/hoomd/write/gsd.py @@ -43,8 +43,8 @@ def _array_to_strings(value): string_list = [] for string in value: string_list.append( - string.view( - dtype='|S{}'.format(value.shape[1])).decode('UTF-8')) + string.view(dtype="|S{}".format(value.shape[1])).decode("UTF-8") + ) return string_list else: return value @@ -175,8 +175,10 @@ class GSD(Writer): .. code-block:: python - gsd = hoomd.write.GSD(trigger=hoomd.trigger.Periodic(1_000_000), - filename=gsd_filename) + gsd = hoomd.write.GSD( + trigger=hoomd.trigger.Periodic(1_000_000), + filename=gsd_filename, + ) simulation.operations.writers.append(gsd) {inherited} @@ -227,17 +229,19 @@ class GSD(Writer): .. code-block:: python - gsd.dynamic = ['property'] + gsd.dynamic = ["property"] .. code-block:: python - gsd.dynamic = ['property', 'momentum'] + gsd.dynamic = ["property", "momentum"] .. code-block:: python - gsd.dynamic = ['property', - 'particles/image', - 'particles/typeid'] + gsd.dynamic = [ + "property", + "particles/image", + "particles/typeid", + ] write_diameter (bool): When `False`, do not write ``particles/diameter``. Set to `True` to write non-default particle @@ -261,68 +265,79 @@ class GSD(Writer): __doc__ = __doc__.replace("{inherited}", Writer._doc_inherited) - def __init__(self, - trigger, - filename, - filter=All(), - mode='ab', - truncate=False, - dynamic=None, - logger=None): - + def __init__( + self, + trigger, + filename, + filter=All(), + mode="ab", + truncate=False, + dynamic=None, + logger=None, + ): super().__init__(trigger) - dynamic_validation = OnlyFrom([ - 'attribute', - 'property', - 'momentum', - 'topology', - 'configuration/box', - 'particles/N', - 'particles/position', - 'particles/orientation', - 'particles/velocity', - 'particles/angmom', - 'particles/image', - 'particles/types', - 'particles/typeid', - 'particles/mass', - 'particles/charge', - 'particles/diameter', - 'particles/body', - 'particles/moment_inertia', - ], - preprocess=_array_to_strings) - - dynamic = ['property'] if dynamic is None else dynamic + dynamic_validation = OnlyFrom( + [ + "attribute", + "property", + "momentum", + "topology", + "configuration/box", + "particles/N", + "particles/position", + "particles/orientation", + "particles/velocity", + "particles/angmom", + "particles/image", + "particles/types", + "particles/typeid", + "particles/mass", + "particles/charge", + "particles/diameter", + "particles/body", + "particles/moment_inertia", + ], + preprocess=_array_to_strings, + ) + + dynamic = ["property"] if dynamic is None else dynamic self._param_dict.update( - ParameterDict(filename=str(filename), - filter=ParticleFilter, - mode=str(mode), - truncate=bool(truncate), - dynamic=[dynamic_validation], - write_diameter=False, - maximum_write_buffer_size=64 * 1024 * 1024, - _defaults=dict(filter=filter, dynamic=dynamic))) + ParameterDict( + filename=str(filename), + filter=ParticleFilter, + mode=str(mode), + truncate=bool(truncate), + dynamic=[dynamic_validation], + write_diameter=False, + maximum_write_buffer_size=64 * 1024 * 1024, + _defaults=dict(filter=filter, dynamic=dynamic), + ) + ) self._logger = None if logger is None else _GSDLogWriter(logger) def _attach_hook(self): self._cpp_obj = _hoomd.GSDDumpWriter( - self._simulation.state._cpp_sys_def, self.trigger, self.filename, - self._simulation.state._get_group(self.filter), self.mode, - self.truncate) + self._simulation.state._cpp_sys_def, + self.trigger, + self.filename, + self._simulation.state._get_group(self.filter), + self.mode, + self.truncate, + ) self._cpp_obj.log_writer = self.logger # Maintain a list of open gsd writers weak_writer = weakref.ref(self) _open_gsd_writers.append(weak_writer) - self._finalizer = weakref.finalize(self, _finalize_gsd, weak_writer, - self._cpp_obj), + self._finalizer = ( + weakref.finalize(self, _finalize_gsd, weak_writer, self._cpp_obj), + ) @staticmethod - def write(state, filename, filter=All(), mode='wb', logger=None): + def write(state, filename, filter=All(), mode="wb", logger=None): """Write the given simulation state out to a GSD file. Args: @@ -334,12 +349,17 @@ def write(state, filename, filter=All(), mode='wb', logger=None): The valid file modes for `write` are ``'wb'`` and ``'xb'``. """ - if mode != 'wb' and mode != 'xb': + if mode != "wb" and mode != "xb": raise ValueError(f"Invalid GSD.write file mode: {mode}") - writer = _hoomd.GSDDumpWriter(state._cpp_sys_def, Periodic(1), - str(filename), state._get_group(filter), - mode, False) + writer = _hoomd.GSDDumpWriter( + state._cpp_sys_def, + Periodic(1), + str(filename), + state._get_group(filter), + mode, + False, + ) if logger is not None: writer.log_writer = _GSDLogWriter(logger) @@ -376,12 +396,14 @@ def flush(self): Flush all write buffers:: for writer in simulation.operations.writers: - if hasattr(writer, 'flush'): + if hasattr(writer, "flush"): writer.flush() """ if not self._attached: - raise RuntimeError("The GSD file is unavailable until the" - "simulation runs for 0 or more steps.") + raise RuntimeError( + "The GSD file is unavailable until the" + "simulation runs for 0 or more steps." + ) self._cpp_obj.flush() @@ -393,8 +415,11 @@ def _iterable_is_incomplete(iterable): ensure that no RequiredArg values exist at any depth in a state loggable key. Otherwise, the GSD backend will fail in its conversion to NumPy arrays. """ - if (not isinstance(iterable, Collection) or isinstance(iterable, str) - or len(iterable) == 0): + if ( + not isinstance(iterable, Collection) + or isinstance(iterable, str) + or len(iterable) == 0 + ): return False incomplete = False @@ -426,14 +451,14 @@ class _GSDLogWriter: _global_prepend (`str`): a str that gets prepending into the namespace of each logged quantity. """ - _per_categories = LoggerCategories.any([ - 'angle', 'bond', 'constraint', 'dihedral', 'improper', 'pair', - 'particle' - ]) - _convert_categories = LoggerCategories.any(['string', 'strings']) - _skip_categories = LoggerCategories['object'] - _special_keys = ['type_shapes'] - _global_prepend = 'log' + + _per_categories = LoggerCategories.any( + ["angle", "bond", "constraint", "dihedral", "improper", "pair", "particle"] + ) + _convert_categories = LoggerCategories.any(["string", "strings"]) + _skip_categories = LoggerCategories["object"] + _special_keys = ["type_shapes"] + _global_prepend = "log" def __init__(self, logger): self.logger = logger @@ -442,7 +467,7 @@ def log(self): """Get the flattened dictionary for consumption by GSD object.""" log = dict() for key, value in _dict_flatten(self.logger.log()).items(): - if 'state' in key and _iterable_is_incomplete(value[0]): + if "state" in key and _iterable_is_incomplete(value[0]): pass log_value, type_category = value type_category = LoggerCategories[type_category] @@ -461,16 +486,20 @@ def log(self): # log/particles/{remaining namespace}. This preserves OVITO # intergration. if type_category in self._per_categories: - log['/'.join( - (self._global_prepend, type_category.name + 's', - *key))] = log_value + log[ + "/".join( + (self._global_prepend, type_category.name + "s", *key) + ) + ] = log_value elif type_category in self._convert_categories: self._log_convert_value( - log, '/'.join((self._global_prepend, *key)), - type_category, log_value) + log, + "/".join((self._global_prepend, *key)), + type_category, + log_value, + ) else: - log['/'.join((self._global_prepend, *key))] = \ - log_value + log["/".join((self._global_prepend, *key))] = log_value else: pass return log @@ -481,25 +510,25 @@ def _log_special(self, dict_, key, value): When adding a key to this make sure this is the only option. In general, special cases like this should be avoided if possible. """ - if key == 'type_shapes': + if key == "type_shapes": shape_list = [ - bytes(json.dumps(type_shape) + '\0', 'UTF-8') - for type_shape in value + bytes(json.dumps(type_shape) + "\0", "UTF-8") for type_shape in value ] max_len = np.max([len(shape) for shape in shape_list]) num_shapes = len(shape_list) str_array = np.array(shape_list) - dict_['particles/type_shapes'] = \ - str_array.view(dtype=np.int8).reshape(num_shapes, max_len) + dict_["particles/type_shapes"] = str_array.view(dtype=np.int8).reshape( + num_shapes, max_len + ) def _log_convert_value(self, dict_, key, category, value): """Convert loggable types that cannot be directly stored by GSD.""" if category == LoggerCategories.string: - value = bytes(value, 'UTF-8') + value = bytes(value, "UTF-8") value = np.array([value], dtype=np.dtype((bytes, len(value) + 1))) value = value.view(dtype=np.int8) elif category == LoggerCategories.strings: - value = [bytes(v + '\0', 'UTF-8') for v in value] + value = [bytes(v + "\0", "UTF-8") for v in value] max_len = np.max([len(string) for string in value]) num_strings = len(value) value = np.array(value) diff --git a/hoomd/write/gsd_burst.py b/hoomd/write/gsd_burst.py index d01c6af9bf..1ec7e40e4b 100644 --- a/hoomd/write/gsd_burst.py +++ b/hoomd/write/gsd_burst.py @@ -56,10 +56,12 @@ class Burst(GSD): .. code-block:: python - burst = hoomd.write.Burst(trigger=hoomd.trigger.Periodic(1_000), - filename=burst_filename, - max_burst_size=100, - write_at_start=True) + burst = hoomd.write.Burst( + trigger=hoomd.trigger.Periodic(1_000), + filename=burst_filename, + max_burst_size=100, + write_at_start=True, + ) simulation.operations.writers.append(burst) See Also: @@ -104,38 +106,50 @@ class Burst(GSD): __doc__ = __doc__.replace("{inherited}", GSD._doc_inherited) - def __init__(self, - trigger, - filename, - filter=All(), - mode='ab', - dynamic=None, - logger=None, - max_burst_size=-1, - write_at_start=False, - clear_whole_buffer_after_dump=True): - super().__init__(trigger=trigger, - filename=filename, - filter=filter, - mode=mode, - dynamic=dynamic, - logger=logger) + def __init__( + self, + trigger, + filename, + filter=All(), + mode="ab", + dynamic=None, + logger=None, + max_burst_size=-1, + write_at_start=False, + clear_whole_buffer_after_dump=True, + ): + super().__init__( + trigger=trigger, + filename=filename, + filter=filter, + mode=mode, + dynamic=dynamic, + logger=logger, + ) self._param_dict.pop("truncate") + self._param_dict.update(ParameterDict(max_burst_size=int, write_at_start=bool)) self._param_dict.update( - ParameterDict(max_burst_size=int, write_at_start=bool)) - self._param_dict.update({ - "max_burst_size": max_burst_size, - "write_at_start": write_at_start, - "clear_whole_buffer_after_dump": clear_whole_buffer_after_dump - }) + { + "max_burst_size": max_burst_size, + "write_at_start": write_at_start, + "clear_whole_buffer_after_dump": clear_whole_buffer_after_dump, + } + ) def _attach_hook(self): sim = self._simulation self._cpp_obj = _hoomd.GSDDequeWriter( - sim.state._cpp_sys_def, self.trigger, self.filename, - sim.state._get_group(self.filter), self.logger, self.max_burst_size, - self.mode, self.write_at_start, self.clear_whole_buffer_after_dump, - sim.timestep) + sim.state._cpp_sys_def, + self.trigger, + self.filename, + sim.state._get_group(self.filter), + self.logger, + self.max_burst_size, + self.mode, + self.write_at_start, + self.clear_whole_buffer_after_dump, + sim.timestep, + ) def dump(self, start=0, end=-1): """Write stored frames in range to the file and empties the buffer. diff --git a/hoomd/write/hdf5.py b/hoomd/write/hdf5.py index de0e169e36..ac7a60dfe9 100644 --- a/hoomd/write/hdf5.py +++ b/hoomd/write/hdf5.py @@ -47,12 +47,10 @@ class _SkipIfNone: - def __init__(self, attr): self._attr = attr def __call__(self, method): - @functools.wraps(method) def func(s, *args, **kwargs): if getattr(s, self._attr, None) is None: @@ -68,9 +66,7 @@ def func(s, *args, **kwargs): class _HDF5LogInternal(_InternalAction): """A HDF5 HOOMD logging backend.""" - _skip_for_equality = _InternalAction._skip_for_equality | { - "_fh", "_attached_" - } + _skip_for_equality = _InternalAction._skip_for_equality | {"_fh", "_attached_"} flags = ( custom.Action.Flags.ROTATIONAL_KINETIC_ENERGY, @@ -78,11 +74,13 @@ class _HDF5LogInternal(_InternalAction): custom.Action.Flags.EXTERNAL_FIELD_VIRIAL, ) - _reject_categories = logging.LoggerCategories.any(( - logging.LoggerCategories.object, - logging.LoggerCategories.strings, - logging.LoggerCategories.string, - )) + _reject_categories = logging.LoggerCategories.any( + ( + logging.LoggerCategories.object, + logging.LoggerCategories.strings, + logging.LoggerCategories.string, + ) + ) accepted_categories = ~_reject_categories @@ -92,19 +90,17 @@ class _HDF5LogInternal(_InternalAction): def __init__(self, filename, logger, mode="a"): if h5py is None: raise ImportError(f"{type(self)} requires the h5py package.") - param_dict = ParameterDict(filename=typeconverter.OnlyTypes( - (str, PurePath)), - logger=logging.Logger, - mode=str) - if (rejects := self._reject_categories - & logger.categories) != logging.LoggerCategories["NONE"]: + param_dict = ParameterDict( + filename=typeconverter.OnlyTypes((str, PurePath)), + logger=logging.Logger, + mode=str, + ) + if ( + rejects := self._reject_categories & logger.categories + ) != logging.LoggerCategories["NONE"]: reject_str = logging.LoggerCategories._get_string_list(rejects) raise ValueError(f"Cannot have {reject_str} in logger categories.") - param_dict.update({ - "filename": filename, - "logger": logger, - "mode": mode - }) + param_dict.update({"filename": filename, "logger": logger, "mode": mode}) self._param_dict = param_dict self._fh = None self._attached_ = False @@ -152,8 +148,7 @@ def act(self, timestep): continue str_key = "/".join(("hoomd-data", *key)) if str_key not in self._fh: - raise RuntimeError( - "The logged quantities cannot change within a file.") + raise RuntimeError("The logged quantities cannot change within a file.") dataset = self._fh[str_key] dataset.resize(self._frame + 1, axis=0) dataset[self._frame, ...] = value @@ -177,7 +172,7 @@ def flush(self): .. code-block:: python for writer in simulation.operations.writers: - if hasattr(writer, 'flush'): + if hasattr(writer, "flush"): writer.flush() """ self._fh.flush() @@ -220,11 +215,12 @@ def _initialize_datasets(self, log_dict): value = np.asarray(value) data_shape = (1, *value.shape) dtype = value.dtype - chunk_size = (max( - self._MULTIFRAME_ARRAY_CHUNK_MAXIMUM // value.nbytes, - 1),) + data_shape[1:] - self._create_dataset("/".join(("hoomd-data", *key)), data_shape, - dtype, chunk_size) + chunk_size = ( + max(self._MULTIFRAME_ARRAY_CHUNK_MAXIMUM // value.nbytes, 1), + ) + data_shape[1:] + self._create_dataset( + "/".join(("hoomd-data", *key)), data_shape, dtype, chunk_size + ) @_skip_fh def _find_frame(self): @@ -285,12 +281,12 @@ class HDF5Log(_InternalCustomWriter): .. code-block:: python - logger = hoomd.logging.Logger( - hoomd.write.HDF5Log.accepted_categories) + logger = hoomd.logging.Logger(hoomd.write.HDF5Log.accepted_categories) hdf5_log = hoomd.write.HDF5Log( trigger=hoomd.trigger.Periodic(10_000), filename=hdf5_filename, - logger=logger) + logger=logger, + ) simulation.operations.writers.append(hdf5_log) {inherited} @@ -336,6 +332,7 @@ class HDF5Log(_InternalCustomWriter): mode = hdf5_log.mode """ + _internal_class = _HDF5LogInternal _wrap_methods = ("flush",) __doc__ = __doc__.replace("{inherited}", Writer._doc_inherited) diff --git a/hoomd/write/table.py b/hoomd/write/table.py index 9a744ae0ea..957d1335a4 100644 --- a/hoomd/write/table.py +++ b/hoomd/write/table.py @@ -80,12 +80,9 @@ class _Formatter: centered ('^'). """ - def __init__(self, - pretty=True, - max_precision=15, - max_decimals_pretty=5, - pad=" ", - align="^"): + def __init__( + self, pretty=True, max_precision=15, max_decimals_pretty=5, pad=" ", align="^" + ): self.generate_fmt_strings(pad, align) self.pretty = pretty self.precision = max_precision - 1 @@ -133,8 +130,7 @@ def format_num(self, value, column_width): if not min_len_repr < 6 or min_len_repr > column_width: # Determine the number of decimals to use if self.pretty: - decimals = min(max(column_width - 6, 1), - self.max_decimals_pretty) + decimals = min(max(column_width - 6, 1), self.max_decimals_pretty) else: decimals = max(self.precision, 0) type_fmt = "." + str(decimals) + "e" @@ -142,32 +138,33 @@ def format_num(self, value, column_width): else: # Determine the number of decimals to use if self.pretty: - decimals = min(max(column_width - min_len_repr - 2, 1), - self.max_decimals_pretty) + decimals = min( + max(column_width - min_len_repr - 2, 1), + self.max_decimals_pretty, + ) else: decimals = max(self.precision - min_len_repr + 1, 0) type_fmt = "." + str(decimals) + "f" - return self._num_format.format(value, - width=column_width, - type=type_fmt) + return self._num_format.format(value, width=column_width, type=type_fmt) def format_str(self, value, column_width): if self.pretty and len(value) > column_width: truncate_to = max(1, column_width - 2) - return self._str_format.format(value[-truncate_to:], - width=column_width) + return self._str_format.format(value[-truncate_to:], width=column_width) else: return self._str_format.format(value, width=column_width) def __eq__(self, other): if not isinstance(other, _Formatter): return NotImplemented - return (self.pretty == other.pretty - and self.precision == other.precision - and self.max_decimals_pretty == other.max_decimals_pretty - and self._num_format == other._num_format - and self._str_format == other._str_format) + return ( + self.pretty == other.pretty + and self.precision == other.precision + and self.max_decimals_pretty == other.max_decimals_pretty + and self._num_format == other._num_format + and self._str_format == other._str_format + ) class _TableInternal(_InternalAction): @@ -179,66 +176,83 @@ class _TableInternal(_InternalAction): logged quantities, but would be more fragile. """ - _invalid_logger_categories = LoggerCategories.any([ - 'sequence', 'object', 'particle', 'bond', 'angle', 'dihedral', - 'improper', 'pair', 'constraint', 'strings' - ]) + _invalid_logger_categories = LoggerCategories.any( + [ + "sequence", + "object", + "particle", + "bond", + "angle", + "dihedral", + "improper", + "pair", + "constraint", + "strings", + ] + ) flags = [ - Action.Flags.ROTATIONAL_KINETIC_ENERGY, Action.Flags.PRESSURE_TENSOR, - Action.Flags.EXTERNAL_FIELD_VIRIAL + Action.Flags.ROTATIONAL_KINETIC_ENERGY, + Action.Flags.PRESSURE_TENSOR, + Action.Flags.EXTERNAL_FIELD_VIRIAL, ] _skip_for_equality = {"_comm"} - def __init__(self, - logger, - output=stdout, - header_sep='.', - delimiter=' ', - pretty=True, - max_precision=10, - max_header_len=None): - - param_dict = ParameterDict(header_sep=str, - delimiter=str, - min_column_width=int, - max_header_len=OnlyTypes(int, - allow_none=True), - pretty=bool, - max_precision=int, - output=OnlyTypes( - _OutputWriter, - postprocess=_ensure_writable), - logger=Logger) + def __init__( + self, + logger, + output=stdout, + header_sep=".", + delimiter=" ", + pretty=True, + max_precision=10, + max_header_len=None, + ): + param_dict = ParameterDict( + header_sep=str, + delimiter=str, + min_column_width=int, + max_header_len=OnlyTypes(int, allow_none=True), + pretty=bool, + max_precision=int, + output=OnlyTypes(_OutputWriter, postprocess=_ensure_writable), + logger=Logger, + ) param_dict.update( - dict(header_sep=header_sep, - delimiter=delimiter, - min_column_width=max(10, max_precision + 6), - max_header_len=max_header_len, - max_precision=max_precision, - pretty=pretty, - output=output, - logger=logger)) + dict( + header_sep=header_sep, + delimiter=delimiter, + min_column_width=max(10, max_precision + 6), + max_header_len=max_header_len, + max_precision=max_precision, + pretty=pretty, + output=output, + logger=logger, + ) + ) self._param_dict = param_dict # internal variables that are not part of the state. # Generate LoggerCategories for valid and invalid categories _valid_categories = LoggerCategories.any( - [LoggerCategories.scalar, LoggerCategories.string]) + [LoggerCategories.scalar, LoggerCategories.string] + ) _invalid_inputs = logger.categories & self._invalid_logger_categories # Ensure that only scalar and string categories are set for the logger if logger.categories == LoggerCategories.NONE: pass - elif (_valid_categories ^ LoggerCategories.ALL - ) & logger.categories == LoggerCategories.NONE: + elif ( + _valid_categories ^ LoggerCategories.ALL + ) & logger.categories == LoggerCategories.NONE: pass else: raise ValueError( "Table Logger may only have scalar or string categories set. \ - Use hoomd.write.GSD for {}.".format(_invalid_inputs)) + Use hoomd.write.GSD for {}.".format(_invalid_inputs) + ) self._cur_headers_with_width = dict() self._fmt = _Formatter(pretty, max_precision) @@ -257,8 +271,7 @@ def detach(self): def _get_log_dict(self): """Get a flattened dict for writing to output.""" return { - key: value[0] - for key, value in _dict_flatten(self.logger.log()).items() + key: value[0] for key, value in _dict_flatten(self.logger.log()).items() } def _update_headers(self, new_keys): @@ -273,16 +286,19 @@ def _update_headers(self, new_keys): header_output_list = [] header_dict = {} for namespace in new_keys: - header = self._determine_header(namespace, self.header_sep, - self.max_header_len) + header = self._determine_header( + namespace, self.header_sep, self.max_header_len + ) column_size = max(len(header), self.min_column_width) header_dict[namespace] = column_size header_output_list.append((header, column_size)) self._cur_headers_with_width = header_dict self.output.write( - self.delimiter.join((self._fmt.format_str(hdr, width) - for hdr, width in header_output_list))) - self.output.write('\n') + self.delimiter.join( + (self._fmt.format_str(hdr, width) for hdr, width in header_output_list) + ) + ) + self.output.write("\n") @staticmethod def _determine_header(namespace, sep, max_len): @@ -302,9 +318,9 @@ def _write_row(self, data): """Write a row of data to output.""" headers = self._cur_headers_with_width self.output.write( - self.delimiter.join( - (self._fmt(data[k], headers[k]) for k in headers))) - self.output.write('\n') + self.delimiter.join((self._fmt(data[k], headers[k]) for k in headers)) + ) + self.output.write("\n") def act(self, timestep=None): """Write row to designated output. @@ -328,27 +344,28 @@ def act(self, timestep=None): def __getstate__(self): state = copy.copy(self.__dict__) - state.pop('_comm', None) + state.pop("_comm", None) # This is to handle when the output specified is just stdout. By default # file objects like this are not picklable, so we need to handle it # differently. We let `None` represent stdout in the state dictionary. # Most other file like objects will simply fail to be pickled here. if self.output == stdout: param_dict = ParameterDict() - param_dict.update(state['_param_dict']) - state['_param_dict'] = param_dict - state['_param_dict']._dict['output'] = None - state['_param_dict']['output'] + param_dict.update(state["_param_dict"]) + state["_param_dict"] = param_dict + state["_param_dict"]._dict["output"] = None + state["_param_dict"]["output"] return state else: return super().__getstate__() def __setstate__(self, state): - if state['_param_dict']['output'] is None: - del state['_param_dict']['output'] - state['_param_dict']['output'] = stdout - state['_param_dict']._type_converter['output'] = OnlyTypes( - _OutputWriter, postprocess=_ensure_writable), + if state["_param_dict"]["output"] is None: + del state["_param_dict"]["output"] + state["_param_dict"]["output"] = stdout + state["_param_dict"]._type_converter["output"] = ( + OnlyTypes(_OutputWriter, postprocess=_ensure_writable), + ) self.__dict__ = state @@ -397,9 +414,11 @@ class Table(_InternalCustomWriter): .. code-block:: python - logger = hoomd.logging.Logger(categories=['scalar', 'string']) - table = hoomd.write.Table(trigger=hoomd.trigger.Periodic(10_000), - logger=logger) + logger = hoomd.logging.Logger(categories=["scalar", "string"]) + table = hoomd.write.Table( + trigger=hoomd.trigger.Periodic(10_000), + logger=logger, + ) {inherited} @@ -482,6 +501,7 @@ class Table(_InternalCustomWriter): min_column_width = table.min_column_width """ + _internal_class = _TableInternal __doc__ = __doc__.replace("{inherited}", Writer._doc_inherited) diff --git a/ruff.toml b/ruff.toml index e2193959f8..b40187cf9a 100644 --- a/ruff.toml +++ b/ruff.toml @@ -46,4 +46,5 @@ convention = "google" [format] indent-style = "space" line-ending = "auto" +docstring-code-line-length = 72 docstring-code-format = true diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 143bc37b87..0000000000 --- a/setup.cfg +++ /dev/null @@ -1,88 +0,0 @@ -[flake8] -# include messages from: -select = - E,F,W, # flake8 - N, # pep8-naming - D, # flake8-docstrings - RST # flake8-rst-docstrings -ignore = - # line breaks should occur before the binary operator - W503, - # allow occasional use of uppercase variable and argument names (e.g. N) - N806,N803, - # match yapf formatting style - E126,E133, - # do not document __init__ separately from the class - D107, - # Google Python style is not RST until after processed by Napoleon - # See https://github.com/peterjc/flake8-rst-docstrings/issues/17 - RST201,RST203,RST301, - # Allow :nowrap: in math directives - RST307 - # Allow backslashes in docstrings so we can have both line continuations and LaTeX math - D301 - # Allow no summary line. The rendered Sphinx documentation is cleaner without them. - D205,D415, - -# do not require docstrings in unit test files -# F401 ignore unused imports in __init__.py files (these are for users) -# D214 ignore overindented sections in Trigger - this is Google napoleon formatting -# N816 ignore mixed case kT variables -# D10* howto guides do not need docstrings -per-file-ignores = - */pytest/*.py:D100,D101,D102,D103,D104,D105,D106 - sphinx-doc/howto/*.py:D100,D101,D102,D103,D104,D105,D106,N816 - */__init__.py: F401 - hoomd/version.py: F401 - hoomd/trigger.py: D214 - -# TODO: These should be removed as files are ported or removed -# These also need to be removed in .pre-commit-config.yaml -exclude = .git, - __pycache__, - build, - hoomd/extern, - hoomd/metal, -max_line_length = 88 -max_doc_length = 88 -hang_closing = True -docstring-convention = google -rst-directives = - autodata, - code-blocks, - deprecated, - py:attribute, - py:method, - seealso, - versionadded, - versionchanged, - todo, -rst-roles = - attr, - class, - doc, - file, - func, - meth, - mod, - py:mod, - py:class, - py:meth, - py:func, - py:exc, - py:attr, - ref, - -[pydocstyle] -convention = google -# do not document __init__ separately from the class -add_ignore = D107 - -[yapf] -based_on_style = google -align_closing_bracket_with_visual_indent = True -split_before_arithmetic_operator = True -split_before_bitwise_operator = True -split_before_logical_operator = True -blank_line_before_module_docstring = True -split_before_dot = True diff --git a/sphinx-doc/conf.py b/sphinx-doc/conf.py index 21b10b48ab..5533a55b0e 100644 --- a/sphinx-doc/conf.py +++ b/sphinx-doc/conf.py @@ -12,23 +12,28 @@ # allows typing objects like variant_like to be documented correctly. # See: https://github.com/sphinx-doc/sphinx/issues/9560 -PythonDomain.object_types['class'].roles = ('class', 'exc', 'data', 'obj') -PythonDomain.object_types['data'].roles = ('data', 'class', 'obj') +PythonDomain.object_types["class"].roles = ("class", "exc", "data", "obj") +PythonDomain.object_types["data"].roles = ("data", "class", "obj") -sphinx_ver = tuple(map(int, sphinx.__version__.split('.'))) +sphinx_ver = tuple(map(int, sphinx.__version__.split("."))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) -os.environ['SPHINX'] = '1' +os.environ["SPHINX"] = "1" extensions = [ - 'nbsphinx', 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', - 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', - 'sphinx.ext.todo', 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx_copybutton' + "nbsphinx", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.todo", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx_copybutton", ] if os.getenv("READTHEDOCS"): @@ -38,53 +43,53 @@ napoleon_include_special_with_doc = True intersphinx_mapping = { - 'python': ('https://docs.python.org/3', None), - 'numpy': ('https://numpy.org/doc/stable', None), - 'gsd': ('https://gsd.readthedocs.io/en/stable/', None) + "python": ("https://docs.python.org/3", None), + "numpy": ("https://numpy.org/doc/stable", None), + "gsd": ("https://gsd.readthedocs.io/en/stable/", None), } autodoc_docstring_signature = True -autodoc_typehints_format = 'short' +autodoc_typehints_format = "short" autodoc_mock_imports = [ - 'hoomd._hoomd', - 'hoomd.version_config', - 'hoomd.md._md', - 'hoomd.metal._metal', - 'hoomd.mpcd._mpcd', - 'hoomd.minimize._minimize', - 'hoomd.hpmc._hpmc', + "hoomd._hoomd", + "hoomd.version_config", + "hoomd.md._md", + "hoomd.metal._metal", + "hoomd.mpcd._mpcd", + "hoomd.minimize._minimize", + "hoomd.hpmc._hpmc", ] -templates_path = ['_templates'] -exclude_patterns = ['_build', 'figures', '**/create-figures.ipynb'] +templates_path = ["_templates"] +exclude_patterns = ["_build", "figures", "**/create-figures.ipynb"] -source_suffix = '.rst' +source_suffix = ".rst" -master_doc = 'index' +master_doc = "index" -project = 'HOOMD-blue' +project = "HOOMD-blue" year = datetime.date.today().year -copyright = f'2009-{year} The Regents of the University of Michigan' -author = 'The Regents of the University of Michigan' +copyright = f"2009-{year} The Regents of the University of Michigan" +author = "The Regents of the University of Michigan" -version = '4.9.1' -release = '4.9.1' +version = "4.9.1" +release = "4.9.1" -language = 'en' +language = "en" -default_role = 'any' +default_role = "any" pygments_style = "friendly" pygments_dark_style = "native" todo_include_todos = False -html_theme = 'furo' -html_static_path = ['_static'] -html_logo = 'hoomdblue-logo-vertical.svg' +html_theme = "furo" +html_static_path = ["_static"] +html_logo = "hoomdblue-logo-vertical.svg" html_theme_options = { - 'sidebar_hide_name': True, - 'top_of_page_buttons': [], + "sidebar_hide_name": True, + "top_of_page_buttons": [], "navigation_with_keys": True, "dark_css_variables": { "color-brand-primary": "#5187b2", @@ -95,9 +100,9 @@ "color-brand-content": "#406a8c", }, } -html_favicon = 'hoomdblue-logo-favicon.svg' +html_favicon = "hoomdblue-logo-favicon.svg" -IGNORE_MODULES = ['hoomd._hoomd'] +IGNORE_MODULES = ["hoomd._hoomd"] IGNORE_CLASSES = [] copybutton_prompt_text = "$ " @@ -110,8 +115,11 @@ def autodoc_process_bases(app, name, obj, options, bases): # bases must be modified in place. remove_indices = [] for i, base in enumerate(bases): - if (base.__module__ in IGNORE_MODULES or base.__name__.startswith("_") - or base.__name__ in IGNORE_CLASSES): + if ( + base.__module__ in IGNORE_MODULES + or base.__name__.startswith("_") + or base.__name__ in IGNORE_CLASSES + ): remove_indices.append(i) for i in reversed(remove_indices): del bases[i] @@ -119,4 +127,4 @@ def autodoc_process_bases(app, name, obj, options, bases): def setup(app): """Configure the Sphinx app.""" - app.connect('autodoc-process-bases', autodoc_process_bases) + app.connect("autodoc-process-bases", autodoc_process_bases) diff --git a/sphinx-doc/generate-toctree.py b/sphinx-doc/generate-toctree.py index 5d3aa2a916..1180d77eef 100644 --- a/sphinx-doc/generate-toctree.py +++ b/sphinx-doc/generate-toctree.py @@ -13,7 +13,7 @@ import os from pathlib import Path -TOPLEVEL = ['hpmc', 'mpcd', 'md'] +TOPLEVEL = ["hpmc", "mpcd", "md"] exit_value = 0 @@ -24,7 +24,7 @@ def generate_member_rst(path, full_module_name, name, type): and then customized as needed. 80+% of the files should not need customization. """ # Generate the file {name}.rst - underline = '=' * len(name) + underline = "=" * len(name) member_rst = f"{name}\n{underline}\n\n" member_rst += f".. py:currentmodule:: {full_module_name}\n\n" @@ -34,11 +34,11 @@ def generate_member_rst(path, full_module_name, name, type): # set :members: and :show-inheritance: for classes. Developers can remove these # individually when they are not appropriate. Unfortunately, we cannot make these # default in `conf.py` because there is no way to opt out of the default. - if type == 'class': + if type == "class": member_rst += " :members:\n" member_rst += " :show-inheritance:\n" - destination = (path / name.lower()).with_suffix('.rst') + destination = (path / name.lower()).with_suffix(".rst") if destination.exists(): return @@ -55,10 +55,10 @@ def generate_module_rst(path, module): global exit_value full_module_name = module.__name__ - module_name = full_module_name.split('.')[-1] + module_name = full_module_name.split(".")[-1] # Alphabetize the items - module_all = getattr(module, '__all__', None) + module_all = getattr(module, "__all__", None) if module_all is None: exit_value = 1 print(f"Warning: {full_module_name} is missing __all__") @@ -83,17 +83,17 @@ def generate_module_rst(path, module): if inspect.isclass(member): classes.append(member_name) - generate_member_rst(path, full_module_name, member_name, 'class') + generate_member_rst(path, full_module_name, member_name, "class") if inspect.isfunction(member): functions.append(member_name) - generate_member_rst(path, full_module_name, member_name, 'function') + generate_member_rst(path, full_module_name, member_name, "function") # data members should be documented directly in the module's docstring, and # are ignored here. # Generate the file module-{module_name}.rst - module_underline = '=' * len(module_name) + module_underline = "=" * len(module_name) module_rst = f"{module_name}\n{module_underline}\n\n" module_rst += f".. automodule:: {full_module_name}\n" @@ -102,39 +102,39 @@ def generate_module_rst(path, module): module_rst += f" :exclude-members: {','.join(classes + functions)}\n\n" if len(submodules) > 0: - module_rst += '.. rubric:: Modules\n\n.. toctree::\n :maxdepth: 1\n\n' + module_rst += ".. rubric:: Modules\n\n.. toctree::\n :maxdepth: 1\n\n" for submodule in submodules: if submodule not in TOPLEVEL: - module_rst += f' {module_name}/module-{submodule}\n' - module_rst += '\n' + module_rst += f" {module_name}/module-{submodule}\n" + module_rst += "\n" if len(classes) > 0: - module_rst += '.. rubric:: Classes\n\n.. toctree::\n :maxdepth: 1\n\n' + module_rst += ".. rubric:: Classes\n\n.. toctree::\n :maxdepth: 1\n\n" for class_name in classes: - module_rst += f' {module_name}/{class_name.lower()}\n' - module_rst += '\n' + module_rst += f" {module_name}/{class_name.lower()}\n" + module_rst += "\n" if len(functions) > 0: - module_rst += '.. rubric:: Functions\n\n.. toctree::\n :maxdepth: 1\n\n' + module_rst += ".. rubric:: Functions\n\n.. toctree::\n :maxdepth: 1\n\n" for function_name in functions: - module_rst += f' {module_name}/{function_name.lower()}\n' - module_rst += '\n' + module_rst += f" {module_name}/{function_name.lower()}\n" + module_rst += "\n" # ensure there is only one newline at the end of the file module_rst = module_rst.rstrip() - module_rst += '\n' - file = (path.parent / ('module-' + module_name)).with_suffix('.rst') + module_rst += "\n" + file = (path.parent / ("module-" + module_name)).with_suffix(".rst") file.write_text(module_rst) -if __name__ == '__main__': +if __name__ == "__main__": doc_dir = Path(__file__).parent repository_dir = doc_dir.parent sys.path.insert(0, str(repository_dir)) - os.environ['SPHINX'] = '1' + os.environ["SPHINX"] = "1" import hoomd - generate_module_rst(doc_dir / 'hoomd', hoomd) + generate_module_rst(doc_dir / "hoomd", hoomd) sys.exit(exit_value) diff --git a/sphinx-doc/howto/choose-the-neighbor-list-buffer-distance.py b/sphinx-doc/howto/choose-the-neighbor-list-buffer-distance.py index 86ee529a7e..34cd674ae5 100644 --- a/sphinx-doc/howto/choose-the-neighbor-list-buffer-distance.py +++ b/sphinx-doc/howto/choose-the-neighbor-list-buffer-distance.py @@ -6,19 +6,21 @@ # Prepare a MD simulation. device = hoomd.device.auto_select() simulation = hoomd.Simulation(device=device) -simulation.create_state_from_gsd(filename='spheres.gsd') +simulation.create_state_from_gsd(filename="spheres.gsd") simulation.state.thermalize_particle_momenta(filter=hoomd.filter.All(), kT=kT) neighbor_list = hoomd.md.nlist.Cell(buffer=0.4) lj = hoomd.md.pair.LJ(nlist=neighbor_list) -lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) -lj.r_cut[('A', 'A')] = 2.5 +lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) +lj.r_cut[("A", "A")] = 2.5 bussi = hoomd.md.methods.thermostats.Bussi(kT=kT) -constant_volume = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All(), - thermostat=bussi) +constant_volume = hoomd.md.methods.ConstantVolume( + filter=hoomd.filter.All(), thermostat=bussi +) simulation.operations.integrator = hoomd.md.Integrator( - dt=0.001, methods=[constant_volume], forces=[lj]) + dt=0.001, methods=[constant_volume], forces=[lj] +) # Complete GPU kernel autotuning before making sensitive timing measurements. if isinstance(device, hoomd.device.GPU): @@ -29,5 +31,7 @@ for buffer in [0, 0.05, 0.1, 0.2, 0.3]: neighbor_list.buffer = buffer simulation.run(sample_steps) - device.notice(f'buffer={buffer}: TPS={simulation.tps:0.3g}, ' - f'num_builds={neighbor_list.num_builds}') + device.notice( + f"buffer={buffer}: TPS={simulation.tps:0.3g}, " + f"num_builds={neighbor_list.num_builds}" + ) diff --git a/sphinx-doc/howto/continuously-vary-potential-parameters.py b/sphinx-doc/howto/continuously-vary-potential-parameters.py index 50a8b96a46..c2e07145b1 100644 --- a/sphinx-doc/howto/continuously-vary-potential-parameters.py +++ b/sphinx-doc/howto/continuously-vary-potential-parameters.py @@ -4,31 +4,31 @@ simulation = hoomd.util.make_example_simulation() lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4)) -lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) -lj.r_cut[('A', 'A')] = 2.5 +lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) +lj.r_cut[("A", "A")] = 2.5 langevin = hoomd.md.methods.Langevin(filter=hoomd.filter.All(), kT=1.5) -simulation.operations.integrator = hoomd.md.Integrator(dt=0.001, - methods=[langevin], - forces=[lj]) +simulation.operations.integrator = hoomd.md.Integrator( + dt=0.001, methods=[langevin], forces=[lj] +) # Step 1: Subclass hoomd.custom.Action. class LJParameterModifer(hoomd.custom.Action): - def __init__(self, lj): super().__init__() self.lj = lj def act(self, timestep): epsilon = 1.0 + 4.0 * timestep / 1e6 - self.lj.params[('A', 'A')] = dict(epsilon=epsilon, sigma=1) + self.lj.params[("A", "A")] = dict(epsilon=epsilon, sigma=1) # Step 2: Create a hoomd.update.CustomUpdater lj_parameter_modifier = LJParameterModifer(lj) lj_parameter_updater = hoomd.update.CustomUpdater( - trigger=hoomd.trigger.Periodic(1), action=lj_parameter_modifier) + trigger=hoomd.trigger.Periodic(1), action=lj_parameter_modifier +) # Step 3: Add the updater to the operations simulation.operations.updaters.append(lj_parameter_updater) diff --git a/sphinx-doc/howto/determine-the-most-efficient-device.py b/sphinx-doc/howto/determine-the-most-efficient-device.py index 1d593a2eda..76cc36eb6b 100644 --- a/sphinx-doc/howto/determine-the-most-efficient-device.py +++ b/sphinx-doc/howto/determine-the-most-efficient-device.py @@ -5,15 +5,15 @@ # Parse command line arguments. parser = argparse.ArgumentParser() -parser.add_argument('--device', default='CPU') -parser.add_argument('--replicate', default=1, type=int) -parser.add_argument('--steps', default=10_000, type=int) +parser.add_argument("--device", default="CPU") +parser.add_argument("--replicate", default=1, type=int) +parser.add_argument("--steps", default=10_000, type=int) args = parser.parse_args() # Create WCA MD simulation device = getattr(hoomd.device, args.device)() simulation = hoomd.Simulation(device=device, seed=1) -simulation.create_state_from_gsd(filename='spheres.gsd') +simulation.create_state_from_gsd(filename="spheres.gsd") simulation.state.replicate( nx=args.replicate, ny=args.replicate, @@ -23,18 +23,19 @@ cell = hoomd.md.nlist.Cell(buffer=0.2) lj = hoomd.md.pair.LJ(nlist=cell) -lj.params[('A', 'A')] = dict(sigma=1, epsilon=1) -lj.r_cut[('A', 'A')] = 2**(1 / 6) +lj.params[("A", "A")] = dict(sigma=1, epsilon=1) +lj.r_cut[("A", "A")] = 2 ** (1 / 6) constant_volume = hoomd.md.methods.ConstantVolume( - filter=hoomd.filter.All(), - thermostat=hoomd.md.methods.thermostats.Bussi(kT=kT)) + filter=hoomd.filter.All(), thermostat=hoomd.md.methods.thermostats.Bussi(kT=kT) +) simulation.operations.integrator = hoomd.md.Integrator( - dt=0.001, methods=[constant_volume], forces=[lj]) + dt=0.001, methods=[constant_volume], forces=[lj] +) # Wait until GPU kernel parameter autotuning is complete. -if args.device == 'GPU': +if args.device == "GPU": simulation.run(100) while not simulation.operations.is_tuning_complete: simulation.run(100) @@ -44,4 +45,4 @@ # Run the benchmark and print the performance. simulation.run(args.steps) -device.notice(f'TPS: {simulation.tps:0.5g}') +device.notice(f"TPS: {simulation.tps:0.5g}") diff --git a/sphinx-doc/howto/minimize-potential-energy.py b/sphinx-doc/howto/minimize-potential-energy.py index a8e4847aaf..2cc9685833 100644 --- a/sphinx-doc/howto/minimize-potential-energy.py +++ b/sphinx-doc/howto/minimize-potential-energy.py @@ -5,17 +5,19 @@ # Step 1: Use hoomd.md.minize.FIRE as the integrator. constant_volume = hoomd.md.methods.ConstantVolume(filter=hoomd.filter.All()) -fire = hoomd.md.minimize.FIRE(dt=0.001, - force_tol=1e-3, - angmom_tol=1e-3, - energy_tol=1e-6, - methods=[constant_volume]) +fire = hoomd.md.minimize.FIRE( + dt=0.001, + force_tol=1e-3, + angmom_tol=1e-3, + energy_tol=1e-6, + methods=[constant_volume], +) simulation.operations.integrator = fire # Step 2: Apply forces to the particles. lj = hoomd.md.pair.LJ(nlist=hoomd.md.nlist.Cell(buffer=0.4)) -lj.params[('A', 'A')] = dict(epsilon=1.0, sigma=1.0) -lj.r_cut[('A', 'A')] = 2.5 +lj.params[("A", "A")] = dict(epsilon=1.0, sigma=1.0) +lj.r_cut[("A", "A")] = 2.5 simulation.operations.integrator.forces = [lj] diff --git a/sphinx-doc/howto/molecular.py b/sphinx-doc/howto/molecular.py index 6583039368..c1a7298e85 100644 --- a/sphinx-doc/howto/molecular.py +++ b/sphinx-doc/howto/molecular.py @@ -5,35 +5,32 @@ # Place a polymer in the box. frame.particles.N = 5 -frame.particles.position = [[-2, 0, 0], [-1, 0, 0], [0, 0, 0], [1, 0, 0], - [2, 0, 0]] -frame.particles.types = ['A'] +frame.particles.position = [[-2, 0, 0], [-1, 0, 0], [0, 0, 0], [1, 0, 0], [2, 0, 0]] +frame.particles.types = ["A"] frame.particles.typeid = [0] * 5 frame.configuration.box = [20, 20, 20, 0, 0, 0] # Connect particles with bonds. frame.bonds.N = 4 -frame.bonds.types = ['A-A'] +frame.bonds.types = ["A-A"] frame.bonds.typeid = [0] * 4 frame.bonds.group = [[0, 1], [1, 2], [2, 3], [3, 4]] -with gsd.hoomd.open(name='molecular.gsd', mode='x') as f: +with gsd.hoomd.open(name="molecular.gsd", mode="x") as f: f.append(frame) # Apply the harmonic potential on the bonds. harmonic = hoomd.md.bond.Harmonic() -harmonic.params['A-A'] = dict(k=100, r0=1.0) +harmonic.params["A-A"] = dict(k=100, r0=1.0) # Perform the MD simulation. sim = hoomd.Simulation(device=hoomd.device.CPU(), seed=1) -sim.create_state_from_gsd(filename='molecular.gsd') +sim.create_state_from_gsd(filename="molecular.gsd") langevin = hoomd.md.methods.Langevin(filter=hoomd.filter.All(), kT=1.0) -integrator = hoomd.md.Integrator(dt=0.005, - methods=[langevin], - forces=[harmonic]) -gsd_writer = hoomd.write.GSD(filename='molecular_trajectory.gsd', - trigger=hoomd.trigger.Periodic(1000), - mode='xb') +integrator = hoomd.md.Integrator(dt=0.005, methods=[langevin], forces=[harmonic]) +gsd_writer = hoomd.write.GSD( + filename="molecular_trajectory.gsd", trigger=hoomd.trigger.Periodic(1000), mode="xb" +) sim.operations.integrator = integrator sim.operations.writers.append(gsd_writer) sim.run(10e3) diff --git a/sphinx-doc/howto/prevent-particles-from-moving-hpmc.py b/sphinx-doc/howto/prevent-particles-from-moving-hpmc.py index f54301a5ae..b62d91215e 100644 --- a/sphinx-doc/howto/prevent-particles-from-moving-hpmc.py +++ b/sphinx-doc/howto/prevent-particles-from-moving-hpmc.py @@ -1,18 +1,17 @@ import hoomd # Step 1: Use different types for stationary and mobile particles. -simulation = hoomd.util.make_example_simulation( - particle_types=['A', 'A_no_motion']) +simulation = hoomd.util.make_example_simulation(particle_types=["A", "A_no_motion"]) hpmc_ellipsoid = hoomd.hpmc.integrate.Ellipsoid() -hpmc_ellipsoid.shape['A'] = dict(a=0.5, b=0.25, c=0.125) +hpmc_ellipsoid.shape["A"] = dict(a=0.5, b=0.25, c=0.125) # Step 2: Set the move sizes of the stationary type to 0. -hpmc_ellipsoid.d['A_no_motion'] = 0 -hpmc_ellipsoid.a['A_no_motion'] = 0 +hpmc_ellipsoid.d["A_no_motion"] = 0 +hpmc_ellipsoid.a["A_no_motion"] = 0 # Step 3: Set the shape of the stationary type accordingly. -hpmc_ellipsoid.shape['A_no_motion'] = hpmc_ellipsoid.shape['A'] +hpmc_ellipsoid.shape["A_no_motion"] = hpmc_ellipsoid.shape["A"] simulation.operations.integrator = hpmc_ellipsoid diff --git a/sphinx-doc/howto/prevent-particles-from-moving-md.py b/sphinx-doc/howto/prevent-particles-from-moving-md.py index 0baa2deeb0..d06a17dbe3 100644 --- a/sphinx-doc/howto/prevent-particles-from-moving-md.py +++ b/sphinx-doc/howto/prevent-particles-from-moving-md.py @@ -4,12 +4,10 @@ # Select mobile particles with a filter. stationary_particles = hoomd.filter.Tags([0]) -mobile_particles = hoomd.filter.SetDifference(hoomd.filter.All(), - stationary_particles) +mobile_particles = hoomd.filter.SetDifference(hoomd.filter.All(), stationary_particles) # Integrate the equations of motion of the mobile particles. langevin = hoomd.md.methods.Langevin(filter=mobile_particles, kT=1.5) -simulation.operations.integrator = hoomd.md.Integrator(dt=0.001, - methods=[langevin]) +simulation.operations.integrator = hoomd.md.Integrator(dt=0.001, methods=[langevin]) simulation.run(100) diff --git a/sphinx-doc/howto/tune-mc-move-sizes-binary-system.py b/sphinx-doc/howto/tune-mc-move-sizes-binary-system.py index 53c5eb4a0f..5d74f26abd 100644 --- a/sphinx-doc/howto/tune-mc-move-sizes-binary-system.py +++ b/sphinx-doc/howto/tune-mc-move-sizes-binary-system.py @@ -1,27 +1,28 @@ import hoomd -simulation = hoomd.util.make_example_simulation(particle_types=['A', 'B']) +simulation = hoomd.util.make_example_simulation(particle_types=["A", "B"]) simulation.state.replicate(nx=3, ny=3, nz=3) mc = hoomd.hpmc.integrate.Ellipsoid() -mc.shape['A'] = dict(a=1.0, b=1.0, c=0.25) -mc.shape['B'] = dict(a=1.0, b=1.0, c=0.5) +mc.shape["A"] = dict(a=1.0, b=1.0, c=0.25) +mc.shape["B"] = dict(a=1.0, b=1.0, c=0.5) simulation.operations.integrator = mc # loop over particle types and set ignore_statistics = True for ignored_type in simulation.state.particle_types: - mc.shape[ignored_type]['ignore_statistics'] = True + mc.shape[ignored_type]["ignore_statistics"] = True # loop over particle types to tune move sizes for for tuned_type in simulation.state.particle_types: move_size_tuner = hoomd.hpmc.tune.MoveSize.scale_solver( - 100, ['a', 'd'], 0.2, [tuned_type]) + 100, ["a", "d"], 0.2, [tuned_type] + ) simulation.operations.add(move_size_tuner) - mc.shape[tuned_type]['ignore_statistics'] = False + mc.shape[tuned_type]["ignore_statistics"] = False simulation.run(1000) - mc.shape[tuned_type]['ignore_statistics'] = True + mc.shape[tuned_type]["ignore_statistics"] = True simulation.operations.remove(move_size_tuner) # stop ignoring statistics after tuning for ignored_type in simulation.state.particle_types: - mc.shape[ignored_type]['ignore_statistics'] = False + mc.shape[ignored_type]["ignore_statistics"] = False diff --git a/sphinx-doc/style.rst b/sphinx-doc/style.rst index f98eea5031..967953320c 100644 --- a/sphinx-doc/style.rst +++ b/sphinx-doc/style.rst @@ -17,26 +17,20 @@ Python ------ Python code in HOOMD-blue should follow `PEP8`_ with the formatting performed by -`yapf`_ (configuration in ``setup.cfg``). Code should pass all **flake8** tests -and formatted by **yapf**. +`ruff`_ (configuration in ``ruff.toml``). Code should pass all **ruff** checks +and be formatted by **ruff**. .. _PEP8: https://www.python.org/dev/peps/pep-0008 -.. _yapf: https://github.com/google/yapf +.. _ruff: https://astral.sh/ruff Tools ^^^^^ -* Linter: `flake8 `_ +* Linter: `ruff`_ - * With these plugins: + * Configure the `ruff`_ language server in your editor to see violations in real time. - * `pep8-naming `_ - * `flake8-docstrings `_ - * `flake8-rst-docstrings `_ - - * Configure flake8 in your editor to see violations on save. - -* Autoformatter: `yapf `_ +* Autoformatter: `ruff`_ * Run: ``pre-commit run --all-files`` to apply style changes to the whole repository. @@ -153,10 +147,3 @@ apply: * 100 character line width. * 4 spaces per indent level. * 4 space indent. - -Editor configuration --------------------- - -`Visual Studio Code `_ users: Open the provided -workspace file (``hoomd.code-workspace``) which provides configuration -settings for these style guidelines.