diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4b194ae5c41..eccc6c6c397 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,7 +25,7 @@ repos: - id: text-unicode-replacement-char - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.9.9 + rev: v0.11.4 hooks: - id: ruff-format - id: ruff @@ -69,12 +69,12 @@ repos: - id: taplo-format args: ["--option", "array_auto_collapse=false"] - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.23 + rev: v0.24.1 hooks: - id: validate-pyproject additional_dependencies: ["validate-pyproject-schema-store[all]"] - repo: https://github.com/crate-ci/typos - rev: dictgen-v0.3.1 + rev: v1 hooks: - id: typos # https://github.com/crate-ci/typos/issues/347 diff --git a/design_notes/flexible_indexes_notes.md b/design_notes/flexible_indexes_notes.md index 76c618aa37c..382911c18de 100644 --- a/design_notes/flexible_indexes_notes.md +++ b/design_notes/flexible_indexes_notes.md @@ -166,7 +166,7 @@ Besides `pandas.Index`, other indexes currently supported in Xarray like `CFTime Like for the indexes, explicit coordinate creation should be preferred over implicit coordinate creation. However, there may be some situations where we would like to keep creating coordinates implicitly for backwards compatibility. -For example, it is currently possible to pass a `pandas.MulitIndex` object as a coordinate to the Dataset/DataArray constructor: +For example, it is currently possible to pass a `pandas.MultiIndex` object as a coordinate to the Dataset/DataArray constructor: ```python >>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [0, 1]], names=['lvl1', 'lvl2']) diff --git a/doc/getting-started-guide/quick-overview.rst b/doc/getting-started-guide/quick-overview.rst index 8b908f0cd70..d60156caa5b 100644 --- a/doc/getting-started-guide/quick-overview.rst +++ b/doc/getting-started-guide/quick-overview.rst @@ -128,7 +128,7 @@ Operations also align based on index labels: data[:-1] - data[:1] -For more, see :ref:`comput`. +For more, see :ref:`compute`. GroupBy ------- diff --git a/doc/user-guide/computation.rst b/doc/user-guide/computation.rst index 5d7002484c2..9953808e931 100644 --- a/doc/user-guide/computation.rst +++ b/doc/user-guide/computation.rst @@ -1,6 +1,6 @@ .. currentmodule:: xarray -.. _comput: +.. _compute: ########### Computation @@ -236,7 +236,7 @@ These operations automatically skip missing values, like in pandas: If desired, you can disable this behavior by invoking the aggregation method with ``skipna=False``. -.. _comput.rolling: +.. _compute.rolling: Rolling window operations ========================= @@ -308,7 +308,7 @@ We can also manually iterate through ``Rolling`` objects: # arr_window is a view of x ... -.. _comput.rolling_exp: +.. _compute.rolling_exp: While ``rolling`` provides a simple moving average, ``DataArray`` also supports an exponential moving average with :py:meth:`~xarray.DataArray.rolling_exp`. @@ -354,7 +354,7 @@ You can also use ``construct`` to compute a weighted rolling sum: To avoid this, use ``skipna=False`` as the above example. -.. _comput.weighted: +.. _compute.weighted: Weighted array reductions ========================= @@ -823,7 +823,7 @@ Arithmetic between two datasets matches data variables of the same name: Similarly to index based alignment, the result has the intersection of all matching data variables. -.. _comput.wrapping-custom: +.. _compute.wrapping-custom: Wrapping custom computation =========================== diff --git a/doc/user-guide/dask.rst b/doc/user-guide/dask.rst index cadb7962f1c..184681aa4c9 100644 --- a/doc/user-guide/dask.rst +++ b/doc/user-guide/dask.rst @@ -282,7 +282,7 @@ we use to calculate `Spearman's rank-correlation coefficient `_. - The new jupyter notebook repr (``Dataset._repr_html_`` and ``DataArray._repr_html_``) (introduced in 0.14.1) is now on by default. To @@ -6412,7 +6412,7 @@ Enhancements - New helper function :py:func:`~xarray.apply_ufunc` for wrapping functions written to work on NumPy arrays to support labels on xarray objects (:issue:`770`). ``apply_ufunc`` also support automatic parallelization for - many functions with dask. See :ref:`comput.wrapping-custom` and + many functions with dask. See :ref:`compute.wrapping-custom` and :ref:`dask.automatic-parallelization` for details. By `Stephan Hoyer `_. @@ -7434,7 +7434,7 @@ Enhancements * x (x) int64 0 1 2 * y (y) int64 0 1 2 3 4 - See :ref:`comput.rolling` for more details. By + See :ref:`compute.rolling` for more details. By `Joe Hamman `_. Bug fixes diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index d59cd8fb174..1a46346dda7 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -1290,7 +1290,7 @@ def _validate_and_autodetect_region(self, ds: Dataset) -> Dataset: region = self._write_region if region == "auto": - region = {dim: "auto" for dim in ds.dims} + region = dict.fromkeys(ds.dims, "auto") if not isinstance(region, dict): raise TypeError(f"``region`` must be a dict, got {type(region)}") diff --git a/xarray/computation/fit.py b/xarray/computation/fit.py index ef9332869cc..e9c5179bcb5 100644 --- a/xarray/computation/fit.py +++ b/xarray/computation/fit.py @@ -80,8 +80,8 @@ def _initialize_feasible(lb, ub): ) return p0 - param_defaults = {p: 1 for p in params} - bounds_defaults = {p: (-np.inf, np.inf) for p in params} + param_defaults = dict.fromkeys(params, 1) + bounds_defaults = dict.fromkeys(params, (-np.inf, np.inf)) for p in params: if p in func_args and func_args[p].default is not func_args[p].empty: param_defaults[p] = func_args[p].default diff --git a/xarray/computation/rolling.py b/xarray/computation/rolling.py index cc54bc6c14c..4a69cf9baa0 100644 --- a/xarray/computation/rolling.py +++ b/xarray/computation/rolling.py @@ -1087,7 +1087,7 @@ def __init__( if utils.is_dict_like(coord_func): coord_func_map = coord_func else: - coord_func_map = {d: coord_func for d in self.obj.dims} + coord_func_map = dict.fromkeys(self.obj.dims, coord_func) for c in self.obj.coords: if c not in coord_func_map: coord_func_map[c] = duck_array_ops.mean # type: ignore[index] diff --git a/xarray/core/common.py b/xarray/core/common.py index bafe1414ee8..a56c4458716 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -457,7 +457,7 @@ def squeeze( numpy.squeeze """ dims = get_squeeze_dims(self, dim, axis) - return self.isel(drop=drop, **{d: 0 for d in dims}) + return self.isel(drop=drop, **dict.fromkeys(dims, 0)) def clip( self, @@ -1701,11 +1701,11 @@ def full_like( if isinstance(other, Dataset): if not isinstance(fill_value, dict): - fill_value = {k: fill_value for k in other.data_vars.keys()} + fill_value = dict.fromkeys(other.data_vars.keys(), fill_value) dtype_: Mapping[Any, DTypeLikeSave] if not isinstance(dtype, Mapping): - dtype_ = {k: dtype for k in other.data_vars.keys()} + dtype_ = dict.fromkeys(other.data_vars.keys(), dtype) else: dtype_ = dtype diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 408e9e630ee..0972b04f1fc 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -309,7 +309,7 @@ def __init__( var = as_variable(data, name=name, auto_convert=False) if var.dims == (name,) and indexes is None: index, index_vars = create_default_index_implicit(var, list(coords)) - default_indexes.update({k: index for k in index_vars}) + default_indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) else: variables[name] = var @@ -384,7 +384,7 @@ def from_xindex(cls, index: Index) -> Self: f"create any coordinate.\n{index!r}" ) - indexes = {name: index for name in variables} + indexes = dict.fromkeys(variables, index) return cls(coords=variables, indexes=indexes) @@ -412,7 +412,7 @@ def from_pandas_multiindex(cls, midx: pd.MultiIndex, dim: Hashable) -> Self: xr_idx = PandasMultiIndex(midx, dim) variables = xr_idx.create_variables() - indexes = {k: xr_idx for k in variables} + indexes = dict.fromkeys(variables, xr_idx) return cls(coords=variables, indexes=indexes) @@ -1134,7 +1134,7 @@ def create_coords_with_default_indexes( # pandas multi-index edge cases. variable = variable.to_index_variable() idx, idx_vars = create_default_index_implicit(variable, all_variables) - indexes.update({k: idx for k in idx_vars}) + indexes.update(dict.fromkeys(idx_vars, idx)) variables.update(idx_vars) all_variables.update(idx_vars) else: @@ -1159,7 +1159,7 @@ def _coordinates_from_variable(variable: Variable) -> Coordinates: (name,) = variable.dims new_index, index_vars = create_default_index_implicit(variable) - indexes = {k: new_index for k in index_vars} + indexes = dict.fromkeys(index_vars, new_index) new_vars = new_index.create_variables() new_vars[name].attrs = variable.attrs return Coordinates(new_vars, indexes) diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index f41e0eea8cb..425e609708d 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -7078,7 +7078,7 @@ def weighted(self, weights: DataArray) -> DataArrayWeighted: -------- :func:`Dataset.weighted ` - :ref:`comput.weighted` + :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.DataArray.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index bf2858c1b18..1e2785c9f3c 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -1122,7 +1122,7 @@ def _copy_listed(self, names: Iterable[Hashable]) -> Self: coord_names.add(var_name) if (var_name,) == var.dims: index, index_vars = create_default_index_implicit(var, names) - indexes.update({k: index for k in index_vars}) + indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) coord_names.update(index_vars) @@ -3012,7 +3012,7 @@ def head( if not isinstance(indexers, int) and not is_dict_like(indexers): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): - indexers = {dim: indexers for dim in self.dims} + indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head") for k, v in indexers.items(): if not isinstance(v, int): @@ -3100,7 +3100,7 @@ def tail( if not isinstance(indexers, int) and not is_dict_like(indexers): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): - indexers = {dim: indexers for dim in self.dims} + indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "tail") for k, v in indexers.items(): if not isinstance(v, int): @@ -3186,7 +3186,7 @@ def thin( ): raise TypeError("indexers must be either dict-like or a single integer") if isinstance(indexers, int): - indexers = {dim: indexers for dim in self.dims} + indexers = dict.fromkeys(self.dims, indexers) indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin") for k, v in indexers.items(): if not isinstance(v, int): @@ -4029,7 +4029,7 @@ def _rename_indexes( for index, coord_names in self.xindexes.group_by_index(): new_index = index.rename(name_dict, dims_dict) new_coord_names = [name_dict.get(k, k) for k in coord_names] - indexes.update({k: new_index for k in new_coord_names}) + indexes.update(dict.fromkeys(new_coord_names, new_index)) new_index_vars = new_index.create_variables( { new: self._variables[old] @@ -4315,7 +4315,7 @@ def swap_dims( variables[current_name] = var else: index, index_vars = create_default_index_implicit(var) - indexes.update({name: index for name in index_vars}) + indexes.update(dict.fromkeys(index_vars, index)) variables.update(index_vars) coord_names.update(index_vars) else: @@ -4474,7 +4474,7 @@ def expand_dims( elif isinstance(dim, Sequence): if len(dim) != len(set(dim)): raise ValueError("dims should not contain duplicate values.") - dim = {d: 1 for d in dim} + dim = dict.fromkeys(dim, 1) dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims") assert isinstance(dim, MutableMapping) @@ -4700,7 +4700,7 @@ def set_index( for n in idx.index.names: replace_dims[n] = dim - new_indexes.update({k: idx for k in idx_vars}) + new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) # re-add deindexed coordinates (convert to base variables) @@ -4816,7 +4816,7 @@ def drop_or_convert(var_names): # instead replace it by a new (multi-)index with dropped level(s) idx = index.keep_levels(keep_level_vars) idx_vars = idx.create_variables(keep_level_vars) - new_indexes.update({k: idx for k in idx_vars}) + new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) if not isinstance(idx, PandasMultiIndex): # multi-index reduced to single index @@ -4996,7 +4996,7 @@ def reorder_levels( level_vars = {k: self._variables[k] for k in order} idx = index.reorder_levels(level_vars) idx_vars = idx.create_variables(level_vars) - new_indexes.update({k: idx for k in idx_vars}) + new_indexes.update(dict.fromkeys(idx_vars, idx)) new_variables.update(idx_vars) indexes = {k: v for k, v in self._indexes.items() if k not in new_indexes} @@ -5104,7 +5104,7 @@ def _stack_once( if len(product_vars) == len(dims): idx = index_cls.stack(product_vars, new_dim) new_indexes[new_dim] = idx - new_indexes.update({k: idx for k in product_vars}) + new_indexes.update(dict.fromkeys(product_vars, idx)) idx_vars = idx.create_variables(product_vars) # keep consistent multi-index coordinate order for k in idx_vars: @@ -5351,7 +5351,7 @@ def _unstack_full_reindex( # TODO: we may depreciate implicit re-indexing with a pandas.MultiIndex xr_full_idx = PandasMultiIndex(full_idx, dim) indexers = Indexes( - {k: xr_full_idx for k in index_vars}, + dict.fromkeys(index_vars, xr_full_idx), xr_full_idx.create_variables(index_vars), ) obj = self._reindex( @@ -10052,7 +10052,7 @@ def weighted(self, weights: DataArray) -> DatasetWeighted: -------- :func:`DataArray.weighted ` - :ref:`comput.weighted` + :ref:`compute.weighted` User guide on weighted array reduction using :py:func:`~xarray.Dataset.weighted` :doc:`xarray-tutorial:fundamentals/03.4_weighted` diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index 0b4eee7b21c..bc126375218 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -1329,7 +1329,7 @@ def sel(self, labels, method=None, tolerance=None) -> IndexSelResult: # variable(s) attrs and encoding metadata are propagated # when replacing the indexes in the resulting xarray object new_vars = new_index.create_variables() - indexes = cast(dict[Any, Index], {k: new_index for k in new_vars}) + indexes = cast(dict[Any, Index], dict.fromkeys(new_vars, new_index)) # add scalar variable for each dropped level variables = new_vars @@ -1518,7 +1518,7 @@ def create_default_index_implicit( if all_variables is None: all_variables = {} if not isinstance(all_variables, Mapping): - all_variables = {k: None for k in all_variables} + all_variables = dict.fromkeys(all_variables) name = dim_variable.dims[0] array = getattr(dim_variable._data, "array", None) @@ -1809,7 +1809,7 @@ def copy_indexes( if convert_new_idx: new_idx = new_idx.index # type: ignore[attr-defined] - new_indexes.update({k: new_idx for k in coords}) + new_indexes.update(dict.fromkeys(coords, new_idx)) new_index_vars.update(idx_vars) return new_indexes, new_index_vars @@ -1855,7 +1855,7 @@ def default_indexes( if name in dims and var.ndim == 1: index, index_vars = create_default_index_implicit(var, coords) if set(index_vars) <= coord_names: - indexes.update({k: index for k in index_vars}) + indexes.update(dict.fromkeys(index_vars, index)) return indexes @@ -1958,7 +1958,7 @@ def _apply_indexes_fast(indexes: Indexes[Index], args: Mapping[Any, Any], func: if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: - new_indexes.update({k: new_index for k in index_vars}) + new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: @@ -1981,7 +1981,7 @@ def _apply_indexes( if index_args: new_index = getattr(index, func)(index_args) if new_index is not None: - new_indexes.update({k: new_index for k in index_vars}) + new_indexes.update(dict.fromkeys(index_vars, new_index)) new_index_vars = new_index.create_variables(index_vars) new_index_variables.update(new_index_vars) else: diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 3d41a48644f..f59680dd7df 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -1043,7 +1043,7 @@ def squeeze(self, dim=None): numpy.squeeze """ dims = common.get_squeeze_dims(self, dim) - return self.isel({d: 0 for d in dims}) + return self.isel(dict.fromkeys(dims, 0)) def _shift_one_dim(self, dim, count, fill_value=dtypes.NA): axis = self.get_axis_num(dim) @@ -2152,10 +2152,10 @@ def coarsen_reshape(self, windows, boundary, side): Construct a reshaped-array for coarsen """ if not is_dict_like(boundary): - boundary = {d: boundary for d in windows.keys()} + boundary = dict.fromkeys(windows.keys(), boundary) if not is_dict_like(side): - side = {d: side for d in windows.keys()} + side = dict.fromkeys(windows.keys(), side) # remove unrelated dimensions boundary = {k: v for k, v in boundary.items() if k in windows} diff --git a/xarray/structure/concat.py b/xarray/structure/concat.py index d1d64b4e1e3..81269320e1c 100644 --- a/xarray/structure/concat.py +++ b/xarray/structure/concat.py @@ -657,7 +657,7 @@ def get_indexes(name): else: # index created from a scalar coordinate idx_vars = {name: datasets[0][name].variable} - result_indexes.update({k: combined_idx for k in idx_vars}) + result_indexes.update(dict.fromkeys(idx_vars, combined_idx)) combined_idx_vars = combined_idx.create_variables(idx_vars) for k, v in combined_idx_vars.items(): v.attrs = merge_attrs( diff --git a/xarray/structure/merge.py b/xarray/structure/merge.py index a06fbfc6d81..8f9835aaaa1 100644 --- a/xarray/structure/merge.py +++ b/xarray/structure/merge.py @@ -365,7 +365,7 @@ def append_all(variables, indexes): append(name, variable, indexes[name]) elif variable.dims == (name,): idx, idx_vars = create_default_index_implicit(variable) - append_all(idx_vars, {k: idx for k in idx_vars}) + append_all(idx_vars, dict.fromkeys(idx_vars, idx)) else: append(name, variable, None) diff --git a/xarray/tests/test_backends_datatree.py b/xarray/tests/test_backends_datatree.py index 579d1f7314b..2ff41adde0c 100644 --- a/xarray/tests/test_backends_datatree.py +++ b/xarray/tests/test_backends_datatree.py @@ -235,7 +235,7 @@ def test_netcdf_encoding(self, tmpdir, simple_datatree): # add compression comp = dict(zlib=True, complevel=9) - enc = {"/set2": {var: comp for var in original_dt["/set2"].dataset.data_vars}} + enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) with open_datatree(filepath, engine=self.engine) as roundtrip_dt: @@ -474,7 +474,7 @@ def test_zarr_encoding(self, tmpdir, simple_datatree, zarr_format): comp = {"compressors": (numcodecs.zarr3.Blosc(cname="zstd", clevel=3),)} - enc = {"/set2": {var: comp for var in original_dt["/set2"].dataset.data_vars}} + enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)} original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format) with open_datatree(filepath, engine="zarr") as roundtrip_dt: diff --git a/xarray/tests/test_concat.py b/xarray/tests/test_concat.py index b4b02e25aee..5f484ec6d07 100644 --- a/xarray/tests/test_concat.py +++ b/xarray/tests/test_concat.py @@ -43,7 +43,7 @@ def create_concat_datasets( ["x", "y", "day"], rng.standard_normal(size=(1, 4, 2)), ) - data_vars = {v: data_tuple for v in variables} + data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, @@ -59,7 +59,7 @@ def create_concat_datasets( ["x", "y"], rng.standard_normal(size=(1, 4)), ) - data_vars = {v: data_tuple for v in variables} + data_vars = dict.fromkeys(variables, data_tuple) result.append( Dataset( data_vars=data_vars, diff --git a/xarray/tests/test_datatree_mapping.py b/xarray/tests/test_datatree_mapping.py index 6cb4455b739..277a19887eb 100644 --- a/xarray/tests/test_datatree_mapping.py +++ b/xarray/tests/test_datatree_mapping.py @@ -32,7 +32,7 @@ def test_not_isomorphic(self, create_test_datatree): def test_no_trees_returned(self, create_test_datatree): dt1 = create_test_datatree() dt2 = create_test_datatree() - expected = xr.DataTree.from_dict({k: None for k in dt1.to_dict()}) + expected = xr.DataTree.from_dict(dict.fromkeys(dt1.to_dict())) actual = map_over_datasets(lambda x, y: None, dt1, dt2) assert_equal(expected, actual) diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index c0ecfe638dc..dcf8349aba4 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -367,7 +367,7 @@ def construct_dataarray(dim_num, dtype, contains_nan, dask): da = DataArray(array, dims=dims, coords={"x": np.arange(16)}, name="da") if dask and has_dask: - chunks = {d: 4 for d in dims} + chunks = dict.fromkeys(dims, 4) da = da.chunk(chunks) return da diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index 9b658fa0d66..2e0925c1b9a 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -833,7 +833,7 @@ def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: a = np.char.add(long_name, np.arange(0, n_vars).astype(str)) b = np.char.add("attr_", np.arange(0, n_attr).astype(str)) c = np.char.add("coord", np.arange(0, n_vars).astype(str)) - attrs = {k: 2 for k in b} + attrs = dict.fromkeys(b, 2) coords = {_c: np.array([0, 1], dtype=np.uint64) for _c in c} data_vars = dict() for v, _c in zip(a, coords.items(), strict=True): diff --git a/xarray/tests/test_indexing.py b/xarray/tests/test_indexing.py index c39062b8419..6dd75b58c6a 100644 --- a/xarray/tests/test_indexing.py +++ b/xarray/tests/test_indexing.py @@ -129,7 +129,7 @@ def create_sel_results( ): dim_indexers = {"x": x_indexer} index_vars = x_index.create_variables() - indexes = {k: x_index for k in index_vars} + indexes = dict.fromkeys(index_vars, x_index) variables = {} variables.update(index_vars) variables.update(other_vars) diff --git a/xarray/tests/test_units.py b/xarray/tests/test_units.py index 3999ae1a57c..ede065eac37 100644 --- a/xarray/tests/test_units.py +++ b/xarray/tests/test_units.py @@ -1385,7 +1385,7 @@ def test_replication_full_like_dataset(variant, dtype): units = { **extract_units(ds), - **{name: unit_registry.degK for name in ds.data_vars}, + **dict.fromkeys(ds.data_vars, unit_registry.degK), } expected = attach_units( xr.full_like(strip_units(ds), fill_value=strip_units(fill_value)), units @@ -1871,7 +1871,7 @@ def test_broadcast_equals(self, unit, dtype): ) def test_isel(self, variable, indexers, dask, dtype): if dask: - variable = variable.chunk({dim: 2 for dim in variable.dims}) + variable = variable.chunk(dict.fromkeys(variable.dims, 2)) quantified = xr.Variable( variable.dims, variable.data.astype(dtype) * unit_registry.s )