Группа :: Development/Python3
Пакет: pytables
Главная Изменения Спек Патчи Sources Загрузить Gear Bugs and FR Repocop
Патч: pytables-3.8.0-Fix-compatibility-with-numpu-v1.25.patch
Скачать
Скачать
From 337792561e5924124efd20d6fea6bbbd2428b2aa Mon Sep 17 00:00:00 2001
From: Antonio Valentino <antonio.valentino@tiscali.it>
Date: Sat, 17 Jun 2023 19:51:00 +0200
Subject: [PATCH] Fix compatibility with numpu v1.25
---
bench/chunkshape-testing.py | 4 +-
bench/deep-tree.py | 2 +-
bench/indexed_search.py | 4 +-
bench/keysort.py | 6 +-
bench/optimal-chunksize.py | 2 +-
doc/source/cookbook/hints_for_sql_users.rst | 4 +-
doc/source/cookbook/inmemory_hdf5_files.rst | 4 +-
doc/source/usersguide/tutorials.rst | 10 ++--
tables/filters.py | 2 +-
tables/hdf5extension.pyx | 66 ++++++++++-----------
tables/indexesextension.pyx | 6 +-
tables/lrucacheextension.pyx | 12 ++--
tables/table.py | 15 ++---
tables/tableextension.pyx | 24 ++++----
tables/tests/test_array.py | 24 ++++----
tables/tests/test_attributes.py | 4 +-
tables/tests/test_carray.py | 4 +-
tables/tests/test_earray.py | 4 +-
tables/tests/test_indexvalues.py | 6 +-
tables/tests/test_queries.py | 8 +--
tables/tests/test_timetype.py | 36 +++++++----
tables/tests/test_types.py | 3 +-
tables/tests/test_vlarray.py | 6 +-
tables/utils.py | 7 ++-
tables/utilsextension.pyx | 20 +++----
tables/vlarray.py | 4 +-
26 files changed, 153 insertions(+), 134 deletions(-)
diff --git a/bench/chunkshape-testing.py b/bench/chunkshape-testing.py
index 84e75875..41e0565a 100644
--- a/bench/chunkshape-testing.py
+++ b/bench/chunkshape-testing.py
@@ -83,7 +83,7 @@ print("earray2 populate time:", clock() - t1)
f2.close()
# t1=time()
-# c2[:] = numpy.empty(shape=(M, N), dtype="int32")
+# c2[:] = np.empty(shape=(M, N), dtype="int32")
# print "carray populate time:", time()-t1
# f3 = f.create_carray(f.root, 'cfield3',
@@ -94,7 +94,7 @@ f2.close()
# tables.Int32Atom(), (0, M),
# "scalar int32 carray", expectedrows=N)
# t1=time()
-# e2.append(numpy.empty(shape=(N, M), dtype="int32"))
+# e2.append(np.empty(shape=(N, M), dtype="int32"))
# print "earray populate time:", time()-t1
# t1=time()
diff --git a/bench/deep-tree.py b/bench/deep-tree.py
index b2a43ec7..ee43577a 100644
--- a/bench/deep-tree.py
+++ b/bench/deep-tree.py
@@ -35,7 +35,7 @@ def show_stats(explain, tref):
def populate(f, nlevels):
g = f.root
- #arr = numpy.zeros((10,), "f4")
+ #arr = np.zeros((10,), "f4")
#descr = {'f0': tables.Int32Col(), 'f1': tables.Float32Col()}
for i in range(nlevels):
#dset = f.create_array(g, "DS1", arr)
diff --git a/bench/indexed_search.py b/bench/indexed_search.py
index 0109cc07..d9e33c59 100644
--- a/bench/indexed_search.py
+++ b/bench/indexed_search.py
@@ -207,11 +207,11 @@ class DB:
# print "Results len:", results
# self.print_qtime_idx(colname, ltimes, True, verbose)
# Print internal PyTables index tprof statistics
- #tprof = numpy.array(tprof)
+ #tprof = np.array(tprof)
#tmean, tstd = self.norm_times(tprof)
# print "tprof-->", round(tmean, prec), "+-", round(tstd, prec)
# print "tprof hist-->", \
- # numpy.histogram(tprof)
+ # np.histogram(tprof)
# print "tprof raw-->", tprof
# Always reopen the file after *every* query loop.
# Necessary to make the benchmark to run correctly.
diff --git a/bench/keysort.py b/bench/keysort.py
index 30db7ef0..6c006d12 100644
--- a/bench/keysort.py
+++ b/bench/keysort.py
@@ -28,6 +28,6 @@ for dtype1 in ('S6', 'b1',
tb.indexesextension.keysort(a, b)
tks = clock() - t1
print("keysort time-->", tks, " {:.2f}x".format(tref / tks))
- assert np.alltrue(a == e)
- #assert numpy.alltrue(b == d)
- assert np.alltrue(f == d)
+ assert np.all(a == e)
+ #assert np.all(b == d)
+ assert np.all(f == d)
diff --git a/bench/optimal-chunksize.py b/bench/optimal-chunksize.py
index fc071d47..1c86317a 100644
--- a/bench/optimal-chunksize.py
+++ b/bench/optimal-chunksize.py
@@ -61,7 +61,7 @@ def bench(chunkshape, filters):
# Fill the array
t1 = clock()
for i in range(N):
- # e.append([numpy.random.rand(M)]) # use this for less compressibility
+ # e.append([np.random.rand(M)]) # use this for less compressibility
e.append([quantize(np.random.rand(M), 6)])
# os.system("sync")
print(f"Creation time: {clock() - t1:.3f}", end=' ')
diff --git a/doc/source/cookbook/hints_for_sql_users.rst b/doc/source/cookbook/hints_for_sql_users.rst
index 866357ee..81ed3fd5 100644
--- a/doc/source/cookbook/hints_for_sql_users.rst
+++ b/doc/source/cookbook/hints_for_sql_users.rst
@@ -311,8 +311,8 @@ structure described above::
tbl.append(rows)
# Using a NumPy container.
- import numpy
- rows = numpy.rec.array(rows)
+ import numpy as np
+ rows = np.rec.array(rows)
tbl.append(rows)
diff --git a/doc/source/cookbook/inmemory_hdf5_files.rst b/doc/source/cookbook/inmemory_hdf5_files.rst
index 9797338e..c35afcc8 100644
--- a/doc/source/cookbook/inmemory_hdf5_files.rst
+++ b/doc/source/cookbook/inmemory_hdf5_files.rst
@@ -46,8 +46,8 @@ one needs to specify to use the CORE driver::
>>> import tables
>>> h5file = tables.open_file("new_sample.h5", "w", driver="H5FD_CORE")
- >>> import numpy
- >>> a = h5file.create_array(h5file.root, "array", numpy.zeros((300, 300)))
+ >>> import numpy as np
+ >>> a = h5file.create_array(h5file.root, "array", np.zeros((300, 300)))
>>> h5file.close()
diff --git a/doc/source/usersguide/tutorials.rst b/doc/source/usersguide/tutorials.rst
index 5cbaa8ba..042ef9e2 100644
--- a/doc/source/usersguide/tutorials.rst
+++ b/doc/source/usersguide/tutorials.rst
@@ -2228,20 +2228,20 @@ object is returned.
It is possible to create arrays that immitate nested table-like structure with _v_nested_descr attribute::
- >>> import numpy
+ >>> import numpy as np
>>> table.description._v_nested_descr
[('info2', [('info3', [('x', '()f8'), ('y', '()u1')]), ('name', '()S10'),
('value', '()f8')]), ('info1', [('name', '()S10'), ('value', '()f8')]),
('color', '()u4')]
- >>> numpy.rec.array(None, shape=0,
- dtype=table.description._v_nested_descr)
+ >>> np.rec.array(None, shape=0, dtype=table.description._v_nested_descr)
recarray([],
dtype=[('info2', [('info3', [('x', '>f8'), ('y', '|u1')]),
('name', '|S10'), ('value', '>f8')]),
('info1', [('name', '|S10'), ('value', '>f8')]),
('color', '>u4')])
- >>> numpy.rec.array(None, shape=0,
- dtype=table.description.info2._v_nested_descr)
+ >>> np.rec.array(
+ None, shape=0, dtype=table.description.info2._v_nested_descr
+ )
recarray([],
dtype=[('info3', [('x', '>f8'), ('y', '|u1')]), ('name', '|S10'),
('value', '>f8')])
diff --git a/tables/filters.py b/tables/filters.py
index f27bedc2..15dcc926 100644
--- a/tables/filters.py
+++ b/tables/filters.py
@@ -278,7 +278,7 @@ class Filters:
# Byte 3: least significant digit.
if self.least_significant_digit is not None:
- # assert isinstance(self.least_significant_digit, numpy.int8)
+ # assert isinstance(self.least_significant_digit, np.int8)
packed |= self.least_significant_digit
packed <<= 8
diff --git a/tables/hdf5extension.pyx b/tables/hdf5extension.pyx
index 7c0b79e6..d90bc07c 100644
--- a/tables/hdf5extension.pyx
+++ b/tables/hdf5extension.pyx
@@ -37,7 +37,7 @@ ObjTimestamps = namedtuple('ObjTimestamps', ['atime', 'mtime',
import pickle
-import numpy
+import numpy as np
from .exceptions import HDF5ExtError, DataTypeWarning
@@ -225,12 +225,12 @@ cdef object get_attribute_string_or_none(hid_t node_id, char* attr_name):
size = H5ATTRget_attribute_string(node_id, attr_name, &attr_value, &cset)
if size == 0:
if cset == H5T_CSET_UTF8:
- retvalue = numpy.unicode_('')
+ retvalue = np.unicode_('')
else:
- retvalue = numpy.bytes_(b'')
+ retvalue = np.bytes_(b'')
elif cset == H5T_CSET_UTF8:
retvalue = PyUnicode_DecodeUTF8(attr_value, size, NULL)
- retvalue = numpy.unicode_(retvalue)
+ retvalue = np.unicode_(retvalue)
else:
retvalue = PyBytes_FromStringAndSize(attr_value, size)
# AV: oct 2012
@@ -239,9 +239,9 @@ cdef object get_attribute_string_or_none(hid_t node_id, char* attr_name):
# The entire process is quite odd but due to a bug (??) in the way
# numpy arrays are pickled in python 3 we can't assume that
# strlen(attr_value) is the actual length of the attribute
- # and numpy.bytes_(attr_value) can give a truncated pickle string
+ # and np.bytes_(attr_value) can give a truncated pickle string
retvalue = retvalue.rstrip(b'\x00')
- retvalue = numpy.bytes_(retvalue)
+ retvalue = np.bytes_(retvalue)
# Important to release attr_value, because it has been malloc'ed!
if attr_value:
@@ -274,7 +274,7 @@ cdef object get_dtype_scalar(hid_t type_id, H5T_class_t class_id,
# Try to get a NumPy type. If this can't be done, return None.
try:
- ntype = numpy.dtype(stype)
+ ntype = np.dtype(stype)
except TypeError:
ntype = None
return ntype
@@ -661,14 +661,14 @@ cdef class AttributeSet:
dset_id = node._v_objectid
# Convert a NumPy scalar into a NumPy 0-dim ndarray
- if isinstance(value, numpy.generic):
- value = numpy.array(value)
+ if isinstance(value, np.generic):
+ value = np.array(value)
# Check if value is a NumPy ndarray and of a supported type
- if (isinstance(value, numpy.ndarray) and
+ if (isinstance(value, np.ndarray) and
value.dtype.kind in ('V', 'S', 'b', 'i', 'u', 'f', 'c')):
# get a contiguous array: fixes #270 and gh-176
- #value = numpy.ascontiguousarray(value)
+ #value = np.ascontiguousarray(value)
value = value.copy()
if value.dtype.kind == 'V':
description, rabyteorder = descr_from_dtype(value.dtype, ptparams=node._v_file.params)
@@ -695,7 +695,7 @@ cdef class AttributeSet:
H5Tclose(type_id)
else:
# Object cannot be natively represented in HDF5.
- if (isinstance(value, numpy.ndarray) and
+ if (isinstance(value, np.ndarray) and
value.dtype.kind == 'U' and
value.shape == ()):
value = value[()].encode('utf-8')
@@ -755,13 +755,13 @@ cdef class AttributeSet:
&cset)
if type_size == 0:
if cset == H5T_CSET_UTF8:
- retvalue = numpy.unicode_('')
+ retvalue = np.unicode_('')
else:
- retvalue = numpy.bytes_(b'')
+ retvalue = np.bytes_(b'')
elif cset == H5T_CSET_UTF8:
retvalue = PyUnicode_DecodeUTF8(str_value, type_size, NULL)
- retvalue = numpy.unicode_(retvalue)
+ retvalue = np.unicode_(retvalue)
else:
retvalue = PyBytes_FromStringAndSize(str_value, type_size)
# AV: oct 2012
@@ -770,9 +770,9 @@ cdef class AttributeSet:
# The entire process is quite odd but due to a bug (??) in the way
# numpy arrays are pickled in python 3 we can't assume that
# strlen(attr_value) is the actual length of the attibute
- # and numpy.bytes_(attr_value) can give a truncated pickle sting
+ # and np.bytes_(attr_value) can give a truncated pickle sting
retvalue = retvalue.rstrip(b'\x00')
- retvalue = numpy.bytes_(retvalue) # bytes
+ retvalue = np.bytes_(retvalue) # bytes
# Important to release attr_value, because it has been malloc'ed!
if str_value:
free(str_value)
@@ -803,7 +803,7 @@ cdef class AttributeSet:
# Get the NumPy dtype from the type_id
try:
stype_, shape_ = hdf5_to_np_ext_type(type_id, pure_numpy_types=True, ptparams=node._v_file.params)
- dtype_ = numpy.dtype(stype_, shape_)
+ dtype_ = np.dtype(stype_, shape_)
except TypeError:
if class_id == H5T_STRING and H5Tis_variable_str(type_id):
nelements = H5ATTRget_attribute_vlen_string_array(dset_id, cattrname,
@@ -814,21 +814,21 @@ cdef class AttributeSet:
# The following generator expressions do not work with Cython 0.15.1
if cset == H5T_CSET_UTF8:
- #retvalue = numpy.fromiter(
+ #retvalue = np.fromiter(
# PyUnicode_DecodeUTF8(<char*>str_values[i],
# strlen(<char*>str_values[i]),
# NULL)
# for i in range(nelements), "O8")
- retvalue = numpy.array([
+ retvalue = np.array([
PyUnicode_DecodeUTF8(<char*>str_values[i],
strlen(<char*>str_values[i]),
NULL)
for i in range(nelements)], "O8")
else:
- #retvalue = numpy.fromiter(
+ #retvalue = np.fromiter(
# <char*>str_values[i] for i in range(nelements), "O8")
- retvalue = numpy.array(
+ retvalue = np.array(
[<char*>str_values[i] for i in range(nelements)], "O8")
retvalue.shape = shape
@@ -849,7 +849,7 @@ cdef class AttributeSet:
return None
# Get the container for data
- ndvalue = numpy.empty(dtype=dtype_, shape=shape)
+ ndvalue = np.empty(dtype=dtype_, shape=shape)
# Get the pointer to the buffer data area
rbuf = PyArray_DATA(ndvalue)
# Actually read the attribute from disk
@@ -1273,7 +1273,7 @@ cdef class Array(Leaf):
self.__class__.__name__, atom_))
# Allocate space for the dimension axis info and fill it
- dims = numpy.array(shape, dtype=numpy.intp)
+ dims = np.array(shape, dtype=np.intp)
self.rank = len(shape)
self.dims = npy_malloc_dims(self.rank, <npy_intp *>PyArray_DATA(dims))
rbuf = _array_data(nparr)
@@ -1339,11 +1339,11 @@ cdef class Array(Leaf):
class_ = self._c_classid.encode('utf-8')
# Get the fill values
- if isinstance(atom.dflt, numpy.ndarray) or atom.dflt:
- dflts = numpy.array(atom.dflt, dtype=atom.dtype)
+ if isinstance(atom.dflt, np.ndarray) or atom.dflt:
+ dflts = np.array(atom.dflt, dtype=atom.dtype)
fill_data = PyArray_DATA(dflts)
else:
- dflts = numpy.zeros((), dtype=atom.dtype)
+ dflts = np.zeros((), dtype=atom.dtype)
fill_data = NULL
if atom.shape == ():
# The default is preferred as a scalar value instead of 0-dim array
@@ -1372,7 +1372,7 @@ cdef class Array(Leaf):
H5ATTRset_attribute_string(self.dataset_id, "TITLE", encoded_title,
len(encoded_title), H5T_CSET_ASCII)
if self.extdim >= 0:
- extdim = <ndarray>numpy.array([self.extdim], dtype="int32")
+ extdim = <ndarray>np.array([self.extdim], dtype="int32")
# Attach the EXTDIM attribute in case of enlargeable arrays
H5ATTRset_attribute(self.dataset_id, "EXTDIM", H5T_NATIVE_INT,
0, NULL, PyArray_BYTES(extdim))
@@ -1447,7 +1447,7 @@ cdef class Array(Leaf):
# object arrays should not be read directly into memory
if atom.dtype != object:
# Get the fill value
- dflts = numpy.zeros((), dtype=atom.dtype)
+ dflts = np.zeros((), dtype=atom.dtype)
fill_data = PyArray_DATA(dflts)
H5ARRAYget_fill_value(self.dataset_id, self.type_id,
&fill_status, fill_data);
@@ -1692,9 +1692,9 @@ cdef class Array(Leaf):
startl.append(start)
countl.append(count)
stepl.append(step)
- start_ = numpy.array(startl, dtype="i8")
- count_ = numpy.array(countl, dtype="i8")
- step_ = numpy.array(stepl, dtype="i8")
+ start_ = np.array(startl, dtype="i8")
+ count_ = np.array(countl, dtype="i8")
+ step_ = np.array(stepl, dtype="i8")
# Get the pointers to array data
startp = <hsize_t *>PyArray_DATA(start_)
@@ -2146,7 +2146,7 @@ cdef class VLArray(Leaf):
# Compute the shape for the read array
shape = list(self._atomicshape)
shape.insert(0, vllen) # put the length at the beginning of the shape
- nparr = numpy.ndarray(
+ nparr = np.ndarray(
buffer=buf, dtype=self._atomicdtype.base, shape=shape)
# Set the writeable flag for this ndarray object
nparr.flags.writeable = True
diff --git a/tables/indexesextension.pyx b/tables/indexesextension.pyx
index 8c7e1ba7..d52ea91c 100644
--- a/tables/indexesextension.pyx
+++ b/tables/indexesextension.pyx
@@ -25,7 +25,7 @@ Misc variables:
"""
import cython
-import numpy
+import numpy as np
cimport numpy as cnp
from .exceptions import HDF5ExtError
@@ -628,7 +628,7 @@ cdef class IndexArray(Array):
# Create the buffer for reading sorted data chunks if not created yet
if <object>self.bufferlb is None:
# Internal buffers
- self.bufferlb = numpy.empty(dtype=dtype, shape=self.chunksize)
+ self.bufferlb = np.empty(dtype=dtype, shape=self.chunksize)
# Get the pointers to the different buffer data areas
self.rbuflb = PyArray_DATA(self.bufferlb)
# Init structures for accelerating sorted array reads
@@ -663,7 +663,7 @@ cdef class IndexArray(Array):
maxslots = params['BOUNDS_MAX_SIZE'] // rowsize
self.boundscache = <NumCache>NumCache(
(maxslots, self.nbounds), dtype, 'non-opt types bounds')
- self.bufferbc = numpy.empty(dtype=dtype, shape=self.nbounds)
+ self.bufferbc = np.empty(dtype=dtype, shape=self.nbounds)
# Get the pointer for the internal buffer for 2nd level cache
self.rbufbc = PyArray_DATA(self.bufferbc)
# Another NumCache for the sorted values
diff --git a/tables/lrucacheextension.pyx b/tables/lrucacheextension.pyx
index 6f58c526..16463c4e 100644
--- a/tables/lrucacheextension.pyx
+++ b/tables/lrucacheextension.pyx
@@ -27,7 +27,7 @@ cdef extern from "Python.h":
import sys
-import numpy
+import numpy as np
from libc.string cimport memcpy, strcmp
from cpython.unicode cimport PyUnicode_Check
from numpy cimport import_array, ndarray, PyArray_DATA
@@ -202,7 +202,7 @@ cdef class BaseCache:
self.name = name
self.incsetcount = False
# The array for keeping the access times (using long ints here)
- self.atimes = <ndarray>numpy.zeros(shape=nslots, dtype=numpy.int_)
+ self.atimes = <ndarray>np.zeros(shape=nslots, dtype=np.int_)
self.ratimes = <long *>PyArray_DATA(self.atimes)
def __len__(self):
@@ -331,7 +331,7 @@ cdef class ObjectCache(BaseCache):
self.__dict = {}
self.mrunode = <ObjectNode>None # Most Recent Used node
# The array for keeping the object size (using long ints here)
- self.sizes = <ndarray>numpy.zeros(shape=nslots, dtype=numpy.int_)
+ self.sizes = <ndarray>np.zeros(shape=nslots, dtype=np.int_)
self.rsizes = <long *>PyArray_DATA(self.sizes)
# Clear cache
@@ -505,11 +505,11 @@ cdef class NumCache(BaseCache):
# The cache object where all data will go
# The last slot is to allow the setitem1_ method to still return
# a valid scratch area for writing purposes
- self.cacheobj = <ndarray>numpy.empty(shape=(nslots+1, self.slotsize),
+ self.cacheobj = <ndarray>np.empty(shape=(nslots+1, self.slotsize),
dtype=dtype)
self.rcache = PyArray_DATA(self.cacheobj)
# The array for keeping the keys of slots
- self.keys = <ndarray>(-numpy.ones(shape=nslots, dtype=numpy.int64))
+ self.keys = <ndarray>(-np.ones(shape=nslots, dtype=np.int64))
self.rkeys = <long long *>PyArray_DATA(self.keys)
# Returns the address of nslot
@@ -623,7 +623,7 @@ cdef class NumCache(BaseCache):
elif self.containscount > 0:
hitratio = <double>self.getcount / self.containscount
else:
- hitratio = numpy.nan
+ hitratio = np.nan
return """<%s(%s)
(%d maxslots, %d slots used, %.3f KB cachesize,
hit ratio: %.3f, disabled? %s)>
diff --git a/tables/table.py b/tables/table.py
index be60de68..ce8e5d96 100644
--- a/tables/table.py
+++ b/tables/table.py
@@ -1532,8 +1532,7 @@ very small/large chunksize, you may want to increase/decrease it."""
cstart, cstop = coords[0], coords[-1] + 1
if cstop - cstart == len(coords):
# Chances for monotonically increasing row values. Refine.
- inc_seq = np.alltrue(
- np.arange(cstart, cstop) == np.array(coords))
+ inc_seq = np.all(np.arange(cstart, cstop) == np.array(coords))
if inc_seq:
return self.read(cstart, cstop, field=field)
return self.read_coordinates(coords, field)
@@ -2094,8 +2093,9 @@ very small/large chunksize, you may want to increase/decrease it."""
table[2] = [456,'db2',1.2]
# Modify two existing rows
- rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]],
- formats='i4,a3,f8')
+ rows = np.rec.array(
+ [[457,'db1',1.2],[6,'de2',1.3]], formats='i4,a3,f8'
+ )
table[1:30:2] = rows # modify a table slice
table[[1,3]] = rows # only modifies rows 1 and 3
table[[True,False,True]] = rows # only modifies rows 0 and 2
@@ -2103,8 +2103,9 @@ very small/large chunksize, you may want to increase/decrease it."""
Which is equivalent to::
table.modify_rows(start=2, rows=[456,'db2',1.2])
- rows = numpy.rec.array([[457,'db1',1.2],[6,'de2',1.3]],
- formats='i4,a3,f8')
+ rows = np.rec.array(
+ [[457,'db1',1.2],[6,'de2',1.3]], formats='i4,a3,f8'
+ )
table.modify_rows(start=1, stop=3, step=2, rows=rows)
table.modify_coordinates([1,3,2], rows)
table.modify_coordinates([True, False, True], rows)
@@ -3489,7 +3490,7 @@ class Column:
table.modify_columns(start=1, columns=[[-1]], names=['col1'])
# Modify rows 1 and 3
- columns = numpy.rec.fromarrays([[2,3]], formats='i4')
+ columns = np.rec.fromarrays([[2,3]], formats='i4')
table.modify_columns(start=1, step=2, columns=columns,
names=['col1'])
diff --git a/tables/tableextension.pyx b/tables/tableextension.pyx
index 4a50c6ab..c90b4b93 100644
--- a/tables/tableextension.pyx
+++ b/tables/tableextension.pyx
@@ -22,7 +22,7 @@ Misc variables:
"""
import math
import sys
-import numpy
+import numpy as np
from time import time
from .description import Col
@@ -378,13 +378,13 @@ cdef class Table(Leaf):
if colpath == "":
# Compute a byteorder for the entire table
if len(field_byteorders) > 0:
- field_byteorders = numpy.array(field_byteorders)
+ field_byteorders = np.array(field_byteorders)
# Cython doesn't interpret well the extended comparison
# operators so this: field_byteorders == "little" doesn't work
# as expected
- if numpy.alltrue(field_byteorders.__eq__("little")):
+ if np.all(field_byteorders.__eq__("little")):
byteorder = "little"
- elif numpy.alltrue(field_byteorders.__eq__("big")):
+ elif np.all(field_byteorders.__eq__("big")):
byteorder = "big"
else: # Yes! someone has done it!
byteorder = "mixed"
@@ -869,7 +869,7 @@ cdef class Row:
wdflts = table._v_wdflts
if wdflts is None:
- self.wrec = numpy.zeros(1, dtype=self.dtype) # Defaults are zero
+ self.wrec = np.zeros(1, dtype=self.dtype) # Defaults are zero
else:
self.wrec = table._v_wdflts.copy()
self.wreccpy = self.wrec.copy() # A copy of the defaults
@@ -1009,8 +1009,8 @@ cdef class Row:
iobuf = self.iobuf
j = 0; recout = 0; cs = self.chunksize
nchunksread = self.nrowsread // cs
- tmp_range = numpy.arange(0, cs, dtype='int64')
- self.bufcoords = numpy.empty(self.nrowsinbuf, dtype='int64')
+ tmp_range = np.arange(0, cs, dtype='int64')
+ self.bufcoords = np.empty(self.nrowsinbuf, dtype='int64')
# Fetch valid chunks until the I/O buffer is full
while nchunksread < self.totalchunks:
if self.chunkmap_data[nchunksread]:
@@ -1100,7 +1100,7 @@ cdef class Row:
lenbuf = self.nrowsinbuf
tmp = self.coords[self.nrowsread:self.nrowsread+lenbuf:self.step]
# We have to get a contiguous buffer, so numpy.array is the way to go
- self.bufcoords = numpy.array(tmp, dtype="uint64")
+ self.bufcoords = np.array(tmp, dtype="uint64")
self._row = -1
if self.bufcoords.size > 0:
recout = self.table._read_elements(self.bufcoords, self.iobuf)
@@ -1126,7 +1126,7 @@ cdef class Row:
tmp = self.coords[0:self.nextelement + 1]
else:
tmp = self.coords[self.nextelement - (<long long> self.nrowsinbuf) + 1:self.nextelement + 1]
- self.bufcoords = numpy.array(tmp, dtype="uint64")
+ self.bufcoords = np.array(tmp, dtype="uint64")
recout = self.table._read_elements(self.bufcoords, self.iobuf)
self.bufcoords_data = <hsize_t*>PyArray_DATA(self.bufcoords)
self.nrowsread = self.nrowsread + self.nrowsinbuf
@@ -1172,7 +1172,7 @@ cdef class Row:
self.index_valid_data = PyArray_BYTES(self.indexvalid)
# Is there any interesting information in this buffer?
- if not numpy.sometrue(self.indexvalid):
+ if not np.any(self.indexvalid):
# No, so take the next one
if self.step >= self.nrowsinbuf:
self.nextelement = self.nextelement + self.step
@@ -1477,7 +1477,7 @@ cdef class Row:
if self.mod_elements is None:
# Initialize an array for keeping the modified elements
# (just in case Row.update() would be used)
- self.mod_elements = numpy.empty(shape=self.nrowsinbuf, dtype=SizeType)
+ self.mod_elements = np.empty(shape=self.nrowsinbuf, dtype=SizeType)
# We need a different copy for self.iobuf here
self.iobufcpy = self.iobuf.copy()
@@ -1649,7 +1649,7 @@ cdef class Row:
if self.exist_enum_cols:
if key in self.colenums:
enum = self.colenums[key]
- for cenval in numpy.asarray(value).flat:
+ for cenval in np.asarray(value).flat:
enum(cenval) # raises ``ValueError`` on invalid values
# Get the field to be modified
diff --git a/tables/tests/test_array.py b/tables/tests/test_array.py
index 00e827b1..e0e6b4ee 100644
--- a/tables/tests/test_array.py
+++ b/tables/tests/test_array.py
@@ -2031,8 +2031,9 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[key]
b = tbarr[key]
self.assertTrue(
- np.alltrue(a == b),
- "NumPy array and PyTables selections does not match.")
+ np.all(a == b),
+ "NumPy array and PyTables selections does not match."
+ )
def test01b_read(self):
"""Test for point-selections (read, integer keys)."""
@@ -2046,7 +2047,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[key]
b = tbarr[key]
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables selections does not match.")
def test01c_read(self):
@@ -2100,7 +2101,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[:]
b = tbarr[:]
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables modifications does not match.")
def test02b_write(self):
@@ -2118,7 +2119,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[:]
b = tbarr[:]
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables modifications does not match.")
def test02c_write(self):
@@ -2136,7 +2137,7 @@ class PointSelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[:]
b = tbarr[:]
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables modifications does not match.")
@@ -2281,7 +2282,7 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[key]
b = tbarr[key]
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables selections does not match.")
def test01b_read(self):
@@ -2333,8 +2334,9 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
a = nparr[:]
b = tbarr[:]
self.assertTrue(
- np.alltrue(a == b),
- "NumPy array and PyTables modifications does not match.")
+ np.all(a == b),
+ "NumPy array and PyTables modifications does not match."
+ )
def test02b_write(self):
"""Test for fancy-selections (working selections, write, broadcast)."""
@@ -2353,7 +2355,7 @@ class FancySelectionTestCase(common.TempFileMixin, common.PyTablesTestCase):
# print("NumPy modified array:", a)
# print("PyTables modifyied array:", b)
self.assertTrue(
- np.alltrue(a == b),
+ np.all(a == b),
"NumPy array and PyTables modifications does not match.")
@@ -2603,7 +2605,7 @@ class TestCreateArrayArgs(common.TempFileMixin, common.PyTablesTestCase):
atom=atom)
def test_kwargs_obj_shape_error(self):
- # atom = Atom.from_dtype(numpy.dtype('complex'))
+ # atom = Atom.from_dtype(np.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_array,
diff --git a/tables/tests/test_attributes.py b/tables/tests/test_attributes.py
index 64d725e5..51d218d5 100644
--- a/tables/tests/test_attributes.py
+++ b/tables/tests/test_attributes.py
@@ -578,7 +578,7 @@ class CreateTestCase(common.TempFileMixin, common.PyTablesTestCase):
# In the views old implementation PyTAbles performa a copy of the
# array:
#
- # value = numpy.array(value)
+ # value = np.array(value)
#
# in order to get a contiguous array.
# Unfortunately array with swapped axis are copyed as they are so
@@ -1374,7 +1374,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase):
# The next raises a `TypeError` when unpickled. See:
# http://projects.scipy.org/numpy/ticket/1037
- # self.array.attrs.pq = numpy.array([''])
+ # self.array.attrs.pq = np.array([''])
self.array.attrs.pq = np.array([''], dtype="U1")
# Check the results
diff --git a/tables/tests/test_carray.py b/tables/tests/test_carray.py
index b35697f0..182f92b5 100644
--- a/tables/tests/test_carray.py
+++ b/tables/tests/test_carray.py
@@ -2674,7 +2674,7 @@ class TestCreateCArrayArgs(common.TempFileMixin, common.PyTablesTestCase):
atom=atom)
def test_kwargs_obj_shape_error(self):
- # atom = Atom.from_dtype(numpy.dtype('complex'))
+ # atom = Atom.from_dtype(np.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_carray,
@@ -2697,7 +2697,7 @@ class TestCreateCArrayArgs(common.TempFileMixin, common.PyTablesTestCase):
shape=self.shape)
def test_kwargs_obj_atom_shape_error_02(self):
- # atom = Atom.from_dtype(numpy.dtype('complex'))
+ # atom = Atom.from_dtype(np.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_carray,
diff --git a/tables/tests/test_earray.py b/tables/tests/test_earray.py
index 8e400e2c..aa3a66de 100644
--- a/tables/tests/test_earray.py
+++ b/tables/tests/test_earray.py
@@ -2756,7 +2756,7 @@ class TestCreateEArrayArgs(common.TempFileMixin, common.PyTablesTestCase):
atom=atom)
def test_kwargs_obj_shape_error(self):
- # atom = tables.Atom.from_dtype(numpy.dtype('complex'))
+ # atom = tables.Atom.from_dtype(np.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_earray,
@@ -2779,7 +2779,7 @@ class TestCreateEArrayArgs(common.TempFileMixin, common.PyTablesTestCase):
shape=self.shape)
def test_kwargs_obj_atom_shape_error_02(self):
- # atom = tables.Atom.from_dtype(numpy.dtype('complex'))
+ # atom = tables.Atom.from_dtype(np.dtype('complex'))
shape = self.shape + self.shape
self.assertRaises(TypeError,
self.h5file.create_earray,
diff --git a/tables/tests/test_indexvalues.py b/tables/tests/test_indexvalues.py
index 47503d80..66312e2d 100644
--- a/tables/tests/test_indexvalues.py
+++ b/tables/tests/test_indexvalues.py
@@ -868,7 +868,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase):
table2 = self.h5file.root.table2
# Convert the limits to the appropriate type
- # il = numpy.string_(self.il)
+ # il = np.string_(self.il)
sl = np.string_(self.sl)
# Do some selections and check the results
@@ -1119,7 +1119,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase):
table2 = self.h5file.root.table2
# Convert the limits to the appropriate type
- # il = numpy.int32(self.il)
+ # il = np.int32(self.il)
sl = np.uint16(self.sl)
# Do some selections and check the results
@@ -1315,7 +1315,7 @@ class SelectValuesTestCase(common.TempFileMixin, common.PyTablesTestCase):
table2 = self.h5file.root.table2
# Convert the limits to the appropriate type
- # il = numpy.float32(self.il)
+ # il = np.float32(self.il)
sl = np.float64(self.sl)
# Do some selections and check the results
diff --git a/tables/tests/test_queries.py b/tables/tests/test_queries.py
index b9406a3e..f27c263a 100644
--- a/tables/tests/test_queries.py
+++ b/tables/tests/test_queries.py
@@ -63,13 +63,13 @@ func_info = {'log10': np.log10, 'log': np.log, 'exp': np.exp,
if hasattr(np, 'float16'):
type_info['float16'] = (np.float16, float)
# if hasattr(numpy, 'float96'):
-# type_info['float96'] = (numpy.float96, float)
+# type_info['float96'] = (np.float96, float)
# if hasattr(numpy, 'float128'):
-# type_info['float128'] = (numpy.float128, float)
+# type_info['float128'] = (np.float128, float)
# if hasattr(numpy, 'complex192'):
-# type_info['complex192'] = (numpy.complex192, complex)
+# type_info['complex192'] = (np.complex192, complex)
# if hasattr(numpy, 'complex256'):
-# type_info['complex256'] = (numpy.complex256, complex)
+# type_info['complex256'] = (np.complex256, complex)
sctype_from_type = {type_: info[0] for (type_, info) in type_info.items()}
"""Maps PyTables types to NumPy scalar types."""
diff --git a/tables/tests/test_timetype.py b/tables/tests/test_timetype.py
index 3d4d7a99..d8ac3b18 100644
--- a/tables/tests/test_timetype.py
+++ b/tables/tests/test_timetype.py
@@ -229,8 +229,10 @@ class CompareTestCase(common.TempFileMixin, common.PyTablesTestCase):
"Stored and retrieved values do not match.")
comp = (recarr['t64col'][0] == np.array((wtime, wtime)))
- self.assertTrue(np.alltrue(comp),
- "Stored and retrieved values do not match.")
+ self.assertTrue(
+ np.all(comp),
+ "Stored and retrieved values do not match."
+ )
def test02b_CompareTable(self):
"""Comparing several written and read time values in a Table."""
@@ -262,8 +264,10 @@ class CompareTestCase(common.TempFileMixin, common.PyTablesTestCase):
if common.verbose:
print("Original values:", orig_val)
print("Retrieved values:", recarr['t32col'][:])
- self.assertTrue(np.alltrue(recarr['t32col'][:] == orig_val),
- "Stored and retrieved values do not match.")
+ self.assertTrue(
+ np.all(recarr['t32col'][:] == orig_val),
+ "Stored and retrieved values do not match."
+ )
# Time64 column.
orig_val = np.arange(0, nrows * 2, dtype=np.int32) + 0.012
@@ -365,16 +369,20 @@ class UnalignedTestCase(common.TempFileMixin, common.PyTablesTestCase):
if common.verbose:
print("Original values:", orig_val)
print("Retrieved values:", recarr['i8col'][:])
- self.assertTrue(np.alltrue(recarr['i8col'][:] == orig_val),
- "Stored and retrieved values do not match.")
+ self.assertTrue(
+ np.all(recarr['i8col'][:] == orig_val),
+ "Stored and retrieved values do not match."
+ )
# Time32 column.
orig_val = np.arange(nrows, dtype=np.int32)
if common.verbose:
print("Original values:", orig_val)
print("Retrieved values:", recarr['t32col'][:])
- self.assertTrue(np.alltrue(recarr['t32col'][:] == orig_val),
- "Stored and retrieved values do not match.")
+ self.assertTrue(
+ np.all(recarr['t32col'][:] == orig_val),
+ "Stored and retrieved values do not match."
+ )
# Time64 column.
orig_val = np.arange(0, nrows * 2, dtype=np.int32) + 0.012
@@ -413,8 +421,10 @@ class BigEndianTestCase(common.PyTablesTestCase):
if common.verbose:
print("Retrieved values:", earr)
print("Should look like:", orig_val)
- self.assertTrue(np.alltrue(earr == orig_val),
- "Retrieved values do not match the expected values.")
+ self.assertTrue(
+ np.all(earr == orig_val),
+ "Retrieved values do not match the expected values."
+ )
def test00b_Read64Array(self):
"""Checking Time64 type in arrays."""
@@ -448,8 +458,10 @@ class BigEndianTestCase(common.PyTablesTestCase):
if common.verbose:
print("Retrieved values:", t32)
print("Should look like:", orig_val)
- self.assertTrue(np.alltrue(t32 == orig_val),
- "Retrieved values do not match the expected values.")
+ self.assertTrue(
+ np.all(t32 == orig_val),
+ "Retrieved values do not match the expected values."
+ )
def test01b_ReadNestedColumn(self):
"""Checking Time64 type in nested columns."""
diff --git a/tables/tests/test_types.py b/tables/tests/test_types.py
index 11c9af4b..c41449db 100644
--- a/tables/tests/test_types.py
+++ b/tables/tests/test_types.py
@@ -86,7 +86,8 @@ class RangeTestCase(common.TempFileMixin, common.PyTablesTestCase):
rec['var3'] = np.array(i % self.maxshort).astype('i2')
rec['var5'] = float(i)
- with self.assertRaises(TypeError):
+ # Numpy 1.25 -> ValueError
+ with self.assertRaises((TypeError, ValueError)):
rec['var4'] = "124c"
rec['var6'] = float(i)
diff --git a/tables/tests/test_vlarray.py b/tables/tests/test_vlarray.py
index e7dab188..44185338 100644
--- a/tables/tests/test_vlarray.py
+++ b/tables/tests/test_vlarray.py
@@ -697,7 +697,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase):
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
- # "UInt64": numpy.int64, # Unavailable in some platforms
+ # "uint64": np.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
@@ -795,7 +795,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase):
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
- # "UInt64": numpy.int64, # Unavailable in some platforms
+ # "uint64": np.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
@@ -851,7 +851,7 @@ class TypesTestCase(common.TempFileMixin, common.PyTablesTestCase):
"int32": np.int32,
"uint32": np.uint32,
"int64": np.int64,
- # "UInt64": numpy.int64, # Unavailable in some platforms
+ # "uint64": np.int64, # Unavailable in some platforms
}
if common.verbose:
print('\n', '-=' * 30)
diff --git a/tables/utils.py b/tables/utils.py
index 8b5d0267..e11e5ba7 100644
--- a/tables/utils.py
+++ b/tables/utils.py
@@ -66,7 +66,10 @@ def idx2long(index):
"""Convert a possible index into a long int."""
try:
- return int(index)
+ if hasattr(index, "item"):
+ return index.item()
+ else:
+ return int(index)
except Exception:
raise TypeError("not an integer type.")
@@ -207,7 +210,7 @@ def lazyattr(fget):
>>> del obj.attribute
Traceback (most recent call last):
...
- AttributeError: can't delete attribute 'attribute'
+ AttributeError: ...
.. warning::
diff --git a/tables/utilsextension.pyx b/tables/utilsextension.pyx
index 664e1ea5..6515a9a7 100644
--- a/tables/utilsextension.pyx
+++ b/tables/utilsextension.pyx
@@ -20,7 +20,7 @@ try:
except ImportError:
zlib_imported = False
-import numpy
+import numpy as np
from .description import Description, Col
from .misc.enum import Enum
@@ -528,7 +528,7 @@ def encode_filename(object filename):
if hasattr(os, 'fspath'):
filename = os.fspath(filename)
- if isinstance(filename, (unicode, numpy.str_)):
+ if isinstance(filename, (unicode, np.str_)):
# if type(filename) is unicode:
encoding = sys.getfilesystemencoding()
encname = filename.encode(encoding, 'replace')
@@ -949,16 +949,16 @@ def read_f_attr(hid_t file_id, str attr_name):
size = H5ATTRget_attribute_string(file_id, c_attr_name, &attr_value, &cset)
if size == 0:
if cset == H5T_CSET_UTF8:
- retvalue = numpy.unicode_('')
+ retvalue = np.unicode_('')
else:
- retvalue = numpy.bytes_(b'')
+ retvalue = np.bytes_(b'')
else:
retvalue = <bytes>(attr_value).rstrip(b'\x00')
if cset == H5T_CSET_UTF8:
retvalue = retvalue.decode('utf-8')
- retvalue = numpy.str_(retvalue)
+ retvalue = np.str_(retvalue)
else:
- retvalue = numpy.bytes_(retvalue) # bytes
+ retvalue = np.bytes_(retvalue) # bytes
# Important to release attr_value, because it has been malloc'ed!
if attr_value:
@@ -1041,7 +1041,7 @@ def enum_from_hdf5(hid_t enumId, str byteorder):
"supported at this moment")
dtype = atom.dtype
- npvalue = numpy.array((0,), dtype=dtype)
+ npvalue = np.array((0,), dtype=dtype)
rbuf = PyArray_DATA(npvalue)
# Get the name and value of each of the members
@@ -1485,15 +1485,15 @@ cdef int load_reference(hid_t dataset_id, hobj_ref_t *refbuf, size_t item_size,
# read entire dataset as numpy array
stype_, shape_ = hdf5_to_np_ext_type(reftype_id, pure_numpy_types=True, atom=True)
if stype_ == "_ref_":
- dtype_ = numpy.dtype("O", shape_)
+ dtype_ = np.dtype("O", shape_)
else:
- dtype_ = numpy.dtype(stype_, shape_)
+ dtype_ = np.dtype(stype_, shape_)
shape = []
for j in range(rank):
shape.append(<int>dims[j])
shape = tuple(shape)
- nprefarr = numpy.empty(dtype=dtype_, shape=shape)
+ nprefarr = np.empty(dtype=dtype_, shape=shape)
nparr[i] = [nprefarr] # box the array in a list to store it as one object
if stype_ == "_ref_":
newrefbuf = <hobj_ref_t *>malloc(nprefarr.size * item_size)
diff --git a/tables/vlarray.py b/tables/vlarray.py
index e1b4b2c0..caa66728 100644
--- a/tables/vlarray.py
+++ b/tables/vlarray.py
@@ -644,7 +644,7 @@ class VLArray(hdf5extension.VLArray, Leaf):
a_list = vlarray[4:1000:2]
a_list2 = vlarray[[0,2]] # get list of coords
a_list3 = vlarray[[0,-2]] # negative values accepted
- a_list4 = vlarray[numpy.array([True,...,False])] # array of bools
+ a_list4 = vlarray[np.array([True,...,False])] # array of bools
"""
@@ -814,7 +814,7 @@ class VLArray(hdf5extension.VLArray, Leaf):
"""Read rows specified in `coords`."""
rows = []
for coord in coords:
- rows.append(self.read(int(coord), int(coord) + 1, 1)[0])
+ rows.append(self.read(idx2long(coord), idx2long(coord) + 1, 1)[0])
return rows
def _g_copy_with_stats(self, group, name, start, stop, step,
--
2.33.8