Commit ccd2f0a3 authored by Sandro Tosi's avatar Sandro Tosi

New upstream version 1.16.0~rc1

parent e322a887
[run]
branch = True
include = */numpy/*
Copyright (c) 2005-2017, NumPy Developers.
Copyright (c) 2005-2018, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
......@@ -42,7 +42,7 @@ License: 2-clause BSD
Name: scipy-sphinx-theme
Files: doc/scipy-sphinx-theme/*
License: 3-clause BSD, PSF and Apache 2.0
For details, see doc/sphinxext/LICENSE.txt
For details, see doc/scipy-sphinx-theme/LICENSE.txt
Name: lapack-lite
Files: numpy/linalg/lapack_lite/*
......
#
# Use .add_data_files and .add_data_dir methods in a appropriate
# setup.py files to include non-python files such as documentation,
# data, etc files to distribution. Avoid using MANIFEST.in for that.
# data, etc files to distribution (*for installation*).
# Avoid using MANIFEST.in for that.
#
include MANIFEST.in
include pytest.ini
......@@ -12,21 +13,27 @@ recursive-include numpy/random/mtrand *.pyx *.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
recursive-include numpy/_build_utils *
recursive-include numpy/linalg/lapack_lite *.c *.h
include tox.ini
include runtests.py
include tox.ini pytest.ini .coveragerc
recursive-include tools *
# Add sdist files whose use depends on local configuration.
include numpy/core/src/multiarray/cblasfuncs.c
include numpy/core/src/multiarray/python_xerbla.c
include numpy/core/src/common/cblasfuncs.c
include numpy/core/src/common/python_xerbla.c
# Adding scons build related files not found by distutils
recursive-include numpy/core/code_generators *.py *.txt
recursive-include numpy/core *.in *.h
# Add documentation: we don't use add_data_dir since we do not want to include
# this at installation, only for sdist-generated tarballs
include doc/Makefile doc/postprocess.py
recursive-include doc/release *
recursive-include doc/source *
recursive-include doc/sphinxext *
recursive-include tools/allocation_tracking *
recursive-include tools/swig *
recursive-include doc/scipy-sphinx-theme *
# Add documentation and benchmarks: we don't use add_data_dir since we do not
# want to include this at installation, only for sdist-generated tarballs
# Docs:
recursive-include doc *
prune doc/build
prune doc/source/generated
# Benchmarks:
recursive-include benchmarks *
prune benchmarks/env
prune benchmarks/results
prune benchmarks/html
prune benchmarks/numpy
# Exclude generated files
prune */__pycache__
global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~
Metadata-Version: 1.2
Name: numpy
Version: 1.15.4
Summary: NumPy: array processing for numbers, strings, records, and objects.
Home-page: http://www.numpy.org
Version: 1.16.0rc1
Summary: NumPy is the fundamental package for array computing with Python.
Home-page: https://www.numpy.org
Author: NumPy Developers
Author-email: numpy-discussion@python.org
License: BSD
Download-URL: https://pypi.python.org/pypi/numpy
Description-Content-Type: UNKNOWN
Description: NumPy is a general-purpose array-processing package designed to
efficiently manipulate large multi-dimensional arrays of arbitrary
records without sacrificing too much speed for small multi-dimensional
arrays. NumPy is built on the Numeric code base and adds features
introduced by numarray as well as an extended C-API and the ability to
create arrays of arbitrary type which also makes NumPy suitable for
interfacing with general-purpose data-base applications.
Description: It provides:
There are also basic facilities for discrete fourier transform,
basic linear algebra and random number generation.
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
All numpy wheels distributed from pypi are BSD licensed.
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
Windows wheels are linked against the ATLAS BLAS / LAPACK library, restricted
to SSE2 instructions, so may not give optimal linear algebra performance for
your machine. See http://docs.scipy.org/doc/numpy/user/install.html for
alternatives.
All NumPy wheels distributed on PyPI are BSD licensed.
Platform: Windows
......
.. -*- rst -*-
================
NumPy benchmarks
================
Benchmarking NumPy with Airspeed Velocity.
Usage
-----
Airspeed Velocity manages building and Python virtualenvs by itself,
unless told otherwise. Some of the benchmarking features in
``runtests.py`` also tell ASV to use the NumPy compiled by
``runtests.py``. To run the benchmarks, you do not need to install a
development version of NumPy to your current Python environment.
Run a benchmark against currently checked out NumPy version (don't
record the result)::
python runtests.py --bench bench_core
Compare change in benchmark results to another version::
python runtests.py --bench-compare v1.6.2 bench_core
Run ASV commands (record results and generate HTML)::
cd benchmarks
asv run --skip-existing-commits --steps 10 ALL
asv publish
asv preview
More on how to use ``asv`` can be found in `ASV documentation`_
Command-line help is available as usual via ``asv --help`` and
``asv run --help``.
.. _ASV documentation: https://asv.readthedocs.io/
Writing benchmarks
------------------
See `ASV documentation`_ for basics on how to write benchmarks.
Some things to consider:
- The benchmark suite should be importable with any NumPy version.
- The benchmark parameters etc. should not depend on which NumPy version
is installed.
- Try to keep the runtime of the benchmark reasonable.
- Prefer ASV's ``time_`` methods for benchmarking times rather than cooking up
time measurements via ``time.clock``, even if it requires some juggling when
writing the benchmark.
- Preparing arrays etc. should generally be put in the ``setup`` method rather
than the ``time_`` methods, to avoid counting preparation time together with
the time of the benchmarked operation.
- Be mindful that large arrays created with ``np.empty`` or ``np.zeros`` might
not be allocated in physical memory until the memory is accessed. If this is
desired behaviour, make sure to comment it in your setup function. If
you are benchmarking an algorithm, it is unlikely that a user will be
executing said algorithm on a newly created empty/zero array. One can force
pagefaults to occur in the setup phase either by calling ``np.ones`` or
``arr.fill(value)`` after creating the array,
{
// The version of the config file format. Do not change, unless
// you know what you are doing.
"version": 1,
// The name of the project being benchmarked
"project": "numpy",
// The project's homepage
"project_url": "https://www.numpy.org/",
// The URL or local path of the source code repository for the
// project being benchmarked
"repo": "..",
// List of branches to benchmark. If not provided, defaults to "master"
// (for git) or "tip" (for mercurial).
"branches": ["master"],
// The DVCS being used. If not set, it will be automatically
// determined from "repo" by looking at the protocol in the URL
// (if remote), or by looking for special directories, such as
// ".git" (if local).
"dvcs": "git",
// The tool to use to create environments. May be "conda",
// "virtualenv" or other value depending on the plugins in use.
// If missing or the empty string, the tool will be automatically
// determined by looking for tools on the PATH environment
// variable.
"environment_type": "virtualenv",
// the base URL to show a commit for the project.
"show_commit_url": "https://github.com/numpy/numpy/commit/",
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
"pythons": ["3.6"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
// list indicates to just test against the default (latest)
// version.
"matrix": {
"six": [],
},
// The directory (relative to the current directory) that benchmarks are
// stored in. If not provided, defaults to "benchmarks"
"benchmark_dir": "benchmarks",
// The directory (relative to the current directory) to cache the Python
// environments in. If not provided, defaults to "env"
"env_dir": "env",
// The directory (relative to the current directory) that raw benchmark
// results are stored in. If not provided, defaults to "results".
"results_dir": "results",
// The directory (relative to the current directory) that the html tree
// should be written to. If not provided, defaults to "html".
"html_dir": "html",
// The number of characters to retain in the commit hashes.
// "hash_length": 8,
// `asv` will cache wheels of the recent builds in each
// environment, making them faster to install next time. This is
// number of builds to keep, per environment.
"wheel_cache_size": 2,
// The commits after which the regression search in `asv publish`
// should start looking for regressions. Dictionary whose keys are
// regexps matching to benchmark names, and values corresponding to
// the commit (exclusive) after which to start looking for
// regressions. The default is to start from the first commit
// with results. If the commit is `null`, regression detection is
// skipped for the matching benchmark.
//
// "regressions_first_commits": {
// "some_benchmark": "352cdf", // Consider regressions only after this commit
// "another_benchmark": null, // Skip regression detection altogether
// }
}
from __future__ import absolute_import, division, print_function
from . import common
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
from six.moves import xrange
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
param_names = ['update']
def setup(self, update):
N = 150
Niter = 1000
dx = 0.1
dy = 0.1
dx2 = (dx * dx)
dy2 = (dy * dy)
def num_update(u, dx2, dy2):
u[1:(-1), 1:(-1)] = ((((u[2:, 1:(-1)] + u[:(-2), 1:(-1)]) * dy2) +
((u[1:(-1), 2:] + u[1:(-1), :(-2)]) * dx2))
/ (2 * (dx2 + dy2)))
def num_inplace(u, dx2, dy2):
tmp = u[:(-2), 1:(-1)].copy()
np.add(tmp, u[2:, 1:(-1)], out=tmp)
np.multiply(tmp, dy2, out=tmp)
tmp2 = u[1:(-1), 2:].copy()
np.add(tmp2, u[1:(-1), :(-2)], out=tmp2)
np.multiply(tmp2, dx2, out=tmp2)
np.add(tmp, tmp2, out=tmp)
np.multiply(tmp, (1.0 / (2.0 * (dx2 + dy2))),
out=u[1:(-1), 1:(-1)])
def laplace(N, Niter=100, func=num_update, args=()):
u = np.zeros([N, N], order='C')
u[0] = 1
for i in range(Niter):
func(u, *args)
return u
func = {'inplace': num_inplace, 'normal': num_update}[update]
def run():
laplace(N, Niter, func, args=(dx2, dy2))
self.run = run
def time_it(self, update):
self.run()
class MaxesOfDots(Benchmark):
def setup(self):
np.random.seed(1)
nsubj = 5
nfeat = 100
ntime = 200
self.arrays = [np.random.normal(size=(ntime, nfeat))
for i in xrange(nsubj)]
def maxes_of_dots(self, arrays):
"""
A magical feature score for each feature in each dataset
:ref:`Haxby et al., Neuron (2011) <HGC+11>`.
If arrays are column-wise zscore-d before computation it
results in characterizing each column in each array with
sum of maximal correlations of that column with columns
in other arrays.
Arrays must agree only on the first dimension.
For numpy it a join benchmark of dot products and max()
on a set of arrays.
"""
feature_scores = ([0] * len(arrays))
for (i, sd) in enumerate(arrays):
for (j, sd2) in enumerate(arrays[(i + 1):]):
corr_temp = np.dot(sd.T, sd2)
feature_scores[i] += np.max(corr_temp, axis=1)
feature_scores[((j + i) + 1)] += np.max(corr_temp, axis=0)
return feature_scores
def time_it(self):
self.maxes_of_dots(self.arrays)
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Core(Benchmark):
def setup(self):
self.l100 = range(100)
self.l50 = range(50)
self.l = [np.arange(1000), np.arange(1000)]
self.l10x10 = np.ones((10, 10))
def time_array_1(self):
np.array(1)
def time_array_empty(self):
np.array([])
def time_array_l1(self):
np.array([1])
def time_array_l100(self):
np.array(self.l100)
def time_array_l(self):
np.array(self.l)
def time_vstack_l(self):
np.vstack(self.l)
def time_hstack_l(self):
np.hstack(self.l)
def time_dstack_l(self):
np.dstack(self.l)
def time_arange_100(self):
np.arange(100)
def time_zeros_100(self):
np.zeros(100)
def time_ones_100(self):
np.ones(100)
def time_empty_100(self):
np.empty(100)
def time_eye_100(self):
np.eye(100)
def time_identity_100(self):
np.identity(100)
def time_eye_3000(self):
np.eye(3000)
def time_identity_3000(self):
np.identity(3000)
def time_diag_l100(self):
np.diag(self.l100)
def time_diagflat_l100(self):
np.diagflat(self.l100)
def time_diagflat_l50_l50(self):
np.diagflat([self.l50, self.l50])
def time_triu_l10x10(self):
np.triu(self.l10x10)
def time_tril_l10x10(self):
np.tril(self.l10x10)
class Temporaries(Benchmark):
def setup(self):
self.amid = np.ones(50000)
self.bmid = np.ones(50000)
self.alarge = np.ones(1000000)
self.blarge = np.ones(1000000)
def time_mid(self):
(self.amid * 2) + self.bmid
def time_mid2(self):
(self.amid + self.bmid) - 2
def time_large(self):
(self.alarge * 2) + self.blarge
def time_large2(self):
(self.alarge + self.blarge) - 2
class CorrConv(Benchmark):
params = [[50, 1000, 1e5],
[10, 100, 1000, 1e4],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
def setup(self, size1, size2, mode):
self.x1 = np.linspace(0, 1, num=size1)
self.x2 = np.cos(np.linspace(0, 2*np.pi, num=size2))
def time_correlate(self, size1, size2, mode):
np.correlate(self.x1, self.x2, mode=mode)
def time_convolve(self, size1, size2, mode):
np.convolve(self.x1, self.x2, mode=mode)
class CountNonzero(Benchmark):
param_names = ['numaxes', 'size', 'dtype']
params = [
[1, 2, 3],
[100, 10000, 1000000],
[bool, int, str, object]